summaryrefslogtreecommitdiff
path: root/chromium/gpu
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-07-12 14:07:37 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-07-17 10:29:26 +0000
commitec02ee4181c49b61fce1c8fb99292dbb8139cc90 (patch)
tree25cde714b2b71eb639d1cd53f5a22e9ba76e14ef /chromium/gpu
parentbb09965444b5bb20b096a291445170876225268d (diff)
downloadqtwebengine-chromium-ec02ee4181c49b61fce1c8fb99292dbb8139cc90.tar.gz
BASELINE: Update Chromium to 59.0.3071.134
Change-Id: Id02ef6fb2204c5fd21668a1c3e6911c83b17585a Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/gpu')
-rw-r--r--chromium/gpu/BUILD.gn53
-rw-r--r--chromium/gpu/GLES2/OWNERS2
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt248
-rw-r--r--chromium/gpu/GLES2/gl2chromium_autogen.h4
-rw-r--r--chromium/gpu/OWNERS2
-rw-r--r--chromium/gpu/PRESUBMIT.py1
-rw-r--r--chromium/gpu/command_buffer/OWNERS2
-rwxr-xr-xchromium/gpu/command_buffer/build_gles2_cmd_buffer.py24
-rw-r--r--chromium/gpu/command_buffer/client/client_test_helper.h3
-rw-r--r--chromium/gpu/command_buffer/client/context_support.h6
-rw-r--r--chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h47
-rw-r--r--chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h44
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc138
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.h6
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_autogen.h25
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h45
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc26
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h11
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_autogen.h24
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h22
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h26
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h22
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h42
-rw-r--r--chromium/gpu/command_buffer/client/gpu_control.h17
-rw-r--r--chromium/gpu/command_buffer/cmd_buffer_functions.txt11
-rw-r--r--chromium/gpu/command_buffer/common/BUILD.gn5
-rw-r--r--chromium/gpu/command_buffer/common/DEPS3
-rw-r--r--chromium/gpu/command_buffer/common/activity_flags.cc75
-rw-r--r--chromium/gpu/command_buffer/common/activity_flags.h81
-rw-r--r--chromium/gpu/command_buffer/common/activity_flags_unittest.cc43
-rw-r--r--chromium/gpu/command_buffer/common/capabilities.h8
-rw-r--r--chromium/gpu/command_buffer/common/discardable_handle.cc117
-rw-r--r--chromium/gpu/command_buffer/common/discardable_handle.h105
-rw-r--r--chromium/gpu/command_buffer/common/discardable_handle_unittest.cc129
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h185
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h70
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h71
-rw-r--r--chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc9
-rw-r--r--chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h5
-rw-r--r--chromium/gpu/command_buffer/common/sync_token.h12
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn2
-rw-r--r--chromium/gpu/command_buffer/service/command_executor.cc30
-rw-r--r--chromium/gpu/command_buffer/service/command_executor.h36
-rw-r--r--chromium/gpu/command_buffer/service/command_executor_unittest.cc3
-rw-r--r--chromium/gpu/command_buffer/service/common_decoder.cc3
-rw-r--r--chromium/gpu/command_buffer/service/common_decoder.h5
-rw-r--r--chromium/gpu/command_buffer/service/context_group.cc4
-rw-r--r--chromium/gpu/command_buffer/service/context_group.h4
-rw-r--r--chromium/gpu/command_buffer/service/context_state.h14
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc6
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.h2
-rw-r--r--chromium/gpu/command_buffer/service/gl_state_restorer_impl.cc20
-rw-r--r--chromium/gpu/command_buffer/service/gl_state_restorer_impl.h4
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.cc18
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.h2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc163
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h1
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc262
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h13
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc438
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.h2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h25
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc42
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h18
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h47
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc686
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc324
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc68
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc5
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc100
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc27
-rw-r--r--chromium/gpu/command_buffer/service/gpu_service_test.cc6
-rw-r--r--chromium/gpu/command_buffer/service/mailbox_manager_sync.cc35
-rw-r--r--chromium/gpu/command_buffer/service/mailbox_manager_sync.h3
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache.cc20
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache.h6
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc12
-rw-r--r--chromium/gpu/command_buffer/service/preemption_flag.h33
-rw-r--r--chromium/gpu/command_buffer/service/query_manager.cc3
-rw-r--r--chromium/gpu/command_buffer/service/query_manager_unittest.cc30
-rw-r--r--chromium/gpu/command_buffer/service/sequence_id.h17
-rw-r--r--chromium/gpu/command_buffer/service/shader_translator.cc4
-rw-r--r--chromium/gpu/command_buffer/service/sync_point_manager.cc276
-rw-r--r--chromium/gpu/command_buffer/service/sync_point_manager.h182
-rw-r--r--chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc109
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.cc21
-rw-r--r--chromium/gpu/config/BUILD.gn83
-rw-r--r--chromium/gpu/config/DEPS4
-rw-r--r--chromium/gpu/config/OWNERS2
-rw-r--r--chromium/gpu/config/gpu_blacklist.cc38
-rw-r--r--chromium/gpu/config/gpu_blacklist.h7
-rw-r--r--chromium/gpu/config/gpu_blacklist_unittest.cc106
-rw-r--r--chromium/gpu/config/gpu_control_list.cc1576
-rw-r--r--chromium/gpu/config/gpu_control_list.h573
-rw-r--r--chromium/gpu/config/gpu_control_list_entry_unittest.cc1451
-rw-r--r--chromium/gpu/config/gpu_control_list_jsons.h18
-rw-r--r--chromium/gpu/config/gpu_control_list_number_info_unittest.cc226
-rw-r--r--chromium/gpu/config/gpu_control_list_os_info_unittest.cc153
-rw-r--r--chromium/gpu/config/gpu_control_list_testing.json722
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h558
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_autogen.cc1533
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_autogen.h22
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_data.h15
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h77
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h150
-rw-r--r--chromium/gpu/config/gpu_control_list_unittest.cc591
-rw-r--r--chromium/gpu/config/gpu_control_list_version_unittest.cc (renamed from chromium/gpu/config/gpu_control_list_version_info_unittest.cc)146
-rw-r--r--chromium/gpu/config/gpu_crash_keys.cc23
-rw-r--r--chromium/gpu/config/gpu_crash_keys.h30
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.README7
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.cc19
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.h10
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.json (renamed from chromium/gpu/config/gpu_driver_bug_list_json.cc)311
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list_unittest.cc154
-rw-r--r--chromium/gpu/config/gpu_driver_bug_workaround_type.h12
-rw-r--r--chromium/gpu/config/gpu_feature_type.h3
-rw-r--r--chromium/gpu/config/gpu_finch_features.cc4
-rw-r--r--chromium/gpu/config/gpu_info.cc13
-rw-r--r--chromium/gpu/config/gpu_info.h6
-rw-r--r--chromium/gpu/config/gpu_info_collector.cc41
-rw-r--r--chromium/gpu/config/gpu_info_collector.h13
-rw-r--r--chromium/gpu/config/gpu_info_collector_android.cc7
-rw-r--r--chromium/gpu/config/gpu_info_collector_linux.cc235
-rw-r--r--chromium/gpu/config/gpu_info_collector_linux.h17
-rw-r--r--chromium/gpu/config/gpu_info_collector_mac.mm194
-rw-r--r--chromium/gpu/config/gpu_info_collector_ozone.cc14
-rw-r--r--chromium/gpu/config/gpu_info_collector_win.cc25
-rw-r--r--chromium/gpu/config/gpu_info_collector_x11.cc43
-rw-r--r--chromium/gpu/config/gpu_test_config.cc3
-rw-r--r--chromium/gpu/config/gpu_util.cc61
-rw-r--r--chromium/gpu/config/gpu_util.h2
-rw-r--r--chromium/gpu/config/gpu_util_unittest.cc2
-rwxr-xr-xchromium/gpu/config/process_json.py803
-rw-r--r--chromium/gpu/config/software_rendering_list.README7
-rw-r--r--chromium/gpu/config/software_rendering_list.json (renamed from chromium/gpu/config/software_rendering_list_json.cc)229
-rw-r--r--chromium/gpu/gles2_conform_support/BUILD.gn6
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.cc4
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.h3
-rw-r--r--chromium/gpu/ipc/client/BUILD.gn12
-rw-r--r--chromium/gpu/ipc/client/DEPS2
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.cc56
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.h23
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.cc7
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.h6
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc10
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc2
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap.cc (renamed from chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc)59
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap.h (renamed from chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.h)23
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap_unittest.cc (renamed from chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap_unittest.cc)6
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc1
-rw-r--r--chromium/gpu/ipc/common/BUILD.gn5
-rw-r--r--chromium/gpu/ipc/common/DEPS2
-rw-r--r--chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h4
-rw-r--r--chromium/gpu/ipc/common/gpu_feature_info.mojom23
-rw-r--r--chromium/gpu/ipc/common/gpu_feature_info.typemap17
-rw-r--r--chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h76
-rw-r--r--chromium/gpu/ipc/common/gpu_info.mojom1
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.cc1
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.h4
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_support.cc17
-rw-r--r--chromium/gpu/ipc/common/gpu_messages.h9
-rw-r--r--chromium/gpu/ipc/common/gpu_param_traits_macros.h1
-rw-r--r--chromium/gpu/ipc/common/mailbox_holder.typemap4
-rw-r--r--chromium/gpu/ipc/common/mailbox_holder_for_blink.typemap13
-rw-r--r--chromium/gpu/ipc/common/struct_traits_unittest.cc24
-rw-r--r--chromium/gpu/ipc/common/typemaps.gni1
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.cc16
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache.cc11
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache.h7
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.cc57
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.h10
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn18
-rw-r--r--chromium/gpu/ipc/service/DEPS4
-rw-r--r--chromium/gpu/ipc/service/child_window_surface_win.cc13
-rw-r--r--chromium/gpu/ipc/service/child_window_surface_win.h3
-rw-r--r--chromium/gpu/ipc/service/child_window_win.cc5
-rw-r--r--chromium/gpu/ipc/service/child_window_win.h3
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.cc1075
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.h73
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc487
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.cc506
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.h233
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.cc76
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.h81
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc13
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.cc236
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.h105
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_unittest.cc154
-rw-r--r--chromium/gpu/ipc/service/gpu_command_buffer_stub.cc142
-rw-r--r--chromium/gpu/ipc/service/gpu_command_buffer_stub.h44
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc24
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory.cc9
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory.h3
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc (renamed from chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc)71
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h (renamed from chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h)23
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc (renamed from chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap_unittest.cc)6
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.cc65
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.h5
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_android.cc7
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_delegate.h2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_linux.cc12
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_mac.mm3
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm33
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_win.cc30
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.cc68
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.h25
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.cc2
-rw-r--r--chromium/gpu/khronos_glcts_support/BUILD.gn1
-rw-r--r--chromium/gpu/skia_bindings/BUILD.gn1
-rw-r--r--chromium/gpu/skia_bindings/grcontext_for_gles2_interface.cc30
-rw-r--r--chromium/gpu/skia_bindings/grcontext_for_gles2_interface.h5
-rw-r--r--chromium/gpu/test_message_loop_type.h30
-rw-r--r--chromium/gpu/tools/compositor_model_bench/BUILD.gn5
-rw-r--r--chromium/gpu/vulkan/BUILD.gn14
-rw-r--r--chromium/gpu/vulkan/OWNERS4
-rw-r--r--chromium/gpu/vulkan/features.gni10
220 files changed, 11989 insertions, 7547 deletions
diff --git a/chromium/gpu/BUILD.gn b/chromium/gpu/BUILD.gn
index 6b0dc6cfab9..87a9e5fdd1f 100644
--- a/chromium/gpu/BUILD.gn
+++ b/chromium/gpu/BUILD.gn
@@ -107,8 +107,9 @@ static_library("test_support") {
"command_buffer/client/gles2_interface_stub.cc",
"command_buffer/client/gles2_interface_stub.h",
"command_buffer/service/error_state_mock.cc",
+ "command_buffer/service/error_state_mock.h",
"command_buffer/service/gles2_cmd_decoder_mock.cc",
- "test_message_loop_type.h",
+ "command_buffer/service/gles2_cmd_decoder_mock.h",
]
public_deps = [
@@ -120,9 +121,6 @@ static_library("test_support") {
"//testing/gtest",
"//ui/gl:gl_unittest_utils",
]
- if (use_ozone) {
- deps += [ "//ui/ozone" ]
- }
}
test("gl_tests") {
@@ -208,6 +206,11 @@ test("gl_tests") {
deps += [ "//ui/android:ui_java" ]
} else if (is_mac) {
libs += [ "IOSurface.framework" ]
+ } else if (is_win) {
+ deps += [
+ "//ui/platform_window",
+ "//ui/platform_window:platform_impls",
+ ]
}
}
@@ -225,11 +228,13 @@ test("gpu_unittests") {
"command_buffer/client/ring_buffer_test.cc",
"command_buffer/client/transfer_buffer_unittest.cc",
"command_buffer/client/vertex_array_object_manager_unittest.cc",
+ "command_buffer/common/activity_flags_unittest.cc",
"command_buffer/common/bitfield_helpers_test.cc",
"command_buffer/common/command_buffer_mock.cc",
"command_buffer/common/command_buffer_mock.h",
"command_buffer/common/command_buffer_shared_test.cc",
"command_buffer/common/debug_marker_manager_unittest.cc",
+ "command_buffer/common/discardable_handle_unittest.cc",
"command_buffer/common/gles2_cmd_format_test.cc",
"command_buffer/common/gles2_cmd_format_test_autogen.h",
"command_buffer/common/gles2_cmd_utils_unittest.cc",
@@ -297,10 +302,14 @@ test("gpu_unittests") {
"command_buffer/service/vertex_attrib_manager_unittest.cc",
"config/gpu_blacklist_unittest.cc",
"config/gpu_control_list_entry_unittest.cc",
- "config/gpu_control_list_number_info_unittest.cc",
- "config/gpu_control_list_os_info_unittest.cc",
+ "config/gpu_control_list_testing_arrays_and_structs_autogen.h",
+ "config/gpu_control_list_testing_autogen.cc",
+ "config/gpu_control_list_testing_autogen.h",
+ "config/gpu_control_list_testing_data.h",
+ "config/gpu_control_list_testing_entry_enums_autogen.h",
+ "config/gpu_control_list_testing_exceptions_autogen.h",
"config/gpu_control_list_unittest.cc",
- "config/gpu_control_list_version_info_unittest.cc",
+ "config/gpu_control_list_version_unittest.cc",
"config/gpu_driver_bug_list_unittest.cc",
"config/gpu_info_collector_unittest.cc",
"config/gpu_info_unittest.cc",
@@ -317,6 +326,10 @@ test("gpu_unittests") {
sources += [ "ipc/client/gpu_memory_buffer_impl_io_surface_unittest.cc" ]
}
+ if (is_linux) {
+ sources += [ "ipc/client/gpu_memory_buffer_impl_native_pixmap_unittest.cc" ]
+ }
+
configs += [
"//build/config:precompiled_headers",
@@ -359,14 +372,13 @@ test("gpu_unittests") {
if (use_ozone) {
deps += [ "//ui/ozone" ]
- sources +=
- [ "ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap_unittest.cc" ]
}
}
test("gpu_perftests") {
sources = [
"perftests/measurements.cc",
+ "perftests/measurements.h",
"perftests/run_all_tests.cc",
"perftests/texture_upload_perftest.cc",
]
@@ -433,4 +445,27 @@ if (is_linux) {
libfuzzer_options = [ "max_len=16384" ]
}
+
+ fuzzer_test("gpu_angle_passthrough_fuzzer") {
+ sources = [
+ "command_buffer/tests/fuzzer_main.cc",
+ ]
+
+ defines = [
+ "GPU_FUZZER_USE_ANGLE",
+ "GPU_FUZZER_USE_PASSTHROUGH_CMD_DECODER",
+ ]
+
+ deps = [
+ ":gpu",
+ "//base",
+ "//base/third_party/dynamic_annotations",
+ "//gpu/command_buffer/common:gles2_utils",
+ "//ui/gfx/geometry",
+ "//ui/gl",
+ "//ui/gl:test_support",
+ ]
+
+ libfuzzer_options = [ "max_len=16384" ]
+ }
}
diff --git a/chromium/gpu/GLES2/OWNERS b/chromium/gpu/GLES2/OWNERS
index dbd228d1861..ca3ddd4c12e 100644
--- a/chromium/gpu/GLES2/OWNERS
+++ b/chromium/gpu/GLES2/OWNERS
@@ -4,4 +4,4 @@ bajones@chromium.org
zmo@chromium.org
vmiura@chromium.org
-# COMPONENT: Internals>GPU
+# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt
index 0c85ec5e878..5e7fad8c993 100644
--- a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt
@@ -8,7 +8,7 @@ Name Strings
Version
- Last Modifed Date: July 16, 2014
+ Last Modifed Date: March 24, 2017
Dependencies
@@ -31,122 +31,120 @@ Overview
New Procedures and Functions
+ void CopyTextureCHROMIUM(uint sourceId,
+ int sourceLevel,
+ enum destTarget,
+ uint destId,
+ int destLevel,
+ int internalFormat,
+ enum destType,
+ boolean unpackFlipY,
+ boolean unpackPremultiplyAlpha,
+ boolean unpackUnmultiplyAlpha)
+
+
+ void CopySubTextureCHROMIUM(uint sourceId,
+ int sourceLevel,
+ enum destTarget,
+ uint destId,
+ int destLevel,
+ int xoffset,
+ int yoffset,
+ int x,
+ int y,
+ sizei width,
+ sizei height,
+ boolean unpackFlipY,
+ boolean unpackPremultiplyAlpha,
+ boolean unpackUnmultiplyAlpha)
+
+Additions to the OpenGL ES 2.0 Specification
+
The command
- void glCopyTextureCHROMIUM (GLenum source_id,
- GLint source_level,
- GLenum dest_target,
- GLenum dest_id,
- GLint dest_level,
- GLint internal_format, GLenum dest_type,
- GLboolean unpack_flip_y,
- GLboolean unpack_premultiply_alpha,
- GLboolean unpack_unmultiply_alpha)
-
- Copies the contents of <source_id> texture to <dest_id> texture.
-
- <source_level> specifies the level of the source texture which is copied
- from.
- <dest_level> specifies the level of the destination texture which is copied
- to.
-
- <dest_target> specifies the target of destination texture. Must be
- GL_TEXTURE_2D,
- GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
- GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
- GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
- GL_TEXTURE_RECTANGLE_ARB.
+ CopyTextureCHROMIUM
+
+ Copies the contents of <sourceLevel> level of <sourceId> texture to
+ <destLevel> level and <destTarget> target of <destId> texture.
+
+ <destTarget> must be TEXTURE_2D,
+ TEXTURE_CUBE_MAP_POSITIVE_X, TEXTURE_CUBE_MAP_NEGATIVE_X,
+ TEXTURE_CUBE_MAP_POSITIVE_Y, TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ TEXTURE_CUBE_MAP_POSITIVE_Z, TEXTURE_CUBE_MAP_NEGATIVE_Z,
+ TEXTURE_RECTANGLE_ARB.
The internal format of the destination texture is converted to that
- specified by <internal_format>. Must be one of the following symbolic
- constants: GL_RGB, GL_RGBA, GL_RGB8, GL_RGBA8, GL_BGRA_EXT, GL_BGRA8_EXT,
- GL_SRGB_EXT, GL_SRGB_ALPHA_EXT, GL_R8, GL_R8UI, GL_RG8, GL_RG8UI, GL_SRGB8,
- GL_RGB565, GL_RGB8UI, GL_SRGB8_ALPHA8, GL_RGB5_A1, GL_RGBA4, GL_RGBA4,
- GL_RGBA8UI, GL_RGB9_E5, GL_R16F, GL_R32F, GL_RG16F, GL_RG32F, GL_RGB16F,
- GL_RGB32F, GL_RGBA16F, GL_RGBA32F, GL_R11F_G11F_B10F.
- The internal format of source texture must be one of the following
- symbolic constants: GL_RED, GL_ALPHA, GL_LUMINANCE, GL_LUMINANCE_ALPHA,
- GL_RGB, GL_RGBA, GL_RGB8, GL_RGBA8, GL_BGRA_EXT, GL_BGRA8_EXT,
- GL_RGB_YCBCR_420V_CHROMIUM, GL_RGB_YCBCR_422_CHROMIUM.
+ specified by <internalFormat>.
+
When source texture doens't contain a superset of the component
- required by <internal_format>, fill the components by following rules.
-
- source format color components
- =====================================================
- GL_ALPHA (0, 0, 0, A)
- GL_RED (R, 0, 0, 1)
- GL_LUMINANCE (L, L, L, 1)
- GL_LUMINANCE_ALPHA (L, L, L, A)
- GL_RGB (R, G, B, 1)
- GL_RGB8 (R, G, B, 1)
- GL_RGBA (R, G, B, A)
- GL_RGBA8 (R, G, B, A)
- GL_BGRA_EXT (R, G, B, A)
- GL_BGRA8_EXT (R, G, B, A)
- GL_RGB_YCBCR_420V_CHROMIUM (R, G, B, 1)
- GL_RGB_YCBCR_422_CHROMIUM (R, G, B, 1)
+ required by <internalFormat>, fill the components by following rules.
+
+ source format color components
+ ----------------------------------------
+ ALPHA (0, 0, 0, A)
+ RED (R, 0, 0, 1)
+ LUMINANCE (L, L, L, 1)
+ LUMINANCE_ALPHA (L, L, L, A)
+ RGB (R, G, B, 1)
+ RGB8 (R, G, B, 1)
+ RGBA (R, G, B, A)
+ RGBA8 (R, G, B, A)
+ BGRA_EXT (R, G, B, A)
+ BGRA8_EXT (R, G, B, A)
+ RGB_YCBCR_420V_CHROMIUM (R, G, B, 1)
+ RGB_YCBCR_422_CHROMIUM (R, G, B, 1)
The format type of the destination texture is converted to that specified
- by <dest_type>.
+ by <destType>.
- If <flip_y> is true, vertically flip texture image data.
+ If <flipY> is true, vertically flip texture image data.
- If <unpack_premultiply_alpha> and <unpack_unmultiply_alpha> are true,
+ If <unpackPremultiplyAlpha> and <unpackUnmultiplyAlpha> are true,
no alpha processing occurs. This is the equivalent of having neither flag
set.
- When <source_id> refers to a stream texture, the texture matrix will be
+ When <sourceId> refers to a stream texture, the texture matrix will be
applied as part of the copy operation.
- INVALID_OPERATION is generated if <internal_format> is not one of the valid
- formats described above.
+ INVALID_OPERATION is generated if <internalFormat> is not one of the
+ formats in Table 1.0.
- INVALID_OPERATION is generated if the internal format of <source_id> is not
- one of formats from the table above.
+ INVALID_OPERATION is generated if the internal format of <sourceId> is not
+ one of formats in Table 1.1.
- INVALID_VALUE is generated if <source_id> or <dest_id> are not valid texture
+ INVALID_VALUE is generated if <sourceId> or <destId> are not valid texture
objects.
- INVALID_ENUM is generated if <dest_target> is not one of the valid targets
+ INVALID_ENUM is generated if <destTarget> is not one of the valid targets
described above.
INVALID_OPERATION is generated if the bound target of destination texture
does not match <target>.
- INVALID_VALUE is generated if textures corresponding to <dest_id> have not
- been bound as GL_TEXTURE_2D, GL_TEXTURE_CUBE_MAP, or
- GL_TEXTURE_RECTANGLE_ARB objects.
+ INVALID_VALUE is generated if textures corresponding to <destId> have not
+ been bound as TEXTURE_2D, TEXTURE_CUBE_MAP, or
+ TEXTURE_RECTANGLE_ARB objects.
- INVALID_VALUE is generated if textures corresponding to <source_id> have not
- been bound as GL_TEXTURE_2D, GL_TEXTURE_RECTANGLE_ARB or
- GL_TEXTURE_EXTERNAL_OES objects.
+ INVALID_VALUE is generated if textures corresponding to <sourceId> have not
+ been bound as TEXTURE_2D, TEXTURE_RECTANGLE_ARB or
+ TEXTURE_EXTERNAL_OES objects.
- INVALID_VALUE is generated if <source_level> is not 0 for ES 2.0, or if
- <source_level> or <dest_level> is less than 0 for ES 3.0.
+ INVALID_VALUE is generated if <sourceLevel> is not 0 for ES 2.0, or if
+ <sourceLevel> or <destLevel> is less than 0 for ES 3.0.
- INVALID_VALUE is generated if <source_level> of the source texture is not
+ INVALID_VALUE is generated if <sourceLevel> of the source texture is not
defined.
The command
- void glCopySubTextureCHROMIUM (GLenum source_id,
- GLint source_level,
- GLenum dest_target,
- GLenum dest_id,
- GLint dest_level,
- GLint xoffset, GLint yoffset,
- GLint x, GLint y,
- GLsizei width, GLsizei height,
- GLboolean unpack_flip_y,
- GLboolean unpack_premultiply_alpha,
- GLboolean unpack_unmultiply_alpha)
-
- Copies the sub contents of texture referred to by <source_id> to <dest_id>
- texture without redefining <dest_id> texture.
-
- See CopyTextureCHROMIUM for the interpretation of the <dest_target>,
- <source_level>, <dest_level>, <flip_y>, <premultiply_alpha>, and
- <unmultiply_alpha> arguments.
+ CopySubTextureCHROMIUM
+
+ Copies the sub contents of texture referred to by <sourceId> to <destId>
+ texture without redefining <destId> texture.
+
+ See CopyTextureCHROMIUM for the interpretation of the <destTarget>,
+ <sourceLevel>, <destLevel>, <flipY>, <premultiplyAlpha>, and
+ <unmultiplyAlpha> arguments.
<xoffset> and <yoffset> specify a texel offset in the x and y direction
respectively within the destination texture.
@@ -158,25 +156,76 @@ New Procedures and Functions
<height> specifies the width of the texture subimage.
- INVALID_VALUE is generated if either <source_id> texture or <dest_id>
+ INVALID_VALUE is generated if either <sourceId> texture or <destId>
texture is not defined.
- INVALID_OPERATION is generated if source internal_format and destination
- internal_format are not one of the valid formats described above.
+ INVALID_OPERATION is generated if the internal format of <sourceId> or
+ <destId> is not one of formats in Table 1.1.
INVALID_OPERATION is generated if the destination texture array has not
been defined.
- INVALID_VALUE is generated if <dest_id> texture is not bound as
- GL_TEXTURE_2D or GL_TEXTURE_RECTANGLE_ARB.
+ INVALID_VALUE is generated if <destId> texture is not bound as
+ TEXTURE_2D or TEXTURE_RECTANGLE_ARB.
INVALID_VALUE is generated if level 0 of the source texture or
the destination texture is not defined.
- INVALID_VALUE is generated if <xoffset> < 0 , or <yoffset> < 0.
-
- INVALID_VALUE is generated if (<xoffset> + <width>) > dest_width,
- or (<yoffset> + <height>) > dest_height.
+ INVALID_VALUE is generated if (<xoffset> + <width>) > destWidth,
+ or (<yoffset> + <height>) > destHeight.
+
+ Table 1.0 Valid internal formats for CopyTextureCHROMIUM:
+
+ <internalFormat>
+ ---------------
+ RGB
+ RGBA
+ RGB8
+ RGBA8
+ BGRA_EXT
+ BGRA8_EXT,
+ SRGB_EXT
+ SRGB_ALPHA_EXT
+ R8
+ R8UI
+ RG8
+ RG8UI
+ SRGB8
+ RGB565
+ RGB8UI
+ SRGB8_ALPHA8
+ RGB5_A1
+ RGBA4
+ RGBA4
+ RGBA8UI
+ RGB9_E5
+ R16F
+ R32F
+ RG16F
+ RG32F
+ RGB16F
+ RGB32F
+ RGBA16F
+ RGBA32F
+ R11F_G11F_B10F
+
+ Table 1.1 Valid source texture internal formats for CopyTextureCHROMIUM and
+ source and destination formats for CopySubTextureCHROMIUM:
+
+ internal format
+ ---------------
+ RED
+ ALPHA
+ LUMINANCE
+ LUMINANCE_ALPHA
+ RGB
+ RGBA
+ RGB8
+ RGBA8
+ BGRA_EXT
+ BGRA8_EXT
+ RGB_YCBCR_420V_CHROMIUM
+ RGB_YCBCR_422_CHROMIUM.
Dependencies on ARB_texture_rg
@@ -204,11 +253,12 @@ Revision History
8/1/2011 Documented the extension
7/4/2013 Add a new parameter dest_type to glCopyTextureCHROMIUM()
- 16/7/2014 Add GL_TEXTURE_RECTANGLE_ARB as valid source_id target
+ 16/7/2014 Add TEXTURE_RECTANGLE_ARB as valid source_id target
19/6/2015 Add arguments unpack_flip_y, unpack_premultiply_alpha, and
unpack_unmultiply_alpha to both commands.
4/1/2016 Removed the argument target.
- 4/1/2016 Added GL_TEXTURE_RECTANGLE_ARB as valid dest_id target.
+ 4/1/2016 Added TEXTURE_RECTANGLE_ARB as valid dest_id target.
19/12/2016 Supported more ES 3.0 formats.
18/1/2017 Supported source_level and dest_level.
- 19/1/2017 Added GL_TEXTURE_CUBE_MAP as valid dest_id target.
+ 19/1/2017 Added TEXTURE_CUBE_MAP as valid dest_id target.
+ 24/3/2017 Clean up naming and move formats into tables.
diff --git a/chromium/gpu/GLES2/gl2chromium_autogen.h b/chromium/gpu/GLES2/gl2chromium_autogen.h
index f5bfb06b364..b5114bd53ab 100644
--- a/chromium/gpu/GLES2/gl2chromium_autogen.h
+++ b/chromium/gpu/GLES2/gl2chromium_autogen.h
@@ -334,6 +334,9 @@
#define glSwapInterval GLES2_GET_FUN(SwapInterval)
#define glFlushDriverCachesCHROMIUM GLES2_GET_FUN(FlushDriverCachesCHROMIUM)
#define glGetLastFlushIdCHROMIUM GLES2_GET_FUN(GetLastFlushIdCHROMIUM)
+#define glScheduleDCLayerSharedStateCHROMIUM \
+ GLES2_GET_FUN(ScheduleDCLayerSharedStateCHROMIUM)
+#define glScheduleDCLayerCHROMIUM GLES2_GET_FUN(ScheduleDCLayerCHROMIUM)
#define glMatrixLoadfCHROMIUM GLES2_GET_FUN(MatrixLoadfCHROMIUM)
#define glMatrixLoadIdentityCHROMIUM GLES2_GET_FUN(MatrixLoadIdentityCHROMIUM)
#define glGenPathsCHROMIUM GLES2_GET_FUN(GenPathsCHROMIUM)
@@ -385,5 +388,6 @@
#define glSwapBuffersWithBoundsCHROMIUM \
GLES2_GET_FUN(SwapBuffersWithBoundsCHROMIUM)
#define glSetDrawRectangleCHROMIUM GLES2_GET_FUN(SetDrawRectangleCHROMIUM)
+#define glSetEnableDCLayersCHROMIUM GLES2_GET_FUN(SetEnableDCLayersCHROMIUM)
#endif // GPU_GLES2_GL2CHROMIUM_AUTOGEN_H_
diff --git a/chromium/gpu/OWNERS b/chromium/gpu/OWNERS
index eccb5057950..95b9256bfd3 100644
--- a/chromium/gpu/OWNERS
+++ b/chromium/gpu/OWNERS
@@ -7,4 +7,4 @@ zmo@chromium.org
# GPU memory buffer implementations.
per-file *gpu_memory_buffer*=reveman@chromium.org
-# COMPONENT: Internals>GPU
+# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/PRESUBMIT.py b/chromium/gpu/PRESUBMIT.py
index 7300f07c6b2..b0c999a5dd8 100644
--- a/chromium/gpu/PRESUBMIT.py
+++ b/chromium/gpu/PRESUBMIT.py
@@ -23,5 +23,6 @@ def PostUploadHook(cl, change, output_api):
'master.tryserver.chromium.linux:linux_optional_gpu_tests_rel',
'master.tryserver.chromium.mac:mac_optional_gpu_tests_rel',
'master.tryserver.chromium.win:win_optional_gpu_tests_rel',
+ 'master.tryserver.chromium.android:android_optional_gpu_tests_rel',
],
'Automatically added optional GPU tests to run on CQ.')
diff --git a/chromium/gpu/command_buffer/OWNERS b/chromium/gpu/command_buffer/OWNERS
index a1c727dcbd4..49a178a25da 100644
--- a/chromium/gpu/command_buffer/OWNERS
+++ b/chromium/gpu/command_buffer/OWNERS
@@ -8,4 +8,4 @@ vmiura@chromium.org
per-file *gpu_memory_buffer*=reveman@chromium.org
-# COMPONENT: Internals>GPU
+# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
index 71e1647ecc9..479b2d6be18 100755
--- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -4005,6 +4005,7 @@ _FUNCTION_INFO = {
'Scissor': {
'type': 'StateSet',
'state': 'Scissor',
+ 'decoder_func': 'DoScissor',
},
'Viewport': {
'impl_func': False,
@@ -4386,6 +4387,23 @@ _FUNCTION_INFO = {
'extension': 'CHROMIUM_schedule_ca_layer',
'unit_test': False,
},
+ 'ScheduleDCLayerSharedStateCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': False,
+ 'client_test': False,
+ 'cmd_args': 'GLfloat opacity, GLboolean is_clipped, '
+ 'GLint z_order, GLuint shm_id, GLuint shm_offset',
+ 'extension': 'CHROMIUM_schedule_ca_layer',
+ },
+ 'ScheduleDCLayerCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': False,
+ 'client_test': False,
+ 'cmd_args': 'GLuint contents_texture_id, GLuint background_color, '
+ 'GLuint edge_aa_mask, GLuint filter, GLuint shm_id, '
+ 'GLuint shm_offset',
+ 'extension': 'CHROMIUM_schedule_ca_layer',
+ },
'CommitOverlayPlanesCHROMIUM': {
'impl_func': False,
'decoder_func': 'DoCommitOverlayPlanes',
@@ -4530,6 +4548,10 @@ _FUNCTION_INFO = {
'decoder_func': 'DoSetDrawRectangleCHROMIUM',
'extension': 'CHROMIUM_set_draw_rectangle',
},
+ 'SetEnableDCLayersCHROMIUM': {
+ 'decoder_func': 'DoSetEnableDCLayersCHROMIUM',
+ 'extension': 'CHROMIUM_dc_layers',
+ },
}
@@ -6681,7 +6703,7 @@ class GETnHandler(TypeHandler):
typedef cmds::%(func_name)s::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
c.%(last_arg_name)s_shm_id, c.%(last_arg_name)s_shm_offset,
- &buffer_size);
+ sizeof(Result), &buffer_size);
%(last_arg_type)s %(last_arg_name)s = result ? result->GetData() : NULL;
if (%(last_arg_name)s == NULL) {
return error::kOutOfBounds;
diff --git a/chromium/gpu/command_buffer/client/client_test_helper.h b/chromium/gpu/command_buffer/client/client_test_helper.h
index 31756c3526f..f806fbbb606 100644
--- a/chromium/gpu/command_buffer/client/client_test_helper.h
+++ b/chromium/gpu/command_buffer/client/client_test_helper.h
@@ -117,7 +117,8 @@ class MockClientGpuControl : public GpuControl {
MOCK_METHOD1(IsFenceSyncReleased, bool(uint64_t release));
MOCK_METHOD2(SignalSyncToken, void(const SyncToken& sync_token,
const base::Closure& callback));
- MOCK_METHOD1(CanWaitUnverifiedSyncToken, bool(const SyncToken*));
+ MOCK_METHOD1(WaitSyncTokenHint, void(const SyncToken&));
+ MOCK_METHOD1(CanWaitUnverifiedSyncToken, bool(const SyncToken&));
private:
DISALLOW_COPY_AND_ASSIGN(MockClientGpuControl);
diff --git a/chromium/gpu/command_buffer/client/context_support.h b/chromium/gpu/command_buffer/client/context_support.h
index ceba6896ce9..53b33c8fee6 100644
--- a/chromium/gpu/command_buffer/client/context_support.h
+++ b/chromium/gpu/command_buffer/client/context_support.h
@@ -27,9 +27,9 @@ class ContextSupport {
virtual void SignalSyncToken(const SyncToken& sync_token,
const base::Closure& callback) = 0;
- // Returns true if the given sync token has been signalled. The sync token
- // must belong to this context. This may be called from any thread.
- virtual bool IsSyncTokenSignalled(const SyncToken& sync_token) = 0;
+ // Returns true if the given sync token has been signaled. The sync token must
+ // belong to this context. This may be called from any thread.
+ virtual bool IsSyncTokenSignaled(const SyncToken& sync_token) = 0;
// Runs |callback| when a query created via glCreateQueryEXT() has cleared
// passed the glEndQueryEXT() point.
diff --git a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
index 12d4643efe7..b789a6be6d4 100644
--- a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -1329,10 +1329,10 @@ void GL_APIENTRY GLES2PostSubBufferCHROMIUM(GLint x,
GLint height) {
gles2::GetGLContext()->PostSubBufferCHROMIUM(x, y, width, height);
}
-void GL_APIENTRY GLES2CopyTextureCHROMIUM(GLenum source_id,
+void GL_APIENTRY GLES2CopyTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint internalformat,
GLenum dest_type,
@@ -1345,10 +1345,10 @@ void GL_APIENTRY GLES2CopyTextureCHROMIUM(GLenum source_id,
unpack_unmultiply_alpha);
}
void GL_APIENTRY
-GLES2CopySubTextureCHROMIUM(GLenum source_id,
+GLES2CopySubTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint xoffset,
GLint yoffset,
@@ -1364,8 +1364,8 @@ GLES2CopySubTextureCHROMIUM(GLenum source_id,
yoffset, x, y, width, height, unpack_flip_y, unpack_premultiply_alpha,
unpack_unmultiply_alpha);
}
-void GL_APIENTRY GLES2CompressedCopyTextureCHROMIUM(GLenum source_id,
- GLenum dest_id) {
+void GL_APIENTRY GLES2CompressedCopyTextureCHROMIUM(GLuint source_id,
+ GLuint dest_id) {
gles2::GetGLContext()->CompressedCopyTextureCHROMIUM(source_id, dest_id);
}
void GL_APIENTRY GLES2DrawArraysInstancedANGLE(GLenum mode,
@@ -1507,6 +1507,25 @@ void GL_APIENTRY GLES2FlushDriverCachesCHROMIUM() {
GLuint GL_APIENTRY GLES2GetLastFlushIdCHROMIUM() {
return gles2::GetGLContext()->GetLastFlushIdCHROMIUM();
}
+void GL_APIENTRY
+GLES2ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
+ GLboolean is_clipped,
+ const GLfloat* clip_rect,
+ GLint z_order,
+ const GLfloat* transform) {
+ gles2::GetGLContext()->ScheduleDCLayerSharedStateCHROMIUM(
+ opacity, is_clipped, clip_rect, z_order, transform);
+}
+void GL_APIENTRY GLES2ScheduleDCLayerCHROMIUM(GLuint contents_texture_id,
+ const GLfloat* contents_rect,
+ GLuint background_color,
+ GLuint edge_aa_mask,
+ const GLfloat* bounds_rect,
+ GLuint filter) {
+ gles2::GetGLContext()->ScheduleDCLayerCHROMIUM(
+ contents_texture_id, contents_rect, background_color, edge_aa_mask,
+ bounds_rect, filter);
+}
void GL_APIENTRY GLES2MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) {
gles2::GetGLContext()->MatrixLoadfCHROMIUM(matrixMode, m);
}
@@ -1726,6 +1745,9 @@ void GL_APIENTRY GLES2SetDrawRectangleCHROMIUM(GLint x,
GLint height) {
gles2::GetGLContext()->SetDrawRectangleCHROMIUM(x, y, width, height);
}
+void GL_APIENTRY GLES2SetEnableDCLayersCHROMIUM(GLboolean enabled) {
+ gles2::GetGLContext()->SetEnableDCLayersCHROMIUM(enabled);
+}
namespace gles2 {
@@ -2880,6 +2902,15 @@ extern const NameToFunc g_gles2_function_table[] = {
reinterpret_cast<GLES2FunctionPointer>(glGetLastFlushIdCHROMIUM),
},
{
+ "glScheduleDCLayerSharedStateCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glScheduleDCLayerSharedStateCHROMIUM),
+ },
+ {
+ "glScheduleDCLayerCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glScheduleDCLayerCHROMIUM),
+ },
+ {
"glMatrixLoadfCHROMIUM",
reinterpret_cast<GLES2FunctionPointer>(glMatrixLoadfCHROMIUM),
},
@@ -3033,6 +3064,10 @@ extern const NameToFunc g_gles2_function_table[] = {
reinterpret_cast<GLES2FunctionPointer>(glSetDrawRectangleCHROMIUM),
},
{
+ "glSetEnableDCLayersCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glSetEnableDCLayersCHROMIUM),
+ },
+ {
NULL, NULL,
},
};
diff --git a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index db32e559622..834b474f6e2 100644
--- a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -2504,10 +2504,10 @@ void PostSubBufferCHROMIUM(GLint x, GLint y, GLint width, GLint height) {
}
}
-void CopyTextureCHROMIUM(GLenum source_id,
+void CopyTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint internalformat,
GLenum dest_type,
@@ -2523,10 +2523,10 @@ void CopyTextureCHROMIUM(GLenum source_id,
}
}
-void CopySubTextureCHROMIUM(GLenum source_id,
+void CopySubTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint xoffset,
GLint yoffset,
@@ -2546,7 +2546,7 @@ void CopySubTextureCHROMIUM(GLenum source_id,
}
}
-void CompressedCopyTextureCHROMIUM(GLenum source_id, GLenum dest_id) {
+void CompressedCopyTextureCHROMIUM(GLuint source_id, GLuint dest_id) {
gles2::cmds::CompressedCopyTextureCHROMIUM* c =
GetCmdSpace<gles2::cmds::CompressedCopyTextureCHROMIUM>();
if (c) {
@@ -2814,6 +2814,32 @@ void FlushDriverCachesCHROMIUM() {
}
}
+void ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
+ GLboolean is_clipped,
+ GLint z_order,
+ GLuint shm_id,
+ GLuint shm_offset) {
+ gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM>();
+ if (c) {
+ c->Init(opacity, is_clipped, z_order, shm_id, shm_offset);
+ }
+}
+
+void ScheduleDCLayerCHROMIUM(GLuint contents_texture_id,
+ GLuint background_color,
+ GLuint edge_aa_mask,
+ GLuint filter,
+ GLuint shm_id,
+ GLuint shm_offset) {
+ gles2::cmds::ScheduleDCLayerCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::ScheduleDCLayerCHROMIUM>();
+ if (c) {
+ c->Init(contents_texture_id, background_color, edge_aa_mask, filter, shm_id,
+ shm_offset);
+ }
+}
+
void MatrixLoadfCHROMIUMImmediate(GLenum matrixMode, const GLfloat* m) {
const uint32_t size =
gles2::cmds::MatrixLoadfCHROMIUMImmediate::ComputeSize();
@@ -3204,4 +3230,12 @@ void SetDrawRectangleCHROMIUM(GLint x, GLint y, GLint width, GLint height) {
}
}
+void SetEnableDCLayersCHROMIUM(GLboolean enabled) {
+ gles2::cmds::SetEnableDCLayersCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::SetEnableDCLayersCHROMIUM>();
+ if (c) {
+ c->Init(enabled);
+ }
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index 8b272ded659..fde1339e7f5 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -376,21 +376,14 @@ void GLES2Implementation::RunIfContextNotLost(const base::Closure& callback) {
void GLES2Implementation::SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) {
+ SyncToken verified_sync_token;
if (sync_token.HasData() &&
- (sync_token.verified_flush() ||
- gpu_control_->CanWaitUnverifiedSyncToken(&sync_token))) {
-
- gpu::SyncToken intermediate_sync_token = sync_token;
-
- // Mark the intermediate sync token as verified if we can wait on
- // unverified sync tokens.
- intermediate_sync_token.SetVerifyFlush();
-
+ GetVerifiedSyncTokenForIPC(sync_token, &verified_sync_token)) {
+ // We can only send verified sync tokens across IPC.
gpu_control_->SignalSyncToken(
- intermediate_sync_token,
+ verified_sync_token,
base::Bind(&GLES2Implementation::RunIfContextNotLost,
- weak_ptr_factory_.GetWeakPtr(),
- callback));
+ weak_ptr_factory_.GetWeakPtr(), callback));
} else {
// Invalid sync token, just call the callback immediately.
callback.Run();
@@ -399,7 +392,7 @@ void GLES2Implementation::SignalSyncToken(const gpu::SyncToken& sync_token,
// This may be called from any thread. It's safe to access gpu_control_ without
// the lock because it is const.
-bool GLES2Implementation::IsSyncTokenSignalled(
+bool GLES2Implementation::IsSyncTokenSignaled(
const gpu::SyncToken& sync_token) {
// Check that the sync token belongs to this context.
DCHECK_EQ(gpu_control_->GetNamespaceID(), sync_token.namespace_id());
@@ -4985,6 +4978,47 @@ void GLES2Implementation::ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
buffer.offset());
}
+void GLES2Implementation::ScheduleDCLayerSharedStateCHROMIUM(
+ GLfloat opacity,
+ GLboolean is_clipped,
+ const GLfloat* clip_rect,
+ GLint z_order,
+ const GLfloat* transform) {
+ size_t shm_size = 20 * sizeof(GLfloat);
+ ScopedTransferBufferPtr buffer(shm_size, helper_, transfer_buffer_);
+ if (!buffer.valid() || buffer.size() < shm_size) {
+ SetGLError(GL_OUT_OF_MEMORY, "GLES2::ScheduleDCLayerSharedStateCHROMIUM",
+ "out of memory");
+ return;
+ }
+ GLfloat* mem = static_cast<GLfloat*>(buffer.address());
+ memcpy(mem + 0, clip_rect, 4 * sizeof(GLfloat));
+ memcpy(mem + 4, transform, 16 * sizeof(GLfloat));
+ helper_->ScheduleDCLayerSharedStateCHROMIUM(opacity, is_clipped, z_order,
+ buffer.shm_id(), buffer.offset());
+}
+
+void GLES2Implementation::ScheduleDCLayerCHROMIUM(GLuint contents_texture_id,
+ const GLfloat* contents_rect,
+ GLuint background_color,
+ GLuint edge_aa_mask,
+ const GLfloat* bounds_rect,
+ GLuint filter) {
+ size_t shm_size = 8 * sizeof(GLfloat);
+ ScopedTransferBufferPtr buffer(shm_size, helper_, transfer_buffer_);
+ if (!buffer.valid() || buffer.size() < shm_size) {
+ SetGLError(GL_OUT_OF_MEMORY, "GLES2::ScheduleDCLayerCHROMIUM",
+ "out of memory");
+ return;
+ }
+ GLfloat* mem = static_cast<GLfloat*>(buffer.address());
+ memcpy(mem + 0, contents_rect, 4 * sizeof(GLfloat));
+ memcpy(mem + 4, bounds_rect, 4 * sizeof(GLfloat));
+ helper_->ScheduleDCLayerCHROMIUM(contents_texture_id, background_color,
+ edge_aa_mask, filter, buffer.shm_id(),
+ buffer.offset());
+}
+
void GLES2Implementation::CommitOverlayPlanesCHROMIUM() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] CommitOverlayPlanesCHROMIUM()");
@@ -6086,15 +6120,15 @@ void GLES2Implementation::GenSyncTokenCHROMIUM(GLuint64 fence_sync,
void GLES2Implementation::GenUnverifiedSyncTokenCHROMIUM(GLuint64 fence_sync,
GLbyte* sync_token) {
if (!sync_token) {
- SetGLError(GL_INVALID_VALUE, "glGenNonFlushedSyncTokenCHROMIUM",
+ SetGLError(GL_INVALID_VALUE, "glGenUnverifiedSyncTokenCHROMIUM",
"empty sync_token");
return;
} else if (!gpu_control_->IsFenceSyncRelease(fence_sync)) {
- SetGLError(GL_INVALID_VALUE, "glGenNonFlushedSyncTokenCHROMIUM",
+ SetGLError(GL_INVALID_VALUE, "glGenUnverifiedSyncTokenCHROMIUM",
"invalid fence sync");
return;
} else if (!gpu_control_->IsFenceSyncFlushed(fence_sync)) {
- SetGLError(GL_INVALID_OPERATION, "glGenSyncTokenCHROMIUM",
+ SetGLError(GL_INVALID_OPERATION, "glGenUnverifiedSyncTokenCHROMIUM",
"fence sync must be flushed before generating sync token");
return;
}
@@ -6115,12 +6149,14 @@ void GLES2Implementation::VerifySyncTokensCHROMIUM(GLbyte **sync_tokens,
memcpy(&sync_token, sync_tokens[i], sizeof(sync_token));
if (sync_token.HasData() && !sync_token.verified_flush()) {
- if (!gpu_control_->CanWaitUnverifiedSyncToken(&sync_token)) {
+ if (!GetVerifiedSyncTokenForIPC(sync_token, &sync_token)) {
SetGLError(GL_INVALID_VALUE, "glVerifySyncTokensCHROMIUM",
"Cannot verify sync token using this context.");
return;
}
requires_synchronization = true;
+ DCHECK(sync_token.verified_flush());
+ memcpy(sync_tokens[i], &sync_token, sizeof(sync_token));
}
}
}
@@ -6131,43 +6167,51 @@ void GLES2Implementation::VerifySyncTokensCHROMIUM(GLbyte **sync_tokens,
if (requires_synchronization) {
// Make sure we have no pending ordering barriers by flushing now.
FlushHelper();
-
// Ensure all the fence syncs are visible on GPU service.
gpu_control_->EnsureWorkVisible();
-
- // We can automatically mark everything as verified now.
- for (GLsizei i = 0; i < count; ++i) {
- if (sync_tokens[i]) {
- SyncToken sync_token;
- memcpy(&sync_token, sync_tokens[i], sizeof(sync_token));
- if (sync_token.HasData() && !sync_token.verified_flush()) {
- sync_token.SetVerifyFlush();
- memcpy(sync_tokens[i], &sync_token, sizeof(sync_token));
- }
- }
- }
}
}
-void GLES2Implementation::WaitSyncTokenCHROMIUM(const GLbyte* sync_token) {
- if (sync_token) {
- // Copy the data over before data access to ensure alignment.
- SyncToken sync_token_data;
- memcpy(&sync_token_data, sync_token, sizeof(SyncToken));
- if (sync_token_data.HasData()) {
- if (!sync_token_data.verified_flush() &&
- !gpu_control_->CanWaitUnverifiedSyncToken(&sync_token_data)) {
- SetGLError(GL_INVALID_VALUE, "glWaitSyncTokenCHROMIUM",
- "Cannot wait on sync_token which has not been verified");
- return;
- }
+void GLES2Implementation::WaitSyncTokenCHROMIUM(const GLbyte* sync_token_data) {
+ if (!sync_token_data)
+ return;
- helper_->WaitSyncTokenCHROMIUM(
- static_cast<GLint>(sync_token_data.namespace_id()),
- sync_token_data.command_buffer_id().GetUnsafeValue(),
- sync_token_data.release_count());
- }
+ // Copy the data over before data access to ensure alignment.
+ SyncToken sync_token, verified_sync_token;
+ memcpy(&sync_token, sync_token_data, sizeof(SyncToken));
+
+ if (!sync_token.HasData())
+ return;
+
+ if (!GetVerifiedSyncTokenForIPC(sync_token, &verified_sync_token)) {
+ SetGLError(GL_INVALID_VALUE, "glWaitSyncTokenCHROMIUM",
+ "Cannot wait on sync_token which has not been verified");
+ return;
}
+
+ helper_->WaitSyncTokenCHROMIUM(
+ static_cast<GLint>(sync_token.namespace_id()),
+ sync_token.command_buffer_id().GetUnsafeValue(),
+ sync_token.release_count());
+
+ // Enqueue sync token in flush after inserting command so that it's not
+ // included in an automatic flush.
+ gpu_control_->WaitSyncTokenHint(verified_sync_token);
+}
+
+bool GLES2Implementation::GetVerifiedSyncTokenForIPC(
+ const SyncToken& sync_token,
+ SyncToken* verified_sync_token) {
+ DCHECK(sync_token.HasData());
+ DCHECK(verified_sync_token);
+
+ if (!sync_token.verified_flush() &&
+ !gpu_control_->CanWaitUnverifiedSyncToken(sync_token))
+ return false;
+
+ *verified_sync_token = sync_token;
+ verified_sync_token->SetVerifyFlush();
+ return true;
}
namespace {
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h
index a0f2d12259a..25bbd9eb668 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.h
@@ -251,10 +251,14 @@ class GLES2_IMPL_EXPORT GLES2Implementation
// ContextSupport implementation.
void SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) override;
- bool IsSyncTokenSignalled(const gpu::SyncToken& sync_token) override;
+ bool IsSyncTokenSignaled(const gpu::SyncToken& sync_token) override;
void SignalQuery(uint32_t query, const base::Closure& callback) override;
void SetAggressivelyFreeResources(bool aggressively_free_resources) override;
+ // Helper to set verified bit on sync token if allowed by gpu control.
+ bool GetVerifiedSyncTokenForIPC(const gpu::SyncToken& sync_token,
+ gpu::SyncToken* verified_sync_token);
+
// base::trace_event::MemoryDumpProvider implementation.
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
index ae04ded1b8a..ee5230112de 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -936,10 +936,10 @@ void PostSubBufferCHROMIUM(GLint x,
GLint width,
GLint height) override;
-void CopyTextureCHROMIUM(GLenum source_id,
+void CopyTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint internalformat,
GLenum dest_type,
@@ -947,10 +947,10 @@ void CopyTextureCHROMIUM(GLenum source_id,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) override;
-void CopySubTextureCHROMIUM(GLenum source_id,
+void CopySubTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint xoffset,
GLint yoffset,
@@ -962,7 +962,7 @@ void CopySubTextureCHROMIUM(GLenum source_id,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) override;
-void CompressedCopyTextureCHROMIUM(GLenum source_id, GLenum dest_id) override;
+void CompressedCopyTextureCHROMIUM(GLuint source_id, GLuint dest_id) override;
void DrawArraysInstancedANGLE(GLenum mode,
GLint first,
@@ -1060,6 +1060,19 @@ void FlushDriverCachesCHROMIUM() override;
GLuint GetLastFlushIdCHROMIUM() override;
+void ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
+ GLboolean is_clipped,
+ const GLfloat* clip_rect,
+ GLint z_order,
+ const GLfloat* transform) override;
+
+void ScheduleDCLayerCHROMIUM(GLuint contents_texture_id,
+ const GLfloat* contents_rect,
+ GLuint background_color,
+ GLuint edge_aa_mask,
+ const GLfloat* bounds_rect,
+ GLuint filter) override;
+
void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) override;
void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) override;
@@ -1212,4 +1225,6 @@ void SetDrawRectangleCHROMIUM(GLint x,
GLint width,
GLint height) override;
+void SetEnableDCLayersCHROMIUM(GLboolean enabled) override;
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
index 73a9676e2ea..db5d667d606 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
@@ -3111,10 +3111,10 @@ void GLES2Implementation::GetTranslatedShaderSourceANGLE(GLuint shader,
CheckGLError();
}
void GLES2Implementation::CopyTextureCHROMIUM(
- GLenum source_id,
+ GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint internalformat,
GLenum dest_type,
@@ -3123,12 +3123,11 @@ void GLES2Implementation::CopyTextureCHROMIUM(
GLboolean unpack_unmultiply_alpha) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG(
- "[" << GetLogPrefix() << "] glCopyTextureCHROMIUM("
- << GLES2Util::GetStringEnum(source_id) << ", " << source_level << ", "
- << GLES2Util::GetStringEnum(dest_target) << ", "
- << GLES2Util::GetStringEnum(dest_id) << ", " << dest_level << ", "
- << internalformat << ", " << GLES2Util::GetStringPixelType(dest_type)
- << ", " << GLES2Util::GetStringBool(unpack_flip_y) << ", "
+ "[" << GetLogPrefix() << "] glCopyTextureCHROMIUM(" << source_id << ", "
+ << source_level << ", " << GLES2Util::GetStringEnum(dest_target)
+ << ", " << dest_id << ", " << dest_level << ", " << internalformat
+ << ", " << GLES2Util::GetStringPixelType(dest_type) << ", "
+ << GLES2Util::GetStringBool(unpack_flip_y) << ", "
<< GLES2Util::GetStringBool(unpack_premultiply_alpha) << ", "
<< GLES2Util::GetStringBool(unpack_unmultiply_alpha) << ")");
helper_->CopyTextureCHROMIUM(source_id, source_level, dest_target, dest_id,
@@ -3139,10 +3138,10 @@ void GLES2Implementation::CopyTextureCHROMIUM(
}
void GLES2Implementation::CopySubTextureCHROMIUM(
- GLenum source_id,
+ GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint xoffset,
GLint yoffset,
@@ -3155,12 +3154,11 @@ void GLES2Implementation::CopySubTextureCHROMIUM(
GLboolean unpack_unmultiply_alpha) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG(
- "[" << GetLogPrefix() << "] glCopySubTextureCHROMIUM("
- << GLES2Util::GetStringEnum(source_id) << ", " << source_level << ", "
- << GLES2Util::GetStringEnum(dest_target) << ", "
- << GLES2Util::GetStringEnum(dest_id) << ", " << dest_level << ", "
- << xoffset << ", " << yoffset << ", " << x << ", " << y << ", "
- << width << ", " << height << ", "
+ "[" << GetLogPrefix() << "] glCopySubTextureCHROMIUM(" << source_id
+ << ", " << source_level << ", "
+ << GLES2Util::GetStringEnum(dest_target) << ", " << dest_id << ", "
+ << dest_level << ", " << xoffset << ", " << yoffset << ", " << x
+ << ", " << y << ", " << width << ", " << height << ", "
<< GLES2Util::GetStringBool(unpack_flip_y) << ", "
<< GLES2Util::GetStringBool(unpack_premultiply_alpha) << ", "
<< GLES2Util::GetStringBool(unpack_unmultiply_alpha) << ")");
@@ -3179,12 +3177,11 @@ void GLES2Implementation::CopySubTextureCHROMIUM(
CheckGLError();
}
-void GLES2Implementation::CompressedCopyTextureCHROMIUM(GLenum source_id,
- GLenum dest_id) {
+void GLES2Implementation::CompressedCopyTextureCHROMIUM(GLuint source_id,
+ GLuint dest_id) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCompressedCopyTextureCHROMIUM("
- << GLES2Util::GetStringEnum(source_id) << ", "
- << GLES2Util::GetStringEnum(dest_id) << ")");
+ << source_id << ", " << dest_id << ")");
helper_->CompressedCopyTextureCHROMIUM(source_id, dest_id);
CheckGLError();
}
@@ -3524,4 +3521,12 @@ void GLES2Implementation::SetDrawRectangleCHROMIUM(GLint x,
CheckGLError();
}
+void GLES2Implementation::SetEnableDCLayersCHROMIUM(GLboolean enabled) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glSetEnableDCLayersCHROMIUM("
+ << GLES2Util::GetStringBool(enabled) << ")");
+ helper_->SetEnableDCLayersCHROMIUM(enabled);
+ CheckGLError();
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
index 48484912b0d..f193dc55445 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -4087,7 +4087,7 @@ TEST_F(GLES2ImplementationTest, VerifySyncTokensCHROMIUM) {
ASSERT_FALSE(sync_token.verified_flush());
ClearCommands();
- EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(_))
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token))
.WillOnce(Return(false));
gl_->VerifySyncTokensCHROMIUM(sync_token_datas, 1);
EXPECT_TRUE(NoCommandsWritten());
@@ -4095,7 +4095,7 @@ TEST_F(GLES2ImplementationTest, VerifySyncTokensCHROMIUM) {
EXPECT_FALSE(sync_token.verified_flush());
ClearCommands();
- EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(_))
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token))
.WillOnce(Return(true));
EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
gl_->VerifySyncTokensCHROMIUM(sync_token_datas, arraysize(sync_token_datas));
@@ -4154,10 +4154,10 @@ TEST_F(GLES2ImplementationTest, VerifySyncTokensCHROMIUM_Sequence) {
// Ensure proper sequence of checking and validating.
Sequence sequence;
- EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(Pointee(sync_token1)))
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token1))
.InSequence(sequence)
.WillOnce(Return(true));
- EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(Pointee(sync_token2)))
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token2))
.InSequence(sequence)
.WillOnce(Return(true));
EXPECT_CALL(*gpu_control_, EnsureWorkVisible())
@@ -4171,11 +4171,12 @@ TEST_F(GLES2ImplementationTest, VerifySyncTokensCHROMIUM_Sequence) {
}
TEST_F(GLES2ImplementationTest, WaitSyncTokenCHROMIUM) {
- const CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
- const CommandBufferId kCommandBufferId =
- CommandBufferId::FromUnsafeValue(234u);
- const GLuint64 kFenceSync = 456u;
- GLbyte sync_token[GL_SYNC_TOKEN_SIZE_CHROMIUM];
+ CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
+ CommandBufferId kCommandBufferId = CommandBufferId::FromUnsafeValue(234u);
+ GLuint64 kFenceSync = 456u;
+
+ gpu::SyncToken sync_token;
+ GLbyte* sync_token_data = sync_token.GetData();
EXPECT_CALL(*gpu_control_, IsFenceSyncRelease(kFenceSync))
.WillOnce(Return(true));
@@ -4185,7 +4186,7 @@ TEST_F(GLES2ImplementationTest, WaitSyncTokenCHROMIUM) {
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillOnce(Return(kCommandBufferId));
EXPECT_CALL(*gpu_control_, GetExtraCommandBufferData()).WillOnce(Return(0));
- gl_->GenSyncTokenCHROMIUM(kFenceSync, sync_token);
+ gl_->GenSyncTokenCHROMIUM(kFenceSync, sync_token_data);
struct Cmds {
cmds::WaitSyncTokenCHROMIUM wait_sync_token;
@@ -4194,7 +4195,8 @@ TEST_F(GLES2ImplementationTest, WaitSyncTokenCHROMIUM) {
expected.wait_sync_token.Init(kNamespaceId, kCommandBufferId.GetUnsafeValue(),
kFenceSync);
- gl_->WaitSyncTokenCHROMIUM(sync_token);
+ EXPECT_CALL(*gpu_control_, WaitSyncTokenHint(sync_token));
+ gl_->WaitSyncTokenCHROMIUM(sync_token_data);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
@@ -4222,7 +4224,7 @@ TEST_F(GLES2ImplementationTest, WaitSyncTokenCHROMIUMErrors) {
ClearCommands();
gpu::SyncToken unverified_sync_token(CommandBufferNamespace::GPU_IO, 0,
gpu::CommandBufferId(), 0);
- EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(_))
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(unverified_sync_token))
.WillOnce(Return(false));
gl_->WaitSyncTokenCHROMIUM(unverified_sync_token.GetConstData());
EXPECT_TRUE(NoCommandsWritten());
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
index 2e17662d7d2..cee92feeea8 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
@@ -3058,4 +3058,15 @@ TEST_F(GLES2ImplementationTest, SetDrawRectangleCHROMIUM) {
gl_->SetDrawRectangleCHROMIUM(1, 2, 3, 4);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
+
+TEST_F(GLES2ImplementationTest, SetEnableDCLayersCHROMIUM) {
+ struct Cmds {
+ cmds::SetEnableDCLayersCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(true);
+
+ gl_->SetEnableDCLayersCHROMIUM(true);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
index 0883e2f3965..65724af5823 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -684,20 +684,20 @@ virtual void PostSubBufferCHROMIUM(GLint x,
GLint y,
GLint width,
GLint height) = 0;
-virtual void CopyTextureCHROMIUM(GLenum source_id,
+virtual void CopyTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) = 0;
-virtual void CopySubTextureCHROMIUM(GLenum source_id,
+virtual void CopySubTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint xoffset,
GLint yoffset,
@@ -708,8 +708,8 @@ virtual void CopySubTextureCHROMIUM(GLenum source_id,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) = 0;
-virtual void CompressedCopyTextureCHROMIUM(GLenum source_id,
- GLenum dest_id) = 0;
+virtual void CompressedCopyTextureCHROMIUM(GLuint source_id,
+ GLuint dest_id) = 0;
virtual void DrawArraysInstancedANGLE(GLenum mode,
GLint first,
GLsizei count,
@@ -776,6 +776,17 @@ virtual void CommitOverlayPlanesCHROMIUM() = 0;
virtual void SwapInterval(GLint interval) = 0;
virtual void FlushDriverCachesCHROMIUM() = 0;
virtual GLuint GetLastFlushIdCHROMIUM() = 0;
+virtual void ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
+ GLboolean is_clipped,
+ const GLfloat* clip_rect,
+ GLint z_order,
+ const GLfloat* transform) = 0;
+virtual void ScheduleDCLayerCHROMIUM(GLuint contents_texture_id,
+ const GLfloat* contents_rect,
+ GLuint background_color,
+ GLuint edge_aa_mask,
+ const GLfloat* bounds_rect,
+ GLuint filter) = 0;
virtual void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) = 0;
virtual void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) = 0;
virtual GLuint GenPathsCHROMIUM(GLsizei range) = 0;
@@ -898,4 +909,5 @@ virtual void SetDrawRectangleCHROMIUM(GLint x,
GLint y,
GLint width,
GLint height) = 0;
+virtual void SetEnableDCLayersCHROMIUM(GLboolean enabled) = 0;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
index dbf6f11595d..54dabbc267b 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -663,20 +663,20 @@ void PostSubBufferCHROMIUM(GLint x,
GLint y,
GLint width,
GLint height) override;
-void CopyTextureCHROMIUM(GLenum source_id,
+void CopyTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) override;
-void CopySubTextureCHROMIUM(GLenum source_id,
+void CopySubTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint xoffset,
GLint yoffset,
@@ -687,7 +687,7 @@ void CopySubTextureCHROMIUM(GLenum source_id,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) override;
-void CompressedCopyTextureCHROMIUM(GLenum source_id, GLenum dest_id) override;
+void CompressedCopyTextureCHROMIUM(GLuint source_id, GLuint dest_id) override;
void DrawArraysInstancedANGLE(GLenum mode,
GLint first,
GLsizei count,
@@ -754,6 +754,17 @@ void CommitOverlayPlanesCHROMIUM() override;
void SwapInterval(GLint interval) override;
void FlushDriverCachesCHROMIUM() override;
GLuint GetLastFlushIdCHROMIUM() override;
+void ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
+ GLboolean is_clipped,
+ const GLfloat* clip_rect,
+ GLint z_order,
+ const GLfloat* transform) override;
+void ScheduleDCLayerCHROMIUM(GLuint contents_texture_id,
+ const GLfloat* contents_rect,
+ GLuint background_color,
+ GLuint edge_aa_mask,
+ const GLfloat* bounds_rect,
+ GLuint filter) override;
void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) override;
void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) override;
GLuint GenPathsCHROMIUM(GLsizei range) override;
@@ -871,4 +882,5 @@ void SetDrawRectangleCHROMIUM(GLint x,
GLint y,
GLint width,
GLint height) override;
+void SetEnableDCLayersCHROMIUM(GLboolean enabled) override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
index 8d3cc0e540f..5073353be75 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -908,10 +908,10 @@ void GLES2InterfaceStub::PostSubBufferCHROMIUM(GLint /* x */,
GLint /* width */,
GLint /* height */) {}
void GLES2InterfaceStub::CopyTextureCHROMIUM(
- GLenum /* source_id */,
+ GLuint /* source_id */,
GLint /* source_level */,
GLenum /* dest_target */,
- GLenum /* dest_id */,
+ GLuint /* dest_id */,
GLint /* dest_level */,
GLint /* internalformat */,
GLenum /* dest_type */,
@@ -919,10 +919,10 @@ void GLES2InterfaceStub::CopyTextureCHROMIUM(
GLboolean /* unpack_premultiply_alpha */,
GLboolean /* unpack_unmultiply_alpha */) {}
void GLES2InterfaceStub::CopySubTextureCHROMIUM(
- GLenum /* source_id */,
+ GLuint /* source_id */,
GLint /* source_level */,
GLenum /* dest_target */,
- GLenum /* dest_id */,
+ GLuint /* dest_id */,
GLint /* dest_level */,
GLint /* xoffset */,
GLint /* yoffset */,
@@ -933,8 +933,8 @@ void GLES2InterfaceStub::CopySubTextureCHROMIUM(
GLboolean /* unpack_flip_y */,
GLboolean /* unpack_premultiply_alpha */,
GLboolean /* unpack_unmultiply_alpha */) {}
-void GLES2InterfaceStub::CompressedCopyTextureCHROMIUM(GLenum /* source_id */,
- GLenum /* dest_id */) {}
+void GLES2InterfaceStub::CompressedCopyTextureCHROMIUM(GLuint /* source_id */,
+ GLuint /* dest_id */) {}
void GLES2InterfaceStub::DrawArraysInstancedANGLE(GLenum /* mode */,
GLint /* first */,
GLsizei /* count */,
@@ -1025,6 +1025,19 @@ void GLES2InterfaceStub::FlushDriverCachesCHROMIUM() {}
GLuint GLES2InterfaceStub::GetLastFlushIdCHROMIUM() {
return 0;
}
+void GLES2InterfaceStub::ScheduleDCLayerSharedStateCHROMIUM(
+ GLfloat /* opacity */,
+ GLboolean /* is_clipped */,
+ const GLfloat* /* clip_rect */,
+ GLint /* z_order */,
+ const GLfloat* /* transform */) {}
+void GLES2InterfaceStub::ScheduleDCLayerCHROMIUM(
+ GLuint /* contents_texture_id */,
+ const GLfloat* /* contents_rect */,
+ GLuint /* background_color */,
+ GLuint /* edge_aa_mask */,
+ const GLfloat* /* bounds_rect */,
+ GLuint /* filter */) {}
void GLES2InterfaceStub::MatrixLoadfCHROMIUM(GLenum /* matrixMode */,
const GLfloat* /* m */) {}
void GLES2InterfaceStub::MatrixLoadIdentityCHROMIUM(GLenum /* matrixMode */) {}
@@ -1174,4 +1187,5 @@ void GLES2InterfaceStub::SetDrawRectangleCHROMIUM(GLint /* x */,
GLint /* y */,
GLint /* width */,
GLint /* height */) {}
+void GLES2InterfaceStub::SetEnableDCLayersCHROMIUM(GLboolean /* enabled */) {}
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
index 6d09d946090..783d7cc7308 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -663,20 +663,20 @@ void PostSubBufferCHROMIUM(GLint x,
GLint y,
GLint width,
GLint height) override;
-void CopyTextureCHROMIUM(GLenum source_id,
+void CopyTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) override;
-void CopySubTextureCHROMIUM(GLenum source_id,
+void CopySubTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint xoffset,
GLint yoffset,
@@ -687,7 +687,7 @@ void CopySubTextureCHROMIUM(GLenum source_id,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) override;
-void CompressedCopyTextureCHROMIUM(GLenum source_id, GLenum dest_id) override;
+void CompressedCopyTextureCHROMIUM(GLuint source_id, GLuint dest_id) override;
void DrawArraysInstancedANGLE(GLenum mode,
GLint first,
GLsizei count,
@@ -754,6 +754,17 @@ void CommitOverlayPlanesCHROMIUM() override;
void SwapInterval(GLint interval) override;
void FlushDriverCachesCHROMIUM() override;
GLuint GetLastFlushIdCHROMIUM() override;
+void ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
+ GLboolean is_clipped,
+ const GLfloat* clip_rect,
+ GLint z_order,
+ const GLfloat* transform) override;
+void ScheduleDCLayerCHROMIUM(GLuint contents_texture_id,
+ const GLfloat* contents_rect,
+ GLuint background_color,
+ GLuint edge_aa_mask,
+ const GLfloat* bounds_rect,
+ GLuint filter) override;
void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) override;
void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) override;
GLuint GenPathsCHROMIUM(GLsizei range) override;
@@ -871,4 +882,5 @@ void SetDrawRectangleCHROMIUM(GLint x,
GLint y,
GLint width,
GLint height) override;
+void SetEnableDCLayersCHROMIUM(GLboolean enabled) override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
index 459910e9433..01db22e23c1 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -1937,10 +1937,10 @@ void GLES2TraceImplementation::PostSubBufferCHROMIUM(GLint x,
}
void GLES2TraceImplementation::CopyTextureCHROMIUM(
- GLenum source_id,
+ GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint internalformat,
GLenum dest_type,
@@ -1954,10 +1954,10 @@ void GLES2TraceImplementation::CopyTextureCHROMIUM(
}
void GLES2TraceImplementation::CopySubTextureCHROMIUM(
- GLenum source_id,
+ GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint xoffset,
GLint yoffset,
@@ -1975,8 +1975,8 @@ void GLES2TraceImplementation::CopySubTextureCHROMIUM(
unpack_unmultiply_alpha);
}
-void GLES2TraceImplementation::CompressedCopyTextureCHROMIUM(GLenum source_id,
- GLenum dest_id) {
+void GLES2TraceImplementation::CompressedCopyTextureCHROMIUM(GLuint source_id,
+ GLuint dest_id) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu",
"GLES2Trace::CompressedCopyTextureCHROMIUM");
gl_->CompressedCopyTextureCHROMIUM(source_id, dest_id);
@@ -2199,6 +2199,31 @@ GLuint GLES2TraceImplementation::GetLastFlushIdCHROMIUM() {
return gl_->GetLastFlushIdCHROMIUM();
}
+void GLES2TraceImplementation::ScheduleDCLayerSharedStateCHROMIUM(
+ GLfloat opacity,
+ GLboolean is_clipped,
+ const GLfloat* clip_rect,
+ GLint z_order,
+ const GLfloat* transform) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::ScheduleDCLayerSharedStateCHROMIUM");
+ gl_->ScheduleDCLayerSharedStateCHROMIUM(opacity, is_clipped, clip_rect,
+ z_order, transform);
+}
+
+void GLES2TraceImplementation::ScheduleDCLayerCHROMIUM(
+ GLuint contents_texture_id,
+ const GLfloat* contents_rect,
+ GLuint background_color,
+ GLuint edge_aa_mask,
+ const GLfloat* bounds_rect,
+ GLuint filter) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ScheduleDCLayerCHROMIUM");
+ gl_->ScheduleDCLayerCHROMIUM(contents_texture_id, contents_rect,
+ background_color, edge_aa_mask, bounds_rect,
+ filter);
+}
+
void GLES2TraceImplementation::MatrixLoadfCHROMIUM(GLenum matrixMode,
const GLfloat* m) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MatrixLoadfCHROMIUM");
@@ -2511,4 +2536,9 @@ void GLES2TraceImplementation::SetDrawRectangleCHROMIUM(GLint x,
gl_->SetDrawRectangleCHROMIUM(x, y, width, height);
}
+void GLES2TraceImplementation::SetEnableDCLayersCHROMIUM(GLboolean enabled) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::SetEnableDCLayersCHROMIUM");
+ gl_->SetEnableDCLayersCHROMIUM(enabled);
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gpu_control.h b/chromium/gpu/command_buffer/client/gpu_control.h
index 6b7bdd52b4b..519712edc4a 100644
--- a/chromium/gpu/command_buffer/client/gpu_control.h
+++ b/chromium/gpu/command_buffer/client/gpu_control.h
@@ -98,15 +98,22 @@ class GPU_EXPORT GpuControl {
// the lock provided by the client.
virtual bool IsFenceSyncReleased(uint64_t release) = 0;
- // Runs |callback| when sync token is signalled.
+ // Runs |callback| when sync token is signaled.
virtual void SignalSyncToken(const SyncToken& sync_token,
const base::Closure& callback) = 0;
+ // This allows the command buffer proxy to mark the next flush with sync token
+ // dependencies for the gpu scheduler. This is used in addition to the
+ // WaitSyncToken command in the command buffer which is still needed. For
+ // example, the WaitSyncToken command is used to pull texture updates when
+ // used in conjunction with MailboxManagerSync.
+ virtual void WaitSyncTokenHint(const SyncToken& sync_token) = 0;
+
// Under some circumstances a sync token may be used which has not been
- // verified to have been flushed. For example, fence syncs queued on the
- // same channel as the wait command guarantee that the fence sync will
- // be enqueued first so does not need to be flushed.
- virtual bool CanWaitUnverifiedSyncToken(const SyncToken* sync_token) = 0;
+ // verified to have been flushed. For example, fence syncs queued on the same
+ // channel as the wait command guarantee that the fence sync will be enqueued
+ // first so does not need to be flushed.
+ virtual bool CanWaitUnverifiedSyncToken(const SyncToken& sync_token) = 0;
private:
DISALLOW_COPY_AND_ASSIGN(GpuControl);
diff --git a/chromium/gpu/command_buffer/cmd_buffer_functions.txt b/chromium/gpu/command_buffer/cmd_buffer_functions.txt
index 6d30de93be6..a1055e89b3f 100644
--- a/chromium/gpu/command_buffer/cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/cmd_buffer_functions.txt
@@ -279,9 +279,9 @@ GL_APICALL void GL_APIENTRY glDestroyImageCHROMIUM (GLuint image_id);
GL_APICALL void GL_APIENTRY glDescheduleUntilFinishedCHROMIUM (void);
GL_APICALL void GL_APIENTRY glGetTranslatedShaderSourceANGLE (GLidShader shader, GLsizeiNotNegative bufsize, GLsizeiOptional* length, char* source);
GL_APICALL void GL_APIENTRY glPostSubBufferCHROMIUM (GLint x, GLint y, GLint width, GLint height);
-GL_APICALL void GL_APIENTRY glCopyTextureCHROMIUM (GLenum source_id, GLint source_level, GLenum dest_target, GLenum dest_id, GLint dest_level, GLintTextureInternalFormat internalformat, GLenumPixelType dest_type, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
-GL_APICALL void GL_APIENTRY glCopySubTextureCHROMIUM (GLenum source_id, GLint source_level, GLenum dest_target, GLenum dest_id, GLint dest_level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
-GL_APICALL void GL_APIENTRY glCompressedCopyTextureCHROMIUM (GLenum source_id, GLenum dest_id);
+GL_APICALL void GL_APIENTRY glCopyTextureCHROMIUM (GLuint source_id, GLint source_level, GLenum dest_target, GLuint dest_id, GLint dest_level, GLintTextureInternalFormat internalformat, GLenumPixelType dest_type, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
+GL_APICALL void GL_APIENTRY glCopySubTextureCHROMIUM (GLuint source_id, GLint source_level, GLenum dest_target, GLuint dest_id, GLint dest_level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
+GL_APICALL void GL_APIENTRY glCompressedCopyTextureCHROMIUM (GLuint source_id, GLuint dest_id);
GL_APICALL void GL_APIENTRY glDrawArraysInstancedANGLE (GLenumDrawMode mode, GLint first, GLsizei count, GLsizei primcount);
GL_APICALL void GL_APIENTRY glDrawElementsInstancedANGLE (GLenumDrawMode mode, GLsizei count, GLenumIndexType type, const void* indices, GLsizei primcount);
GL_APICALL void GL_APIENTRY glVertexAttribDivisorANGLE (GLuint index, GLuint divisor);
@@ -313,6 +313,8 @@ GL_APICALL void GL_APIENTRY glCommitOverlayPlanesCHROMIUM (void);
GL_APICALL void GL_APIENTRY glSwapInterval (GLint interval);
GL_APICALL void GL_APIENTRY glFlushDriverCachesCHROMIUM (void);
GL_APICALL GLuint GL_APIENTRY glGetLastFlushIdCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glScheduleDCLayerSharedStateCHROMIUM (GLfloat opacity, GLboolean is_clipped, const GLfloat* clip_rect, GLint z_order, const GLfloat* transform);
+GL_APICALL void GL_APIENTRY glScheduleDCLayerCHROMIUM (GLuint contents_texture_id, const GLfloat* contents_rect, GLuint background_color, GLuint edge_aa_mask, const GLfloat* bounds_rect, GLuint filter);
// Extension CHROMIUM_path_rendering.
GL_APICALL void GL_APIENTRY glMatrixLoadfCHROMIUM (GLenumMatrixMode matrixMode, const GLfloat* m);
@@ -366,3 +368,6 @@ GL_APICALL void GL_APIENTRY glSwapBuffersWithBoundsCHROMIUM (GLsizei cou
// Extension CHROMIUM_set_draw_rectangle
GL_APICALL void GL_APIENTRY glSetDrawRectangleCHROMIUM (GLint x, GLint y, GLint width, GLint height);
+
+// Extension CHROMIUM_dc_overlays
+GL_APICALL void GL_APIENTRY glSetEnableDCLayersCHROMIUM (GLboolean enabled);
diff --git a/chromium/gpu/command_buffer/common/BUILD.gn b/chromium/gpu/command_buffer/common/BUILD.gn
index 8c4df70103f..4b69e04e114 100644
--- a/chromium/gpu/command_buffer/common/BUILD.gn
+++ b/chromium/gpu/command_buffer/common/BUILD.gn
@@ -23,6 +23,8 @@ source_set("common_sources") {
visibility = [ "//gpu/*" ]
sources = [
+ "activity_flags.cc",
+ "activity_flags.h",
"bitfield_helpers.h",
"buffer.cc",
"buffer.h",
@@ -35,6 +37,8 @@ source_set("common_sources") {
"constants.h",
"debug_marker_manager.cc",
"debug_marker_manager.h",
+ "discardable_handle.cc",
+ "discardable_handle.h",
"gles2_cmd_format.cc",
"gles2_cmd_format.h",
"gles2_cmd_format_autogen.h",
@@ -59,6 +63,7 @@ source_set("common_sources") {
configs += [ "//gpu:gpu_implementation" ]
public_deps = [
+ "//mojo/public/cpp/system",
"//ui/gfx:memory_buffer",
"//ui/gfx/geometry",
]
diff --git a/chromium/gpu/command_buffer/common/DEPS b/chromium/gpu/command_buffer/common/DEPS
index d6a79315970..5945e36c80b 100644
--- a/chromium/gpu/command_buffer/common/DEPS
+++ b/chromium/gpu/command_buffer/common/DEPS
@@ -2,4 +2,7 @@ specific_include_rules = {
"unittest_main\.cc": [
"+mojo/edk/embedder/embedder.h",
],
+ "activity_flags.h": [
+ "+mojo/public/cpp/system/buffer.h",
+ ],
}
diff --git a/chromium/gpu/command_buffer/common/activity_flags.cc b/chromium/gpu/command_buffer/common/activity_flags.cc
new file mode 100644
index 00000000000..d70d99a1dc3
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/activity_flags.cc
@@ -0,0 +1,75 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/activity_flags.h"
+
+namespace gpu {
+
+ActivityFlagsBase::ActivityFlagsBase() = default;
+ActivityFlagsBase::ActivityFlagsBase(ActivityFlagsBase&& other) = default;
+ActivityFlagsBase::~ActivityFlagsBase() = default;
+
+void ActivityFlagsBase::Initialize(mojo::ScopedSharedBufferHandle handle) {
+ handle_ = std::move(handle);
+ mapping_ = handle_->Map(sizeof(Flag));
+}
+
+volatile base::subtle::Atomic32* ActivityFlagsBase::AsAtomic() {
+ return reinterpret_cast<volatile base::subtle::Atomic32*>(mapping_.get());
+}
+
+GpuProcessActivityFlags::GpuProcessActivityFlags() = default;
+GpuProcessActivityFlags::GpuProcessActivityFlags(
+ GpuProcessActivityFlags&& other) = default;
+
+GpuProcessActivityFlags::GpuProcessActivityFlags(
+ mojo::ScopedSharedBufferHandle handle) {
+ // In cases where we are running without a GpuProcessHost, we may not
+ // have a valid handle. In this case, just return.
+ if (!handle.is_valid())
+ return;
+
+ Initialize(std::move(handle));
+}
+
+void GpuProcessActivityFlags::SetFlag(Flag flag) {
+ // In cases where we are running without a GpuProcessHost, we may not
+ // initialize the GpuProcessActivityFlags. In this case, just return.
+ if (!is_initialized())
+ return;
+
+ base::subtle::Atomic32 old_value = base::subtle::NoBarrier_Load(AsAtomic());
+ base::subtle::Atomic32 new_value = old_value | flag;
+
+ // These flags are only written by a single process / single thread.
+ // We should never double-set them.
+ DCHECK(!(old_value & flag));
+ base::subtle::Acquire_Store(AsAtomic(), new_value);
+}
+
+void GpuProcessActivityFlags::UnsetFlag(Flag flag) {
+ // In cases where we are running without a GpuProcessHost, we may not
+ // initialize the GpuProcessActivityFlags. In this case, just return.
+ if (!is_initialized())
+ return;
+
+ base::subtle::Atomic32 old_value = base::subtle::NoBarrier_Load(AsAtomic());
+ base::subtle::Atomic32 new_value = old_value ^ flag;
+
+ // These flags are only written by a single process / single thread.
+ // We should never double-unset them.
+ DCHECK(!!(old_value & flag));
+ base::subtle::Release_Store(AsAtomic(), new_value);
+}
+
+GpuProcessHostActivityFlags::GpuProcessHostActivityFlags() {
+ Initialize(mojo::SharedBufferHandle::Create(sizeof(Flag)));
+}
+
+bool GpuProcessHostActivityFlags::IsFlagSet(Flag flag) {
+ DCHECK(is_initialized());
+ return !!(base::subtle::Acquire_Load(AsAtomic()) & flag);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/activity_flags.h b/chromium/gpu/command_buffer/common/activity_flags.h
new file mode 100644
index 00000000000..d80e1e3da83
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/activity_flags.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_ACTIVITY_FLAGS_H_
+#define GPU_COMMAND_BUFFER_COMMON_ACTIVITY_FLAGS_H_
+
+#include "base/atomicops.h"
+#include "gpu/gpu_export.h"
+#include "mojo/public/cpp/system/buffer.h"
+
+namespace gpu {
+
+// Base class for GpuProcessActivityFlags and GpuProcessHostActivityFlags,
+// can not be used directly.
+class GPU_EXPORT ActivityFlagsBase {
+ public:
+ enum Flag : uint32_t { FLAG_LOADING_PROGRAM_BINARY = 0x1 };
+
+ protected:
+ ActivityFlagsBase();
+ ActivityFlagsBase(ActivityFlagsBase&& other);
+ ~ActivityFlagsBase();
+
+ void Initialize(mojo::ScopedSharedBufferHandle handle);
+ const mojo::SharedBufferHandle& handle() const { return handle_.get(); }
+ bool is_initialized() const { return handle().is_valid(); }
+
+ protected:
+ volatile base::subtle::Atomic32* AsAtomic();
+
+ private:
+ mojo::ScopedSharedBufferHandle handle_;
+ mojo::ScopedSharedBufferMapping mapping_;
+};
+
+// Provides write-only access to activity flags for the gpu process. Each gpu
+// process has a singleton GpuProcessActivityFlags retreived via GetInstance().
+//
+// Note that we currently assume that the GPU process never sets/unsets flags
+// from multiple threads at the same time. This is true with our current
+// single-flag approach, but may need adjustment if additional flags are added.
+class GPU_EXPORT GpuProcessActivityFlags : public ActivityFlagsBase {
+ public:
+ class ScopedSetFlag {
+ public:
+ ScopedSetFlag(GpuProcessActivityFlags* activity_flags, Flag flag)
+ : activity_flags_(activity_flags), flag_(flag) {
+ activity_flags_->SetFlag(flag_);
+ }
+ ~ScopedSetFlag() { activity_flags_->UnsetFlag(flag_); }
+
+ private:
+ GpuProcessActivityFlags* activity_flags_;
+ Flag flag_;
+ };
+
+ GpuProcessActivityFlags();
+ GpuProcessActivityFlags(GpuProcessActivityFlags&& other);
+ GpuProcessActivityFlags(mojo::ScopedSharedBufferHandle handle);
+
+ private:
+ void SetFlag(Flag flag);
+ void UnsetFlag(Flag flag);
+};
+
+// Provides read-only access to activity flags. Creating a new
+// GpuProcessHostActivityFlags will initialize a new mojo shared buffer. The
+// handle to this buffer should be passed to the GPU process via CloneHandle.
+// The GPU process will then populate flags, which can be read via this class.
+class GPU_EXPORT GpuProcessHostActivityFlags : public ActivityFlagsBase {
+ public:
+ GpuProcessHostActivityFlags();
+
+ bool IsFlagSet(Flag flag);
+ mojo::ScopedSharedBufferHandle CloneHandle() { return handle().Clone(); }
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_ACTIVITY_FLAGS_H_
diff --git a/chromium/gpu/command_buffer/common/activity_flags_unittest.cc b/chromium/gpu/command_buffer/common/activity_flags_unittest.cc
new file mode 100644
index 00000000000..3fe930849c7
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/activity_flags_unittest.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/activity_flags.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+TEST(ActivityFlagsTest, BasicUsage) {
+ // Create the host activity flags.
+ GpuProcessHostActivityFlags host_flags;
+ EXPECT_FALSE(
+ host_flags.IsFlagSet(ActivityFlagsBase::FLAG_LOADING_PROGRAM_BINARY));
+
+ // Create the service activity flags from host memory.
+ GpuProcessActivityFlags service_flags(host_flags.CloneHandle());
+
+ // Ensure we can set and re-set flags.
+ {
+ GpuProcessActivityFlags::ScopedSetFlag scoped_set_flag(
+ &service_flags, ActivityFlagsBase::FLAG_LOADING_PROGRAM_BINARY);
+ EXPECT_TRUE(
+ host_flags.IsFlagSet(ActivityFlagsBase::FLAG_LOADING_PROGRAM_BINARY));
+ }
+ EXPECT_FALSE(
+ host_flags.IsFlagSet(ActivityFlagsBase::FLAG_LOADING_PROGRAM_BINARY));
+}
+
+TEST(ActivityFlagsTest, NotInitialized) {
+ // Get the service activity flags without providing host memory.
+ auto buffer = mojo::ScopedSharedBufferHandle();
+ GpuProcessActivityFlags service_flags(std::move(buffer));
+
+ // Set/Unset should not crash.
+ {
+ GpuProcessActivityFlags::ScopedSetFlag scoped_set_flag(
+ &service_flags, ActivityFlagsBase::FLAG_LOADING_PROGRAM_BINARY);
+ }
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/capabilities.h b/chromium/gpu/command_buffer/common/capabilities.h
index 826d0b4479e..9cfa984a671 100644
--- a/chromium/gpu/command_buffer/common/capabilities.h
+++ b/chromium/gpu/command_buffer/common/capabilities.h
@@ -154,7 +154,9 @@ struct GPU_EXPORT Capabilities {
bool disable_multisampling_color_mask_usage = false;
bool disable_webgl_rgb_multisampling_usage = false;
bool gpu_rasterization = false;
- bool set_draw_rectangle = false;
+ bool avoid_stencil_buffers = false;
+ // True if DirectComposition layers are enabled.
+ bool dc_layers = false;
// When this parameter is true, a CHROMIUM image created with RGB format will
// actually have RGBA format. The client is responsible for handling most of
@@ -167,6 +169,10 @@ struct GPU_EXPORT Capabilities {
// work around this. See https://crbug.com/449150 for an example.
bool emulate_rgb_buffer_with_rgba = false;
+ // When true, is safe to convert a canvas from software to accelerated.
+ // See https://crbug.com/710029.
+ bool software_to_accelerated_canvas_upgrade = true;
+
// When true, non-empty post sub buffer calls are unsupported.
bool disable_non_empty_post_sub_buffers = false;
diff --git a/chromium/gpu/command_buffer/common/discardable_handle.cc b/chromium/gpu/command_buffer/common/discardable_handle.cc
new file mode 100644
index 00000000000..987ad821f45
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/discardable_handle.cc
@@ -0,0 +1,117 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/discardable_handle.h"
+
+#include "base/atomicops.h"
+#include "gpu/command_buffer/common/buffer.h"
+
+namespace gpu {
+namespace {
+const int32_t kHandleDeleted = 0;
+const int32_t kHandleUnlocked = 1;
+const int32_t kHandleLockedStart = 2;
+
+} // namespace
+
+DiscardableHandleBase::DiscardableHandleBase(scoped_refptr<Buffer> buffer,
+ uint32_t byte_offset,
+ int32_t shm_id)
+ : buffer_(std::move(buffer)), byte_offset_(byte_offset), shm_id_(shm_id) {}
+
+DiscardableHandleBase::DiscardableHandleBase(
+ const DiscardableHandleBase& other) = default;
+DiscardableHandleBase::DiscardableHandleBase(DiscardableHandleBase&& other) =
+ default;
+DiscardableHandleBase::~DiscardableHandleBase() = default;
+DiscardableHandleBase& DiscardableHandleBase::operator=(
+ const DiscardableHandleBase& other) = default;
+DiscardableHandleBase& DiscardableHandleBase::operator=(
+ DiscardableHandleBase&& other) = default;
+
+bool DiscardableHandleBase::IsLockedForTesting() {
+ return kHandleLockedStart <= base::subtle::NoBarrier_Load(AsAtomic());
+}
+
+bool DiscardableHandleBase::IsDeletedForTesting() {
+ return kHandleDeleted == base::subtle::NoBarrier_Load(AsAtomic());
+}
+
+volatile base::subtle::Atomic32* DiscardableHandleBase::AsAtomic() const {
+ return reinterpret_cast<volatile base::subtle::Atomic32*>(
+ buffer_->GetDataAddress(byte_offset_, sizeof(base::subtle::Atomic32)));
+}
+
+ClientDiscardableHandle::ClientDiscardableHandle(scoped_refptr<Buffer> buffer,
+ uint32_t byte_offset,
+ int32_t shm_id)
+ : DiscardableHandleBase(std::move(buffer), byte_offset, shm_id) {
+ // Handle always starts locked.
+ base::subtle::NoBarrier_Store(AsAtomic(), kHandleLockedStart);
+}
+
+ClientDiscardableHandle::ClientDiscardableHandle(
+ const ClientDiscardableHandle& other) = default;
+ClientDiscardableHandle::ClientDiscardableHandle(
+ ClientDiscardableHandle&& other) = default;
+ClientDiscardableHandle& ClientDiscardableHandle::operator=(
+ const ClientDiscardableHandle& other) = default;
+ClientDiscardableHandle& ClientDiscardableHandle::operator=(
+ ClientDiscardableHandle&& other) = default;
+
+bool ClientDiscardableHandle::Lock() {
+ while (true) {
+ base::subtle::Atomic32 current_value =
+ base::subtle::NoBarrier_Load(AsAtomic());
+ if (current_value == kHandleDeleted) {
+ // Once a handle is deleted, it cannot be modified further.
+ return false;
+ }
+ base::subtle::Atomic32 new_value = current_value + 1;
+ // No barrier is needed, as any commands which depend on this operation
+ // will flow over the command buffer, which ensures a memory barrier
+ // between here and where these commands are executed on the GPU process.
+ base::subtle::Atomic32 previous_value =
+ base::subtle::NoBarrier_CompareAndSwap(AsAtomic(), current_value,
+ new_value);
+ if (current_value == previous_value) {
+ return true;
+ }
+ }
+}
+
+bool ClientDiscardableHandle::CanBeReUsed() const {
+ return kHandleDeleted == base::subtle::Acquire_Load(AsAtomic());
+}
+
+ServiceDiscardableHandle::ServiceDiscardableHandle(scoped_refptr<Buffer> buffer,
+ uint32_t byte_offset,
+ int32_t shm_id)
+ : DiscardableHandleBase(std::move(buffer), byte_offset, shm_id) {}
+
+ServiceDiscardableHandle::ServiceDiscardableHandle(
+ const ServiceDiscardableHandle& other) = default;
+ServiceDiscardableHandle::ServiceDiscardableHandle(
+ ServiceDiscardableHandle&& other) = default;
+ServiceDiscardableHandle& ServiceDiscardableHandle::operator=(
+ const ServiceDiscardableHandle& other) = default;
+ServiceDiscardableHandle& ServiceDiscardableHandle::operator=(
+ ServiceDiscardableHandle&& other) = default;
+
+void ServiceDiscardableHandle::Unlock() {
+ // No barrier is needed as all GPU process access happens on a single thread,
+ // and communication of dependent data between the GPU process and the
+ // renderer process happens across the command buffer and includes barriers.
+ base::subtle::NoBarrier_AtomicIncrement(AsAtomic(), -1);
+}
+
+bool ServiceDiscardableHandle::Delete() {
+ // No barrier is needed as all GPU process access happens on a single thread,
+ // and communication of dependent data between the GPU process and the
+ // renderer process happens across the command buffer and includes barriers.
+ return kHandleUnlocked == base::subtle::NoBarrier_CompareAndSwap(
+ AsAtomic(), kHandleUnlocked, kHandleDeleted);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/discardable_handle.h b/chromium/gpu/command_buffer/common/discardable_handle.h
new file mode 100644
index 00000000000..a3b17691da9
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/discardable_handle.h
@@ -0,0 +1,105 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_DISCARDABLE_HANDLE_H_
+#define GPU_COMMAND_BUFFER_COMMON_DISCARDABLE_HANDLE_H_
+
+#include "base/memory/ref_counted.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class Buffer;
+
+// DiscardableHandleBase is the base class for the discardable handle
+// implementation. In order to facilitate transfering handles across the
+// command buffer, DiscardableHandleBase is backed by a gpu::Buffer and an
+// offset into that buffer. It uses a single uint32_t of data at the given
+// offset.
+//
+// DiscardableHandleBase is never used directly, but is instead modified by the
+// Client/ServiceDiscardableHandle subclasses. These subclasses implement the
+// Lock/Unlock/Delete functionality, making it explicit which operations occur
+// in which process.
+//
+// Via these subclasses, a discardable handle can be transitioned between one
+// of three states:
+// ╔════════════╗ ╔════════════╗ ╔═══════════╗
+// ║ Locked ║ ──────> ║ Unlocked ║ ──────> ║ Deleted ║
+// ╚════════════╝ ╚════════════╝ ╚═══════════╝
+// └───────────<──────────┘
+//
+// Note that a handle can be locked multiple times, and stores a lock-count.
+class GPU_EXPORT DiscardableHandleBase {
+ public:
+ int32_t shm_id() const { return shm_id_; }
+ uint32_t byte_offset() const { return byte_offset_; }
+
+ // Test only functions.
+ bool IsLockedForTesting();
+ bool IsDeletedForTesting();
+
+ protected:
+ DiscardableHandleBase(scoped_refptr<Buffer> buffer,
+ uint32_t byte_offset,
+ int32_t shm_id);
+ DiscardableHandleBase(const DiscardableHandleBase& other);
+ DiscardableHandleBase(DiscardableHandleBase&& other);
+ DiscardableHandleBase& operator=(const DiscardableHandleBase& other);
+ DiscardableHandleBase& operator=(DiscardableHandleBase&& other);
+ ~DiscardableHandleBase();
+
+ volatile base::subtle::Atomic32* AsAtomic() const;
+
+ private:
+ scoped_refptr<Buffer> buffer_;
+ uint32_t byte_offset_ = 0;
+ uint32_t shm_id_ = 0;
+};
+
+// ClientDiscardableHandle enables the instantiation of a new discardable
+// handle (via the constructor), and can Lock an existing handle.
+class GPU_EXPORT ClientDiscardableHandle : public DiscardableHandleBase {
+ public:
+ ClientDiscardableHandle(scoped_refptr<Buffer> buffer,
+ uint32_t byte_offset,
+ int32_t shm_id);
+ ClientDiscardableHandle(const ClientDiscardableHandle& other);
+ ClientDiscardableHandle(ClientDiscardableHandle&& other);
+ ClientDiscardableHandle& operator=(const ClientDiscardableHandle& other);
+ ClientDiscardableHandle& operator=(ClientDiscardableHandle&& other);
+
+ // Tries to lock the handle. Returns true if successfully locked. Returns
+ // false if the handle has already been deleted on the service.
+ bool Lock();
+
+ // Returns true if the handle has been deleted on service side and can be
+ // re-used on the client.
+ bool CanBeReUsed() const;
+};
+
+// ServiceDiscardableHandle can wrap an existing handle (via the constructor),
+// and can unlock and delete this handle.
+class GPU_EXPORT ServiceDiscardableHandle : public DiscardableHandleBase {
+ public:
+ ServiceDiscardableHandle(scoped_refptr<Buffer> buffer,
+ uint32_t byte_offset,
+ int32_t shm_id);
+ ServiceDiscardableHandle(const ServiceDiscardableHandle& other);
+ ServiceDiscardableHandle(ServiceDiscardableHandle&& other);
+ ServiceDiscardableHandle& operator=(const ServiceDiscardableHandle& other);
+ ServiceDiscardableHandle& operator=(ServiceDiscardableHandle&& other);
+
+ // Unlocks the handle. This should always be paired with a client-side call
+ // to lock, or with a new handle, which starts locked.
+ void Unlock();
+
+ // Tries to delete the handle. Returns true if successfully deleted. Returns
+ // false if the handle is locked client-side and cannot be deleted.
+ bool Delete();
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_DISCARDABLE_HANDLE_H_
diff --git a/chromium/gpu/command_buffer/common/discardable_handle_unittest.cc b/chromium/gpu/command_buffer/common/discardable_handle_unittest.cc
new file mode 100644
index 00000000000..8bb01ae849a
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/discardable_handle_unittest.cc
@@ -0,0 +1,129 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/discardable_handle.h"
+#include "gpu/command_buffer/common/buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace {
+
+scoped_refptr<Buffer> MakeBufferForTesting(size_t num_handles) {
+ size_t size = sizeof(base::subtle::Atomic32) * num_handles;
+ std::unique_ptr<base::SharedMemory> shared_mem(new base::SharedMemory);
+ shared_mem->CreateAndMapAnonymous(size);
+ return MakeBufferFromSharedMemory(std::move(shared_mem), size);
+}
+
+} // namespace
+
+TEST(DiscardableHandleTest, BasicUsage) {
+ scoped_refptr<Buffer> buffer = MakeBufferForTesting(1);
+
+ uint32_t byte_offset = 0;
+ int32_t shm_id = 1;
+ ClientDiscardableHandle client_handle(buffer, byte_offset, shm_id);
+ EXPECT_EQ(client_handle.shm_id(), shm_id);
+ EXPECT_TRUE(client_handle.IsLockedForTesting());
+
+ ServiceDiscardableHandle service_handle(buffer, byte_offset, shm_id);
+ EXPECT_EQ(service_handle.shm_id(), shm_id);
+ EXPECT_TRUE(service_handle.IsLockedForTesting());
+
+ EXPECT_FALSE(service_handle.Delete());
+ EXPECT_FALSE(service_handle.IsDeletedForTesting());
+ EXPECT_FALSE(client_handle.CanBeReUsed());
+
+ service_handle.Unlock();
+ EXPECT_FALSE(service_handle.IsLockedForTesting());
+ EXPECT_FALSE(client_handle.IsLockedForTesting());
+
+ EXPECT_TRUE(client_handle.Lock());
+ EXPECT_TRUE(client_handle.IsLockedForTesting());
+ EXPECT_TRUE(service_handle.IsLockedForTesting());
+
+ service_handle.Unlock();
+ EXPECT_FALSE(service_handle.IsLockedForTesting());
+ EXPECT_FALSE(client_handle.IsLockedForTesting());
+
+ EXPECT_TRUE(service_handle.Delete());
+ EXPECT_TRUE(service_handle.IsDeletedForTesting());
+ EXPECT_TRUE(client_handle.CanBeReUsed());
+ EXPECT_FALSE(service_handle.IsLockedForTesting());
+ EXPECT_FALSE(client_handle.IsLockedForTesting());
+
+ EXPECT_FALSE(client_handle.Lock());
+ EXPECT_FALSE(service_handle.IsLockedForTesting());
+ EXPECT_FALSE(client_handle.IsLockedForTesting());
+ EXPECT_TRUE(service_handle.IsDeletedForTesting());
+ EXPECT_TRUE(client_handle.IsDeletedForTesting());
+}
+
+TEST(DiscardableHandleTest, MultiLock) {
+ scoped_refptr<Buffer> buffer = MakeBufferForTesting(1);
+
+ uint32_t byte_offset = 0;
+ int32_t shm_id = 1;
+ ClientDiscardableHandle client_handle(buffer, byte_offset, shm_id);
+ EXPECT_EQ(client_handle.shm_id(), shm_id);
+ EXPECT_TRUE(client_handle.IsLockedForTesting());
+
+ ServiceDiscardableHandle service_handle(buffer, byte_offset, shm_id);
+ EXPECT_EQ(service_handle.shm_id(), shm_id);
+ EXPECT_TRUE(service_handle.IsLockedForTesting());
+
+ for (int i = 1; i < 10; ++i) {
+ EXPECT_TRUE(client_handle.IsLockedForTesting());
+ EXPECT_TRUE(service_handle.IsLockedForTesting());
+ EXPECT_TRUE(client_handle.Lock());
+ }
+
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_TRUE(client_handle.IsLockedForTesting());
+ EXPECT_TRUE(service_handle.IsLockedForTesting());
+ service_handle.Unlock();
+ }
+
+ EXPECT_FALSE(client_handle.IsLockedForTesting());
+ EXPECT_FALSE(service_handle.IsLockedForTesting());
+}
+
+TEST(DiscardableHandleTest, Suballocations) {
+ static const int32_t num_elements = 10;
+ scoped_refptr<Buffer> buffer = MakeBufferForTesting(num_elements);
+
+ std::vector<ClientDiscardableHandle> client_handles;
+ std::vector<ServiceDiscardableHandle> service_handles;
+ for (int32_t i = 0; i < num_elements; ++i) {
+ client_handles.emplace_back(buffer, sizeof(base::subtle::Atomic32) * i,
+ i + 1);
+ EXPECT_EQ(client_handles[i].shm_id(), i + 1);
+ EXPECT_TRUE(client_handles[i].IsLockedForTesting());
+
+ service_handles.emplace_back(buffer, sizeof(base::subtle::Atomic32) * i,
+ i + 1);
+ EXPECT_EQ(service_handles[i].shm_id(), i + 1);
+ EXPECT_TRUE(service_handles[i].IsLockedForTesting());
+ }
+
+ for (int32_t i = 0; i < num_elements; i += 2) {
+ service_handles[i].Unlock();
+ }
+
+ for (int32_t i = 1; i < num_elements; i += 2) {
+ client_handles[i].Lock();
+ }
+
+ for (int32_t i = 0; i < num_elements; ++i) {
+ if (i % 2) {
+ EXPECT_TRUE(client_handles[i].IsLockedForTesting());
+ EXPECT_TRUE(service_handles[i].IsLockedForTesting());
+ } else {
+ EXPECT_FALSE(client_handles[i].IsLockedForTesting());
+ EXPECT_FALSE(service_handles[i].IsLockedForTesting());
+ }
+ }
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index 1bf837a5030..2f1e47f72dd 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -12444,10 +12444,10 @@ struct CopyTextureCHROMIUM {
void SetHeader() { header.SetCmd<ValueType>(); }
- void Init(GLenum _source_id,
+ void Init(GLuint _source_id,
GLint _source_level,
GLenum _dest_target,
- GLenum _dest_id,
+ GLuint _dest_id,
GLint _dest_level,
GLint _internalformat,
GLenum _dest_type,
@@ -12468,10 +12468,10 @@ struct CopyTextureCHROMIUM {
}
void* Set(void* cmd,
- GLenum _source_id,
+ GLuint _source_id,
GLint _source_level,
GLenum _dest_target,
- GLenum _dest_id,
+ GLuint _dest_id,
GLint _dest_level,
GLint _internalformat,
GLenum _dest_type,
@@ -12537,10 +12537,10 @@ struct CopySubTextureCHROMIUM {
void SetHeader() { header.SetCmd<ValueType>(); }
- void Init(GLenum _source_id,
+ void Init(GLuint _source_id,
GLint _source_level,
GLenum _dest_target,
- GLenum _dest_id,
+ GLuint _dest_id,
GLint _dest_level,
GLint _xoffset,
GLint _yoffset,
@@ -12569,10 +12569,10 @@ struct CopySubTextureCHROMIUM {
}
void* Set(void* cmd,
- GLenum _source_id,
+ GLuint _source_id,
GLint _source_level,
GLenum _dest_target,
- GLenum _dest_id,
+ GLuint _dest_id,
GLint _dest_level,
GLint _xoffset,
GLint _yoffset,
@@ -12654,13 +12654,13 @@ struct CompressedCopyTextureCHROMIUM {
void SetHeader() { header.SetCmd<ValueType>(); }
- void Init(GLenum _source_id, GLenum _dest_id) {
+ void Init(GLuint _source_id, GLuint _dest_id) {
SetHeader();
source_id = _source_id;
dest_id = _dest_id;
}
- void* Set(void* cmd, GLenum _source_id, GLenum _dest_id) {
+ void* Set(void* cmd, GLuint _source_id, GLuint _dest_id) {
static_cast<ValueType*>(cmd)->Init(_source_id, _dest_id);
return NextCmdAddress<ValueType>(cmd);
}
@@ -13814,6 +13814,138 @@ static_assert(sizeof(FlushDriverCachesCHROMIUM) == 4,
static_assert(offsetof(FlushDriverCachesCHROMIUM, header) == 0,
"offset of FlushDriverCachesCHROMIUM header should be 0");
+struct ScheduleDCLayerSharedStateCHROMIUM {
+ typedef ScheduleDCLayerSharedStateCHROMIUM ValueType;
+ static const CommandId kCmdId = kScheduleDCLayerSharedStateCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLfloat _opacity,
+ GLboolean _is_clipped,
+ GLint _z_order,
+ GLuint _shm_id,
+ GLuint _shm_offset) {
+ SetHeader();
+ opacity = _opacity;
+ is_clipped = _is_clipped;
+ z_order = _z_order;
+ shm_id = _shm_id;
+ shm_offset = _shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLfloat _opacity,
+ GLboolean _is_clipped,
+ GLint _z_order,
+ GLuint _shm_id,
+ GLuint _shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_opacity, _is_clipped, _z_order, _shm_id,
+ _shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ float opacity;
+ uint32_t is_clipped;
+ int32_t z_order;
+ uint32_t shm_id;
+ uint32_t shm_offset;
+};
+
+static_assert(sizeof(ScheduleDCLayerSharedStateCHROMIUM) == 24,
+ "size of ScheduleDCLayerSharedStateCHROMIUM should be 24");
+static_assert(
+ offsetof(ScheduleDCLayerSharedStateCHROMIUM, header) == 0,
+ "offset of ScheduleDCLayerSharedStateCHROMIUM header should be 0");
+static_assert(
+ offsetof(ScheduleDCLayerSharedStateCHROMIUM, opacity) == 4,
+ "offset of ScheduleDCLayerSharedStateCHROMIUM opacity should be 4");
+static_assert(
+ offsetof(ScheduleDCLayerSharedStateCHROMIUM, is_clipped) == 8,
+ "offset of ScheduleDCLayerSharedStateCHROMIUM is_clipped should be 8");
+static_assert(
+ offsetof(ScheduleDCLayerSharedStateCHROMIUM, z_order) == 12,
+ "offset of ScheduleDCLayerSharedStateCHROMIUM z_order should be 12");
+static_assert(
+ offsetof(ScheduleDCLayerSharedStateCHROMIUM, shm_id) == 16,
+ "offset of ScheduleDCLayerSharedStateCHROMIUM shm_id should be 16");
+static_assert(
+ offsetof(ScheduleDCLayerSharedStateCHROMIUM, shm_offset) == 20,
+ "offset of ScheduleDCLayerSharedStateCHROMIUM shm_offset should be 20");
+
+struct ScheduleDCLayerCHROMIUM {
+ typedef ScheduleDCLayerCHROMIUM ValueType;
+ static const CommandId kCmdId = kScheduleDCLayerCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _contents_texture_id,
+ GLuint _background_color,
+ GLuint _edge_aa_mask,
+ GLuint _filter,
+ GLuint _shm_id,
+ GLuint _shm_offset) {
+ SetHeader();
+ contents_texture_id = _contents_texture_id;
+ background_color = _background_color;
+ edge_aa_mask = _edge_aa_mask;
+ filter = _filter;
+ shm_id = _shm_id;
+ shm_offset = _shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _contents_texture_id,
+ GLuint _background_color,
+ GLuint _edge_aa_mask,
+ GLuint _filter,
+ GLuint _shm_id,
+ GLuint _shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_contents_texture_id, _background_color,
+ _edge_aa_mask, _filter, _shm_id,
+ _shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t contents_texture_id;
+ uint32_t background_color;
+ uint32_t edge_aa_mask;
+ uint32_t filter;
+ uint32_t shm_id;
+ uint32_t shm_offset;
+};
+
+static_assert(sizeof(ScheduleDCLayerCHROMIUM) == 28,
+ "size of ScheduleDCLayerCHROMIUM should be 28");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, header) == 0,
+ "offset of ScheduleDCLayerCHROMIUM header should be 0");
+static_assert(
+ offsetof(ScheduleDCLayerCHROMIUM, contents_texture_id) == 4,
+ "offset of ScheduleDCLayerCHROMIUM contents_texture_id should be 4");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, background_color) == 8,
+ "offset of ScheduleDCLayerCHROMIUM background_color should be 8");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, edge_aa_mask) == 12,
+ "offset of ScheduleDCLayerCHROMIUM edge_aa_mask should be 12");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, filter) == 16,
+ "offset of ScheduleDCLayerCHROMIUM filter should be 16");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, shm_id) == 20,
+ "offset of ScheduleDCLayerCHROMIUM shm_id should be 20");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, shm_offset) == 24,
+ "offset of ScheduleDCLayerCHROMIUM shm_offset should be 24");
+
struct MatrixLoadfCHROMIUMImmediate {
typedef MatrixLoadfCHROMIUMImmediate ValueType;
static const CommandId kCmdId = kMatrixLoadfCHROMIUMImmediate;
@@ -15734,4 +15866,37 @@ static_assert(offsetof(SetDrawRectangleCHROMIUM, width) == 12,
static_assert(offsetof(SetDrawRectangleCHROMIUM, height) == 16,
"offset of SetDrawRectangleCHROMIUM height should be 16");
+struct SetEnableDCLayersCHROMIUM {
+ typedef SetEnableDCLayersCHROMIUM ValueType;
+ static const CommandId kCmdId = kSetEnableDCLayersCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLboolean _enabled) {
+ SetHeader();
+ enabled = _enabled;
+ }
+
+ void* Set(void* cmd, GLboolean _enabled) {
+ static_cast<ValueType*>(cmd)->Init(_enabled);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t enabled;
+};
+
+static_assert(sizeof(SetEnableDCLayersCHROMIUM) == 8,
+ "size of SetEnableDCLayersCHROMIUM should be 8");
+static_assert(offsetof(SetEnableDCLayersCHROMIUM, header) == 0,
+ "offset of SetEnableDCLayersCHROMIUM header should be 0");
+static_assert(offsetof(SetEnableDCLayersCHROMIUM, enabled) == 4,
+ "offset of SetEnableDCLayersCHROMIUM enabled should be 4");
+
#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index ab231ac0c03..1bf9ced5675 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -4160,18 +4160,18 @@ TEST_F(GLES2FormatTest, PostSubBufferCHROMIUM) {
TEST_F(GLES2FormatTest, CopyTextureCHROMIUM) {
cmds::CopyTextureCHROMIUM& cmd = *GetBufferAs<cmds::CopyTextureCHROMIUM>();
void* next_cmd =
- cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLint>(12),
- static_cast<GLenum>(13), static_cast<GLenum>(14),
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLint>(12),
+ static_cast<GLenum>(13), static_cast<GLuint>(14),
static_cast<GLint>(15), static_cast<GLint>(16),
static_cast<GLenum>(17), static_cast<GLboolean>(18),
static_cast<GLboolean>(19), static_cast<GLboolean>(20));
EXPECT_EQ(static_cast<uint32_t>(cmds::CopyTextureCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLenum>(11), cmd.source_id);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.source_id);
EXPECT_EQ(static_cast<GLint>(12), cmd.source_level);
EXPECT_EQ(static_cast<GLenum>(13), cmd.dest_target);
- EXPECT_EQ(static_cast<GLenum>(14), cmd.dest_id);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.dest_id);
EXPECT_EQ(static_cast<GLint>(15), cmd.dest_level);
EXPECT_EQ(static_cast<GLint>(16), cmd.internalformat);
EXPECT_EQ(static_cast<GLenum>(17), cmd.dest_type);
@@ -4185,8 +4185,8 @@ TEST_F(GLES2FormatTest, CopySubTextureCHROMIUM) {
cmds::CopySubTextureCHROMIUM& cmd =
*GetBufferAs<cmds::CopySubTextureCHROMIUM>();
void* next_cmd = cmd.Set(
- &cmd, static_cast<GLenum>(11), static_cast<GLint>(12),
- static_cast<GLenum>(13), static_cast<GLenum>(14), static_cast<GLint>(15),
+ &cmd, static_cast<GLuint>(11), static_cast<GLint>(12),
+ static_cast<GLenum>(13), static_cast<GLuint>(14), static_cast<GLint>(15),
static_cast<GLint>(16), static_cast<GLint>(17), static_cast<GLint>(18),
static_cast<GLint>(19), static_cast<GLsizei>(20),
static_cast<GLsizei>(21), static_cast<GLboolean>(22),
@@ -4194,10 +4194,10 @@ TEST_F(GLES2FormatTest, CopySubTextureCHROMIUM) {
EXPECT_EQ(static_cast<uint32_t>(cmds::CopySubTextureCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLenum>(11), cmd.source_id);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.source_id);
EXPECT_EQ(static_cast<GLint>(12), cmd.source_level);
EXPECT_EQ(static_cast<GLenum>(13), cmd.dest_target);
- EXPECT_EQ(static_cast<GLenum>(14), cmd.dest_id);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.dest_id);
EXPECT_EQ(static_cast<GLint>(15), cmd.dest_level);
EXPECT_EQ(static_cast<GLint>(16), cmd.xoffset);
EXPECT_EQ(static_cast<GLint>(17), cmd.yoffset);
@@ -4215,12 +4215,12 @@ TEST_F(GLES2FormatTest, CompressedCopyTextureCHROMIUM) {
cmds::CompressedCopyTextureCHROMIUM& cmd =
*GetBufferAs<cmds::CompressedCopyTextureCHROMIUM>();
void* next_cmd =
- cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLenum>(12));
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12));
EXPECT_EQ(static_cast<uint32_t>(cmds::CompressedCopyTextureCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLenum>(11), cmd.source_id);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.dest_id);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.source_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.dest_id);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
@@ -4670,6 +4670,43 @@ TEST_F(GLES2FormatTest, FlushDriverCachesCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, ScheduleDCLayerSharedStateCHROMIUM) {
+ cmds::ScheduleDCLayerSharedStateCHROMIUM& cmd =
+ *GetBufferAs<cmds::ScheduleDCLayerSharedStateCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLfloat>(11),
+ static_cast<GLboolean>(12), static_cast<GLint>(13),
+ static_cast<GLuint>(14), static_cast<GLuint>(15));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::ScheduleDCLayerSharedStateCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLfloat>(11), cmd.opacity);
+ EXPECT_EQ(static_cast<GLboolean>(12), cmd.is_clipped);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.z_order);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.shm_id);
+ EXPECT_EQ(static_cast<GLuint>(15), cmd.shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ScheduleDCLayerCHROMIUM) {
+ cmds::ScheduleDCLayerCHROMIUM& cmd =
+ *GetBufferAs<cmds::ScheduleDCLayerCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
+ static_cast<GLuint>(13), static_cast<GLuint>(14),
+ static_cast<GLuint>(15), static_cast<GLuint>(16));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ScheduleDCLayerCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.contents_texture_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.background_color);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.edge_aa_mask);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.filter);
+ EXPECT_EQ(static_cast<GLuint>(15), cmd.shm_id);
+ EXPECT_EQ(static_cast<GLuint>(16), cmd.shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
TEST_F(GLES2FormatTest, MatrixLoadfCHROMIUMImmediate) {
const int kSomeBaseValueToTestWith = 51;
static GLfloat data[] = {
@@ -5276,4 +5313,15 @@ TEST_F(GLES2FormatTest, SetDrawRectangleCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, SetEnableDCLayersCHROMIUM) {
+ cmds::SetEnableDCLayersCHROMIUM& cmd =
+ *GetBufferAs<cmds::SetEnableDCLayersCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLboolean>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::SetEnableDCLayersCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLboolean>(11), cmd.enabled);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_TEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index 1dc54cf9433..ffef2d5e609 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -297,40 +297,43 @@
OP(CommitOverlayPlanesCHROMIUM) /* 538 */ \
OP(SwapInterval) /* 539 */ \
OP(FlushDriverCachesCHROMIUM) /* 540 */ \
- OP(MatrixLoadfCHROMIUMImmediate) /* 541 */ \
- OP(MatrixLoadIdentityCHROMIUM) /* 542 */ \
- OP(GenPathsCHROMIUM) /* 543 */ \
- OP(DeletePathsCHROMIUM) /* 544 */ \
- OP(IsPathCHROMIUM) /* 545 */ \
- OP(PathCommandsCHROMIUM) /* 546 */ \
- OP(PathParameterfCHROMIUM) /* 547 */ \
- OP(PathParameteriCHROMIUM) /* 548 */ \
- OP(PathStencilFuncCHROMIUM) /* 549 */ \
- OP(StencilFillPathCHROMIUM) /* 550 */ \
- OP(StencilStrokePathCHROMIUM) /* 551 */ \
- OP(CoverFillPathCHROMIUM) /* 552 */ \
- OP(CoverStrokePathCHROMIUM) /* 553 */ \
- OP(StencilThenCoverFillPathCHROMIUM) /* 554 */ \
- OP(StencilThenCoverStrokePathCHROMIUM) /* 555 */ \
- OP(StencilFillPathInstancedCHROMIUM) /* 556 */ \
- OP(StencilStrokePathInstancedCHROMIUM) /* 557 */ \
- OP(CoverFillPathInstancedCHROMIUM) /* 558 */ \
- OP(CoverStrokePathInstancedCHROMIUM) /* 559 */ \
- OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 560 */ \
- OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 561 */ \
- OP(BindFragmentInputLocationCHROMIUMBucket) /* 562 */ \
- OP(ProgramPathFragmentInputGenCHROMIUM) /* 563 */ \
- OP(GetBufferSubDataAsyncCHROMIUM) /* 564 */ \
- OP(CoverageModulationCHROMIUM) /* 565 */ \
- OP(BlendBarrierKHR) /* 566 */ \
- OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 567 */ \
- OP(BindFragDataLocationIndexedEXTBucket) /* 568 */ \
- OP(BindFragDataLocationEXTBucket) /* 569 */ \
- OP(GetFragDataIndexEXT) /* 570 */ \
- OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 571 */ \
- OP(OverlayPromotionHintCHROMIUM) /* 572 */ \
- OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 573 */ \
- OP(SetDrawRectangleCHROMIUM) /* 574 */
+ OP(ScheduleDCLayerSharedStateCHROMIUM) /* 541 */ \
+ OP(ScheduleDCLayerCHROMIUM) /* 542 */ \
+ OP(MatrixLoadfCHROMIUMImmediate) /* 543 */ \
+ OP(MatrixLoadIdentityCHROMIUM) /* 544 */ \
+ OP(GenPathsCHROMIUM) /* 545 */ \
+ OP(DeletePathsCHROMIUM) /* 546 */ \
+ OP(IsPathCHROMIUM) /* 547 */ \
+ OP(PathCommandsCHROMIUM) /* 548 */ \
+ OP(PathParameterfCHROMIUM) /* 549 */ \
+ OP(PathParameteriCHROMIUM) /* 550 */ \
+ OP(PathStencilFuncCHROMIUM) /* 551 */ \
+ OP(StencilFillPathCHROMIUM) /* 552 */ \
+ OP(StencilStrokePathCHROMIUM) /* 553 */ \
+ OP(CoverFillPathCHROMIUM) /* 554 */ \
+ OP(CoverStrokePathCHROMIUM) /* 555 */ \
+ OP(StencilThenCoverFillPathCHROMIUM) /* 556 */ \
+ OP(StencilThenCoverStrokePathCHROMIUM) /* 557 */ \
+ OP(StencilFillPathInstancedCHROMIUM) /* 558 */ \
+ OP(StencilStrokePathInstancedCHROMIUM) /* 559 */ \
+ OP(CoverFillPathInstancedCHROMIUM) /* 560 */ \
+ OP(CoverStrokePathInstancedCHROMIUM) /* 561 */ \
+ OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 562 */ \
+ OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 563 */ \
+ OP(BindFragmentInputLocationCHROMIUMBucket) /* 564 */ \
+ OP(ProgramPathFragmentInputGenCHROMIUM) /* 565 */ \
+ OP(GetBufferSubDataAsyncCHROMIUM) /* 566 */ \
+ OP(CoverageModulationCHROMIUM) /* 567 */ \
+ OP(BlendBarrierKHR) /* 568 */ \
+ OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 569 */ \
+ OP(BindFragDataLocationIndexedEXTBucket) /* 570 */ \
+ OP(BindFragDataLocationEXTBucket) /* 571 */ \
+ OP(GetFragDataIndexEXT) /* 572 */ \
+ OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 573 */ \
+ OP(OverlayPromotionHintCHROMIUM) /* 574 */ \
+ OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 575 */ \
+ OP(SetDrawRectangleCHROMIUM) /* 576 */ \
+ OP(SetEnableDCLayersCHROMIUM) /* 577 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
index 68237a08fd2..0159b48fde1 100644
--- a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
@@ -84,6 +84,7 @@ bool IsImageFormatCompatibleWithGpuMemoryBufferFormat(
case gfx::BufferFormat::RGBX_8888:
return internalformat == GL_RGB;
case gfx::BufferFormat::RGBA_4444:
+ case gfx::BufferFormat::RGBA_F16:
return internalformat == GL_RGBA;
}
@@ -91,8 +92,9 @@ bool IsImageFormatCompatibleWithGpuMemoryBufferFormat(
return false;
}
-bool IsGpuMemoryBufferFormatSupported(gfx::BufferFormat format,
- const gpu::Capabilities& capabilities) {
+bool IsImageFromGpuMemoryBufferFormatSupported(
+ gfx::BufferFormat format,
+ const gpu::Capabilities& capabilities) {
switch (format) {
case gfx::BufferFormat::ATC:
case gfx::BufferFormat::ATCIA:
@@ -117,6 +119,8 @@ bool IsGpuMemoryBufferFormatSupported(gfx::BufferFormat format,
case gfx::BufferFormat::RGBX_8888:
case gfx::BufferFormat::YVU_420:
return true;
+ case gfx::BufferFormat::RGBA_F16:
+ return capabilities.texture_half_float_linear;
case gfx::BufferFormat::YUV_420_BIPLANAR:
#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
// TODO(dcastagna): Determine ycbcr_420v_image on CrOS at runtime
@@ -150,6 +154,7 @@ bool IsImageSizeValidForGpuMemoryBufferFormat(const gfx::Size& size,
case gfx::BufferFormat::RGBX_8888:
case gfx::BufferFormat::BGRA_8888:
case gfx::BufferFormat::BGRX_8888:
+ case gfx::BufferFormat::RGBA_F16:
return true;
case gfx::BufferFormat::YVU_420:
case gfx::BufferFormat::YUV_420_BIPLANAR:
diff --git a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
index ad87b1d7ad0..aa8c567dee0 100644
--- a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
+++ b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
@@ -24,8 +24,9 @@ GPU_EXPORT bool IsImageFormatCompatibleWithGpuMemoryBufferFormat(
unsigned internalformat,
gfx::BufferFormat format);
-// Returns true if |format| is supported by |capabilities|.
-GPU_EXPORT bool IsGpuMemoryBufferFormatSupported(
+// Returns true if creating an image for a GpuMemoryBuffer with |format| is
+// supported by |capabilities|.
+GPU_EXPORT bool IsImageFromGpuMemoryBufferFormatSupported(
gfx::BufferFormat format,
const Capabilities& capabilities);
diff --git a/chromium/gpu/command_buffer/common/sync_token.h b/chromium/gpu/command_buffer/common/sync_token.h
index 6a6bfad2a9a..990f0c9b4bd 100644
--- a/chromium/gpu/command_buffer/common/sync_token.h
+++ b/chromium/gpu/command_buffer/common/sync_token.h
@@ -8,6 +8,8 @@
#include <stdint.h>
#include <string.h>
+#include <tuple>
+
#include "gpu/command_buffer/common/command_buffer_id.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/gpu_export.h"
@@ -77,13 +79,9 @@ struct GPU_EXPORT SyncToken {
int32_t extra_data_field() const { return extra_data_field_; }
bool operator<(const SyncToken& other) const {
- // TODO(dyen): Once all our compilers support c++11, we can replace this
- // long list of comparisons with std::tie().
- return (namespace_id_ < other.namespace_id()) ||
- ((namespace_id_ == other.namespace_id()) &&
- ((command_buffer_id_ < other.command_buffer_id()) ||
- ((command_buffer_id_ == other.command_buffer_id()) &&
- (release_count_ < other.release_count()))));
+ return std::tie(namespace_id_, command_buffer_id_, release_count_) <
+ std::tie(other.namespace_id_, other.command_buffer_id_,
+ other.release_count_);
}
bool operator==(const SyncToken& other) const {
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index 10f6a8e819b..bf045d8f437 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -111,6 +111,7 @@ target(link_target_type, "service_sources") {
"memory_program_cache.h",
"path_manager.cc",
"path_manager.h",
+ "preemption_flag.h",
"program_cache.cc",
"program_cache.h",
"program_manager.cc",
@@ -121,6 +122,7 @@ target(link_target_type, "service_sources") {
"renderbuffer_manager.h",
"sampler_manager.cc",
"sampler_manager.h",
+ "sequence_id.h",
"service_utils.cc",
"service_utils.h",
"shader_manager.cc",
diff --git a/chromium/gpu/command_buffer/service/command_executor.cc b/chromium/gpu/command_buffer/service/command_executor.cc
index ff7a910bf86..e0bc504584b 100644
--- a/chromium/gpu/command_buffer/service/command_executor.cc
+++ b/chromium/gpu/command_buffer/service/command_executor.cc
@@ -24,11 +24,7 @@ namespace gpu {
CommandExecutor::CommandExecutor(CommandBufferServiceBase* command_buffer,
AsyncAPIInterface* handler,
gles2::GLES2Decoder* decoder)
- : command_buffer_(command_buffer),
- handler_(handler),
- decoder_(decoder),
- scheduled_(true),
- was_preempted_(false) {}
+ : command_buffer_(command_buffer), handler_(handler), decoder_(decoder) {}
CommandExecutor::~CommandExecutor() {}
@@ -53,7 +49,7 @@ void CommandExecutor::PutChanged() {
if (decoder_)
decoder_->BeginDecoding();
while (!parser_->IsEmpty()) {
- if (IsPreempted())
+ if (PauseExecution())
break;
DCHECK(scheduled());
@@ -152,19 +148,21 @@ void CommandExecutor::SetCommandProcessedCallback(
command_processed_callback_ = callback;
}
-bool CommandExecutor::IsPreempted() {
- if (!preemption_flag_.get())
+void CommandExecutor::SetPauseExecutionCallback(
+ const PauseExecutionCallback& callback) {
+ pause_execution_callback_ = callback;
+}
+
+bool CommandExecutor::PauseExecution() {
+ if (pause_execution_callback_.is_null())
return false;
- if (!was_preempted_ && preemption_flag_->IsSet()) {
- TRACE_COUNTER_ID1("gpu", "CommandExecutor::Preempted", this, 1);
- was_preempted_ = true;
- } else if (was_preempted_ && !preemption_flag_->IsSet()) {
- TRACE_COUNTER_ID1("gpu", "CommandExecutor::Preempted", this, 0);
- was_preempted_ = false;
+ bool pause = pause_execution_callback_.Run();
+ if (paused_ != pause) {
+ TRACE_COUNTER_ID1("gpu", "CommandExecutor::Paused", this, pause);
+ paused_ = pause;
}
-
- return preemption_flag_->IsSet();
+ return pause;
}
bool CommandExecutor::HasMoreIdleWork() const {
diff --git a/chromium/gpu/command_buffer/service/command_executor.h b/chromium/gpu/command_buffer/service/command_executor.h
index 7493d4f6244..6189c4b99a6 100644
--- a/chromium/gpu/command_buffer/service/command_executor.h
+++ b/chromium/gpu/command_buffer/service/command_executor.h
@@ -10,11 +10,8 @@
#include <memory>
#include <queue>
-#include "base/atomic_ref_count.h"
-#include "base/atomicops.h"
#include "base/callback.h"
#include "base/macros.h"
-#include "base/memory/ref_counted.h"
#include "base/memory/shared_memory.h"
#include "base/memory/weak_ptr.h"
#include "gpu/command_buffer/service/cmd_buffer_engine.h"
@@ -25,22 +22,6 @@
namespace gpu {
-class PreemptionFlag : public base::RefCountedThreadSafe<PreemptionFlag> {
- public:
- PreemptionFlag() : flag_(0) {}
-
- bool IsSet() { return !base::AtomicRefCountIsZero(&flag_); }
- void Set() { base::AtomicRefCountInc(&flag_); }
- void Reset() { base::subtle::NoBarrier_Store(&flag_, 0); }
-
- private:
- base::AtomicRefCount flag_;
-
- ~PreemptionFlag() {}
-
- friend class base::RefCountedThreadSafe<PreemptionFlag>;
-};
-
// This class schedules commands that have been flushed. They are received via
// a command buffer and forwarded to a command parser. TODO(apatrick): This
// class should not know about the decoder. Do not add additional dependencies
@@ -57,10 +38,6 @@ class GPU_EXPORT CommandExecutor
void PutChanged();
- void SetPreemptByFlag(scoped_refptr<PreemptionFlag> flag) {
- preemption_flag_ = flag;
- }
-
// Sets whether commands should be processed by this scheduler. Setting to
// false unschedules. Setting to true reschedules.
void SetScheduled(bool scheduled);
@@ -84,6 +61,9 @@ class GPU_EXPORT CommandExecutor
void SetCommandProcessedCallback(const base::Closure& callback);
+ using PauseExecutionCallback = base::Callback<bool(void)>;
+ void SetPauseExecutionCallback(const PauseExecutionCallback& callback);
+
// Returns whether the scheduler needs to be polled again in the future to
// process idle work.
bool HasMoreIdleWork() const;
@@ -99,7 +79,7 @@ class GPU_EXPORT CommandExecutor
CommandParser* parser() const { return parser_.get(); }
private:
- bool IsPreempted();
+ bool PauseExecution();
// The CommandExecutor holds a weak reference to the CommandBuffer. The
// CommandBuffer owns the CommandExecutor and holds a strong reference to it
@@ -120,13 +100,13 @@ class GPU_EXPORT CommandExecutor
std::unique_ptr<CommandParser> parser_;
// Whether the scheduler is currently able to process more commands.
- bool scheduled_;
+ bool scheduled_ = true;
base::Closure command_processed_callback_;
- // If non-NULL and |preemption_flag_->IsSet()|, exit PutChanged early.
- scoped_refptr<PreemptionFlag> preemption_flag_;
- bool was_preempted_;
+ // If this callback returns true, exit PutChanged early.
+ PauseExecutionCallback pause_execution_callback_;
+ bool paused_ = false;
DISALLOW_COPY_AND_ASSIGN(CommandExecutor);
};
diff --git a/chromium/gpu/command_buffer/service/command_executor_unittest.cc b/chromium/gpu/command_buffer/service/command_executor_unittest.cc
index c17703ad3d3..34610ed0439 100644
--- a/chromium/gpu/command_buffer/service/command_executor_unittest.cc
+++ b/chromium/gpu/command_buffer/service/command_executor_unittest.cc
@@ -10,6 +10,7 @@
#include <memory>
#include "base/run_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "gpu/command_buffer/common/command_buffer_mock.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
@@ -76,7 +77,7 @@ class CommandExecutorTest : public testing::Test {
int32_t* buffer_;
std::unique_ptr<gles2::MockGLES2Decoder> decoder_;
std::unique_ptr<CommandExecutor> executor_;
- base::MessageLoop message_loop_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
};
TEST_F(CommandExecutorTest, ExecutorDoesNothingIfRingBufferIsEmpty) {
diff --git a/chromium/gpu/command_buffer/service/common_decoder.cc b/chromium/gpu/command_buffer/service/common_decoder.cc
index b879f9572d3..a561056e354 100644
--- a/chromium/gpu/command_buffer/service/common_decoder.cc
+++ b/chromium/gpu/command_buffer/service/common_decoder.cc
@@ -143,10 +143,11 @@ void* CommonDecoder::GetAddressAndCheckSize(unsigned int shm_id,
void* CommonDecoder::GetAddressAndSize(unsigned int shm_id,
unsigned int data_offset,
+ unsigned int minimum_size,
unsigned int* data_size) {
CHECK(engine_);
scoped_refptr<gpu::Buffer> buffer = engine_->GetSharedMemoryBuffer(shm_id);
- if (!buffer.get())
+ if (!buffer.get() || buffer->GetRemainingSize(data_offset) < minimum_size)
return NULL;
return buffer->GetDataAddressAndSize(data_offset, data_size);
}
diff --git a/chromium/gpu/command_buffer/service/common_decoder.h b/chromium/gpu/command_buffer/service/common_decoder.h
index 5b41775b226..3c23d539b67 100644
--- a/chromium/gpu/command_buffer/service/common_decoder.h
+++ b/chromium/gpu/command_buffer/service/common_decoder.h
@@ -152,13 +152,16 @@ class GPU_EXPORT CommonDecoder : NON_EXPORTED_BASE(public AsyncAPIInterface) {
void* GetAddressAndSize(unsigned int shm_id,
unsigned int offset,
+ unsigned int minimum_size,
unsigned int* size);
template <typename T>
T GetSharedMemoryAndSizeAs(unsigned int shm_id,
unsigned int offset,
+ unsigned int minimum_size,
unsigned int* size) {
- return static_cast<T>(GetAddressAndSize(shm_id, offset, size));
+ return static_cast<T>(
+ GetAddressAndSize(shm_id, offset, minimum_size, size));
}
unsigned int GetSharedMemorySize(unsigned int shm_id, unsigned int offset);
diff --git a/chromium/gpu/command_buffer/service/context_group.cc b/chromium/gpu/command_buffer/service/context_group.cc
index 6c078a45c22..de793240f9b 100644
--- a/chromium/gpu/command_buffer/service/context_group.cc
+++ b/chromium/gpu/command_buffer/service/context_group.cc
@@ -38,6 +38,8 @@ void GetIntegerv(GLenum pname, uint32_t* var) {
*var = value;
}
+} // namespace anonymous
+
DisallowedFeatures AdjustDisallowedFeatures(
ContextType context_type, const DisallowedFeatures& disallowed_features) {
DisallowedFeatures adjusted_disallowed_features = disallowed_features;
@@ -56,8 +58,6 @@ DisallowedFeatures AdjustDisallowedFeatures(
return adjusted_disallowed_features;
}
-} // namespace anonymous
-
ContextGroup::ContextGroup(
const GpuPreferences& gpu_preferences,
const scoped_refptr<MailboxManager>& mailbox_manager,
diff --git a/chromium/gpu/command_buffer/service/context_group.h b/chromium/gpu/command_buffer/service/context_group.h
index 51a620168e5..c2ebbfb2900 100644
--- a/chromium/gpu/command_buffer/service/context_group.h
+++ b/chromium/gpu/command_buffer/service/context_group.h
@@ -48,6 +48,10 @@ class MemoryTracker;
struct DisallowedFeatures;
struct PassthroughResources;
+DisallowedFeatures AdjustDisallowedFeatures(
+ ContextType context_type,
+ const DisallowedFeatures& disallowed_features);
+
// A Context Group helps manage multiple GLES2Decoders that share
// resources.
class GPU_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
diff --git a/chromium/gpu/command_buffer/service/context_state.h b/chromium/gpu/command_buffer/service/context_state.h
index 21a2a2816e7..056dd4f6ce9 100644
--- a/chromium/gpu/command_buffer/service/context_state.h
+++ b/chromium/gpu/command_buffer/service/context_state.h
@@ -309,6 +309,13 @@ struct GPU_EXPORT ContextState {
PixelStoreParams GetPackParams();
PixelStoreParams GetUnpackParams(Dimension dimension);
+ // If a buffer object is bound to PIXEL_PACK_BUFFER, set all pack parameters
+ // user values; otherwise, set them to 0.
+ void UpdatePackParameters() const;
+ // If a buffer object is bound to PIXEL_UNPACK_BUFFER, set all unpack
+ // parameters user values; otherwise, set them to 0.
+ void UpdateUnpackParameters() const;
+
void EnableDisableFramebufferSRGB(bool enable);
#include "gpu/command_buffer/service/context_state_autogen.h"
@@ -369,13 +376,6 @@ struct GPU_EXPORT ContextState {
private:
void EnableDisable(GLenum pname, bool enable) const;
- // If a buffer object is bound to PIXEL_PACK_BUFFER, set all pack parameters
- // user values; otherwise, set them to 0.
- void UpdatePackParameters() const;
- // If a buffer object is bound to PIXEL_UNPACK_BUFFER, set all unpack
- // parameters user values; otherwise, set them to 0.
- void UpdateUnpackParameters() const;
-
void InitStateManual(const ContextState* prev_state) const;
// EnableDisableFramebufferSRGB is called at very high frequency. Cache the
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index 3eaf1adc7f8..32e72c69f32 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -865,7 +865,7 @@ void FeatureInfo::InitializeFeatures() {
extensions.Contains("GL_EXT_framebuffer_multisample") ||
gl_version_info_->is_es3 ||
gl_version_info_->is_desktop_core_profile;
- if (gl_version_info_->is_angle) {
+ if (gl_version_info_->is_angle || gl_version_info_->is_swiftshader) {
feature_flags_.angle_framebuffer_multisample =
extensions.Contains("GL_ANGLE_framebuffer_multisample");
ext_has_multisample |= feature_flags_.angle_framebuffer_multisample;
@@ -913,6 +913,7 @@ void FeatureInfo::InitializeFeatures() {
(gl_version_info_->IsAtLeastGLES(3, 1) ||
(gl_version_info_->IsAtLeastGL(3, 0) &&
extensions.Contains("GL_ARB_shading_language_420pack") &&
+ extensions.Contains("GL_ARB_texture_storage") &&
extensions.Contains("GL_ARB_texture_gather") &&
extensions.Contains("GL_ARB_explicit_uniform_location") &&
extensions.Contains("GL_ARB_explicit_attrib_location") &&
@@ -1357,6 +1358,9 @@ void FeatureInfo::InitializeFeatures() {
extensions.Contains("GL_CHROMIUM_copy_compressed_texture");
feature_flags_.angle_client_arrays =
extensions.Contains("GL_ANGLE_client_arrays");
+ feature_flags_.angle_request_extension =
+ extensions.Contains("GL_ANGLE_request_extension");
+ feature_flags_.ext_debug_marker = extensions.Contains("GL_EXT_debug_marker");
}
void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h
index a2a7f51926a..36e9f38585e 100644
--- a/chromium/gpu/command_buffer/service/feature_info.h
+++ b/chromium/gpu/command_buffer/service/feature_info.h
@@ -108,6 +108,8 @@ class GPU_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool angle_framebuffer_multisample = false;
bool ext_disjoint_timer_query = false;
bool angle_client_arrays = false;
+ bool angle_request_extension = false;
+ bool ext_debug_marker = false;
};
FeatureInfo();
diff --git a/chromium/gpu/command_buffer/service/gl_state_restorer_impl.cc b/chromium/gpu/command_buffer/service/gl_state_restorer_impl.cc
index 2eec5fb024c..fe761453af6 100644
--- a/chromium/gpu/command_buffer/service/gl_state_restorer_impl.cc
+++ b/chromium/gpu/command_buffer/service/gl_state_restorer_impl.cc
@@ -36,6 +36,11 @@ void GLStateRestorerImpl::RestoreAllTextureUnitBindings() {
decoder_->RestoreAllTextureUnitBindings(NULL);
}
+void GLStateRestorerImpl::RestoreActiveTexture() {
+ DCHECK(decoder_.get());
+ decoder_->RestoreActiveTexture();
+}
+
void GLStateRestorerImpl::RestoreActiveTextureUnitBinding(unsigned int target) {
DCHECK(decoder_.get());
decoder_->RestoreActiveTextureUnitBinding(target);
@@ -51,6 +56,21 @@ void GLStateRestorerImpl::RestoreFramebufferBindings() {
decoder_->RestoreFramebufferBindings();
}
+void GLStateRestorerImpl::RestoreProgramBindings() {
+ DCHECK(decoder_.get());
+ decoder_->RestoreProgramBindings();
+}
+
+void GLStateRestorerImpl::RestoreBufferBinding(unsigned int target) {
+ DCHECK(decoder_.get());
+ decoder_->RestoreBufferBinding(target);
+}
+
+void GLStateRestorerImpl::RestoreVertexAttribArray(unsigned int index) {
+ DCHECK(decoder_.get());
+ decoder_->RestoreVertexAttribArray(index);
+}
+
void GLStateRestorerImpl::PauseQueries() {
DCHECK(decoder_.get());
decoder_->GetQueryManager()->PauseQueries();
diff --git a/chromium/gpu/command_buffer/service/gl_state_restorer_impl.h b/chromium/gpu/command_buffer/service/gl_state_restorer_impl.h
index 187dcec188d..0827aa5032a 100644
--- a/chromium/gpu/command_buffer/service/gl_state_restorer_impl.h
+++ b/chromium/gpu/command_buffer/service/gl_state_restorer_impl.h
@@ -28,9 +28,13 @@ class GPU_EXPORT GLStateRestorerImpl : public gl::GLStateRestorer {
bool IsInitialized() override;
void RestoreState(const gl::GLStateRestorer* prev_state) override;
void RestoreAllTextureUnitBindings() override;
+ void RestoreActiveTexture() override;
void RestoreActiveTextureUnitBinding(unsigned int target) override;
void RestoreAllExternalTextureBindingsIfNeeded() override;
void RestoreFramebufferBindings() override;
+ void RestoreProgramBindings() override;
+ void RestoreBufferBinding(unsigned int target) override;
+ void RestoreVertexAttribArray(unsigned int index) override;
void PauseQueries() override;
void ResumeQueries() override;
diff --git a/chromium/gpu/command_buffer/service/gl_utils.cc b/chromium/gpu/command_buffer/service/gl_utils.cc
index 19683c3fbe6..cef929f0b6d 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.cc
+++ b/chromium/gpu/command_buffer/service/gl_utils.cc
@@ -262,24 +262,6 @@ const char* GetServiceShadingLanguageVersionString(
return "OpenGL ES GLSL ES 1.0 Chromium";
}
-const char* GetServiceRendererString(const FeatureInfo* feature_info) {
- // Return the unmasked RENDERER string for WebGL contexts.
- // It is used by WEBGL_debug_renderer_info.
- if (!feature_info->IsWebGLContext())
- return "Chromium";
- else
- return reinterpret_cast<const char*>(glGetString(GL_RENDERER));
-}
-
-const char* GetServiceVendorString(const FeatureInfo* feature_info) {
- // Return the unmasked VENDOR string for WebGL contexts.
- // It is used by WEBGL_debug_renderer_info.
- if (!feature_info->IsWebGLContext())
- return "Chromium";
- else
- return reinterpret_cast<const char*>(glGetString(GL_VENDOR));
-}
-
void APIENTRY LogGLDebugMessage(GLenum source,
GLenum type,
GLuint id,
diff --git a/chromium/gpu/command_buffer/service/gl_utils.h b/chromium/gpu/command_buffer/service/gl_utils.h
index c4905edcaf6..0b8634d09cf 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.h
+++ b/chromium/gpu/command_buffer/service/gl_utils.h
@@ -57,8 +57,6 @@ bool CheckUniqueAndNonNullIds(GLsizei n, const GLuint* client_ids);
const char* GetServiceVersionString(const FeatureInfo* feature_info);
const char* GetServiceShadingLanguageVersionString(
const FeatureInfo* feature_info);
-const char* GetServiceRendererString(const FeatureInfo* feature_info);
-const char* GetServiceVendorString(const FeatureInfo* feature_info);
void APIENTRY LogGLDebugMessage(GLenum source,
GLenum type,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
index 4b9d7e437bd..e4cdd30cfe4 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
@@ -25,7 +25,6 @@ ApplyFramebufferAttachmentCMAAINTELResourceManager::
is_in_gamma_correct_mode_(false),
supports_usampler_(true),
supports_r8_image_(true),
- supports_r8_read_format_(true),
is_gles31_compatible_(false),
frame_id_(0),
width_(0),
@@ -59,67 +58,55 @@ void ApplyFramebufferAttachmentCMAAINTELResourceManager::Initialize(
is_gles31_compatible_ =
decoder->GetGLContext()->GetVersionInfo()->IsAtLeastGLES(3, 1);
- // Check if RGBA8UI is supported as an FBO colour target with depth.
- // If not supported, GLSL needs to convert the data to/from float so there is
- // a small extra cost.
- {
- GLuint rgba8ui_texture = 0, depth_texture = 0;
- glGenTextures(1, &rgba8ui_texture);
- glBindTexture(GL_TEXTURE_2D, rgba8ui_texture);
- glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8UI, 4, 4);
-
- glGenTextures(1, &depth_texture);
- glBindTexture(GL_TEXTURE_2D, depth_texture);
- glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_DEPTH_COMPONENT16, 4, 4);
-
- // Create the FBO
- GLuint rgba8ui_framebuffer = 0;
- glGenFramebuffersEXT(1, &rgba8ui_framebuffer);
- glBindFramebufferEXT(GL_FRAMEBUFFER, rgba8ui_framebuffer);
-
- // Bind to the FBO to test support
- glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- GL_TEXTURE_2D, rgba8ui_texture, 0);
- glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
- GL_TEXTURE_2D, depth_texture, 0);
- GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
-
- supports_usampler_ = (status == GL_FRAMEBUFFER_COMPLETE);
-
- glDeleteFramebuffersEXT(1, &rgba8ui_framebuffer);
- glDeleteTextures(1, &rgba8ui_texture);
- glDeleteTextures(1, &depth_texture);
- }
-
- // Check to see if R8 images are supported
- // If not supported, images are bound as R32F for write targets, not R8.
- {
- GLuint r8_texture = 0;
- glGenTextures(1, &r8_texture);
- glBindTexture(GL_TEXTURE_2D, r8_texture);
- glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_R8, 4, 4);
-
- glGetError(); // reset all previous errors
- glBindImageTextureEXT(0, r8_texture, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_R8);
- if (glGetError() != GL_NO_ERROR)
- supports_r8_image_ = false;
-
- glDeleteTextures(1, &r8_texture);
- }
+ if (is_gles31_compatible_) {
+ supports_r8_image_ =
+ decoder->GetGLContext()->HasExtension("GL_NV_image_formats");
- // Check if R8 GLSL read formats are supported.
- // If not supported, r32f is used instead.
- {
- const char shader_source[] =
- SHADER(layout(r8) restrict writeonly uniform highp image2D g_r8Image;
- void main() {
- imageStore(g_r8Image, ivec2(0, 0), vec4(1.0, 0.0, 0.0, 0.0));
- });
-
- GLuint shader = CreateShader(GL_FRAGMENT_SHADER, "", shader_source);
- supports_r8_read_format_ = (shader != 0);
- if (shader != 0) {
- glDeleteShader(shader);
+ // ES 3.0 requires GL_RGBA8UI is color renderable.
+ supports_usampler_ = true;
+ } else {
+ // CMAA requires GL_ARB_shader_image_load_store for GL, and it requires r8
+ // image texture.
+ DCHECK(decoder->GetGLContext()->HasExtension(
+ "GL_ARB_shader_image_load_store"));
+ supports_r8_image_ = true;
+
+ // Check if RGBA8UI is supported as an FBO colour target with depth.
+ // If not supported, GLSL needs to convert the data to/from float so there
+ // is a small extra cost.
+ {
+ glActiveTexture(GL_TEXTURE0);
+
+ GLuint rgba8ui_texture = 0, depth_texture = 0;
+ glGenTextures(1, &rgba8ui_texture);
+ glBindTexture(GL_TEXTURE_2D, rgba8ui_texture);
+ glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8UI, 4, 4);
+
+ glGenTextures(1, &depth_texture);
+ glBindTexture(GL_TEXTURE_2D, depth_texture);
+ glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_DEPTH_COMPONENT16, 4, 4);
+
+ // Create the FBO
+ GLuint rgba8ui_framebuffer = 0;
+ glGenFramebuffersEXT(1, &rgba8ui_framebuffer);
+ glBindFramebufferEXT(GL_FRAMEBUFFER, rgba8ui_framebuffer);
+
+ // Bind to the FBO to test support
+ glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, rgba8ui_texture, 0);
+ glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
+ GL_TEXTURE_2D, depth_texture, 0);
+ GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
+
+ supports_usampler_ = (status == GL_FRAMEBUFFER_COMPLETE);
+
+ glDeleteFramebuffersEXT(1, &rgba8ui_framebuffer);
+ glDeleteTextures(1, &rgba8ui_texture);
+ glDeleteTextures(1, &depth_texture);
+
+ decoder->RestoreTextureUnitBindings(0);
+ decoder->RestoreActiveTexture();
+ decoder->RestoreFramebufferBindings();
}
}
@@ -128,9 +115,6 @@ void ApplyFramebufferAttachmentCMAAINTELResourceManager::Initialize(
VLOG(1) << "ApplyFramebufferAttachmentCMAAINTEL: "
<< "Supports R8 Images is "
<< (supports_r8_image_ ? "true" : "false");
- VLOG(1) << "ApplyFramebufferAttachmentCMAAINTEL: "
- << "Supports R8 Read Format is "
- << (supports_r8_read_format_ ? "true" : "false");
// Create the shaders
std::ostringstream defines, edge1, edge2, combineEdges, blur, displayEdges,
@@ -148,7 +132,7 @@ void ApplyFramebufferAttachmentCMAAINTELResourceManager::Initialize(
defines << "#define IN_GAMMA_CORRECT_MODE\n";
}
- if (supports_r8_read_format_) {
+ if (supports_r8_image_) {
defines << "#define EDGE_READ_FORMAT r8\n";
} else {
defines << "#define EDGE_READ_FORMAT r32f\n";
@@ -629,7 +613,7 @@ GLuint ApplyFramebufferAttachmentCMAAINTELResourceManager::CreateShader(
const char header_es31[] =
"#version 310 es \n";
- const char header_gl30[] =
+ const char header_gl130[] =
"#version 130 \n"
"#extension GL_ARB_shading_language_420pack : require \n"
"#extension GL_ARB_texture_gather : require \n"
@@ -637,14 +621,17 @@ GLuint ApplyFramebufferAttachmentCMAAINTELResourceManager::CreateShader(
"#extension GL_ARB_explicit_attrib_location : require \n"
"#extension GL_ARB_shader_image_load_store : require \n";
- const char* header = NULL;
+ std::ostringstream header;
if (is_gles31_compatible_) {
- header = header_es31;
+ header << header_es31;
+ if (supports_r8_image_)
+ header << "#extension GL_NV_image_formats : require\n";
} else {
- header = header_gl30;
+ header << header_gl130;
}
- const char* source_array[4] = {header, defines, "\n", source};
+ std::string header_str = header.str();
+ const char* source_array[4] = {header_str.c_str(), defines, "\n", source};
glShaderSource(shader, 4, source_array, NULL);
glCompileShader(shader);
@@ -821,42 +808,6 @@ const char ApplyFramebufferAttachmentCMAAINTELResourceManager::cmaa_frag_s1_[] =
return ret;
}
- uint PackZ(const uvec2 screenPos, const bool invertedZShape) {
- uint retVal = screenPos.x | (screenPos.y << 15u);
- if (invertedZShape)
- retVal |= (1u << 30u);
- return retVal;
- }
-
- void UnpackZ(uint packedZ, out uvec2 screenPos,
- out bool invertedZShape)
- {
- screenPos.x = packedZ & 0x7FFFu;
- screenPos.y = (packedZ >> 15u) & 0x7FFFu;
- invertedZShape = (packedZ >> 30u) == 1u;
- }
-
- uint PackZ(const uvec2 screenPos,
- const bool invertedZShape,
- const bool horizontal) {
- uint retVal = screenPos.x | (screenPos.y << 15u);
- if (invertedZShape)
- retVal |= (1u << 30u);
- if (horizontal)
- retVal |= (1u << 31u);
- return retVal;
- }
-
- void UnpackZ(uint packedZ,
- out uvec2 screenPos,
- out bool invertedZShape,
- out bool horizontal) {
- screenPos.x = packedZ & 0x7FFFu;
- screenPos.y = (packedZ >> 15u) & 0x7FFFu;
- invertedZShape = (packedZ & (1u << 30u)) != 0u;
- horizontal = (packedZ & (1u << 31u)) != 0u;
- }
-
vec4 PackBlurAAInfo(ivec2 pixelPos, uint shapeType) {
uint packedEdges = uint(
texelFetch(g_src0TextureFlt, pixelPos, 0).r * 255.5);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h
index f9bad673293..9f2bd9ae327 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h
@@ -57,7 +57,6 @@ class GPU_EXPORT ApplyFramebufferAttachmentCMAAINTELResourceManager {
bool is_in_gamma_correct_mode_;
bool supports_usampler_;
bool supports_r8_image_;
- bool supports_r8_read_format_;
bool is_gles31_compatible_;
int frame_id_;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
index d310095abde..c9e6bdbc538 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
@@ -281,7 +281,11 @@ ShaderId GetFragmentShaderId(bool premultiply_alpha,
const char* kShaderPrecisionPreamble =
"#ifdef GL_ES\n"
"precision mediump float;\n"
+ "#ifdef GL_FRAGMENT_PRECISION_HIGH\n"
+ "#define TexCoordPrecision highp\n"
+ "#else\n"
"#define TexCoordPrecision mediump\n"
+ "#endif\n"
"#else\n"
"#define TexCoordPrecision\n"
"#endif\n";
@@ -613,6 +617,196 @@ void DoCopyTexSubImage2D(const gpu::gles2::GLES2Decoder* decoder,
decoder->RestoreFramebufferBindings();
}
+// Convert RGBA/UNSIGNED_BYTE source to RGB/UNSIGNED_BYTE destination.
+void convertToRGB(const uint8_t* source,
+ uint8_t* destination,
+ unsigned length) {
+ for (unsigned i = 0; i < length; ++i) {
+ destination[0] = source[0];
+ destination[1] = source[1];
+ destination[2] = source[2];
+ source += 4;
+ destination += 3;
+ }
+}
+
+// Convert RGBA/UNSIGNED_BYTE source to RGB/FLOAT destination.
+void convertToRGBFloat(const uint8_t* source,
+ float* destination,
+ unsigned length) {
+ const float scaleFactor = 1.0f / 255.0f;
+ for (unsigned i = 0; i < length; ++i) {
+ destination[0] = source[0] * scaleFactor;
+ destination[1] = source[1] * scaleFactor;
+ destination[2] = source[2] * scaleFactor;
+ source += 4;
+ destination += 3;
+ }
+}
+
+// Prepare the image data to be uploaded to a texture in pixel unpack buffer.
+void prepareUnpackBuffer(GLuint buffer[2],
+ bool is_es,
+ GLenum format,
+ GLenum type,
+ GLsizei width,
+ GLsizei height) {
+ uint32_t pixel_num = width * height;
+
+ // Result of glReadPixels with format == GL_RGB and type == GL_UNSIGNED_BYTE
+ // from read framebuffer in RGBA fromat is not correct on desktop core
+ // profile on both Linux Mesa and Linux NVIDIA. This may be a driver bug.
+ bool is_rgb_unsigned_byte = format == GL_RGB && type == GL_UNSIGNED_BYTE;
+ if ((!is_es && !is_rgb_unsigned_byte) ||
+ (format == GL_RGBA && type == GL_UNSIGNED_BYTE)) {
+ uint32_t bytes_per_group =
+ gpu::gles2::GLES2Util::ComputeImageGroupSize(format, type);
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, buffer[0]);
+ glBufferData(GL_PIXEL_PACK_BUFFER, pixel_num * bytes_per_group, 0,
+ GL_STATIC_READ);
+ glReadPixels(0, 0, width, height, format, type, 0);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer[0]);
+ return;
+ }
+
+ uint32_t bytes_per_group =
+ gpu::gles2::GLES2Util::ComputeImageGroupSize(GL_RGBA, GL_UNSIGNED_BYTE);
+ uint32_t buf_size = pixel_num * bytes_per_group;
+
+ if (format == GL_RGB && type == GL_FLOAT) {
+#if defined(OS_ANDROID)
+ // Reading pixels to pbo with glReadPixels will cause random failures of
+ // GLCopyTextureCHROMIUMES3Test.FormatCombinations in gl_tests. This is seen
+ // on Nexus 5 but not Nexus 4. Read pixels to client memory, then upload to
+ // pixel unpack buffer with glBufferData.
+ std::unique_ptr<uint8_t[]> pixels(new uint8_t[width * height * 4]);
+ glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, pixels.get());
+ std::unique_ptr<float[]> data(new float[width * height * 3]);
+ convertToRGBFloat(pixels.get(), data.get(), pixel_num);
+ bytes_per_group =
+ gpu::gles2::GLES2Util::ComputeImageGroupSize(format, type);
+ buf_size = pixel_num * bytes_per_group;
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer[1]);
+ glBufferData(GL_PIXEL_UNPACK_BUFFER, buf_size, data.get(), GL_STATIC_DRAW);
+#else
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, buffer[0]);
+ glBufferData(GL_PIXEL_PACK_BUFFER, buf_size, 0, GL_STATIC_READ);
+ glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
+ void* pixels =
+ glMapBufferRange(GL_PIXEL_PACK_BUFFER, 0, buf_size, GL_MAP_READ_BIT);
+
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer[1]);
+ bytes_per_group =
+ gpu::gles2::GLES2Util::ComputeImageGroupSize(format, type);
+ buf_size = pixel_num * bytes_per_group;
+ glBufferData(GL_PIXEL_UNPACK_BUFFER, buf_size, 0, GL_STATIC_DRAW);
+ void* data =
+ glMapBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, buf_size, GL_MAP_WRITE_BIT);
+ convertToRGBFloat(static_cast<uint8_t*>(pixels), static_cast<float*>(data),
+ pixel_num);
+ glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
+ glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
+#endif
+ return;
+ }
+
+ if (format == GL_RGB && type == GL_UNSIGNED_BYTE) {
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, buffer[0]);
+ glBufferData(GL_PIXEL_PACK_BUFFER, buf_size, 0, GL_DYNAMIC_DRAW);
+ glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
+ void* pixels = glMapBufferRange(GL_PIXEL_PACK_BUFFER, 0, buf_size,
+ GL_MAP_READ_BIT | GL_MAP_WRITE_BIT);
+ void* data = pixels;
+ convertToRGB((uint8_t*)pixels, (uint8_t*)data, pixel_num);
+ glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer[0]);
+ return;
+ }
+
+ NOTREACHED();
+}
+
+enum TexImageCommandType {
+ kTexImage,
+ kTexSubImage,
+};
+
+void DoReadbackAndTexImage(TexImageCommandType command_type,
+ const gpu::gles2::GLES2Decoder* decoder,
+ GLenum source_target,
+ GLuint source_id,
+ GLint source_level,
+ GLenum dest_target,
+ GLuint dest_id,
+ GLint dest_level,
+ GLenum dest_internal_format,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLuint framebuffer) {
+ DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), source_target);
+ GLenum dest_binding_target =
+ gpu::gles2::GLES2Util::GLFaceTargetToTextureTarget(dest_target);
+ DCHECK(dest_binding_target == GL_TEXTURE_2D ||
+ dest_binding_target == GL_TEXTURE_CUBE_MAP);
+ DCHECK(source_level == 0 || decoder->GetFeatureInfo()->IsES3Capable());
+ if (BindFramebufferTexture2D(source_target, source_id, source_level,
+ framebuffer)) {
+ glBindTexture(dest_binding_target, dest_id);
+ glTexParameterf(dest_binding_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(dest_binding_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(dest_binding_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(dest_binding_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+ GLenum format = GL_RGBA;
+ GLenum type = GL_UNSIGNED_BYTE;
+ switch (dest_internal_format) {
+ case GL_RGB9_E5:
+ format = GL_RGB;
+ type = GL_FLOAT;
+ break;
+ case GL_SRGB_EXT:
+ case GL_SRGB8:
+ format = GL_RGB;
+ break;
+ case GL_RGB5_A1:
+ case GL_SRGB_ALPHA_EXT:
+ case GL_SRGB8_ALPHA8:
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+
+ // TODO(qiankun.miao@intel.com): PIXEL_PACK_BUFFER and PIXEL_UNPACK_BUFFER
+ // are not supported in ES2.
+ bool is_es = decoder->GetFeatureInfo()->gl_version_info().is_es;
+ DCHECK(!decoder->GetFeatureInfo()->gl_version_info().is_es2);
+
+ uint32_t buffer_num = is_es && format == GL_RGB && type == GL_FLOAT ? 2 : 1;
+ GLuint buffer[2] = {0u};
+ glGenBuffersARB(buffer_num, buffer);
+ prepareUnpackBuffer(buffer, is_es, format, type, width, height);
+
+ if (command_type == kTexImage) {
+ glTexImage2D(dest_target, dest_level, dest_internal_format, width, height,
+ 0, format, type, 0);
+ } else {
+ glTexSubImage2D(dest_target, dest_level, xoffset, yoffset, width, height,
+ format, type, 0);
+ }
+ glDeleteBuffersARB(buffer_num, buffer);
+ }
+
+ decoder->RestoreTextureState(source_id);
+ decoder->RestoreTextureState(dest_id);
+ decoder->RestoreTextureUnitBindings(0);
+ decoder->RestoreActiveTexture();
+ decoder->RestoreFramebufferBindings();
+ decoder->RestoreBufferBindings();
+}
+
} // namespace
namespace gpu {
@@ -718,16 +912,7 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTexture(
bool premultiply_alpha,
bool unpremultiply_alpha,
CopyTextureMethod method) {
- bool premultiply_alpha_change = premultiply_alpha ^ unpremultiply_alpha;
- GLenum dest_binding_target =
- gpu::gles2::GLES2Util::GLFaceTargetToTextureTarget(dest_target);
-
- // GL_TEXTURE_RECTANGLE_ARB on FBO is supported by OpenGL, not GLES2,
- // so restrict this to GL_TEXTURE_2D and GL_TEXTURE_CUBE_MAP.
- if (source_target == GL_TEXTURE_2D &&
- (dest_binding_target == GL_TEXTURE_2D ||
- dest_binding_target == GL_TEXTURE_CUBE_MAP) &&
- !flip_y && !premultiply_alpha_change && method == DIRECT_COPY) {
+ if (method == DIRECT_COPY) {
DoCopyTexImage2D(decoder, source_target, source_id, source_level,
dest_target, dest_id, dest_level, dest_internal_format,
width, height, framebuffer_);
@@ -740,9 +925,11 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTexture(
GLint original_dest_level = dest_level;
GLenum original_dest_target = dest_target;
GLenum original_internal_format = dest_internal_format;
- if (method == DRAW_AND_COPY) {
+ if (method == DRAW_AND_COPY || method == DRAW_AND_READBACK) {
GLenum adjusted_internal_format =
- getIntermediateFormat(dest_internal_format);
+ method == DRAW_AND_READBACK
+ ? GL_RGBA
+ : getIntermediateFormat(dest_internal_format);
dest_target = GL_TEXTURE_2D;
glGenTextures(1, &intermediate_texture);
glBindTexture(dest_target, intermediate_texture);
@@ -763,11 +950,18 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTexture(
dest_target, dest_texture, dest_level, dest_internal_format, width,
height, flip_y, premultiply_alpha, unpremultiply_alpha, kIdentityMatrix);
- if (method == DRAW_AND_COPY) {
+ if (method == DRAW_AND_COPY || method == DRAW_AND_READBACK) {
source_level = 0;
- DoCopyTexImage2D(decoder, dest_target, intermediate_texture, source_level,
- original_dest_target, dest_id, original_dest_level,
- original_internal_format, width, height, framebuffer_);
+ if (method == DRAW_AND_COPY) {
+ DoCopyTexImage2D(decoder, dest_target, intermediate_texture, source_level,
+ original_dest_target, dest_id, original_dest_level,
+ original_internal_format, width, height, framebuffer_);
+ } else if (method == DRAW_AND_READBACK) {
+ DoReadbackAndTexImage(
+ kTexImage, decoder, dest_target, intermediate_texture, source_level,
+ original_dest_target, dest_id, original_dest_level,
+ original_internal_format, 0, 0, width, height, framebuffer_);
+ }
glDeleteTextures(1, &intermediate_texture);
}
}
@@ -796,16 +990,7 @@ void CopyTextureCHROMIUMResourceManager::DoCopySubTexture(
bool premultiply_alpha,
bool unpremultiply_alpha,
CopyTextureMethod method) {
- bool premultiply_alpha_change = premultiply_alpha ^ unpremultiply_alpha;
- GLenum dest_binding_target =
- gpu::gles2::GLES2Util::GLFaceTargetToTextureTarget(dest_target);
-
- // GL_TEXTURE_RECTANGLE_ARB on FBO is supported by OpenGL, not GLES2,
- // so restrict this to GL_TEXTURE_2D and GL_TEXTURE_CUBE_MAP.
- if (source_target == GL_TEXTURE_2D &&
- (dest_binding_target == GL_TEXTURE_2D ||
- dest_binding_target == GL_TEXTURE_CUBE_MAP) &&
- !flip_y && !premultiply_alpha_change && method == DIRECT_COPY) {
+ if (method == DIRECT_COPY) {
DoCopyTexSubImage2D(decoder, source_target, source_id, source_level,
dest_target, dest_id, dest_level, xoffset, yoffset, x,
y, width, height, framebuffer_);
@@ -819,9 +1004,12 @@ void CopyTextureCHROMIUMResourceManager::DoCopySubTexture(
GLint original_dest_level = dest_level;
GLenum original_dest_target = dest_target;
GLuint intermediate_texture = 0;
- if (method == DRAW_AND_COPY) {
+ GLenum original_internal_format = dest_internal_format;
+ if (method == DRAW_AND_COPY || method == DRAW_AND_READBACK) {
GLenum adjusted_internal_format =
- getIntermediateFormat(dest_internal_format);
+ method == DRAW_AND_READBACK
+ ? GL_RGBA
+ : getIntermediateFormat(dest_internal_format);
dest_target = GL_TEXTURE_2D;
glGenTextures(1, &intermediate_texture);
glBindTexture(dest_target, intermediate_texture);
@@ -848,12 +1036,20 @@ void CopyTextureCHROMIUMResourceManager::DoCopySubTexture(
source_height, flip_y, premultiply_alpha, unpremultiply_alpha,
kIdentityMatrix);
- if (method == DRAW_AND_COPY) {
+ if (method == DRAW_AND_COPY || method == DRAW_AND_READBACK) {
source_level = 0;
- DoCopyTexSubImage2D(decoder, dest_target, intermediate_texture,
- source_level, original_dest_target, dest_id,
- original_dest_level, xoffset, yoffset, 0, 0, width,
- height, framebuffer_);
+ if (method == DRAW_AND_COPY) {
+ DoCopyTexSubImage2D(decoder, dest_target, intermediate_texture,
+ source_level, original_dest_target, dest_id,
+ original_dest_level, xoffset, yoffset, 0, 0, width,
+ height, framebuffer_);
+ } else if (method == DRAW_AND_READBACK) {
+ DoReadbackAndTexImage(kTexSubImage, decoder, dest_target,
+ intermediate_texture, source_level,
+ original_dest_target, dest_id, original_dest_level,
+ original_internal_format, xoffset, yoffset, width,
+ height, framebuffer_);
+ }
glDeleteTextures(1, &intermediate_texture);
}
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
index 93cdc58c726..9cad28bbd3c 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
@@ -25,6 +25,10 @@ enum CopyTextureMethod {
DIRECT_DRAW,
// Draw to an intermediate texture, and then copy to the destination texture.
DRAW_AND_COPY,
+ // Draw to an intermediate texture in RGBA format, read back pixels in the
+ // intermediate texture from GPU to CPU, and then upload to the destination
+ // texture.
+ DRAW_AND_READBACK,
// CopyTexture isn't available.
NOT_COPYABLE
};
@@ -32,13 +36,8 @@ enum CopyTextureMethod {
// TODOs(qiankun.miao@intel.com):
// 1. Add readback path for RGB9_E5 and float formats (if extension isn't
// available and they are not color-renderable).
-// 2. Support faces of cube map texture as valid dest target. The cube map
-// texture may be incomplete currently.
-// 3. Add support for levels other than 0.
-// 4. Support ALPHA, LUMINANCE and LUMINANCE_ALPHA formats on core profile.
-// 5. Update the extension doc after the whole work is done
-// in gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt. We probably
-// will need a ES2 version and a ES3 version.
+// 2. Support GL_TEXTURE_3D as valid dest_target.
+// 3. Support ALPHA, LUMINANCE and LUMINANCE_ALPHA formats on core profile.
// This class encapsulates the resources required to implement the
// GL_CHROMIUM_copy_texture extension. The copy operation is performed
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 5b50a3125c7..0d0dbe6ca73 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -77,6 +77,7 @@
#include "ui/gfx/overlay_transform.h"
#include "ui/gfx/transform.h"
#include "ui/gl/ca_renderer_layer_params.h"
+#include "ui/gl/dc_renderer_layer_params.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_fence.h"
@@ -556,6 +557,10 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
void RestoreTextureUnitBindings(unsigned unit) const override {
state_.RestoreTextureUnitBindings(unit, NULL);
}
+ void RestoreVertexAttribArray(unsigned index) override {
+ RestoreStateForAttrib(index, true);
+ }
+ void RestoreBufferBinding(unsigned int target) override;
void RestoreFramebufferBindings() const override;
void RestoreRenderbufferBindings() override;
void RestoreTextureState(unsigned service_id) const override;
@@ -861,6 +866,12 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Return 0 if no stencil attachment.
GLenum GetBoundFramebufferStencilFormat(GLenum target);
+ gfx::Vector2d GetBoundFramebufferDrawOffset() const {
+ if (GetBoundDrawFramebuffer() || offscreen_target_frame_buffer_.get())
+ return gfx::Vector2d();
+ return surface_->GetDrawOffset();
+ }
+
void MarkDrawBufferAsCleared(GLenum buffer, GLint drawbuffer_i);
// Wrapper for CompressedTexImage{2|3}D commands.
@@ -919,6 +930,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
const volatile GLint* rects);
// Callback for async SwapBuffers.
+ void FinishAsyncSwapBuffers(gfx::SwapResult result);
void FinishSwapBuffers(gfx::SwapResult result);
void DoCommitOverlayPlanes();
@@ -1697,6 +1709,8 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Wrapper for glSetDrawRectangleCHROMIUM
void DoSetDrawRectangleCHROMIUM(GLint x, GLint y, GLint width, GLint height);
+ void DoSetEnableDCLayersCHROMIUM(GLboolean enable);
+
// Wrapper for glReadBuffer
void DoReadBuffer(GLenum src);
@@ -1862,6 +1876,9 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Wrapper for glViewport
void DoViewport(GLint x, GLint y, GLsizei width, GLsizei height);
+ // Wrapper for glScissor
+ void DoScissor(GLint x, GLint y, GLsizei width, GLsizei height);
+
// Wrapper for glUseProgram
void DoUseProgram(GLuint program);
@@ -2030,14 +2047,19 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
TextureRef* source_texture_ref,
TextureRef* dest_texture_ref);
bool CanUseCopyTextureCHROMIUMInternalFormat(GLenum dest_internal_format);
- CopyTextureMethod ValidateCopyTextureCHROMIUMInternalFormats(
- const char* function_name,
- GLint source_level,
- GLenum source_internal_format,
- GLenum source_type,
- GLenum dest_target,
- GLint dest_level,
- GLenum dest_internal_format);
+ bool ValidateCopyTextureCHROMIUMInternalFormats(const char* function_name,
+ GLenum source_internal_format,
+ GLenum dest_internal_format);
+ CopyTextureMethod getCopyTextureCHROMIUMMethod(GLenum source_target,
+ GLint source_level,
+ GLenum source_internal_format,
+ GLenum source_type,
+ GLenum dest_target,
+ GLint dest_level,
+ GLenum dest_internal_format,
+ bool flip_y,
+ bool premultiply_alpha,
+ bool unpremultiply_alpha);
bool ValidateCompressedCopyTextureCHROMIUM(const char* function_name,
TextureRef* source_texture_ref,
TextureRef* dest_texture_ref);
@@ -2156,6 +2178,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// The GL_CHROMIUM_schedule_ca_layer extension requires that SwapBuffers and
// equivalent functions reset shared state.
void ClearScheduleCALayerState();
+ void ClearScheduleDCLayerState();
// Helper method to call glClear workaround.
void ClearFramebufferForWorkaround(GLbitfield mask);
@@ -2353,7 +2376,9 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
bool supports_swap_buffers_with_bounds_;
bool supports_commit_overlay_planes_;
bool supports_async_swap_;
- bool supports_set_draw_rectangle_ = false;
+ uint32_t next_async_swap_id_ = 1;
+ uint32_t pending_swaps_ = 0;
+ bool supports_dc_layers_ = false;
// These flags are used to override the state of the shared feature_info_
// member. Because the same FeatureInfo instance may be shared among many
@@ -2453,6 +2478,16 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
std::unique_ptr<CALayerSharedState> ca_layer_shared_state_;
+ struct DCLayerSharedState {
+ float opacity;
+ bool is_clipped;
+ gfx::Rect clip_rect;
+ int z_order;
+ gfx::Transform transform;
+ };
+
+ std::unique_ptr<DCLayerSharedState> dc_layer_shared_state_;
+
DISALLOW_COPY_AND_ASSIGN(GLES2DecoderImpl);
};
@@ -3568,8 +3603,7 @@ bool GLES2DecoderImpl::Initialize(
supports_async_swap_ = surface->SupportsAsyncSwap();
- supports_set_draw_rectangle_ =
- !offscreen && surface->SupportsSetDrawRectangle();
+ supports_dc_layers_ = !offscreen && surface->SupportsDCLayers();
if (workarounds().reverse_point_sprite_coord_origin) {
glPointParameteri(GL_POINT_SPRITE_COORD_ORIGIN, GL_LOWER_LEFT);
@@ -3745,7 +3779,8 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
bool is_offscreen = !!offscreen_target_frame_buffer_.get();
caps.flips_vertically = !is_offscreen && surface_->FlipsVertically();
caps.msaa_is_slow = workarounds().msaa_is_slow;
- caps.set_draw_rectangle = supports_set_draw_rectangle_;
+ caps.avoid_stencil_buffers = workarounds().avoid_stencil_buffers;
+ caps.dc_layers = supports_dc_layers_;
caps.blend_equation_advanced =
feature_info_->feature_flags().blend_equation_advanced;
@@ -3778,6 +3813,8 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
kGpuFeatureStatusEnabled;
caps.disable_webgl_rgb_multisampling_usage =
workarounds().disable_webgl_rgb_multisampling_usage;
+ caps.software_to_accelerated_canvas_upgrade =
+ !workarounds().disable_software_to_accelerated_canvas_upgrade;
caps.emulate_rgb_buffer_with_rgba =
workarounds().disable_gl_rgb_format;
if (workarounds().disable_non_empty_post_sub_buffers_for_onscreen_surfaces &&
@@ -5147,11 +5184,6 @@ error::Error GLES2DecoderImpl::HandleResizeCHROMIUM(
width = std::max(1U, width);
height = std::max(1U, height);
-#if defined(OS_POSIX) && !defined(OS_MACOSX) && \
- !defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
- // Make sure that we are done drawing to the back buffer before resizing.
- glFinish();
-#endif
bool is_offscreen = !!offscreen_target_frame_buffer_.get();
if (is_offscreen) {
if (!ResizeOffscreenFramebuffer(gfx::Size(width, height))) {
@@ -5573,6 +5605,17 @@ void GLES2DecoderImpl::RestoreState(const ContextState* prev_state) {
state_.RestoreState(prev_state);
}
+void GLES2DecoderImpl::RestoreBufferBinding(unsigned int target) {
+ if (target == GL_PIXEL_PACK_BUFFER) {
+ state_.UpdatePackParameters();
+ } else if (target == GL_PIXEL_UNPACK_BUFFER) {
+ state_.UpdateUnpackParameters();
+ }
+ Buffer* bound_buffer =
+ buffer_manager()->GetBufferInfoForTarget(&state_, target);
+ glBindBuffer(target, bound_buffer ? bound_buffer->service_id() : 0);
+}
+
void GLES2DecoderImpl::RestoreFramebufferBindings() const {
GLuint service_id =
framebuffer_state_.bound_draw_framebuffer.get()
@@ -5653,13 +5696,22 @@ void GLES2DecoderImpl::OnUseFramebuffer() const {
return;
state_.fbo_binding_for_scissor_workaround_dirty = false;
- if (workarounds().restore_scissor_on_fbo_change) {
+ if (supports_dc_layers_) {
+ gfx::Vector2d draw_offset = GetBoundFramebufferDrawOffset();
+ glViewport(state_.viewport_x + draw_offset.x(),
+ state_.viewport_y + draw_offset.y(), state_.viewport_width,
+ state_.viewport_height);
+ }
+
+ if (workarounds().restore_scissor_on_fbo_change || supports_dc_layers_) {
// The driver forgets the correct scissor when modifying the FBO binding.
- glScissor(state_.scissor_x,
- state_.scissor_y,
- state_.scissor_width,
+ gfx::Vector2d scissor_offset = GetBoundFramebufferDrawOffset();
+ glScissor(state_.scissor_x + scissor_offset.x(),
+ state_.scissor_y + scissor_offset.y(), state_.scissor_width,
state_.scissor_height);
+ }
+ if (workarounds().restore_scissor_on_fbo_change) {
// crbug.com/222018 - Also on QualComm, the flush here avoids flicker,
// it's unclear how this bug works.
glFlush();
@@ -7662,7 +7714,9 @@ void GLES2DecoderImpl::RestoreClearState() {
glClearDepth(state_.depth_clear);
state_.SetDeviceCapabilityState(GL_SCISSOR_TEST,
state_.enable_flags.scissor_test);
- glScissor(state_.scissor_x, state_.scissor_y, state_.scissor_width,
+ gfx::Vector2d scissor_offset = GetBoundFramebufferDrawOffset();
+ glScissor(state_.scissor_x + scissor_offset.x(),
+ state_.scissor_y + scissor_offset.y(), state_.scissor_width,
state_.scissor_height);
}
@@ -8338,7 +8392,7 @@ void GLES2DecoderImpl::RenderbufferStorageMultisampleHelper(
if (feature_info->feature_flags().use_core_framebuffer_multisample) {
glRenderbufferStorageMultisample(
target, samples, internal_format, width, height);
- } else if (feature_info->gl_version_info().is_angle) {
+ } else if (feature_info->feature_flags().angle_framebuffer_multisample) {
// This is ES2 only.
glRenderbufferStorageMultisampleANGLE(
target, samples, internal_format, width, height);
@@ -8363,7 +8417,7 @@ void GLES2DecoderImpl::BlitFramebufferHelper(GLint srcX0,
if (feature_info_->feature_flags().use_core_framebuffer_multisample) {
glBlitFramebuffer(
srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
- } else if (gl_version_info().is_angle) {
+ } else if (feature_info_->feature_flags().angle_framebuffer_multisample) {
// This is ES2 only.
glBlitFramebufferANGLE(
srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
@@ -8715,7 +8769,7 @@ void GLES2DecoderImpl::DoSetDrawRectangleCHROMIUM(GLint x,
"framebuffer must not be bound");
return;
}
- if (!supports_set_draw_rectangle_) {
+ if (!supports_dc_layers_) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glSetDrawRectangleCHROMIUM",
"surface doesn't support SetDrawRectangle");
return;
@@ -8725,6 +8779,25 @@ void GLES2DecoderImpl::DoSetDrawRectangleCHROMIUM(GLint x,
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glSetDrawRectangleCHROMIUM",
"failed on surface");
}
+ OnFboChanged();
+}
+
+void GLES2DecoderImpl::DoSetEnableDCLayersCHROMIUM(GLboolean enable) {
+ Framebuffer* framebuffer = GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER);
+ if (framebuffer) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glSetEnableDCLayersCHROMIUM",
+ "framebuffer must not be bound");
+ return;
+ }
+ if (!supports_dc_layers_) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glSetEnableDCLayersCHROMIUM",
+ "surface doesn't support SetDrawRectangle");
+ return;
+ }
+ if (!surface_->SetEnableDCLayers(!!enable)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glSetEnableDCLayersCHROMIUM",
+ "failed on surface");
+ }
}
void GLES2DecoderImpl::DoReadBuffer(GLenum src) {
@@ -9749,15 +9822,24 @@ bool GLES2DecoderImpl::IsDrawValid(
return false;
}
- return state_.vertex_attrib_manager
- ->ValidateBindings(function_name,
- this,
- feature_info_.get(),
- buffer_manager(),
- state_.current_program.get(),
- max_vertex_accessed,
- instanced,
- primcount);
+ if (!state_.vertex_attrib_manager->ValidateBindings(
+ function_name, this, feature_info_.get(), buffer_manager(),
+ state_.current_program.get(), max_vertex_accessed, instanced,
+ primcount)) {
+ return false;
+ }
+
+ if (workarounds().disallow_large_instanced_draw) {
+ const GLsizei kMaxInstancedDrawPrimitiveCount = 0x4000000;
+ if (primcount > kMaxInstancedDrawPrimitiveCount) {
+ LOCAL_SET_GL_ERROR(
+ GL_OUT_OF_MEMORY, function_name,
+ "Instanced draw primcount too large for this platform");
+ return false;
+ }
+ }
+
+ return true;
}
bool GLES2DecoderImpl::SimulateAttrib0(
@@ -11179,7 +11261,16 @@ void GLES2DecoderImpl::DoViewport(GLint x, GLint y, GLsizei width,
state_.viewport_y = y;
state_.viewport_width = std::min(width, viewport_max_width_);
state_.viewport_height = std::min(height, viewport_max_height_);
- glViewport(x, y, width, height);
+ gfx::Vector2d viewport_offset = GetBoundFramebufferDrawOffset();
+ glViewport(x + viewport_offset.x(), y + viewport_offset.y(), width, height);
+}
+
+void GLES2DecoderImpl::DoScissor(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ gfx::Vector2d draw_offset = GetBoundFramebufferDrawOffset();
+ glScissor(x + draw_offset.x(), y + draw_offset.y(), width, height);
}
error::Error GLES2DecoderImpl::HandleVertexAttribDivisorANGLE(
@@ -11781,6 +11872,7 @@ void GLES2DecoderImpl::DoSwapBuffersWithBoundsCHROMIUM(
}
ClearScheduleCALayerState();
+ ClearScheduleDCLayerState();
std::vector<gfx::Rect> bounds(count);
for (GLsizei i = 0; i < count; ++i) {
@@ -11817,12 +11909,17 @@ error::Error GLES2DecoderImpl::HandlePostSubBufferCHROMIUM(
}
ClearScheduleCALayerState();
+ ClearScheduleDCLayerState();
if (supports_async_swap_) {
- TRACE_EVENT_ASYNC_BEGIN0("cc", "GLES2DecoderImpl::AsyncSwapBuffers", this);
+ DCHECK_LT(pending_swaps_, 2u);
+ uint32_t async_swap_id = next_async_swap_id_++;
+ ++pending_swaps_;
+ TRACE_EVENT_ASYNC_BEGIN0("gpu", "AsyncSwapBuffers", async_swap_id);
+
surface_->PostSubBufferAsync(
c.x, c.y, c.width, c.height,
- base::Bind(&GLES2DecoderImpl::FinishSwapBuffers,
+ base::Bind(&GLES2DecoderImpl::FinishAsyncSwapBuffers,
base::AsWeakPtr(this)));
} else {
FinishSwapBuffers(surface_->PostSubBuffer(c.x, c.y, c.width, c.height));
@@ -11960,6 +12057,91 @@ error::Error GLES2DecoderImpl::HandleScheduleCALayerCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleScheduleDCLayerSharedStateCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM& c =
+ *static_cast<
+ const volatile gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM*>(
+ cmd_data);
+
+ const GLfloat* mem = GetSharedMemoryAs<const GLfloat*>(c.shm_id, c.shm_offset,
+ 20 * sizeof(GLfloat));
+ if (!mem) {
+ return error::kOutOfBounds;
+ }
+ gfx::RectF clip_rect(mem[0], mem[1], mem[2], mem[3]);
+ gfx::Transform transform(mem[4], mem[8], mem[12], mem[16], mem[5], mem[9],
+ mem[13], mem[17], mem[6], mem[10], mem[14], mem[18],
+ mem[7], mem[11], mem[15], mem[19]);
+ dc_layer_shared_state_.reset(new DCLayerSharedState);
+ dc_layer_shared_state_->opacity = c.opacity;
+ dc_layer_shared_state_->is_clipped = c.is_clipped ? true : false;
+ dc_layer_shared_state_->clip_rect = gfx::ToEnclosingRect(clip_rect);
+ dc_layer_shared_state_->z_order = c.z_order;
+ dc_layer_shared_state_->transform = transform;
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleScheduleDCLayerCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::ScheduleDCLayerCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::ScheduleDCLayerCHROMIUM*>(
+ cmd_data);
+ GLuint filter = c.filter;
+ if (filter != GL_NEAREST && filter != GL_LINEAR) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
+ "invalid filter");
+ return error::kNoError;
+ }
+
+ if (!dc_layer_shared_state_) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, "glScheduleDCLayerCHROMIUM",
+ "glScheduleDCLayerSharedStateCHROMIUM has not been called");
+ return error::kNoError;
+ }
+
+ gl::GLImage* image = nullptr;
+ GLuint contents_texture_id = c.contents_texture_id;
+ if (contents_texture_id) {
+ TextureRef* ref = texture_manager()->GetTexture(contents_texture_id);
+ if (!ref) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
+ "unknown texture");
+ return error::kNoError;
+ }
+ Texture::ImageState image_state;
+ image = ref->texture()->GetLevelImage(ref->texture()->target(), 0,
+ &image_state);
+ if (!image) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
+ "unsupported texture format");
+ return error::kNoError;
+ }
+ }
+
+ const GLfloat* mem = GetSharedMemoryAs<const GLfloat*>(c.shm_id, c.shm_offset,
+ 8 * sizeof(GLfloat));
+ if (!mem) {
+ return error::kOutOfBounds;
+ }
+ gfx::RectF contents_rect(mem[0], mem[1], mem[2], mem[3]);
+ gfx::RectF bounds_rect(mem[4], mem[5], mem[6], mem[7]);
+
+ ui::DCRendererLayerParams params = ui::DCRendererLayerParams(
+ dc_layer_shared_state_->is_clipped, dc_layer_shared_state_->clip_rect,
+ dc_layer_shared_state_->z_order, dc_layer_shared_state_->transform, image,
+ contents_rect, gfx::ToEnclosingRect(bounds_rect), c.background_color,
+ c.edge_aa_mask, dc_layer_shared_state_->opacity, filter);
+ if (!surface_->ScheduleDCLayer(params)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glScheduleDCLayerCHROMIUM",
+ "failed to schedule DCLayer");
+ }
+ return error::kNoError;
+}
+
void GLES2DecoderImpl::DoScheduleCALayerInUseQueryCHROMIUM(
GLsizei count,
const volatile GLuint* textures) {
@@ -12342,13 +12524,6 @@ error::Error GLES2DecoderImpl::HandleGetString(uint32_t immediate_data_size,
case GL_SHADING_LANGUAGE_VERSION:
str = GetServiceShadingLanguageVersionString(feature_info_.get());
break;
- case GL_RENDERER:
- str = GetServiceRendererString(feature_info_.get());
- break;
- case GL_VENDOR:
- str = GetServiceVendorString(feature_info_.get());
- break;
- break;
case GL_EXTENSIONS:
{
// For WebGL contexts, strip out shader extensions if they have not
@@ -12468,7 +12643,9 @@ bool GLES2DecoderImpl::ClearLevel(Texture* texture,
glClearDepth(1.0f);
state_.SetDeviceDepthMask(GL_TRUE);
state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, true);
- glScissor(xoffset, yoffset, width, height);
+ gfx::Vector2d scissor_offset = GetBoundFramebufferDrawOffset();
+ glScissor(xoffset + scissor_offset.x(), yoffset + scissor_offset.y(), width,
+ height);
glClear(GL_DEPTH_BUFFER_BIT | (have_stencil ? GL_STENCIL_BUFFER_BIT : 0));
RestoreClearState();
@@ -14019,7 +14196,11 @@ bool GLES2DecoderImpl::ValidateCopyTexFormatHelper(
std::string("can not be used with depth or stencil textures");
return false;
}
- if (feature_info_->IsWebGL2OrES3Context()) {
+ if (feature_info_->IsWebGL2OrES3Context() ||
+ (feature_info_->feature_flags().chromium_color_buffer_float_rgb &&
+ internal_format == GL_RGB32F) ||
+ (feature_info_->feature_flags().chromium_color_buffer_float_rgba &&
+ internal_format == GL_RGBA32F)) {
if (GLES2Util::IsSizedColorFormat(internal_format)) {
int sr, sg, sb, sa;
GLES2Util::GetColorFormatComponentSizes(
@@ -15275,6 +15456,7 @@ void GLES2DecoderImpl::DoSwapBuffers() {
}
ClearScheduleCALayerState();
+ ClearScheduleDCLayerState();
// If offscreen then don't actually SwapBuffers to the display. Just copy
// the rendered frame to another frame buffer.
@@ -15355,9 +15537,13 @@ void GLES2DecoderImpl::DoSwapBuffers() {
glFlush();
}
} else if (supports_async_swap_) {
- TRACE_EVENT_ASYNC_BEGIN0("cc", "GLES2DecoderImpl::AsyncSwapBuffers", this);
- surface_->SwapBuffersAsync(base::Bind(&GLES2DecoderImpl::FinishSwapBuffers,
- base::AsWeakPtr(this)));
+ DCHECK_LT(pending_swaps_, 2u);
+ uint32_t async_swap_id = next_async_swap_id_++;
+ ++pending_swaps_;
+ TRACE_EVENT_ASYNC_BEGIN0("gpu", "AsyncSwapBuffers", async_swap_id);
+
+ surface_->SwapBuffersAsync(base::Bind(
+ &GLES2DecoderImpl::FinishAsyncSwapBuffers, base::AsWeakPtr(this)));
} else {
FinishSwapBuffers(surface_->SwapBuffers());
}
@@ -15367,6 +15553,15 @@ void GLES2DecoderImpl::DoSwapBuffers() {
ExitCommandProcessingEarly();
}
+void GLES2DecoderImpl::FinishAsyncSwapBuffers(gfx::SwapResult result) {
+ DCHECK_NE(0u, pending_swaps_);
+ uint32_t async_swap_id = next_async_swap_id_ - pending_swaps_;
+ --pending_swaps_;
+ TRACE_EVENT_ASYNC_END0("gpu", "AsyncSwapBuffers", async_swap_id);
+
+ FinishSwapBuffers(result);
+}
+
void GLES2DecoderImpl::FinishSwapBuffers(gfx::SwapResult result) {
if (result == gfx::SwapResult::SWAP_FAILED) {
LOG(ERROR) << "Context lost because SwapBuffers failed.";
@@ -15381,10 +15576,6 @@ void GLES2DecoderImpl::FinishSwapBuffers(gfx::SwapResult result) {
// known values.
backbuffer_needs_clear_bits_ |= GL_COLOR_BUFFER_BIT;
}
-
- if (supports_async_swap_) {
- TRACE_EVENT_ASYNC_END0("cc", "GLES2DecoderImpl::AsyncSwapBuffers", this);
- }
}
void GLES2DecoderImpl::DoCommitOverlayPlanes() {
@@ -15395,6 +15586,7 @@ void GLES2DecoderImpl::DoCommitOverlayPlanes() {
return;
}
ClearScheduleCALayerState();
+ ClearScheduleDCLayerState();
if (supports_async_swap_) {
surface_->CommitOverlayPlanesAsync(base::Bind(
&GLES2DecoderImpl::FinishSwapBuffers, base::AsWeakPtr(this)));
@@ -16312,13 +16504,9 @@ bool GLES2DecoderImpl::CanUseCopyTextureCHROMIUMInternalFormat(
}
}
-CopyTextureMethod GLES2DecoderImpl::ValidateCopyTextureCHROMIUMInternalFormats(
+bool GLES2DecoderImpl::ValidateCopyTextureCHROMIUMInternalFormats(
const char* function_name,
- GLint source_level,
GLenum source_internal_format,
- GLenum source_type,
- GLenum dest_target,
- GLint dest_level,
GLenum dest_internal_format) {
bool valid_dest_format = false;
// TODO(qiankun.miao@intel.com): ALPHA, LUMINANCE and LUMINANCE_ALPHA formats
@@ -16355,19 +16543,25 @@ CopyTextureMethod GLES2DecoderImpl::ValidateCopyTextureCHROMIUMInternalFormats(
valid_dest_format = feature_info_->IsWebGL2OrES3Context();
break;
case GL_RGB9_E5:
- valid_dest_format = !gl_version_info().is_es;
- break;
case GL_R16F:
case GL_R32F:
case GL_RG16F:
case GL_RG32F:
case GL_RGB16F:
- case GL_RGB32F:
case GL_RGBA16F:
- case GL_RGBA32F:
case GL_R11F_G11F_B10F:
valid_dest_format = feature_info_->ext_color_buffer_float_available();
break;
+ case GL_RGB32F:
+ valid_dest_format =
+ feature_info_->ext_color_buffer_float_available() ||
+ feature_info_->feature_flags().chromium_color_buffer_float_rgb;
+ break;
+ case GL_RGBA32F:
+ valid_dest_format =
+ feature_info_->ext_color_buffer_float_available() ||
+ feature_info_->feature_flags().chromium_color_buffer_float_rgba;
+ break;
default:
valid_dest_format = false;
break;
@@ -16388,22 +16582,67 @@ CopyTextureMethod GLES2DecoderImpl::ValidateCopyTextureCHROMIUMInternalFormats(
GLES2Util::GetStringEnum(source_internal_format);
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
msg.c_str());
- return NOT_COPYABLE;
+ return false;
}
if (!valid_dest_format) {
std::string msg = "invalid dest internal format " +
GLES2Util::GetStringEnum(dest_internal_format);
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
msg.c_str());
- return NOT_COPYABLE;
+ return false;
}
+ return true;
+}
+
+CopyTextureMethod GLES2DecoderImpl::getCopyTextureCHROMIUMMethod(
+ GLenum source_target,
+ GLint source_level,
+ GLenum source_internal_format,
+ GLenum source_type,
+ GLenum dest_target,
+ GLint dest_level,
+ GLenum dest_internal_format,
+ bool flip_y,
+ bool premultiply_alpha,
+ bool unpremultiply_alpha) {
+ bool premultiply_alpha_change = premultiply_alpha ^ unpremultiply_alpha;
bool source_format_color_renderable =
Texture::ColorRenderable(GetFeatureInfo(), source_internal_format, false);
bool dest_format_color_renderable =
Texture::ColorRenderable(GetFeatureInfo(), dest_internal_format, false);
std::string output_error_msg;
+ switch (dest_internal_format) {
+#if defined(OS_MACOSX)
+ // RGB5_A1 is not color-renderable on NVIDIA Mac, see crbug.com/676209.
+ case GL_RGB5_A1:
+ return DRAW_AND_READBACK;
+#endif
+ // RGB9_E5 isn't accepted by glCopyTexImage2D if underlying context is ES.
+ case GL_RGB9_E5:
+ if (gl_version_info().is_es)
+ return DRAW_AND_READBACK;
+ break;
+ // SRGB format has color-space conversion issue. WebGL spec doesn't define
+ // clearly if linear-to-srgb color space conversion is required or not when
+ // uploading DOM elements to SRGB textures. WebGL conformance test expects
+ // no linear-to-srgb conversion, while current GPU path for
+ // CopyTextureCHROMIUM does the conversion. Do a fallback path before the
+ // issue is resolved. see https://github.com/KhronosGroup/WebGL/issues/2165.
+ // TODO(qiankun.miao@intel.com): revisit this once the above issue is
+ // resolved.
+ case GL_SRGB_EXT:
+ case GL_SRGB_ALPHA_EXT:
+ case GL_SRGB8:
+ case GL_SRGB8_ALPHA8:
+ if (feature_info_->IsWebGLContext())
+ return DRAW_AND_READBACK;
+ break;
+ default:
+ break;
+ }
+
// CopyTexImage* should not allow internalformat of GL_BGRA_EXT and
// GL_BGRA8_EXT. crbug.com/663086.
bool copy_tex_image_format_valid =
@@ -16424,8 +16663,10 @@ CopyTextureMethod GLES2DecoderImpl::ValidateCopyTextureCHROMIUMInternalFormats(
// in ES2 context. DIRECT_DRAW path isn't available for cube map dest texture
// either due to it may be cube map incomplete. Go to DRAW_AND_COPY path in
// these cases.
- if (source_format_color_renderable && copy_tex_image_format_valid &&
- source_level == 0)
+ if (source_target == GL_TEXTURE_2D &&
+ (dest_target == GL_TEXTURE_2D || dest_target == GL_TEXTURE_CUBE_MAP) &&
+ source_format_color_renderable && copy_tex_image_format_valid &&
+ source_level == 0 && !flip_y && !premultiply_alpha_change)
return DIRECT_COPY;
if (dest_format_color_renderable && dest_level == 0 &&
dest_target != GL_TEXTURE_CUBE_MAP)
@@ -16529,12 +16770,8 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
return;
}
- CopyTextureMethod method = ValidateCopyTextureCHROMIUMInternalFormats(
- kFunctionName, source_level, source_internal_format, source_type,
- dest_binding_target, dest_level, internal_format);
- // INVALID_OPERATION is already generated by
- // ValidateCopyTextureCHROMIUMInternalFormats.
- if (method == NOT_COPYABLE) {
+ if (!ValidateCopyTextureCHROMIUMInternalFormats(
+ kFunctionName, source_internal_format, internal_format)) {
return;
}
@@ -16664,6 +16901,11 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
}
}
+ CopyTextureMethod method = getCopyTextureCHROMIUMMethod(
+ source_target, source_level, source_internal_format, source_type,
+ dest_binding_target, dest_level, internal_format,
+ unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
+ unpack_unmultiply_alpha == GL_TRUE);
copy_texture_CHROMIUM_->DoCopyTexture(
this, source_target, source_texture->service_id(), source_level,
source_internal_format, dest_target, dest_texture->service_id(),
@@ -16781,27 +17023,11 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
return;
}
- CopyTextureMethod method = ValidateCopyTextureCHROMIUMInternalFormats(
- kFunctionName, source_level, source_internal_format, source_type,
- dest_binding_target, dest_level, dest_internal_format);
- // INVALID_OPERATION is already generated by
- // ValidateCopyTextureCHROMIUMInternalFormats.
- if (method == NOT_COPYABLE) {
+ if (!ValidateCopyTextureCHROMIUMInternalFormats(
+ kFunctionName, source_internal_format, dest_internal_format)) {
return;
}
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
- // glDrawArrays is faster than glCopyTexSubImage2D on IA Mesa driver,
- // although opposite in Android.
- // TODO(dshwang): After Mesa fixes this issue, remove this hack.
- // https://bugs.freedesktop.org/show_bug.cgi?id=98478, crbug.com/535198.
- if (Texture::ColorRenderable(GetFeatureInfo(), dest_internal_format,
- dest_texture->IsImmutable()) &&
- method == DIRECT_COPY) {
- method = DIRECT_DRAW;
- }
-#endif
-
if (feature_info_->feature_flags().desktop_srgb_support) {
bool enable_framebuffer_srgb =
GLES2Util::GetColorEncodingFromInternalFormat(source_internal_format) ==
@@ -16887,6 +17113,24 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
return;
}
}
+
+ CopyTextureMethod method = getCopyTextureCHROMIUMMethod(
+ source_target, source_level, source_internal_format, source_type,
+ dest_binding_target, dest_level, dest_internal_format,
+ unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
+ unpack_unmultiply_alpha == GL_TRUE);
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+ // glDrawArrays is faster than glCopyTexSubImage2D on IA Mesa driver,
+ // although opposite in Android.
+ // TODO(dshwang): After Mesa fixes this issue, remove this hack.
+ // https://bugs.freedesktop.org/show_bug.cgi?id=98478, crbug.com/535198.
+ if (Texture::ColorRenderable(GetFeatureInfo(), dest_internal_format,
+ dest_texture->IsImmutable()) &&
+ method == DIRECT_COPY) {
+ method = DIRECT_DRAW;
+ }
+#endif
+
copy_texture_CHROMIUM_->DoCopySubTexture(
this, source_target, source_texture->service_id(), source_level,
source_internal_format, dest_target, dest_texture->service_id(),
@@ -17089,9 +17333,14 @@ void GLES2DecoderImpl::TexStorageImpl(GLenum target,
GL_INVALID_OPERATION, function_name, "target invalid for format");
return;
}
+ // The glTexStorage entry points require width, height, and depth to be
+ // at least 1, but the other texture entry points (those which use
+ // ValidForTarget) do not. So we have to add an extra check here.
+ bool is_invalid_texstorage_size = width < 1 || height < 1 || depth < 1;
if (!texture_manager()->ValidForTarget(target, 0, width, height, depth) ||
- TextureManager::ComputeMipMapCount(
- target, width, height, depth) < levels) {
+ is_invalid_texstorage_size ||
+ TextureManager::ComputeMipMapCount(target, width, height, depth) <
+ levels) {
LOCAL_SET_GL_ERROR(
GL_INVALID_VALUE, function_name, "dimensions out of range");
return;
@@ -17443,7 +17692,6 @@ void GLES2DecoderImpl::DoApplyScreenSpaceAntialiasingCHROMIUM() {
apply_framebuffer_attachment_cmaa_intel_.reset(
new ApplyFramebufferAttachmentCMAAINTELResourceManager());
apply_framebuffer_attachment_cmaa_intel_->Initialize(this);
- RestoreCurrentFramebufferBindings();
if (LOCAL_PEEK_GL_ERROR("glApplyFramebufferAttachmentCMAAINTEL") !=
GL_NO_ERROR)
return;
@@ -19163,6 +19411,10 @@ void GLES2DecoderImpl::ClearScheduleCALayerState() {
ca_layer_shared_state_.reset();
}
+void GLES2DecoderImpl::ClearScheduleDCLayerState() {
+ dc_layer_shared_state_.reset();
+}
+
void GLES2DecoderImpl::ClearFramebufferForWorkaround(GLbitfield mask) {
ScopedGLErrorSuppressor suppressor("GLES2DecoderImpl::ClearWorkaround",
GetErrorState());
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
index d7d8a232d4a..8de703fa1dc 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -181,6 +181,7 @@ class GPU_EXPORT GLES2Decoder : public base::SupportsWeakPtr<GLES2Decoder>,
virtual void RestoreAllTextureUnitBindings(
const ContextState* prev_state) const = 0;
virtual void RestoreActiveTextureUnitBinding(unsigned int target) const = 0;
+ virtual void RestoreBufferBinding(unsigned int target) = 0;
virtual void RestoreBufferBindings() const = 0;
virtual void RestoreFramebufferBindings() const = 0;
virtual void RestoreRenderbufferBindings() = 0;
@@ -188,6 +189,7 @@ class GPU_EXPORT GLES2Decoder : public base::SupportsWeakPtr<GLES2Decoder>,
virtual void RestoreProgramBindings() const = 0;
virtual void RestoreTextureState(unsigned service_id) const = 0;
virtual void RestoreTextureUnitBindings(unsigned unit) const = 0;
+ virtual void RestoreVertexAttribArray(unsigned index) = 0;
virtual void RestoreAllExternalTextureBindingsIfNeeded() = 0;
virtual void ClearAllAttributes() const = 0;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
index 9fbab4038bf..fa2f5799cae 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -2618,7 +2618,7 @@ error::Error GLES2DecoderImpl::HandleScissor(uint32_t immediate_data_size,
state_.scissor_y = y;
state_.scissor_width = width;
state_.scissor_height = height;
- glScissor(x, y, width, height);
+ DoScissor(x, y, width, height);
}
return error::kNoError;
}
@@ -4569,10 +4569,10 @@ error::Error GLES2DecoderImpl::HandleCopyTextureCHROMIUM(
const volatile void* cmd_data) {
const volatile gles2::cmds::CopyTextureCHROMIUM& c =
*static_cast<const volatile gles2::cmds::CopyTextureCHROMIUM*>(cmd_data);
- GLenum source_id = static_cast<GLenum>(c.source_id);
+ GLuint source_id = static_cast<GLuint>(c.source_id);
GLint source_level = static_cast<GLint>(c.source_level);
GLenum dest_target = static_cast<GLenum>(c.dest_target);
- GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
GLint dest_level = static_cast<GLint>(c.dest_level);
GLint internalformat = static_cast<GLint>(c.internalformat);
GLenum dest_type = static_cast<GLenum>(c.dest_type);
@@ -4603,10 +4603,10 @@ error::Error GLES2DecoderImpl::HandleCopySubTextureCHROMIUM(
const volatile gles2::cmds::CopySubTextureCHROMIUM& c =
*static_cast<const volatile gles2::cmds::CopySubTextureCHROMIUM*>(
cmd_data);
- GLenum source_id = static_cast<GLenum>(c.source_id);
+ GLuint source_id = static_cast<GLuint>(c.source_id);
GLint source_level = static_cast<GLint>(c.source_level);
GLenum dest_target = static_cast<GLenum>(c.dest_target);
- GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
GLint dest_level = static_cast<GLint>(c.dest_level);
GLint xoffset = static_cast<GLint>(c.xoffset);
GLint yoffset = static_cast<GLint>(c.yoffset);
@@ -4642,8 +4642,8 @@ error::Error GLES2DecoderImpl::HandleCompressedCopyTextureCHROMIUM(
const volatile gles2::cmds::CompressedCopyTextureCHROMIUM& c =
*static_cast<const volatile gles2::cmds::CompressedCopyTextureCHROMIUM*>(
cmd_data);
- GLenum source_id = static_cast<GLenum>(c.source_id);
- GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLuint source_id = static_cast<GLuint>(c.source_id);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
DoCompressedCopyTextureCHROMIUM(source_id, dest_id);
return error::kNoError;
}
@@ -5172,6 +5172,17 @@ error::Error GLES2DecoderImpl::HandleSetDrawRectangleCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleSetEnableDCLayersCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::SetEnableDCLayersCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::SetEnableDCLayersCHROMIUM*>(
+ cmd_data);
+ GLboolean enabled = static_cast<GLboolean>(c.enabled);
+ DoSetEnableDCLayersCHROMIUM(enabled);
+ return error::kNoError;
+}
+
bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
switch (cap) {
case GL_BLEND:
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
index 64ca1a8c4b4..c251ae5d710 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
@@ -82,6 +82,7 @@ class MockGLES2Decoder : public GLES2Decoder {
MOCK_CONST_METHOD1(
RestoreActiveTextureUnitBinding, void(unsigned int target));
MOCK_METHOD0(RestoreAllExternalTextureBindingsIfNeeded, void());
+ MOCK_METHOD1(RestoreBufferBinding, void(unsigned int target));
MOCK_CONST_METHOD0(RestoreBufferBindings, void());
MOCK_CONST_METHOD0(RestoreFramebufferBindings, void());
MOCK_CONST_METHOD0(RestoreGlobalState, void());
@@ -89,6 +90,7 @@ class MockGLES2Decoder : public GLES2Decoder {
MOCK_METHOD0(RestoreRenderbufferBindings, void());
MOCK_CONST_METHOD1(RestoreTextureState, void(unsigned service_id));
MOCK_CONST_METHOD1(RestoreTextureUnitBindings, void(unsigned unit));
+ MOCK_METHOD1(RestoreVertexAttribArray, void(unsigned index));
MOCK_CONST_METHOD0(ClearAllAttributes, void());
MOCK_CONST_METHOD0(RestoreAllAttributes, void());
MOCK_METHOD0(GetQueryManager, gpu::gles2::QueryManager*());
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index 86f13e5f7f1..eac5a98899c 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -78,7 +78,7 @@ GLES2DecoderPassthroughImpl::GLES2DecoderPassthroughImpl(ContextGroup* group)
context_(),
offscreen_(false),
group_(group),
- feature_info_(group->feature_info()) {
+ feature_info_(new FeatureInfo) {
DCHECK(group);
}
@@ -172,14 +172,25 @@ bool GLES2DecoderPassthroughImpl::Initialize(
return false;
}
+ // Each context initializes its own feature info because some extensions may
+ // be enabled dynamically
+ DisallowedFeatures adjusted_disallowed_features =
+ AdjustDisallowedFeatures(attrib_helper.context_type, disallowed_features);
+ if (!feature_info_->Initialize(attrib_helper.context_type,
+ adjusted_disallowed_features)) {
+ Destroy(true);
+ return false;
+ }
+
// Check for required extensions
if (!feature_info_->feature_flags().angle_robust_client_memory ||
!feature_info_->feature_flags().chromium_bind_generates_resource ||
!feature_info_->feature_flags().chromium_copy_texture ||
!feature_info_->feature_flags().angle_client_arrays ||
- glIsEnabled(GL_CLIENT_ARRAYS_ANGLE) != GL_FALSE) {
- // TODO(geofflang): Verify that ANGLE_webgl_compatibility is enabled if this
- // is a WebGL context (depends on crbug.com/671217).
+ glIsEnabled(GL_CLIENT_ARRAYS_ANGLE) != GL_FALSE ||
+ feature_info_->feature_flags().angle_webgl_compatibility !=
+ IsWebGLContextType(attrib_helper.context_type) ||
+ !feature_info_->feature_flags().angle_request_extension) {
Destroy(true);
return false;
}
@@ -384,6 +395,8 @@ void GLES2DecoderPassthroughImpl::RestoreAllTextureUnitBindings(
void GLES2DecoderPassthroughImpl::RestoreActiveTextureUnitBinding(
unsigned int target) const {}
+void GLES2DecoderPassthroughImpl::RestoreBufferBinding(unsigned int target) {}
+
void GLES2DecoderPassthroughImpl::RestoreBufferBindings() const {}
void GLES2DecoderPassthroughImpl::RestoreFramebufferBindings() const {}
@@ -400,6 +413,8 @@ void GLES2DecoderPassthroughImpl::RestoreTextureState(
void GLES2DecoderPassthroughImpl::RestoreTextureUnitBindings(
unsigned unit) const {}
+void GLES2DecoderPassthroughImpl::RestoreVertexAttribArray(unsigned index) {}
+
void GLES2DecoderPassthroughImpl::RestoreAllExternalTextureBindingsIfNeeded() {}
void GLES2DecoderPassthroughImpl::ClearAllAttributes() const {}
@@ -824,6 +839,25 @@ error::Error GLES2DecoderPassthroughImpl::ProcessQueries(bool did_finish) {
return error::kNoError;
}
+void GLES2DecoderPassthroughImpl::RemovePendingQuery(GLuint service_id) {
+ auto pending_iter =
+ std::find_if(pending_queries_.begin(), pending_queries_.end(),
+ [service_id](const PendingQuery& pending_query) {
+ return pending_query.service_id == service_id;
+ });
+ if (pending_iter != pending_queries_.end()) {
+ QuerySync* sync = GetSharedMemoryAs<QuerySync*>(
+ pending_iter->shm_id, pending_iter->shm_offset, sizeof(QuerySync));
+ if (sync != nullptr) {
+ sync->result = 0;
+ base::subtle::Release_Store(&sync->process_count,
+ pending_iter->submit_count);
+ }
+
+ pending_queries_.erase(pending_iter);
+ }
+}
+
void GLES2DecoderPassthroughImpl::UpdateTextureBinding(GLenum target,
GLuint client_id,
GLuint service_id) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index cd7ef379cdd..0985a2c3b9e 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -30,6 +30,14 @@ namespace gles2 {
class ContextGroup;
+struct MappedBuffer {
+ GLsizeiptr size;
+ GLbitfield access;
+ uint8_t* map_ptr;
+ int32_t data_shm_id;
+ uint32_t data_shm_offset;
+};
+
struct PassthroughResources {
PassthroughResources();
~PassthroughResources();
@@ -53,6 +61,10 @@ struct PassthroughResources {
// using the mailbox are deleted
std::unordered_map<GLuint, scoped_refptr<TexturePassthrough>>
texture_object_map;
+
+ // Mapping of client buffer IDs that are mapped to the shared memory used to
+ // back the mapping so that it can be flushed when the buffer is unmapped
+ std::unordered_map<GLuint, MappedBuffer> mapped_buffer_map;
};
class GLES2DecoderPassthroughImpl : public GLES2Decoder {
@@ -114,6 +126,7 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
void RestoreAllTextureUnitBindings(
const ContextState* prev_state) const override;
void RestoreActiveTextureUnitBinding(unsigned int target) const override;
+ void RestoreBufferBinding(unsigned int target) override;
void RestoreBufferBindings() const override;
void RestoreFramebufferBindings() const override;
void RestoreRenderbufferBindings() override;
@@ -121,6 +134,7 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
void RestoreProgramBindings() const override;
void RestoreTextureState(unsigned service_id) const override;
void RestoreTextureUnitBindings(unsigned unit) const override;
+ void RestoreVertexAttribArray(unsigned index) override;
void RestoreAllExternalTextureBindingsIfNeeded() override;
void ClearAllAttributes() const override;
@@ -285,6 +299,7 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
bool IsEmulatedQueryTarget(GLenum target) const;
error::Error ProcessQueries(bool did_finish);
+ void RemovePendingQuery(GLuint service_id);
void UpdateTextureBinding(GLenum target, GLuint client_id, GLuint service_id);
@@ -351,6 +366,9 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
size_t active_texture_unit_;
std::unordered_map<GLenum, std::vector<GLuint>> bound_textures_;
+ // State tracking of currently bound buffers
+ std::unordered_map<GLenum, GLuint> bound_buffers_;
+
// Track the service-id to type of all queries for validation
struct QueryInfo {
GLenum type = GL_NONE;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
index fb4b4090287..60282e148d8 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
@@ -74,7 +74,8 @@ error::Error DoCompressedTexImage2D(GLenum target,
GLsizei width,
GLsizei height,
GLint border,
- GLsizei imageSize,
+ GLsizei image_size,
+ GLsizei data_size,
const void* data);
error::Error DoCompressedTexSubImage2D(GLenum target,
GLint level,
@@ -83,7 +84,8 @@ error::Error DoCompressedTexSubImage2D(GLenum target,
GLsizei width,
GLsizei height,
GLenum format,
- GLsizei imageSize,
+ GLsizei image_size,
+ GLsizei data_size,
const void* data);
error::Error DoCompressedTexImage3D(GLenum target,
GLint level,
@@ -92,7 +94,8 @@ error::Error DoCompressedTexImage3D(GLenum target,
GLsizei height,
GLsizei depth,
GLint border,
- GLsizei imageSize,
+ GLsizei image_size,
+ GLsizei data_size,
const void* data);
error::Error DoCompressedTexSubImage3D(GLenum target,
GLint level,
@@ -103,7 +106,8 @@ error::Error DoCompressedTexSubImage3D(GLenum target,
GLsizei height,
GLsizei depth,
GLenum format,
- GLsizei imageSize,
+ GLsizei image_size,
+ GLsizei data_size,
const void* data);
error::Error DoCopyBufferSubData(GLenum readtarget,
GLenum writetarget,
@@ -215,8 +219,6 @@ error::Error DoGetActiveUniformsiv(GLuint program,
GLsizei count,
const GLuint* indices,
GLenum pname,
- GLsizei bufSize,
- GLsizei* length,
GLint* params);
error::Error DoGetAttachedShaders(GLuint program,
GLsizei maxcount,
@@ -225,6 +227,10 @@ error::Error DoGetAttachedShaders(GLuint program,
error::Error DoGetAttribLocation(GLuint program,
const char* name,
GLint* result);
+error::Error DoGetBufferSubDataAsyncCHROMIUM(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ uint8_t* mem);
error::Error DoGetBooleanv(GLenum pname,
GLsizei bufsize,
GLsizei* length,
@@ -354,7 +360,6 @@ error::Error DoGetUniformIndices(GLuint program,
GLsizei count,
const char* const* names,
GLsizei bufSize,
- GLsizei* length,
GLuint* indices);
error::Error DoGetUniformLocation(GLuint program,
const char* name,
@@ -419,6 +424,8 @@ error::Error DoReadPixels(GLint x,
GLenum type,
GLsizei bufsize,
GLsizei* length,
+ GLsizei* columns,
+ GLsizei* rows,
void* pixels,
int32_t* success);
error::Error DoReleaseShaderCompiler();
@@ -713,7 +720,10 @@ error::Error DoMapBufferRange(GLenum target,
GLintptr offset,
GLsizeiptr size,
GLbitfield access,
- void** ptr);
+ void* ptr,
+ int32_t data_shm_id,
+ uint32_t data_shm_offset,
+ uint32_t* result);
error::Error DoUnmapBuffer(GLenum target);
error::Error DoResizeCHROMIUM(GLuint width,
GLuint height,
@@ -737,20 +747,20 @@ error::Error DoPostSubBufferCHROMIUM(GLint x,
GLint y,
GLint width,
GLint height);
-error::Error DoCopyTextureCHROMIUM(GLenum source_id,
+error::Error DoCopyTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha);
-error::Error DoCopySubTextureCHROMIUM(GLenum source_id,
+error::Error DoCopySubTextureCHROMIUM(GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint xoffset,
GLint yoffset,
@@ -761,7 +771,7 @@ error::Error DoCopySubTextureCHROMIUM(GLenum source_id,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha);
-error::Error DoCompressedCopyTextureCHROMIUM(GLenum source_id, GLenum dest_id);
+error::Error DoCompressedCopyTextureCHROMIUM(GLuint source_id, GLuint dest_id);
error::Error DoDrawArraysInstancedANGLE(GLenum mode,
GLint first,
GLsizei count,
@@ -825,6 +835,16 @@ error::Error DoScheduleCALayerCHROMIUM(GLuint contents_texture_id,
error::Error DoScheduleCALayerInUseQueryCHROMIUM(
GLuint n,
const volatile GLuint* textures);
+error::Error DoScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
+ GLboolean is_clipped,
+ const GLfloat* clip_rect,
+ GLint z_order,
+ const GLfloat* transform);
+error::Error DoScheduleDCLayerCHROMIUM(GLuint contents_texture_id,
+ const GLfloat* contents_rect,
+ GLuint background_color,
+ GLuint edge_aa_mask,
+ const GLfloat* bounds_rect);
error::Error DoCommitOverlayPlanesCHROMIUM();
error::Error DoSwapInterval(GLint interval);
error::Error DoFlushDriverCachesCHROMIUM();
@@ -958,3 +978,4 @@ error::Error DoSetDrawRectangleCHROMIUM(GLint x,
GLint y,
GLint width,
GLint height);
+error::Error DoSetEnableDCLayersCHROMIUM(GLboolean enable);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index 74c7a4a6d75..43ec395c203 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -180,17 +180,17 @@ void InsertValueIntoBuffer(std::vector<uint8_t>* data,
template <typename T>
void AppendValueToBuffer(std::vector<uint8_t>* data, const T& value) {
- size_t old_size = data->size();
- data->resize(old_size + sizeof(T));
- memcpy(data->data() + old_size, &value, sizeof(T));
+ const base::CheckedNumeric<size_t> old_size = data->size();
+ data->resize((old_size + sizeof(T)).ValueOrDie());
+ memcpy(data->data() + old_size.ValueOrDie(), &value, sizeof(T));
}
void AppendStringToBuffer(std::vector<uint8_t>* data,
const char* str,
size_t len) {
- size_t old_size = data->size();
- data->resize(old_size + len);
- memcpy(data->data() + old_size, str, len);
+ const base::CheckedNumeric<size_t> old_size = data->size();
+ data->resize((old_size + len).ValueOrDie());
+ memcpy(data->data() + old_size.ValueOrDie(), str, len);
}
} // anonymous namespace
@@ -224,8 +224,15 @@ error::Error GLES2DecoderPassthroughImpl::DoBindAttribLocation(
error::Error GLES2DecoderPassthroughImpl::DoBindBuffer(GLenum target,
GLuint buffer) {
+ FlushErrors();
glBindBuffer(
target, GetBufferServiceID(buffer, resources_, bind_generates_resource_));
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+
+ bound_buffers_[target] = buffer;
+
return error::kNoError;
}
@@ -432,7 +439,10 @@ error::Error GLES2DecoderPassthroughImpl::DoClientWaitSync(GLuint sync,
GLbitfield flags,
GLuint64 timeout,
GLenum* result) {
- NOTIMPLEMENTED();
+ // Force GL_SYNC_FLUSH_COMMANDS_BIT to avoid infinite wait.
+ GLbitfield modified_flags = flags | GL_SYNC_FLUSH_COMMANDS_BIT;
+ *result = glClientWaitSync(GetSyncServiceID(sync, resources_), modified_flags,
+ timeout);
return error::kNoError;
}
@@ -456,10 +466,13 @@ error::Error GLES2DecoderPassthroughImpl::DoCompressedTexImage2D(
GLsizei width,
GLsizei height,
GLint border,
- GLsizei imageSize,
+ GLsizei image_size,
+ GLsizei data_size,
const void* data) {
+ // TODO(cwallez@chromium.org): Use data_size with the robust version of the
+ // entry point
glCompressedTexImage2D(target, level, internalformat, width, height, border,
- imageSize, data);
+ image_size, data);
return error::kNoError;
}
@@ -471,10 +484,13 @@ error::Error GLES2DecoderPassthroughImpl::DoCompressedTexSubImage2D(
GLsizei width,
GLsizei height,
GLenum format,
- GLsizei imageSize,
+ GLsizei image_size,
+ GLsizei data_size,
const void* data) {
+ // TODO(cwallez@chromium.org): Use data_size with the robust version of the
+ // entry point
glCompressedTexSubImage2D(target, level, xoffset, yoffset, width, height,
- format, imageSize, data);
+ format, image_size, data);
return error::kNoError;
}
@@ -486,10 +502,13 @@ error::Error GLES2DecoderPassthroughImpl::DoCompressedTexImage3D(
GLsizei height,
GLsizei depth,
GLint border,
- GLsizei imageSize,
+ GLsizei image_size,
+ GLsizei data_size,
const void* data) {
+ // TODO(cwallez@chromium.org): Use data_size with the robust version of the
+ // entry point
glCompressedTexImage3D(target, level, internalformat, width, height, depth,
- border, imageSize, data);
+ border, image_size, data);
return error::kNoError;
}
@@ -503,10 +522,13 @@ error::Error GLES2DecoderPassthroughImpl::DoCompressedTexSubImage3D(
GLsizei height,
GLsizei depth,
GLenum format,
- GLsizei imageSize,
+ GLsizei image_size,
+ GLsizei data_size,
const void* data) {
+ // TODO(cwallez@chromium.org): Use data_size with the robust version of the
+ // entry point
glCompressedTexSubImage3D(target, level, xoffset, yoffset, zoffset, width,
- height, depth, format, imageSize, data);
+ height, depth, format, image_size, data);
return error::kNoError;
}
@@ -584,9 +606,16 @@ error::Error GLES2DecoderPassthroughImpl::DoDeleteBuffers(
InsertError(GL_INVALID_VALUE, "n cannot be negative.");
return error::kNoError;
}
- return DeleteHelper(
- n, buffers, &resources_->buffer_id_map,
- [](GLsizei n, GLuint* buffers) { glDeleteBuffersARB(n, buffers); });
+ return DeleteHelper(n, buffers, &resources_->buffer_id_map,
+ [this](GLsizei n, GLuint* buffers) {
+ glDeleteBuffersARB(n, buffers);
+ for (GLsizei i = 0; i < n; i++)
+ for (auto buffer_binding : bound_buffers_) {
+ if (buffer_binding.second == buffers[i]) {
+ buffer_binding.second = 0;
+ }
+ }
+ });
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteFramebuffers(
@@ -759,7 +788,19 @@ error::Error GLES2DecoderPassthroughImpl::DoEnableVertexAttribArray(
error::Error GLES2DecoderPassthroughImpl::DoFenceSync(GLenum condition,
GLbitfield flags,
GLuint client_id) {
- NOTIMPLEMENTED();
+ if (resources_->sync_id_map.GetServiceID(client_id, nullptr)) {
+ return error::kInvalidArguments;
+ }
+
+ FlushErrors();
+ GLsync service_id = glFenceSync(condition, flags);
+ if (FlushErrors()) {
+ return error::kInvalidArguments;
+ }
+
+ resources_->sync_id_map.SetIDMapping(client_id,
+ reinterpret_cast<uintptr_t>(service_id));
+
return error::kNoError;
}
@@ -777,6 +818,50 @@ error::Error GLES2DecoderPassthroughImpl::DoFlushMappedBufferRange(
GLenum target,
GLintptr offset,
GLsizeiptr size) {
+ auto bound_buffers_iter = bound_buffers_.find(target);
+ if (bound_buffers_iter == bound_buffers_.end() ||
+ bound_buffers_iter->second == 0) {
+ InsertError(GL_INVALID_OPERATION, "No buffer bound to this target.");
+ return error::kNoError;
+ }
+
+ GLuint client_buffer = bound_buffers_iter->second;
+ auto mapped_buffer_info_iter =
+ resources_->mapped_buffer_map.find(client_buffer);
+ if (mapped_buffer_info_iter == resources_->mapped_buffer_map.end()) {
+ InsertError(GL_INVALID_OPERATION, "Buffer is not mapped.");
+ return error::kNoError;
+ }
+
+ const MappedBuffer& map_info = mapped_buffer_info_iter->second;
+
+ if (offset < 0) {
+ InsertError(GL_INVALID_VALUE, "Offset cannot be negative.");
+ return error::kNoError;
+ }
+
+ if (size < 0) {
+ InsertError(GL_INVALID_VALUE, "Size cannot be negative.");
+ return error::kNoError;
+ }
+
+ base::CheckedNumeric<size_t> range_start(offset);
+ base::CheckedNumeric<size_t> range_end = offset + size;
+ if (!range_end.IsValid() && range_end.ValueOrDefault(0) > map_info.size) {
+ InsertError(GL_INVALID_OPERATION,
+ "Flush range is not within the original mapping size.");
+ return error::kNoError;
+ }
+
+ uint8_t* mem = GetSharedMemoryAs<uint8_t*>(
+ map_info.data_shm_id, map_info.data_shm_offset, map_info.size);
+ if (!mem) {
+ return error::kOutOfBounds;
+ }
+
+ memcpy(map_info.map_ptr + offset, mem + offset, size);
+ glFlushMappedBufferRange(target, offset, size);
+
return error::kNoError;
}
@@ -942,7 +1027,24 @@ error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniformBlockName(
GLuint program,
GLuint index,
std::string* name) {
- NOTIMPLEMENTED();
+ FlushErrors();
+
+ GLuint program_service_id = GetProgramServiceID(program, resources_);
+ GLint max_name_length = 0;
+ glGetProgramiv(program_service_id, GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH,
+ &max_name_length);
+
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+
+ std::vector<GLchar> buffer(max_name_length, 0);
+ GLsizei length = 0;
+ glGetActiveUniformBlockName(program_service_id, index, max_name_length,
+ &length, buffer.data());
+ DCHECK(length <= max_name_length);
+ *name = length > 0 ? std::string(buffer.data(), length) : std::string();
+
return error::kNoError;
}
@@ -951,10 +1053,9 @@ error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniformsiv(
GLsizei count,
const GLuint* indices,
GLenum pname,
- GLsizei bufSize,
- GLsizei* length,
GLint* params) {
- NOTIMPLEMENTED();
+ glGetActiveUniformsiv(GetProgramServiceID(program, resources_), count,
+ indices, pname, params);
return error::kNoError;
}
@@ -963,7 +1064,8 @@ error::Error GLES2DecoderPassthroughImpl::DoGetAttachedShaders(
GLsizei maxcount,
GLsizei* count,
GLuint* shaders) {
- NOTIMPLEMENTED();
+ glGetAttachedShaders(GetProgramServiceID(program, resources_), maxcount,
+ count, shaders);
return error::kNoError;
}
@@ -974,6 +1076,25 @@ error::Error GLES2DecoderPassthroughImpl::DoGetAttribLocation(GLuint program,
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoGetBufferSubDataAsyncCHROMIUM(
+ GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ uint8_t* mem) {
+ FlushErrors();
+
+ void* mapped_ptr = glMapBufferRange(target, offset, size, GL_MAP_READ_BIT);
+ if (FlushErrors() || mapped_ptr == nullptr) {
+ // Had an error while mapping, don't copy any data
+ return error::kNoError;
+ }
+
+ memcpy(mem, mapped_ptr, size);
+ glUnmapBuffer(target);
+
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoGetBooleanv(GLenum pname,
GLsizei bufsize,
GLsizei* length,
@@ -1122,14 +1243,21 @@ error::Error GLES2DecoderPassthroughImpl::DoGetProgramiv(GLuint program,
error::Error GLES2DecoderPassthroughImpl::DoGetProgramInfoLog(
GLuint program,
std::string* infolog) {
+ FlushErrors();
GLint info_log_len = 0;
glGetProgramiv(GetProgramServiceID(program, resources_), GL_INFO_LOG_LENGTH,
&info_log_len);
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+
std::vector<char> buffer(info_log_len, 0);
+ GLsizei length = 0;
glGetProgramInfoLog(GetProgramServiceID(program, resources_), info_log_len,
- nullptr, buffer.data());
- *infolog = info_log_len > 0 ? std::string(buffer.data()) : std::string();
+ &length, buffer.data());
+ DCHECK(length <= info_log_len);
+ *infolog = length > 0 ? std::string(buffer.data(), length) : std::string();
return error::kNoError;
}
@@ -1161,7 +1289,8 @@ error::Error GLES2DecoderPassthroughImpl::DoGetSamplerParameteriv(
GLsizei bufsize,
GLsizei* length,
GLint* params) {
- glGetSamplerParameterivRobustANGLE(sampler, pname, bufsize, length, params);
+ glGetSamplerParameterivRobustANGLE(GetSamplerServiceID(sampler, resources_),
+ pname, bufsize, length, params);
return error::kNoError;
}
@@ -1178,12 +1307,20 @@ error::Error GLES2DecoderPassthroughImpl::DoGetShaderiv(GLuint shader,
error::Error GLES2DecoderPassthroughImpl::DoGetShaderInfoLog(
GLuint shader,
std::string* infolog) {
+ FlushErrors();
+
GLuint service_id = GetShaderServiceID(shader, resources_);
GLint info_log_len = 0;
glGetShaderiv(service_id, GL_INFO_LOG_LENGTH, &info_log_len);
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+
std::vector<char> buffer(info_log_len, 0);
- glGetShaderInfoLog(service_id, info_log_len, nullptr, buffer.data());
- *infolog = info_log_len > 0 ? std::string(buffer.data()) : std::string();
+ GLsizei length = 0;
+ glGetShaderInfoLog(service_id, info_log_len, &length, buffer.data());
+ DCHECK(length <= info_log_len);
+ *infolog = length > 0 ? std::string(buffer.data(), length) : std::string();
return error::kNoError;
}
@@ -1202,7 +1339,23 @@ error::Error GLES2DecoderPassthroughImpl::DoGetShaderPrecisionFormat(
error::Error GLES2DecoderPassthroughImpl::DoGetShaderSource(
GLuint shader,
std::string* source) {
- NOTIMPLEMENTED();
+ FlushErrors();
+
+ GLuint shader_service_id = GetShaderServiceID(shader, resources_);
+ GLint shader_source_length = 0;
+ glGetShaderiv(shader_service_id, GL_SHADER_SOURCE_LENGTH,
+ &shader_source_length);
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+
+ std::vector<char> buffer(shader_source_length, 0);
+ GLsizei length = 0;
+ glGetShaderSource(shader_service_id, shader_source_length, &length,
+ buffer.data());
+ DCHECK(length <= shader_source_length);
+ *source = shader_source_length > 0 ? std::string(buffer.data(), length)
+ : std::string();
return error::kNoError;
}
@@ -1215,12 +1368,6 @@ error::Error GLES2DecoderPassthroughImpl::DoGetString(GLenum name,
case GL_SHADING_LANGUAGE_VERSION:
*result = GetServiceShadingLanguageVersionString(feature_info_.get());
break;
- case GL_RENDERER:
- *result = GetServiceRendererString(feature_info_.get());
- break;
- case GL_VENDOR:
- *result = GetServiceVendorString(feature_info_.get());
- break;
case GL_EXTENSIONS:
*result = feature_info_->extensions().c_str();
break;
@@ -1290,7 +1437,8 @@ error::Error GLES2DecoderPassthroughImpl::DoGetUniformBlockIndex(
GLuint program,
const char* name,
GLint* index) {
- NOTIMPLEMENTED();
+ *index =
+ glGetUniformBlockIndex(GetProgramServiceID(program, resources_), name);
return error::kNoError;
}
@@ -1335,9 +1483,9 @@ error::Error GLES2DecoderPassthroughImpl::DoGetUniformIndices(
GLsizei count,
const char* const* names,
GLsizei bufSize,
- GLsizei* length,
GLuint* indices) {
- NOTIMPLEMENTED();
+ glGetUniformIndices(GetProgramServiceID(program, resources_), count, names,
+ indices);
return error::kNoError;
}
@@ -1449,7 +1597,6 @@ error::Error GLES2DecoderPassthroughImpl::DoInvalidateSubFramebuffer(
error::Error GLES2DecoderPassthroughImpl::DoIsBuffer(GLuint buffer,
uint32_t* result) {
- NOTIMPLEMENTED();
*result = glIsBuffer(GetBufferServiceID(buffer, resources_, false));
return error::kNoError;
}
@@ -1475,7 +1622,6 @@ error::Error GLES2DecoderPassthroughImpl::DoIsProgram(GLuint program,
error::Error GLES2DecoderPassthroughImpl::DoIsRenderbuffer(GLuint renderbuffer,
uint32_t* result) {
- NOTIMPLEMENTED();
*result = glIsRenderbufferEXT(
GetRenderbufferServiceID(renderbuffer, resources_, false));
return error::kNoError;
@@ -1553,11 +1699,13 @@ error::Error GLES2DecoderPassthroughImpl::DoReadPixels(GLint x,
GLenum type,
GLsizei bufsize,
GLsizei* length,
+ GLsizei* columns,
+ GLsizei* rows,
void* pixels,
int32_t* success) {
FlushErrors();
glReadPixelsRobustANGLE(x, y, width, height, format, type, bufsize, length,
- pixels);
+ columns, rows, pixels);
*success = FlushErrors() ? 0 : 1;
return error::kNoError;
}
@@ -1636,7 +1784,11 @@ error::Error GLES2DecoderPassthroughImpl::DoShaderBinary(GLsizei n,
GLenum binaryformat,
const void* binary,
GLsizei length) {
- NOTIMPLEMENTED();
+ std::vector<GLuint> service_shaders(n, 0);
+ for (GLsizei i = 0; i < n; i++) {
+ service_shaders[i] = GetShaderServiceID(shaders[i], resources_);
+ }
+ glShaderBinary(n, service_shaders.data(), binaryformat, binary, length);
return error::kNoError;
}
@@ -2224,7 +2376,7 @@ error::Error GLES2DecoderPassthroughImpl::DoViewport(GLint x,
error::Error GLES2DecoderPassthroughImpl::DoWaitSync(GLuint sync,
GLbitfield flags,
GLuint64 timeout) {
- NOTIMPLEMENTED();
+ glWaitSync(GetSyncServiceID(sync, resources_), flags, timeout);
return error::kNoError;
}
@@ -2294,7 +2446,9 @@ error::Error GLES2DecoderPassthroughImpl::DoFramebufferTexture2DMultisampleEXT(
GLuint texture,
GLint level,
GLsizei samples) {
- NOTIMPLEMENTED();
+ glFramebufferTexture2DMultisampleEXT(
+ target, attachment, textarget,
+ GetTextureServiceID(texture, resources_, false), level, samples);
return error::kNoError;
}
@@ -2347,14 +2501,7 @@ error::Error GLES2DecoderPassthroughImpl::DoDeleteQueriesEXT(
active_queries_.erase(active_queries_iter);
}
- auto pending_iter =
- std::find_if(pending_queries_.begin(), pending_queries_.end(),
- [query_service_id](const PendingQuery& pending_query) {
- return pending_query.service_id == query_service_id;
- });
- if (pending_iter != pending_queries_.end()) {
- pending_queries_.erase(pending_iter);
- }
+ RemovePendingQuery(query_service_id);
}
return DeleteHelper(
queries_copy.size(), queries_copy.data(), &query_id_map_,
@@ -2386,6 +2533,10 @@ error::Error GLES2DecoderPassthroughImpl::DoQueryCounterEXT(
QueryInfo* query_info = &query_info_map_[service_id];
query_info->type = target;
+ // Make sure to stop tracking this query if it was still pending a result from
+ // a previous glEndQuery
+ RemovePendingQuery(service_id);
+
PendingQuery pending_query;
pending_query.target = target;
pending_query.service_id = service_id;
@@ -2435,6 +2586,10 @@ error::Error GLES2DecoderPassthroughImpl::DoBeginQueryEXT(
query_info->type = target;
+ // Make sure to stop tracking this query if it was still pending a result from
+ // a previous glEndQuery
+ RemovePendingQuery(service_id);
+
ActiveQuery query;
query.service_id = service_id;
query.shm_id = sync_shm_id;
@@ -2498,19 +2653,31 @@ error::Error GLES2DecoderPassthroughImpl::DoSetDisjointValueSyncCHROMIUM(
error::Error GLES2DecoderPassthroughImpl::DoInsertEventMarkerEXT(
GLsizei length,
const char* marker) {
- NOTIMPLEMENTED();
+ if (!feature_info_->feature_flags().ext_debug_marker) {
+ return error::kUnknownCommand;
+ }
+
+ glInsertEventMarkerEXT(length, marker);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoPushGroupMarkerEXT(
GLsizei length,
const char* marker) {
- NOTIMPLEMENTED();
+ if (!feature_info_->feature_flags().ext_debug_marker) {
+ return error::kUnknownCommand;
+ }
+
+ glPushGroupMarkerEXT(length, marker);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoPopGroupMarkerEXT() {
- NOTIMPLEMENTED();
+ if (!feature_info_->feature_flags().ext_debug_marker) {
+ return error::kUnknownCommand;
+ }
+
+ glPopGroupMarkerEXT();
return error::kNoError;
}
@@ -2572,17 +2739,100 @@ error::Error GLES2DecoderPassthroughImpl::DoEnableFeatureCHROMIUM(
return error::kNoError;
}
-error::Error GLES2DecoderPassthroughImpl::DoMapBufferRange(GLenum target,
- GLintptr offset,
- GLsizeiptr size,
- GLbitfield access,
- void** ptr) {
- NOTIMPLEMENTED();
+error::Error GLES2DecoderPassthroughImpl::DoMapBufferRange(
+ GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLbitfield access,
+ void* ptr,
+ int32_t data_shm_id,
+ uint32_t data_shm_offset,
+ uint32_t* result) {
+ FlushErrors();
+
+ GLbitfield filtered_access = access;
+
+ // Always filter out GL_MAP_UNSYNCHRONIZED_BIT to get rid of undefined
+ // behaviors.
+ filtered_access = (filtered_access & ~GL_MAP_UNSYNCHRONIZED_BIT);
+
+ if ((filtered_access & GL_MAP_INVALIDATE_BUFFER_BIT) != 0) {
+ // To be on the safe side, always map GL_MAP_INVALIDATE_BUFFER_BIT to
+ // GL_MAP_INVALIDATE_RANGE_BIT.
+ filtered_access = (filtered_access & ~GL_MAP_INVALIDATE_BUFFER_BIT);
+ filtered_access = (filtered_access | GL_MAP_INVALIDATE_RANGE_BIT);
+ }
+ if ((filtered_access & GL_MAP_INVALIDATE_RANGE_BIT) == 0) {
+ // If this user intends to use this buffer without invalidating the data, we
+ // need to also add GL_MAP_READ_BIT to preserve the original data when
+ // copying it to shared memory.
+ filtered_access = (filtered_access | GL_MAP_READ_BIT);
+ }
+
+ void* mapped_ptr = glMapBufferRange(target, offset, size, filtered_access);
+ if (FlushErrors() || mapped_ptr == nullptr) {
+ // Had an error while mapping, don't copy any data
+ *result = 0;
+ return error::kNoError;
+ }
+
+ if ((filtered_access & GL_MAP_INVALIDATE_RANGE_BIT) == 0) {
+ memcpy(ptr, mapped_ptr, size);
+ }
+
+ // Track the mapping of this buffer so that data can be synchronized when it
+ // is unmapped
+ DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
+ GLuint client_buffer = bound_buffers_.at(target);
+
+ MappedBuffer mapped_buffer_info;
+ mapped_buffer_info.size = size;
+ mapped_buffer_info.access = filtered_access;
+ mapped_buffer_info.map_ptr = static_cast<uint8_t*>(mapped_ptr);
+ mapped_buffer_info.data_shm_id = data_shm_id;
+ mapped_buffer_info.data_shm_offset = data_shm_offset;
+
+ DCHECK(resources_->mapped_buffer_map.find(client_buffer) ==
+ resources_->mapped_buffer_map.end());
+ resources_->mapped_buffer_map.insert(
+ std::make_pair(client_buffer, mapped_buffer_info));
+
+ *result = 1;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoUnmapBuffer(GLenum target) {
- NOTIMPLEMENTED();
+ auto bound_buffers_iter = bound_buffers_.find(target);
+ if (bound_buffers_iter == bound_buffers_.end() ||
+ bound_buffers_iter->second == 0) {
+ InsertError(GL_INVALID_OPERATION, "No buffer bound to this target.");
+ return error::kNoError;
+ }
+
+ GLuint client_buffer = bound_buffers_iter->second;
+ auto mapped_buffer_info_iter =
+ resources_->mapped_buffer_map.find(client_buffer);
+ if (mapped_buffer_info_iter == resources_->mapped_buffer_map.end()) {
+ InsertError(GL_INVALID_OPERATION, "Buffer is not mapped.");
+ return error::kNoError;
+ }
+
+ const MappedBuffer& map_info = mapped_buffer_info_iter->second;
+ if ((map_info.access & GL_MAP_WRITE_BIT) != 0 &&
+ (map_info.access & GL_MAP_FLUSH_EXPLICIT_BIT) == 0) {
+ uint8_t* mem = GetSharedMemoryAs<uint8_t*>(
+ map_info.data_shm_id, map_info.data_shm_offset, map_info.size);
+ if (!mem) {
+ return error::kOutOfBounds;
+ }
+
+ memcpy(map_info.map_ptr, mem, map_info.size);
+ }
+
+ glUnmapBuffer(target);
+
+ resources_->mapped_buffer_map.erase(mapped_buffer_info_iter);
+
return error::kNoError;
}
@@ -2590,20 +2840,46 @@ error::Error GLES2DecoderPassthroughImpl::DoResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
GLboolean alpha) {
- NOTIMPLEMENTED();
+ if (offscreen_) {
+ // TODO: crbug.com/665521
+ NOTIMPLEMENTED();
+ } else {
+ if (!surface_->Resize(gfx::Size(width, height), scale_factor, !!alpha)) {
+ LOG(ERROR) << "GLES2DecoderImpl: Context lost because resize failed.";
+ return error::kLostContext;
+ }
+ DCHECK(context_->IsCurrent(surface_.get()));
+ if (!context_->IsCurrent(surface_.get())) {
+ LOG(ERROR) << "GLES2DecoderImpl: Context lost because context no longer "
+ << "current after resize callback.";
+ return error::kLostContext;
+ }
+ }
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetRequestableExtensionsCHROMIUM(
const char** extensions) {
- *extensions = "";
- NOTIMPLEMENTED();
+ *extensions = reinterpret_cast<const char*>(
+ glGetString(GL_REQUESTABLE_EXTENSIONS_ANGLE));
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoRequestExtensionCHROMIUM(
const char* extension) {
- NOTIMPLEMENTED();
+ glRequestExtensionANGLE(extension);
+
+ // Make sure there are no pending GL errors before re-initializing feature
+ // info
+ FlushErrors();
+
+ // Make sure newly enabled extensions are exposed and usable.
+ context_->ReinitializeDynamicBindings();
+ if (!feature_info_->Initialize(feature_info_->context_type(),
+ feature_info_->disallowed_features())) {
+ return error::kLostContext;
+ }
+
return error::kNoError;
}
@@ -2621,8 +2897,16 @@ error::Error GLES2DecoderPassthroughImpl::DoGetProgramInfoCHROMIUM(
GLint num_uniforms = 0;
glGetProgramiv(service_program, GL_ACTIVE_UNIFORMS, &num_uniforms);
- data->resize(sizeof(ProgramInfoHeader) +
- ((num_attributes + num_uniforms) * sizeof(ProgramInput)),
+ const base::CheckedNumeric<size_t> buffer_header_size(
+ sizeof(ProgramInfoHeader));
+ const base::CheckedNumeric<size_t> buffer_block_size(
+ sizeof(ProgramInput));
+ const base::CheckedNumeric<size_t> attribute_block_size =
+ buffer_block_size * num_attributes;
+ const base::CheckedNumeric<size_t> uniform_block_size =
+ buffer_block_size * num_uniforms;
+ data->resize((buffer_header_size + attribute_block_size + uniform_block_size)
+ .ValueOrDie(),
0);
GLint link_status = 0;
@@ -2661,7 +2945,7 @@ error::Error GLES2DecoderPassthroughImpl::DoGetProgramInfoCHROMIUM(
InsertValueIntoBuffer(
data, input,
- sizeof(ProgramInfoHeader) + (attrib_index * sizeof(ProgramInput)));
+ (buffer_header_size + (buffer_block_size * attrib_index)).ValueOrDie());
}
GLint active_uniform_max_length = 0;
@@ -2701,9 +2985,10 @@ error::Error GLES2DecoderPassthroughImpl::DoGetProgramInfoCHROMIUM(
input.name_length = length;
AppendStringToBuffer(data, uniform_name_buf.data(), length);
- InsertValueIntoBuffer(data, input, sizeof(ProgramInfoHeader) +
- ((num_attributes + uniform_index) *
- sizeof(ProgramInput)));
+ InsertValueIntoBuffer(data, input,
+ (buffer_header_size + attribute_block_size +
+ (buffer_block_size * uniform_index))
+ .ValueOrDie());
}
return error::kNoError;
@@ -2712,7 +2997,101 @@ error::Error GLES2DecoderPassthroughImpl::DoGetProgramInfoCHROMIUM(
error::Error GLES2DecoderPassthroughImpl::DoGetUniformBlocksCHROMIUM(
GLuint program,
std::vector<uint8_t>* data) {
- NOTIMPLEMENTED();
+ GLuint service_program = 0;
+ if (!resources_->program_id_map.GetServiceID(program, &service_program)) {
+ return error::kNoError;
+ }
+
+ GLint num_uniform_blocks = 0;
+ glGetProgramiv(service_program, GL_ACTIVE_UNIFORM_BLOCKS,
+ &num_uniform_blocks);
+
+ // Resize the data to fit the headers and info objects so that strings can be
+ // appended.
+ const base::CheckedNumeric<size_t> buffer_header_size(
+ sizeof(UniformBlocksHeader));
+ const base::CheckedNumeric<size_t> buffer_block_size(
+ sizeof(UniformBlockInfo));
+ data->resize((buffer_header_size + (num_uniform_blocks * buffer_block_size))
+ .ValueOrDie(),
+ 0);
+
+ UniformBlocksHeader header;
+ header.num_uniform_blocks = num_uniform_blocks;
+ InsertValueIntoBuffer(data, header, 0);
+
+ GLint active_uniform_block_max_length = 0;
+ glGetProgramiv(service_program, GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH,
+ &active_uniform_block_max_length);
+
+ std::vector<char> uniform_block_name_buf(active_uniform_block_max_length, 0);
+ for (GLint uniform_block_index = 0; uniform_block_index < num_uniform_blocks;
+ uniform_block_index++) {
+ UniformBlockInfo block_info;
+
+ GLint uniform_block_binding = 0;
+ glGetActiveUniformBlockiv(service_program, uniform_block_index,
+ GL_UNIFORM_BLOCK_BINDING, &uniform_block_binding);
+ block_info.binding = uniform_block_binding;
+
+ GLint uniform_block_data_size = 0;
+ glGetActiveUniformBlockiv(service_program, uniform_block_index,
+ GL_UNIFORM_BLOCK_DATA_SIZE,
+ &uniform_block_data_size);
+ block_info.data_size = uniform_block_data_size;
+
+ GLint uniform_block_name_length = 0;
+ glGetActiveUniformBlockName(
+ service_program, uniform_block_index, active_uniform_block_max_length,
+ &uniform_block_name_length, uniform_block_name_buf.data());
+
+ DCHECK(uniform_block_name_length + 1 <= active_uniform_block_max_length);
+ block_info.name_offset = data->size();
+ block_info.name_length = uniform_block_name_length + 1;
+ AppendStringToBuffer(data, uniform_block_name_buf.data(),
+ uniform_block_name_length + 1);
+
+ GLint uniform_block_active_uniforms = 0;
+ glGetActiveUniformBlockiv(service_program, uniform_block_index,
+ GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS,
+ &uniform_block_active_uniforms);
+ block_info.active_uniforms = uniform_block_active_uniforms;
+
+ std::vector<GLint> uniform_block_indices_buf(uniform_block_active_uniforms,
+ 0);
+ glGetActiveUniformBlockiv(service_program, uniform_block_index,
+ GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES,
+ uniform_block_indices_buf.data());
+ block_info.active_uniform_offset = data->size();
+ for (GLint uniform_block_uniform_index_index = 0;
+ uniform_block_uniform_index_index < uniform_block_active_uniforms;
+ uniform_block_uniform_index_index++) {
+ AppendValueToBuffer(
+ data,
+ static_cast<uint32_t>(
+ uniform_block_indices_buf[uniform_block_uniform_index_index]));
+ }
+
+ GLint uniform_block_referenced_by_vertex_shader = 0;
+ glGetActiveUniformBlockiv(service_program, uniform_block_index,
+ GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER,
+ &uniform_block_referenced_by_vertex_shader);
+ block_info.referenced_by_vertex_shader =
+ uniform_block_referenced_by_vertex_shader;
+
+ GLint uniform_block_referenced_by_fragment_shader = 0;
+ glGetActiveUniformBlockiv(service_program, uniform_block_index,
+ GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER,
+ &uniform_block_referenced_by_fragment_shader);
+ block_info.referenced_by_fragment_shader =
+ uniform_block_referenced_by_fragment_shader;
+
+ InsertValueIntoBuffer(
+ data, block_info,
+ (buffer_header_size + (buffer_block_size * uniform_block_index))
+ .ValueOrDie());
+ }
+
return error::kNoError;
}
@@ -2720,14 +3099,119 @@ error::Error
GLES2DecoderPassthroughImpl::DoGetTransformFeedbackVaryingsCHROMIUM(
GLuint program,
std::vector<uint8_t>* data) {
- NOTIMPLEMENTED();
+ GLuint service_program = 0;
+ if (!resources_->program_id_map.GetServiceID(program, &service_program)) {
+ return error::kNoError;
+ }
+
+ GLint transform_feedback_buffer_mode = 0;
+ glGetProgramiv(service_program, GL_TRANSFORM_FEEDBACK_BUFFER_MODE,
+ &transform_feedback_buffer_mode);
+
+ GLint num_transform_feedback_varyings = 0;
+ glGetProgramiv(service_program, GL_TRANSFORM_FEEDBACK_VARYINGS,
+ &num_transform_feedback_varyings);
+
+ // Resize the data to fit the headers and info objects so that strings can be
+ // appended.
+ const base::CheckedNumeric<size_t> buffer_header_size(
+ sizeof(TransformFeedbackVaryingsHeader));
+ const base::CheckedNumeric<size_t> buffer_block_size(
+ sizeof(TransformFeedbackVaryingInfo));
+ data->resize((buffer_header_size +
+ (num_transform_feedback_varyings * buffer_block_size))
+ .ValueOrDie(),
+ 0);
+
+ TransformFeedbackVaryingsHeader header;
+ header.transform_feedback_buffer_mode = transform_feedback_buffer_mode;
+ header.num_transform_feedback_varyings = num_transform_feedback_varyings;
+ InsertValueIntoBuffer(data, header, 0);
+
+ GLint max_transform_feedback_varying_length = 0;
+ glGetProgramiv(service_program, GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH,
+ &max_transform_feedback_varying_length);
+
+ std::vector<char> transform_feedback_varying_name_buf(
+ max_transform_feedback_varying_length, 0);
+ for (GLint transform_feedback_varying_index = 0;
+ transform_feedback_varying_index < num_transform_feedback_varyings;
+ transform_feedback_varying_index++) {
+ GLsizei length = 0;
+ GLint size = 0;
+ GLenum type = GL_NONE;
+ glGetTransformFeedbackVarying(
+ service_program, transform_feedback_varying_index,
+ max_transform_feedback_varying_length, &length, &size, &type,
+ transform_feedback_varying_name_buf.data());
+
+ TransformFeedbackVaryingInfo varying_info;
+ varying_info.size = size;
+ varying_info.type = type;
+
+ DCHECK(length + 1 <= max_transform_feedback_varying_length);
+ varying_info.name_length = data->size();
+ varying_info.name_length = length + 1;
+ AppendStringToBuffer(data, transform_feedback_varying_name_buf.data(),
+ length + 1);
+
+ InsertValueIntoBuffer(
+ data, varying_info,
+ (buffer_header_size +
+ (buffer_block_size * transform_feedback_varying_index))
+ .ValueOrDie());
+ }
+
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetUniformsES3CHROMIUM(
GLuint program,
std::vector<uint8_t>* data) {
- NOTIMPLEMENTED();
+ GLuint service_program = 0;
+ if (!resources_->program_id_map.GetServiceID(program, &service_program)) {
+ return error::kNoError;
+ }
+
+ GLint num_uniforms = 0;
+ glGetProgramiv(service_program, GL_ACTIVE_UNIFORMS, &num_uniforms);
+
+ UniformsES3Header header;
+ header.num_uniforms = num_uniforms;
+ AppendValueToBuffer(data, header);
+
+ for (GLuint uniform_index = 0;
+ uniform_index < static_cast<GLuint>(num_uniforms); uniform_index++) {
+ UniformES3Info uniform_info;
+
+ GLint uniform_block_index = 0;
+ glGetActiveUniformsiv(service_program, 1, &uniform_index,
+ GL_UNIFORM_BLOCK_INDEX, &uniform_block_index);
+ uniform_info.block_index = uniform_block_index;
+
+ GLint uniform_offset = 0;
+ glGetActiveUniformsiv(service_program, 1, &uniform_index, GL_UNIFORM_OFFSET,
+ &uniform_offset);
+ uniform_info.offset = uniform_offset;
+
+ GLint uniform_array_stride = 0;
+ glGetActiveUniformsiv(service_program, 1, &uniform_index,
+ GL_UNIFORM_ARRAY_STRIDE, &uniform_array_stride);
+ uniform_info.array_stride = uniform_array_stride;
+
+ GLint uniform_matrix_stride = 0;
+ glGetActiveUniformsiv(service_program, 1, &uniform_index,
+ GL_UNIFORM_MATRIX_STRIDE, &uniform_matrix_stride);
+ uniform_info.matrix_stride = uniform_matrix_stride;
+
+ GLint uniform_is_row_major = 0;
+ glGetActiveUniformsiv(service_program, 1, &uniform_index,
+ GL_UNIFORM_IS_ROW_MAJOR, &uniform_is_row_major);
+ uniform_info.is_row_major = uniform_is_row_major;
+
+ AppendValueToBuffer(data, uniform_info);
+ }
+
return error::kNoError;
}
@@ -2782,10 +3266,10 @@ error::Error GLES2DecoderPassthroughImpl::DoPostSubBufferCHROMIUM(
}
error::Error GLES2DecoderPassthroughImpl::DoCopyTextureCHROMIUM(
- GLenum source_id,
+ GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint internalformat,
GLenum dest_type,
@@ -2797,17 +3281,18 @@ error::Error GLES2DecoderPassthroughImpl::DoCopyTextureCHROMIUM(
}
glCopyTextureCHROMIUM(GetTextureServiceID(source_id, resources_, false),
+ source_level, dest_target,
GetTextureServiceID(dest_id, resources_, false),
- internalformat, dest_type, unpack_flip_y,
+ dest_level, internalformat, dest_type, unpack_flip_y,
unpack_premultiply_alpha, unpack_unmultiply_alpha);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCopySubTextureCHROMIUM(
- GLenum source_id,
+ GLuint source_id,
GLint source_level,
GLenum dest_target,
- GLenum dest_id,
+ GLuint dest_id,
GLint dest_level,
GLint xoffset,
GLint yoffset,
@@ -2822,16 +3307,17 @@ error::Error GLES2DecoderPassthroughImpl::DoCopySubTextureCHROMIUM(
return error::kUnknownCommand;
}
- glCopySubTextureCHROMIUM(GetTextureServiceID(source_id, resources_, false),
- GetTextureServiceID(dest_id, resources_, false),
- xoffset, yoffset, x, y, width, height, unpack_flip_y,
- unpack_premultiply_alpha, unpack_unmultiply_alpha);
+ glCopySubTextureCHROMIUM(
+ GetTextureServiceID(source_id, resources_, false), source_level,
+ dest_target, GetTextureServiceID(dest_id, resources_, false), dest_level,
+ xoffset, yoffset, x, y, width, height, unpack_flip_y,
+ unpack_premultiply_alpha, unpack_unmultiply_alpha);
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoCompressedCopyTextureCHROMIUM(
- GLenum source_id,
- GLenum dest_id) {
+ GLuint source_id,
+ GLuint dest_id) {
if (!feature_info_->feature_flags().chromium_copy_compressed_texture) {
return error::kUnknownCommand;
}
@@ -3167,6 +3653,26 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleCALayerInUseQueryCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoScheduleDCLayerSharedStateCHROMIUM(
+ GLfloat opacity,
+ GLboolean is_clipped,
+ const GLfloat* clip_rect,
+ GLint z_order,
+ const GLfloat* transform) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::DoScheduleDCLayerCHROMIUM(
+ GLuint contents_texture_id,
+ const GLfloat* contents_rect,
+ GLuint background_color,
+ GLuint edge_aa_mask,
+ const GLfloat* bounds_rect) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoCommitOverlayPlanesCHROMIUM() {
NOTIMPLEMENTED();
return error::kNoError;
@@ -3477,5 +3983,11 @@ error::Error GLES2DecoderPassthroughImpl::DoSetDrawRectangleCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoSetEnableDCLayersCHROMIUM(
+ GLboolean enable) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
index f6e26a305c1..50e2cbfde3a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
@@ -226,7 +226,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetActiveUniformBlockiv(
unsigned int buffer_size = 0;
typedef cmds::GetActiveUniformBlockiv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -303,15 +303,13 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetActiveUniformsiv(
return error::kInvalidArguments;
}
- GLsizei bufsize = uniformCount;
- GLsizei length = 0;
- error::Error error = DoGetActiveUniformsiv(program, uniformCount, indices,
- pname, bufsize, &length, params);
+ error::Error error =
+ DoGetActiveUniformsiv(program, uniformCount, indices, pname, params);
if (error != error::kNoError) {
return error;
}
- result->SetNumResults(length);
+ result->SetNumResults(uniformCount);
return error::kNoError;
}
@@ -383,20 +381,14 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetBufferSubDataAsyncCHROMIUM(
GLsizeiptr size = static_cast<GLsizeiptr>(c.size);
uint32_t data_shm_id = static_cast<uint32_t>(c.data_shm_id);
- int8_t* mem =
- GetSharedMemoryAs<int8_t*>(data_shm_id, c.data_shm_offset, size);
+ uint8_t* mem =
+ GetSharedMemoryAs<uint8_t*>(data_shm_id, c.data_shm_offset, size);
if (!mem) {
return error::kOutOfBounds;
}
- void* ptr = nullptr;
error::Error error =
- DoMapBufferRange(target, offset, size, GL_MAP_READ_BIT, &ptr);
- if (error != error::kNoError) {
- return error;
- }
- memcpy(mem, ptr, size);
- error = DoUnmapBuffer(target);
+ DoGetBufferSubDataAsyncCHROMIUM(target, offset, size, mem);
if (error != error::kNoError) {
return error;
}
@@ -446,7 +438,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetInternalformativ(
unsigned int buffer_size = 0;
typedef cmds::GetInternalformativ::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -655,7 +647,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetUniformfv(
unsigned int buffer_size = 0;
typedef cmds::GetUniformfv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLfloat* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -684,7 +676,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetUniformiv(
unsigned int buffer_size = 0;
typedef cmds::GetUniformiv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -713,7 +705,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetUniformuiv(
unsigned int buffer_size = 0;
typedef cmds::GetUniformuiv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLuint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -760,16 +752,12 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetUniformIndices(
if (result->size != 0) {
return error::kInvalidArguments;
}
- GLsizei length = 0;
error::Error error =
- DoGetUniformIndices(program, count, &names[0], count, &length, indices);
+ DoGetUniformIndices(program, count, &names[0], count, indices);
if (error != error::kNoError) {
return error;
}
- if (length != count) {
- return error::kOutOfBounds;
- }
- result->SetNumResults(length);
+ result->SetNumResults(count);
return error::kNoError;
}
@@ -814,7 +802,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetVertexAttribPointerv(
unsigned int buffer_size = 0;
typedef cmds::GetVertexAttribPointerv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.pointer_shm_id, c.pointer_shm_offset, &buffer_size);
+ c.pointer_shm_id, c.pointer_shm_offset, sizeof(Result), &buffer_size);
GLuint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -858,22 +846,29 @@ error::Error GLES2DecoderPassthroughImpl::HandleReadPixels(
GLsizei height = static_cast<GLsizei>(c.height);
GLenum format = static_cast<GLenum>(c.format);
GLenum type = static_cast<GLenum>(c.type);
+ uint32_t pixels_shm_id = c.pixels_shm_id;
+ uint32_t pixels_shm_offset = c.pixels_shm_offset;
uint8_t* pixels = nullptr;
unsigned int buffer_size = 0;
if (c.pixels_shm_id != 0) {
pixels = GetSharedMemoryAndSizeAs<uint8_t*>(
- c.pixels_shm_id, c.pixels_shm_offset, &buffer_size);
+ pixels_shm_id, pixels_shm_offset, 0, &buffer_size);
if (!pixels) {
return error::kOutOfBounds;
}
+ } else {
+ pixels =
+ reinterpret_cast<uint8_t*>(static_cast<intptr_t>(pixels_shm_offset));
}
GLsizei bufsize = buffer_size;
GLsizei length = 0;
+ GLsizei columns = 0;
+ GLsizei rows = 0;
int32_t success = 0;
error::Error error = DoReadPixels(x, y, width, height, format, type, bufsize,
- &length, pixels, &success);
+ &length, &columns, &rows, pixels, &success);
if (error != error::kNoError) {
return error;
}
@@ -896,8 +891,8 @@ error::Error GLES2DecoderPassthroughImpl::HandleReadPixels(
if (result) {
result->success = success;
- result->row_length = static_cast<uint32_t>(width);
- result->num_rows = static_cast<uint32_t>(height);
+ result->row_length = static_cast<uint32_t>(columns);
+ result->num_rows = static_cast<uint32_t>(rows);
}
return error::kNoError;
@@ -950,22 +945,19 @@ error::Error GLES2DecoderPassthroughImpl::HandleTexImage2D(
unsigned int buffer_size = 0;
const void* pixels = nullptr;
- if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
+ if (pixels_shm_id != 0) {
pixels = GetSharedMemoryAndSizeAs<uint8_t*>(
- pixels_shm_id, pixels_shm_offset, &buffer_size);
+ pixels_shm_id, pixels_shm_offset, 0, &buffer_size);
if (!pixels) {
return error::kOutOfBounds;
}
+ } else {
+ pixels =
+ reinterpret_cast<const void*>(static_cast<intptr_t>(pixels_shm_offset));
}
- error::Error error =
- DoTexImage2D(target, level, internal_format, width, height, border,
- format, type, buffer_size, pixels);
- if (error != error::kNoError) {
- return error;
- }
-
- return error::kNoError;
+ return DoTexImage2D(target, level, internal_format, width, height, border,
+ format, type, buffer_size, pixels);
}
error::Error GLES2DecoderPassthroughImpl::HandleTexImage3D(
@@ -988,22 +980,19 @@ error::Error GLES2DecoderPassthroughImpl::HandleTexImage3D(
unsigned int buffer_size = 0;
const void* pixels = nullptr;
- if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
+ if (pixels_shm_id != 0) {
pixels = GetSharedMemoryAndSizeAs<uint8_t*>(
- pixels_shm_id, pixels_shm_offset, &buffer_size);
+ pixels_shm_id, pixels_shm_offset, 0, &buffer_size);
if (!pixels) {
return error::kOutOfBounds;
}
+ } else {
+ pixels =
+ reinterpret_cast<const void*>(static_cast<intptr_t>(pixels_shm_offset));
}
- error::Error error =
- DoTexImage3D(target, level, internal_format, width, height, depth, border,
- format, type, buffer_size, pixels);
- if (error != error::kNoError) {
- return error;
- }
-
- return error::kNoError;
+ return DoTexImage3D(target, level, internal_format, width, height, depth,
+ border, format, type, buffer_size, pixels);
}
error::Error GLES2DecoderPassthroughImpl::HandleTexSubImage2D(
@@ -1025,22 +1014,19 @@ error::Error GLES2DecoderPassthroughImpl::HandleTexSubImage2D(
unsigned int buffer_size = 0;
const void* pixels = nullptr;
- if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
+ if (pixels_shm_id != 0) {
pixels = GetSharedMemoryAndSizeAs<uint8_t*>(
- pixels_shm_id, pixels_shm_offset, &buffer_size);
+ pixels_shm_id, pixels_shm_offset, 0, &buffer_size);
if (!pixels) {
return error::kOutOfBounds;
}
+ } else {
+ pixels =
+ reinterpret_cast<const void*>(static_cast<intptr_t>(pixels_shm_offset));
}
- error::Error error =
- DoTexSubImage2D(target, level, xoffset, yoffset, width, height, format,
- type, buffer_size, pixels);
- if (error != error::kNoError) {
- return error;
- }
-
- return error::kNoError;
+ return DoTexSubImage2D(target, level, xoffset, yoffset, width, height, format,
+ type, buffer_size, pixels);
}
error::Error GLES2DecoderPassthroughImpl::HandleTexSubImage3D(
@@ -1064,22 +1050,19 @@ error::Error GLES2DecoderPassthroughImpl::HandleTexSubImage3D(
unsigned int buffer_size = 0;
const void* pixels = nullptr;
- if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
+ if (pixels_shm_id != 0) {
pixels = GetSharedMemoryAndSizeAs<uint8_t*>(
- pixels_shm_id, pixels_shm_offset, &buffer_size);
+ pixels_shm_id, pixels_shm_offset, 0, &buffer_size);
if (!pixels) {
return error::kOutOfBounds;
}
+ } else {
+ pixels =
+ reinterpret_cast<const void*>(static_cast<intptr_t>(pixels_shm_offset));
}
- error::Error error =
- DoTexSubImage3D(target, level, xoffset, yoffset, zoffset, width, height,
- depth, format, type, buffer_size, pixels);
- if (error != error::kNoError) {
- return error;
- }
-
- return error::kNoError;
+ return DoTexSubImage3D(target, level, xoffset, yoffset, zoffset, width,
+ height, depth, format, type, buffer_size, pixels);
}
error::Error GLES2DecoderPassthroughImpl::HandleUniformBlockBinding(
@@ -1332,13 +1315,14 @@ error::Error GLES2DecoderPassthroughImpl::HandleMapBufferRange(
return error::kOutOfBounds;
}
- void* ptr = nullptr;
- error::Error error = DoMapBufferRange(target, offset, size, access, &ptr);
+ error::Error error =
+ DoMapBufferRange(target, offset, size, access, mem, c.data_shm_id,
+ c.data_shm_offset, result);
if (error != error::kNoError) {
+ DCHECK(*result == 0);
return error;
}
- *result = 1;
return error::kNoError;
}
@@ -1795,6 +1779,57 @@ error::Error GLES2DecoderPassthroughImpl::HandleScheduleCALayerCHROMIUM(
return error::kNoError;
}
+error::Error
+GLES2DecoderPassthroughImpl::HandleScheduleDCLayerSharedStateCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM& c =
+ *static_cast<
+ const volatile gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM*>(
+ cmd_data);
+ const GLfloat* mem = GetSharedMemoryAs<const GLfloat*>(c.shm_id, c.shm_offset,
+ 20 * sizeof(GLfloat));
+ if (!mem) {
+ return error::kOutOfBounds;
+ }
+ GLfloat opacity = static_cast<GLfloat>(c.opacity);
+ GLboolean is_clipped = static_cast<GLboolean>(c.is_clipped);
+ const GLfloat* clip_rect = mem + 0;
+ GLint z_order = static_cast<GLint>(c.z_order);
+ const GLfloat* transform = mem + 4;
+ error::Error error = DoScheduleDCLayerSharedStateCHROMIUM(
+ opacity, is_clipped, clip_rect, z_order, transform);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleScheduleDCLayerCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::ScheduleDCLayerCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::ScheduleDCLayerCHROMIUM*>(
+ cmd_data);
+ const GLfloat* mem = GetSharedMemoryAs<const GLfloat*>(c.shm_id, c.shm_offset,
+ 8 * sizeof(GLfloat));
+ if (!mem) {
+ return error::kOutOfBounds;
+ }
+ GLuint contents_texture_id = static_cast<GLint>(c.contents_texture_id);
+ const GLfloat* contents_rect = mem;
+ GLuint background_color = static_cast<GLuint>(c.background_color);
+ GLuint edge_aa_mask = static_cast<GLuint>(c.edge_aa_mask);
+ const GLfloat* bounds_rect = mem + 4;
+ error::Error error =
+ DoScheduleDCLayerCHROMIUM(contents_texture_id, contents_rect,
+ background_color, edge_aa_mask, bounds_rect);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::HandleGenPathsCHROMIUM(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -1852,7 +1887,7 @@ error::Error GLES2DecoderPassthroughImpl::HandlePathCommandsCHROMIUM(
if (coords_shm_id != 0 || coords_shm_offset != 0) {
unsigned int memory_size = 0;
coords = GetSharedMemoryAndSizeAs<const GLvoid*>(
- coords_shm_id, coords_shm_offset, &memory_size);
+ coords_shm_id, coords_shm_offset, 0, &memory_size);
coords_bufsize = static_cast<GLsizei>(memory_size);
}
@@ -2023,7 +2058,7 @@ GLES2DecoderPassthroughImpl::HandleStencilFillPathInstancedCHROMIUM(
if (paths_shm_id != 0 || paths_shm_offset != 0) {
unsigned int memory_size = 0;
paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, &memory_size);
+ paths_shm_id, paths_shm_offset, 0, &memory_size);
paths_bufsize = static_cast<GLsizei>(memory_size);
}
@@ -2040,7 +2075,8 @@ GLES2DecoderPassthroughImpl::HandleStencilFillPathInstancedCHROMIUM(
if (c.transformValues_shm_id != 0 || c.transformValues_shm_offset != 0) {
unsigned int memory_size = 0;
transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- c.transformValues_shm_id, c.transformValues_shm_offset, &memory_size);
+ c.transformValues_shm_id, c.transformValues_shm_offset, 0,
+ &memory_size);
transform_values_bufsize = static_cast<GLsizei>(memory_size);
}
if (!transform_values) {
@@ -2075,7 +2111,7 @@ GLES2DecoderPassthroughImpl::HandleStencilStrokePathInstancedCHROMIUM(
if (paths_shm_id != 0 || paths_shm_offset != 0) {
unsigned int memory_size = 0;
paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, &memory_size);
+ paths_shm_id, paths_shm_offset, 0, &memory_size);
paths_bufsize = static_cast<GLsizei>(memory_size);
}
@@ -2092,7 +2128,8 @@ GLES2DecoderPassthroughImpl::HandleStencilStrokePathInstancedCHROMIUM(
if (c.transformValues_shm_id != 0 || c.transformValues_shm_offset != 0) {
unsigned int memory_size = 0;
transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- c.transformValues_shm_id, c.transformValues_shm_offset, &memory_size);
+ c.transformValues_shm_id, c.transformValues_shm_offset, 0,
+ &memory_size);
transform_values_bufsize = static_cast<GLsizei>(memory_size);
}
if (!transform_values) {
@@ -2125,7 +2162,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleCoverFillPathInstancedCHROMIUM(
if (paths_shm_id != 0 || paths_shm_offset != 0) {
unsigned int memory_size = 0;
paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, &memory_size);
+ paths_shm_id, paths_shm_offset, 0, &memory_size);
paths_bufsize = static_cast<GLsizei>(memory_size);
}
@@ -2141,7 +2178,8 @@ error::Error GLES2DecoderPassthroughImpl::HandleCoverFillPathInstancedCHROMIUM(
if (c.transformValues_shm_id != 0 || c.transformValues_shm_offset != 0) {
unsigned int memory_size = 0;
transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- c.transformValues_shm_id, c.transformValues_shm_offset, &memory_size);
+ c.transformValues_shm_id, c.transformValues_shm_offset, 0,
+ &memory_size);
transform_values_bufsize = static_cast<GLsizei>(memory_size);
}
if (!transform_values) {
@@ -2176,7 +2214,7 @@ GLES2DecoderPassthroughImpl::HandleCoverStrokePathInstancedCHROMIUM(
if (paths_shm_id != 0 || paths_shm_offset != 0) {
unsigned int memory_size = 0;
paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, &memory_size);
+ paths_shm_id, paths_shm_offset, 0, &memory_size);
paths_bufsize = static_cast<GLsizei>(memory_size);
}
@@ -2192,7 +2230,8 @@ GLES2DecoderPassthroughImpl::HandleCoverStrokePathInstancedCHROMIUM(
if (c.transformValues_shm_id != 0 || c.transformValues_shm_offset != 0) {
unsigned int memory_size = 0;
transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- c.transformValues_shm_id, c.transformValues_shm_offset, &memory_size);
+ c.transformValues_shm_id, c.transformValues_shm_offset, 0,
+ &memory_size);
transform_values_bufsize = static_cast<GLsizei>(memory_size);
}
if (!transform_values) {
@@ -2226,7 +2265,7 @@ GLES2DecoderPassthroughImpl::HandleStencilThenCoverFillPathInstancedCHROMIUM(
if (paths_shm_id != 0 || paths_shm_offset != 0) {
unsigned int memory_size = 0;
paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, &memory_size);
+ paths_shm_id, paths_shm_offset, 0, &memory_size);
paths_bufsize = static_cast<GLsizei>(memory_size);
}
@@ -2244,7 +2283,8 @@ GLES2DecoderPassthroughImpl::HandleStencilThenCoverFillPathInstancedCHROMIUM(
if (c.transformValues_shm_id != 0 || c.transformValues_shm_offset != 0) {
unsigned int memory_size = 0;
transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- c.transformValues_shm_id, c.transformValues_shm_offset, &memory_size);
+ c.transformValues_shm_id, c.transformValues_shm_offset, 0,
+ &memory_size);
transform_values_bufsize = static_cast<GLsizei>(memory_size);
}
if (!transform_values) {
@@ -2279,7 +2319,7 @@ GLES2DecoderPassthroughImpl::HandleStencilThenCoverStrokePathInstancedCHROMIUM(
if (paths_shm_id != 0 || paths_shm_offset != 0) {
unsigned int memory_size = 0;
paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, &memory_size);
+ paths_shm_id, paths_shm_offset, 0, &memory_size);
paths_bufsize = static_cast<GLsizei>(memory_size);
}
@@ -2297,7 +2337,8 @@ GLES2DecoderPassthroughImpl::HandleStencilThenCoverStrokePathInstancedCHROMIUM(
if (c.transformValues_shm_id != 0 || c.transformValues_shm_offset != 0) {
unsigned int memory_size = 0;
transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- c.transformValues_shm_id, c.transformValues_shm_offset, &memory_size);
+ c.transformValues_shm_id, c.transformValues_shm_offset, 0,
+ &memory_size);
transform_values_bufsize = static_cast<GLsizei>(memory_size);
}
if (!transform_values) {
@@ -2358,7 +2399,7 @@ GLES2DecoderPassthroughImpl::HandleProgramPathFragmentInputGenCHROMIUM(
if (c.coeffs_shm_id != 0 || c.coeffs_shm_offset != 0) {
unsigned int memory_size = 0;
coeffs = GetSharedMemoryAndSizeAs<const GLfloat*>(
- c.coeffs_shm_id, c.coeffs_shm_offset, &memory_size);
+ c.coeffs_shm_id, c.coeffs_shm_offset, 0, &memory_size);
coeffs_bufsize = static_cast<GLsizei>(memory_size);
}
if (!coeffs) {
@@ -2471,8 +2512,8 @@ error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexImage2DBucket(
uint32_t image_size = bucket->size();
const void* data = bucket->GetData(0, image_size);
DCHECK(data || !image_size);
- return DoCompressedTexImage2D(
- target, level, internal_format, width, height, border, image_size, data);
+ return DoCompressedTexImage2D(target, level, internal_format, width, height,
+ border, image_size, image_size, data);
}
error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexImage2D(
@@ -2486,11 +2527,24 @@ error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexImage2D(
GLsizei height = static_cast<GLsizei>(c.height);
GLint border = static_cast<GLint>(c.border);
GLsizei image_size = static_cast<GLsizei>(c.imageSize);
- // TODO(geofflang): Handle PIXEL_UNPACK_BUFFER case.
- const void* data = GetSharedMemoryAs<const void*>(
- c.data_shm_id, c.data_shm_offset, image_size);
- return DoCompressedTexImage2D(
- target, level, internal_format, width, height, border, image_size, data);
+ uint32_t data_shm_id = c.data_shm_id;
+ uint32_t data_shm_offset = c.data_shm_offset;
+
+ unsigned int data_size = 0;
+ const void* data = nullptr;
+ if (data_shm_id != 0) {
+ data = GetSharedMemoryAndSizeAs<const void*>(data_shm_id, data_shm_offset,
+ image_size, &data_size);
+ if (data == nullptr) {
+ return error::kOutOfBounds;
+ }
+ } else {
+ data =
+ reinterpret_cast<const void*>(static_cast<intptr_t>(data_shm_offset));
+ }
+
+ return DoCompressedTexImage2D(target, level, internal_format, width, height,
+ border, image_size, data_size, data);
}
error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexSubImage2DBucket(
@@ -2512,8 +2566,9 @@ error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexSubImage2DBucket(
uint32_t image_size = bucket->size();
const void* data = bucket->GetData(0, image_size);
DCHECK(data || !image_size);
- return DoCompressedTexSubImage2D(
- target, level, xoffset, yoffset, width, height, format, image_size, data);
+ return DoCompressedTexSubImage2D(target, level, xoffset, yoffset, width,
+ height, format, image_size, image_size,
+ data);
}
error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexSubImage2D(
@@ -2529,11 +2584,24 @@ error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexSubImage2D(
GLsizei height = static_cast<GLsizei>(c.height);
GLenum format = static_cast<GLenum>(c.format);
GLsizei image_size = static_cast<GLsizei>(c.imageSize);
- // TODO(geofflang): Handle PIXEL_UNPACK_BUFFER case.
- const void* data = GetSharedMemoryAs<const void*>(
- c.data_shm_id, c.data_shm_offset, image_size);
- return DoCompressedTexSubImage2D(
- target, level, xoffset, yoffset, width, height, format, image_size, data);
+ uint32_t data_shm_id = c.data_shm_id;
+ uint32_t data_shm_offset = c.data_shm_offset;
+
+ unsigned int data_size = 0;
+ const void* data = nullptr;
+ if (data_shm_id != 0) {
+ data = GetSharedMemoryAndSizeAs<const void*>(data_shm_id, data_shm_offset,
+ image_size, &data_size);
+ if (data == nullptr) {
+ return error::kOutOfBounds;
+ }
+ } else {
+ data =
+ reinterpret_cast<const void*>(static_cast<intptr_t>(data_shm_offset));
+ }
+
+ return DoCompressedTexSubImage2D(target, level, xoffset, yoffset, width,
+ height, format, image_size, data_size, data);
}
error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexImage3DBucket(
@@ -2556,7 +2624,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexImage3DBucket(
const void* data = bucket->GetData(0, image_size);
DCHECK(data || !image_size);
return DoCompressedTexImage3D(target, level, internal_format, width, height,
- depth, border, image_size, data);
+ depth, border, image_size, image_size, data);
}
error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexImage3D(
@@ -2571,11 +2639,24 @@ error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexImage3D(
GLsizei depth = static_cast<GLsizei>(c.depth);
GLint border = static_cast<GLint>(c.border);
GLsizei image_size = static_cast<GLsizei>(c.imageSize);
- // TODO(geofflang): Handle PIXEL_UNPACK_BUFFER case.
- const void* data = GetSharedMemoryAs<const void*>(
- c.data_shm_id, c.data_shm_offset, image_size);
+ uint32_t data_shm_id = c.data_shm_id;
+ uint32_t data_shm_offset = c.data_shm_offset;
+
+ unsigned int data_size = 0;
+ const void* data = nullptr;
+ if (data_shm_id != 0) {
+ data = GetSharedMemoryAndSizeAs<const void*>(data_shm_id, data_shm_offset,
+ image_size, &data_size);
+ if (data == nullptr) {
+ return error::kOutOfBounds;
+ }
+ } else {
+ data =
+ reinterpret_cast<const void*>(static_cast<intptr_t>(data_shm_offset));
+ }
+
return DoCompressedTexImage3D(target, level, internal_format, width, height,
- depth, border, image_size, data);
+ depth, border, image_size, data_size, data);
}
error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexSubImage3DBucket(
@@ -2599,9 +2680,9 @@ error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexSubImage3DBucket(
uint32_t image_size = bucket->size();
const void* data = bucket->GetData(0, image_size);
DCHECK(data || !image_size);
- return DoCompressedTexSubImage3D(
- target, level, xoffset, yoffset, zoffset, width, height, depth,
- format, image_size, data);
+ return DoCompressedTexSubImage3D(target, level, xoffset, yoffset, zoffset,
+ width, height, depth, format, image_size,
+ image_size, data);
}
error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexSubImage3D(
@@ -2619,12 +2700,25 @@ error::Error GLES2DecoderPassthroughImpl::HandleCompressedTexSubImage3D(
GLsizei depth = static_cast<GLsizei>(c.depth);
GLenum format = static_cast<GLenum>(c.format);
GLsizei image_size = static_cast<GLsizei>(c.imageSize);
- // TODO(geofflang): Handle PIXEL_UNPACK_BUFFER case.
- const void* data = GetSharedMemoryAs<const void*>(
- c.data_shm_id, c.data_shm_offset, image_size);
- return DoCompressedTexSubImage3D(
- target, level, xoffset, yoffset, zoffset, width, height, depth,
- format, image_size, data);
+ uint32_t data_shm_id = c.data_shm_id;
+ uint32_t data_shm_offset = c.data_shm_offset;
+
+ unsigned int data_size = 0;
+ const void* data = nullptr;
+ if (data_shm_id != 0) {
+ data = GetSharedMemoryAndSizeAs<const void*>(data_shm_id, data_shm_offset,
+ image_size, &data_size);
+ if (data == nullptr) {
+ return error::kOutOfBounds;
+ }
+ } else {
+ data =
+ reinterpret_cast<const void*>(static_cast<intptr_t>(data_shm_offset));
+ }
+
+ return DoCompressedTexSubImage3D(target, level, xoffset, yoffset, zoffset,
+ width, height, depth, format, image_size,
+ data_size, data);
}
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
index 376392062da..9c6f95f3e27 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
@@ -1079,7 +1079,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetBooleanv(
unsigned int buffer_size = 0;
typedef cmds::GetBooleanv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLboolean* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1109,7 +1109,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetBufferParameteri64v(
unsigned int buffer_size = 0;
typedef cmds::GetBufferParameteri64v::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint64* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1139,7 +1139,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetBufferParameteriv(
unsigned int buffer_size = 0;
typedef cmds::GetBufferParameteriv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1186,7 +1186,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetFloatv(
unsigned int buffer_size = 0;
typedef cmds::GetFloatv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLfloat* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1219,7 +1219,7 @@ GLES2DecoderPassthroughImpl::HandleGetFramebufferAttachmentParameteriv(
unsigned int buffer_size = 0;
typedef cmds::GetFramebufferAttachmentParameteriv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1248,7 +1248,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetInteger64v(
unsigned int buffer_size = 0;
typedef cmds::GetInteger64v::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint64* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1277,7 +1277,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetIntegeri_v(
unsigned int buffer_size = 0;
typedef cmds::GetIntegeri_v::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.data_shm_id, c.data_shm_offset, &buffer_size);
+ c.data_shm_id, c.data_shm_offset, sizeof(Result), &buffer_size);
GLint* data = result ? result->GetData() : NULL;
if (data == NULL) {
return error::kOutOfBounds;
@@ -1306,7 +1306,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetInteger64i_v(
unsigned int buffer_size = 0;
typedef cmds::GetInteger64i_v::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.data_shm_id, c.data_shm_offset, &buffer_size);
+ c.data_shm_id, c.data_shm_offset, sizeof(Result), &buffer_size);
GLint64* data = result ? result->GetData() : NULL;
if (data == NULL) {
return error::kOutOfBounds;
@@ -1334,7 +1334,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetIntegerv(
unsigned int buffer_size = 0;
typedef cmds::GetIntegerv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1363,7 +1363,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetProgramiv(
unsigned int buffer_size = 0;
typedef cmds::GetProgramiv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1393,7 +1393,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetRenderbufferParameteriv(
unsigned int buffer_size = 0;
typedef cmds::GetRenderbufferParameteriv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1424,7 +1424,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetSamplerParameterfv(
unsigned int buffer_size = 0;
typedef cmds::GetSamplerParameterfv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLfloat* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1455,7 +1455,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetSamplerParameteriv(
unsigned int buffer_size = 0;
typedef cmds::GetSamplerParameteriv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1485,7 +1485,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetShaderiv(
unsigned int buffer_size = 0;
typedef cmds::GetShaderiv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1514,7 +1514,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetSynciv(
unsigned int buffer_size = 0;
typedef cmds::GetSynciv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.values_shm_id, c.values_shm_offset, &buffer_size);
+ c.values_shm_id, c.values_shm_offset, sizeof(Result), &buffer_size);
GLint* values = result ? result->GetData() : NULL;
if (values == NULL) {
return error::kOutOfBounds;
@@ -1543,7 +1543,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetTexParameterfv(
unsigned int buffer_size = 0;
typedef cmds::GetTexParameterfv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLfloat* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1573,7 +1573,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetTexParameteriv(
unsigned int buffer_size = 0;
typedef cmds::GetTexParameteriv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1603,7 +1603,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetVertexAttribfv(
unsigned int buffer_size = 0;
typedef cmds::GetVertexAttribfv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLfloat* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1633,7 +1633,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetVertexAttribiv(
unsigned int buffer_size = 0;
typedef cmds::GetVertexAttribiv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1663,7 +1663,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetVertexAttribIiv(
unsigned int buffer_size = 0;
typedef cmds::GetVertexAttribIiv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -1693,7 +1693,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetVertexAttribIuiv(
unsigned int buffer_size = 0;
typedef cmds::GetVertexAttribIuiv::Result Result;
Result* result = GetSharedMemoryAndSizeAs<Result*>(
- c.params_shm_id, c.params_shm_offset, &buffer_size);
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
GLuint* params = result ? result->GetData() : NULL;
if (params == NULL) {
return error::kOutOfBounds;
@@ -3832,10 +3832,10 @@ error::Error GLES2DecoderPassthroughImpl::HandleCopyTextureCHROMIUM(
const volatile void* cmd_data) {
const volatile gles2::cmds::CopyTextureCHROMIUM& c =
*static_cast<const volatile gles2::cmds::CopyTextureCHROMIUM*>(cmd_data);
- GLenum source_id = static_cast<GLenum>(c.source_id);
+ GLuint source_id = static_cast<GLuint>(c.source_id);
GLint source_level = static_cast<GLint>(c.source_level);
GLenum dest_target = static_cast<GLenum>(c.dest_target);
- GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
GLint dest_level = static_cast<GLint>(c.dest_level);
GLint internalformat = static_cast<GLint>(c.internalformat);
GLenum dest_type = static_cast<GLenum>(c.dest_type);
@@ -3860,10 +3860,10 @@ error::Error GLES2DecoderPassthroughImpl::HandleCopySubTextureCHROMIUM(
const volatile gles2::cmds::CopySubTextureCHROMIUM& c =
*static_cast<const volatile gles2::cmds::CopySubTextureCHROMIUM*>(
cmd_data);
- GLenum source_id = static_cast<GLenum>(c.source_id);
+ GLuint source_id = static_cast<GLuint>(c.source_id);
GLint source_level = static_cast<GLint>(c.source_level);
GLenum dest_target = static_cast<GLenum>(c.dest_target);
- GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
GLint dest_level = static_cast<GLint>(c.dest_level);
GLint xoffset = static_cast<GLint>(c.xoffset);
GLint yoffset = static_cast<GLint>(c.yoffset);
@@ -3892,8 +3892,8 @@ error::Error GLES2DecoderPassthroughImpl::HandleCompressedCopyTextureCHROMIUM(
const volatile gles2::cmds::CompressedCopyTextureCHROMIUM& c =
*static_cast<const volatile gles2::cmds::CompressedCopyTextureCHROMIUM*>(
cmd_data);
- GLenum source_id = static_cast<GLenum>(c.source_id);
- GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLuint source_id = static_cast<GLuint>(c.source_id);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
error::Error error = DoCompressedCopyTextureCHROMIUM(source_id, dest_id);
if (error != error::kNoError) {
return error;
@@ -4385,5 +4385,19 @@ error::Error GLES2DecoderPassthroughImpl::HandleSetDrawRectangleCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleSetEnableDCLayersCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::SetEnableDCLayersCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::SetEnableDCLayersCHROMIUM*>(
+ cmd_data);
+ GLboolean enabled = static_cast<GLboolean>(c.enabled);
+ error::Error error = DoSetEnableDCLayersCHROMIUM(enabled);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h
index 264c6678bc2..671309d4951 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h
@@ -33,4 +33,13 @@ TEST_P(GLES2DecoderTest4, SetDrawRectangleCHROMIUMValidArgs) {
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
+
+TEST_P(GLES2DecoderTest4, SetEnableDCLayersCHROMIUMValidArgs) {
+ EXPECT_CALL(*gl_, SetEnableDCLayersCHROMIUM(true));
+ SpecializedSetup<cmds::SetEnableDCLayersCHROMIUM, 0>(true);
+ cmds::SetEnableDCLayersCHROMIUM cmd;
+ cmd.Init(true);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_4_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
index 8cdeb34e926..2187f4961f8 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
@@ -23,7 +23,6 @@
#include "gpu/command_buffer/service/program_manager.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "gpu/command_buffer/service/vertex_attrib_manager.h"
-#include "gpu/test_message_loop_type.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_mock.h"
#include "ui/gl/init/gl_factory.h"
@@ -124,8 +123,7 @@ GLES2DecoderTestBase::GLES2DecoderTestBase()
cached_depth_mask_(true),
cached_stencil_front_mask_(static_cast<GLuint>(-1)),
cached_stencil_back_mask_(static_cast<GLuint>(-1)),
- shader_language_version_(100),
- message_loop_(test::GetMessageLoopTypeForGpu()) {
+ shader_language_version_(100) {
memset(immediate_buffer_, 0xEE, sizeof(immediate_buffer_));
}
@@ -207,6 +205,7 @@ void GLES2DecoderTestBase::InitDecoderWithCommandLine(
surface_ = new gl::GLSurfaceStub;
surface_->SetSize(gfx::Size(kBackBufferWidth, kBackBufferHeight));
+ surface_->set_supports_draw_rectangle(surface_supports_draw_rectangle_);
// Context needs to be created before initializing ContextGroup, which will
// in turn initialize FeatureInfo, which needs a context to determine
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
index d7f71fa8f8e..45789ca0e45 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
@@ -637,6 +637,8 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool> {
std::unique_ptr<GLES2Decoder> decoder_;
MemoryTracker* memory_tracker_;
+ bool surface_supports_draw_rectangle_ = false;
+
GLuint client_buffer_id_;
GLuint client_framebuffer_id_;
GLuint client_program_id_;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
index fbad893fcf5..0fc0769ee50 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
@@ -4038,6 +4038,106 @@ TEST_P(GLES3DecoderTest, BlitFramebufferMissingDepthOrStencil) {
}
}
+class GLES2DecoderTestWithDrawRectangle : public GLES2DecoderTest {
+ void SetUp() override {
+ surface_supports_draw_rectangle_ = true;
+ GLES2DecoderTest::SetUp();
+ }
+};
+
+// Test that the draw offset is correctly honored when SetDrawRectangle is
+// supported.
+TEST_P(GLES2DecoderTestWithDrawRectangle, FramebufferDrawRectangleClear) {
+ EXPECT_CALL(*gl_, Scissor(101, 202, 3, 4)).Times(1).RetiresOnSaturation();
+ cmds::Scissor scissor_cmd;
+ scissor_cmd.Init(1, 2, 3, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(scissor_cmd));
+
+ // Scissor and Viewport should be restored to (0,0) offset on when clearing
+ // a framebuffer.
+ {
+ const GLuint kFBOClientTextureId = 4100;
+ const GLuint kFBOServiceTextureId = 4101;
+
+ // Register a texture id.
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgPointee<1>(kFBOServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
+
+ // Setup "render to" texture.
+ DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ 0, 0);
+ DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
+ kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ kFBOClientTextureId, kFBOServiceTextureId, 0,
+ GL_NO_ERROR);
+ // Set scissor rect and enable GL_SCISSOR_TEST to make sure we re-enable it
+ // and restore the rect again after the clear.
+ DoEnableDisable(GL_SCISSOR_TEST, true);
+ EXPECT_CALL(*gl_, Viewport(0, 0, 128, 64)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl_, Scissor(1, 2, 3, 4)).Times(1).RetiresOnSaturation();
+
+ // Setup "render from" texture.
+ SetupTexture();
+
+ SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
+ GL_COLOR_BUFFER_BIT, // clear bits
+ 0, 0, 0,
+ 0, // color
+ 0, // stencil
+ 1.0f, // depth
+ true, // scissor test
+ 1, 2, 3, 4);
+ SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
+ false, // Framebuffer has depth
+ false, // Framebuffer has stencil
+ 0x1111, // color bits
+ false, // depth mask
+ false, // depth enabled
+ 0, // front stencil mask
+ 0, // back stencil mask
+ false); // stencil enabled
+
+ EXPECT_CALL(*gl_, Clear(GL_COLOR_BUFFER_BIT))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ Clear cmd;
+ cmd.Init(GL_COLOR_BUFFER_BIT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ // Check that the draw offset is used when switching to the default
+ // framebuffer and clearing it.
+ {
+ DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
+ EXPECT_CALL(*gl_, Clear(GL_COLOR_BUFFER_BIT))
+ .Times(1)
+ .RetiresOnSaturation();
+ SetupExpectationsForColorMask(true, true, true, true);
+ SetupExpectationsForDepthMask(true);
+ SetupExpectationsForStencilMask(0, 0);
+ SetupExpectationsForEnableDisable(GL_DEPTH_TEST, false);
+ SetupExpectationsForEnableDisable(GL_STENCIL_TEST, false);
+ EXPECT_CALL(*gl_, Viewport(100, 200, 128, 64))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, Scissor(101, 202, 3, 4)).Times(1).RetiresOnSaturation();
+ Clear cmd;
+ cmd.Init(GL_COLOR_BUFFER_BIT);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderTestWithDrawRectangle,
+ ::testing::Bool());
+
// TODO(gman): PixelStorei
// TODO(gman): SwapBuffers
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
index f05c197c03f..f4067970449 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -4363,6 +4363,33 @@ TEST_P(GLES2DecoderManualInitTest, TexStorageInvalidLevels) {
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
}
+TEST_P(GLES2DecoderManualInitTest, TexStorageInvalidSize) {
+ InitState init;
+ init.gl_version = "OpenGL 4.2";
+ init.extensions = "GL_ARB_texture_storage";
+ init.bind_generates_resource = true;
+ InitDecoder(init);
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ {
+ TexStorage2DEXT cmd;
+ cmd.Init(GL_TEXTURE_2D, 1, GL_RGBA8, 0, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+ {
+ TexStorage2DEXT cmd;
+ cmd.Init(GL_TEXTURE_2D, 1, GL_RGBA8, 4, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+ {
+ TexStorage2DEXT cmd;
+ cmd.Init(GL_TEXTURE_2D, 1, GL_RGBA8, 0, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+}
+
class GLES2DecoderTexStorageFormatAndTypeTest
: public GLES2DecoderManualInitTest {
public:
diff --git a/chromium/gpu/command_buffer/service/gpu_service_test.cc b/chromium/gpu/command_buffer/service/gpu_service_test.cc
index f4e38dcfdfd..8e2d198585b 100644
--- a/chromium/gpu/command_buffer/service/gpu_service_test.cc
+++ b/chromium/gpu/command_buffer/service/gpu_service_test.cc
@@ -5,7 +5,6 @@
#include "gpu/command_buffer/service/gpu_service_test.h"
#include "gpu/command_buffer/service/test_helper.h"
-#include "gpu/test_message_loop_type.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_context_stub.h"
#include "ui/gl/gl_implementation.h"
@@ -17,10 +16,7 @@
namespace gpu {
namespace gles2 {
-GpuServiceTest::GpuServiceTest()
- : ran_setup_(false),
- ran_teardown_(false),
- message_loop_(test::GetMessageLoopTypeForGpu()) {}
+GpuServiceTest::GpuServiceTest() : ran_setup_(false), ran_teardown_(false) {}
GpuServiceTest::~GpuServiceTest() {
DCHECK(ran_teardown_);
diff --git a/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc b/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc
index dd9e6738818..d92d29f93ca 100644
--- a/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc
+++ b/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc
@@ -25,14 +25,15 @@ namespace gles2 {
namespace {
-base::LazyInstance<base::Lock> g_lock = LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<base::Lock>::DestructorAtExit g_lock =
+ LAZY_INSTANCE_INITIALIZER;
#if !defined(OS_MACOSX)
typedef std::map<SyncToken, std::unique_ptr<gl::GLFence>> SyncTokenToFenceMap;
-base::LazyInstance<SyncTokenToFenceMap> g_sync_point_to_fence =
- LAZY_INSTANCE_INITIALIZER;
-base::LazyInstance<std::queue<SyncTokenToFenceMap::iterator>> g_sync_points =
- LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<SyncTokenToFenceMap>::DestructorAtExit
+ g_sync_point_to_fence = LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<std::queue<SyncTokenToFenceMap::iterator>>::DestructorAtExit
+ g_sync_points = LAZY_INSTANCE_INITIALIZER;
#endif
void CreateFenceLocked(const SyncToken& sync_token) {
@@ -77,8 +78,8 @@ static const unsigned kNewTextureVersion = 1;
} // anonymous namespace
-base::LazyInstance<MailboxManagerSync::TextureGroup::MailboxToGroupMap>
- MailboxManagerSync::TextureGroup::mailbox_to_group_ =
+base::LazyInstance<MailboxManagerSync::TextureGroup::MailboxToGroupMap>::
+ DestructorAtExit MailboxManagerSync::TextureGroup::mailbox_to_group_ =
LAZY_INSTANCE_INITIALIZER;
// static
@@ -194,6 +195,10 @@ bool MailboxManagerSync::UsesSync() {
Texture* MailboxManagerSync::ConsumeTexture(const Mailbox& mailbox) {
base::AutoLock lock(g_lock.Get());
+ // Relax the cross-thread access restriction to non-thread-safe RefCount.
+ // The lock above protects non-thread-safe RefCount in TextureGroup.
+ base::ScopedAllowCrossThreadRefCountAccess
+ scoped_allow_cross_thread_ref_count_access;
TextureGroup* group = TextureGroup::FromName(mailbox);
if (!group)
return NULL;
@@ -221,6 +226,10 @@ Texture* MailboxManagerSync::ConsumeTexture(const Mailbox& mailbox) {
void MailboxManagerSync::ProduceTexture(const Mailbox& mailbox,
TextureBase* texture_base) {
base::AutoLock lock(g_lock.Get());
+ // Relax the cross-thread access restriction to non-thread-safe RefCount.
+ // The lock above protects non-thread-safe RefCount in TextureGroup.
+ base::ScopedAllowCrossThreadRefCountAccess
+ scoped_allow_cross_thread_ref_count_access;
Texture* texture = static_cast<Texture*>(texture_base);
DCHECK(texture != nullptr);
@@ -268,6 +277,10 @@ void MailboxManagerSync::ProduceTexture(const Mailbox& mailbox,
void MailboxManagerSync::TextureDeleted(TextureBase* texture_base) {
base::AutoLock lock(g_lock.Get());
+ // Relax the cross-thread access restriction to non-thread-safe RefCount.
+ // The lock above protects non-thread-safe RefCount in TextureGroup.
+ base::ScopedAllowCrossThreadRefCountAccess
+ scoped_allow_cross_thread_ref_count_access;
Texture* texture = static_cast<Texture*>(texture_base);
DCHECK(texture != nullptr);
@@ -315,6 +328,10 @@ void MailboxManagerSync::UpdateDefinitionLocked(TextureBase* texture_base,
void MailboxManagerSync::PushTextureUpdates(const SyncToken& token) {
base::AutoLock lock(g_lock.Get());
+ // Relax the cross-thread access restriction to non-thread-safe RefCount.
+ // The lock above protects non-thread-safe RefCount in TextureGroup.
+ base::ScopedAllowCrossThreadRefCountAccess
+ scoped_allow_cross_thread_ref_count_access;
for (TextureToGroupMap::iterator it = texture_to_group_.begin();
it != texture_to_group_.end(); it++) {
@@ -328,6 +345,10 @@ void MailboxManagerSync::PullTextureUpdates(const SyncToken& token) {
std::vector<TextureUpdatePair> needs_update;
{
base::AutoLock lock(g_lock.Get());
+ // Relax the cross-thread access restriction to non-thread-safe RefCount.
+ // The lock above protects non-thread-safe RefCount in TextureGroup.
+ base::ScopedAllowCrossThreadRefCountAccess
+ scoped_allow_cross_thread_ref_count_access;
AcquireFenceLocked(token);
for (TextureToGroupMap::iterator it = texture_to_group_.begin();
diff --git a/chromium/gpu/command_buffer/service/mailbox_manager_sync.h b/chromium/gpu/command_buffer/service/mailbox_manager_sync.h
index f7e6290b1c7..ba0c05e879f 100644
--- a/chromium/gpu/command_buffer/service/mailbox_manager_sync.h
+++ b/chromium/gpu/command_buffer/service/mailbox_manager_sync.h
@@ -72,7 +72,8 @@ class GPU_EXPORT MailboxManagerSync : public MailboxManager {
typedef std::map<Mailbox, scoped_refptr<TextureGroup>>
MailboxToGroupMap;
- static base::LazyInstance<MailboxToGroupMap> mailbox_to_group_;
+ static base::LazyInstance<MailboxToGroupMap>::DestructorAtExit
+ mailbox_to_group_;
};
struct TextureGroupRef {
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache.cc b/chromium/gpu/command_buffer/service/memory_program_cache.cc
index b24cbd7e42f..3b4fe79dd51 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache.cc
+++ b/chromium/gpu/command_buffer/service/memory_program_cache.cc
@@ -12,6 +12,7 @@
#include "base/metrics/histogram_macros.h"
#include "base/sha1.h"
#include "base/strings/string_number_conversions.h"
+#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/service/disk_cache_proto.pb.h"
#include "gpu/command_buffer/service/gl_utils.h"
@@ -211,14 +212,15 @@ bool ProgramBinaryExtensionsAvailable() {
MemoryProgramCache::MemoryProgramCache(
size_t max_cache_size_bytes,
bool disable_gpu_shader_disk_cache,
- bool disable_program_caching_for_transform_feedback)
+ bool disable_program_caching_for_transform_feedback,
+ GpuProcessActivityFlags* activity_flags)
: max_size_bytes_(max_cache_size_bytes),
disable_gpu_shader_disk_cache_(disable_gpu_shader_disk_cache),
disable_program_caching_for_transform_feedback_(
disable_program_caching_for_transform_feedback),
curr_size_bytes_(0),
- store_(ProgramMRUCache::NO_AUTO_EVICT) {
-}
+ store_(ProgramMRUCache::NO_AUTO_EVICT),
+ activity_flags_(activity_flags) {}
MemoryProgramCache::~MemoryProgramCache() {}
@@ -263,10 +265,14 @@ ProgramCache::ProgramLoadResult MemoryProgramCache::LoadLinkedProgram(
return PROGRAM_LOAD_FAILURE;
}
const scoped_refptr<ProgramCacheValue> value = found->second;
- glProgramBinary(program,
- value->format(),
- static_cast<const GLvoid*>(value->data()),
- value->length());
+
+ {
+ GpuProcessActivityFlags::ScopedSetFlag scoped_set_flag(
+ activity_flags_, ActivityFlagsBase::FLAG_LOADING_PROGRAM_BINARY);
+ glProgramBinary(program, value->format(),
+ static_cast<const GLvoid*>(value->data()), value->length());
+ }
+
GLint success = 0;
glGetProgramiv(program, GL_LINK_STATUS, &success);
if (success == GL_FALSE) {
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache.h b/chromium/gpu/command_buffer/service/memory_program_cache.h
index f4cc2ec4921..4662e911175 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache.h
+++ b/chromium/gpu/command_buffer/service/memory_program_cache.h
@@ -20,6 +20,8 @@
namespace gpu {
+class GpuProcessActivityFlags;
+
namespace gles2 {
// Program cache that stores binaries completely in-memory
@@ -27,7 +29,8 @@ class GPU_EXPORT MemoryProgramCache : public ProgramCache {
public:
MemoryProgramCache(size_t max_cache_size_bytes,
bool disable_gpu_shader_disk_cache,
- bool disable_program_caching_for_transform_feedback);
+ bool disable_program_caching_for_transform_feedback,
+ GpuProcessActivityFlags* activity_flags);
~MemoryProgramCache() override;
ProgramLoadResult LoadLinkedProgram(
@@ -168,6 +171,7 @@ class GPU_EXPORT MemoryProgramCache : public ProgramCache {
const bool disable_program_caching_for_transform_feedback_;
size_t curr_size_bytes_;
ProgramMRUCache store_;
+ GpuProcessActivityFlags* activity_flags_;
DISALLOW_COPY_AND_ASSIGN(MemoryProgramCache);
};
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc b/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
index b6e157730c3..61726ed969b 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
@@ -10,6 +10,7 @@
#include <memory>
#include "base/bind.h"
+#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/service/gl_utils.h"
#include "gpu/command_buffer/service/gpu_service_test.h"
@@ -77,8 +78,10 @@ class MemoryProgramCacheTest : public GpuServiceTest {
static const GLuint kFragmentShaderServiceId = 100;
MemoryProgramCacheTest()
- : cache_(new MemoryProgramCache(kCacheSizeBytes, kDisableGpuDiskCache,
- kDisableCachingForTransformFeedback)),
+ : cache_(new MemoryProgramCache(kCacheSizeBytes,
+ kDisableGpuDiskCache,
+ kDisableCachingForTransformFeedback,
+ &activity_flags_)),
shader_manager_(nullptr),
vertex_shader_(nullptr),
fragment_shader_(nullptr),
@@ -186,6 +189,7 @@ class MemoryProgramCacheTest : public GpuServiceTest {
.WillOnce(SetArgPointee<2>(GL_FALSE));
}
+ GpuProcessActivityFlags activity_flags_;
std::unique_ptr<MemoryProgramCache> cache_;
ShaderManager shader_manager_;
Shader* vertex_shader_;
@@ -551,8 +555,8 @@ TEST_F(MemoryProgramCacheTest, LoadFailIfTransformFeedbackCachingDisabled) {
// Forcibly reset the program cache so we can disable caching of
// programs which include transform feedback varyings.
- cache_.reset(new MemoryProgramCache(
- kCacheSizeBytes, kDisableGpuDiskCache, true));
+ cache_.reset(new MemoryProgramCache(kCacheSizeBytes, kDisableGpuDiskCache,
+ true, &activity_flags_));
varyings_.push_back("test");
cache_->SaveLinkedProgram(kProgramId,
vertex_shader_,
diff --git a/chromium/gpu/command_buffer/service/preemption_flag.h b/chromium/gpu/command_buffer/service/preemption_flag.h
new file mode 100644
index 00000000000..e4e0dc7f6f9
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/preemption_flag.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_PREEMPTION_FLAG_H_
+#define GPU_COMMAND_BUFFER_SERVICE_PREEMPTION_FLAG_H_
+
+#include "base/atomic_ref_count.h"
+#include "base/atomicops.h"
+#include "base/memory/ref_counted.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class PreemptionFlag : public base::RefCountedThreadSafe<PreemptionFlag> {
+ public:
+ PreemptionFlag() : flag_(0) {}
+
+ bool IsSet() { return !base::AtomicRefCountIsZero(&flag_); }
+ void Set() { base::AtomicRefCountInc(&flag_); }
+ void Reset() { base::subtle::NoBarrier_Store(&flag_, 0); }
+
+ private:
+ base::AtomicRefCount flag_;
+
+ ~PreemptionFlag() {}
+
+ friend class base::RefCountedThreadSafe<PreemptionFlag>;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_PREEMPTION_FLAG_H_
diff --git a/chromium/gpu/command_buffer/service/query_manager.cc b/chromium/gpu/command_buffer/service/query_manager.cc
index 8c2fe286e67..56f3594b458 100644
--- a/chromium/gpu/command_buffer/service/query_manager.cc
+++ b/chromium/gpu/command_buffer/service/query_manager.cc
@@ -1118,6 +1118,9 @@ bool QueryManager::EndQuery(Query* query, base::subtle::Atomic32 submit_count) {
bool QueryManager::QueryCounter(
Query* query, base::subtle::Atomic32 submit_count) {
DCHECK(query);
+ if (!RemovePendingQuery(query)) {
+ return false;
+ }
return query->QueryCounter(submit_count);
}
diff --git a/chromium/gpu/command_buffer/service/query_manager_unittest.cc b/chromium/gpu/command_buffer/service/query_manager_unittest.cc
index d6d14478563..aa6be4f35cb 100644
--- a/chromium/gpu/command_buffer/service/query_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/query_manager_unittest.cc
@@ -829,6 +829,36 @@ TEST_F(QueryManagerTest, TimeStampQuery) {
manager_->Destroy(false);
}
+TEST_F(QueryManagerTest, TimeStampQueryPending) {
+ const GLuint kClient1Id = 1;
+ const GLenum kTarget = GL_TIMESTAMP_EXT;
+ const base::subtle::Atomic32 kSubmitCount = 123;
+ gl::GPUTimingFake fake_timing_queries;
+
+ decoder_->GetGLContext()->CreateGPUTimingClient()->SetCpuTimeForTesting(
+ base::Bind(&gl::GPUTimingFake::GetFakeCPUTime));
+
+ QueryManager::Query* query = manager_->CreateQuery(
+ kTarget, kClient1Id, kSharedMemoryId, kSharedMemoryOffset);
+ ASSERT_TRUE(query != NULL);
+
+ const uint64_t expected_result =
+ 100u * base::Time::kNanosecondsPerMicrosecond;
+ fake_timing_queries.SetCurrentGLTime(expected_result);
+ fake_timing_queries.ExpectGPUTimeStampQuery(*gl_, false);
+ EXPECT_TRUE(manager_->QueryCounter(query, kSubmitCount));
+ EXPECT_TRUE(query->IsPending());
+ fake_timing_queries.ExpectGPUTimeStampQuery(*gl_, false);
+ EXPECT_TRUE(manager_->QueryCounter(query, kSubmitCount));
+ EXPECT_TRUE(manager_->ProcessPendingQueries(false));
+
+ QuerySync* sync = decoder_->GetSharedMemoryAs<QuerySync*>(
+ kSharedMemoryId, kSharedMemoryOffset, sizeof(*sync));
+ EXPECT_EQ(expected_result, sync->result);
+
+ manager_->Destroy(false);
+}
+
TEST_F(QueryManagerManualSetupTest, TimeStampDisjoint) {
GpuServiceTest::SetUpWithGLVersion("OpenGL ES 3.0",
"GL_EXT_disjoint_timer_query");
diff --git a/chromium/gpu/command_buffer/service/sequence_id.h b/chromium/gpu/command_buffer/service/sequence_id.h
new file mode 100644
index 00000000000..a2302c2ae9e
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/sequence_id.h
@@ -0,0 +1,17 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SEQUENCE_ID_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SEQUENCE_ID_H_
+
+#include "gpu/command_buffer/common/id_type.h"
+
+namespace gpu {
+
+class SyncPointOrderData;
+using SequenceId = gpu::IdTypeU32<SyncPointOrderData>;
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SEQUENCE_ID_H_
diff --git a/chromium/gpu/command_buffer/service/shader_translator.cc b/chromium/gpu/command_buffer/service/shader_translator.cc
index d5067b58c8d..3d3012de0fc 100644
--- a/chromium/gpu/command_buffer/service/shader_translator.cc
+++ b/chromium/gpu/command_buffer/service/shader_translator.cc
@@ -36,8 +36,8 @@ class ShaderTranslatorInitializer {
}
};
-base::LazyInstance<ShaderTranslatorInitializer> g_translator_initializer =
- LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<ShaderTranslatorInitializer>::DestructorAtExit
+ g_translator_initializer = LAZY_INSTANCE_INITIALIZER;
void GetAttributes(ShHandle compiler, AttributeMap* var_map) {
if (!var_map)
diff --git a/chromium/gpu/command_buffer/service/sync_point_manager.cc b/chromium/gpu/command_buffer/service/sync_point_manager.cc
index 85210e88161..60dce0568ef 100644
--- a/chromium/gpu/command_buffer/service/sync_point_manager.cc
+++ b/chromium/gpu/command_buffer/service/sync_point_manager.cc
@@ -11,6 +11,7 @@
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/single_thread_task_runner.h"
@@ -29,8 +30,26 @@ void RunOnThread(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
} // namespace
-scoped_refptr<SyncPointOrderData> SyncPointOrderData::Create() {
- return new SyncPointOrderData;
+SyncPointOrderData::OrderFence::OrderFence(
+ uint32_t order,
+ uint64_t release,
+ const base::Closure& callback,
+ scoped_refptr<SyncPointClientState> state)
+ : order_num(order),
+ fence_release(release),
+ release_callback(callback),
+ client_state(std::move(state)) {}
+
+SyncPointOrderData::OrderFence::OrderFence(const OrderFence& other) = default;
+
+SyncPointOrderData::OrderFence::~OrderFence() {}
+
+SyncPointOrderData::SyncPointOrderData(SyncPointManager* sync_point_manager,
+ SequenceId sequence_id)
+ : sync_point_manager_(sync_point_manager), sequence_id_(sequence_id) {}
+
+SyncPointOrderData::~SyncPointOrderData() {
+ DCHECK(destroyed_);
}
void SyncPointOrderData::Destroy() {
@@ -38,20 +57,23 @@ void SyncPointOrderData::Destroy() {
// SyncPointClientState, we must remove the references on destroy. Releasing
// the fence syncs in the order fence queue would be redundant at this point
// because they are assumed to be released on the destruction of the
- // SyncPointClient.
- base::AutoLock auto_lock(lock_);
- destroyed_ = true;
- while (!order_fence_queue_.empty()) {
- order_fence_queue_.pop();
+ // SyncPointClientState.
+ {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(!destroyed_);
+ destroyed_ = true;
+ while (!order_fence_queue_.empty())
+ order_fence_queue_.pop();
}
+ // Call DestroyedSyncPointOrderData outside the lock to prevent deadlock.
+ sync_point_manager_->DestroyedSyncPointOrderData(sequence_id_);
}
-uint32_t SyncPointOrderData::GenerateUnprocessedOrderNumber(
- SyncPointManager* sync_point_manager) {
- const uint32_t order_num = sync_point_manager->GenerateOrderNumber();
+uint32_t SyncPointOrderData::GenerateUnprocessedOrderNumber() {
base::AutoLock auto_lock(lock_);
- unprocessed_order_num_ = order_num;
- return order_num;
+ DCHECK(!destroyed_);
+ unprocessed_order_num_ = sync_point_manager_->GenerateOrderNumber();
+ return unprocessed_order_num_;
}
void SyncPointOrderData::BeginProcessingOrderNumber(uint32_t order_num) {
@@ -127,24 +149,6 @@ void SyncPointOrderData::FinishProcessingOrderNumber(uint32_t order_num) {
}
}
-SyncPointOrderData::OrderFence::OrderFence(
- uint32_t order,
- uint64_t release,
- const base::Closure& callback,
- scoped_refptr<SyncPointClientState> state)
- : order_num(order),
- fence_release(release),
- release_callback(callback),
- client_state(state) {}
-
-SyncPointOrderData::OrderFence::OrderFence(const OrderFence& other) = default;
-
-SyncPointOrderData::OrderFence::~OrderFence() {}
-
-SyncPointOrderData::SyncPointOrderData() {}
-
-SyncPointOrderData::~SyncPointOrderData() {}
-
bool SyncPointOrderData::ValidateReleaseOrderNumber(
scoped_refptr<SyncPointClientState> client_state,
uint32_t wait_order_num,
@@ -168,7 +172,8 @@ bool SyncPointOrderData::ValidateReleaseOrderNumber(
uint32_t expected_order_num =
std::min(unprocessed_order_num_, wait_order_num);
order_fence_queue_.push(OrderFence(expected_order_num, fence_release,
- release_callback, client_state));
+ release_callback,
+ std::move(client_state)));
return true;
}
@@ -183,10 +188,48 @@ SyncPointClientState::ReleaseCallback::ReleaseCallback(
SyncPointClientState::ReleaseCallback::~ReleaseCallback() {}
SyncPointClientState::SyncPointClientState(
- scoped_refptr<SyncPointOrderData> order_data)
- : order_data_(order_data) {}
+ SyncPointManager* sync_point_manager,
+ scoped_refptr<SyncPointOrderData> order_data,
+ CommandBufferNamespace namespace_id,
+ CommandBufferId command_buffer_id)
+ : sync_point_manager_(sync_point_manager),
+ order_data_(std::move(order_data)),
+ namespace_id_(namespace_id),
+ command_buffer_id_(command_buffer_id) {}
-SyncPointClientState::~SyncPointClientState() {}
+SyncPointClientState::~SyncPointClientState() {
+ DCHECK_EQ(UINT64_MAX, fence_sync_release_);
+}
+
+void SyncPointClientState::Destroy() {
+ // Release all fences on destruction.
+ ReleaseFenceSyncHelper(UINT64_MAX);
+ DCHECK(sync_point_manager_); // not destroyed
+ sync_point_manager_->DestroyedSyncPointClientState(namespace_id_,
+ command_buffer_id_);
+ sync_point_manager_ = nullptr;
+}
+
+bool SyncPointClientState::Wait(const SyncToken& sync_token,
+ const base::Closure& callback) {
+ DCHECK(sync_point_manager_); // not destroyed
+ // Validate that this Wait call is between BeginProcessingOrderNumber() and
+ // FinishProcessingOrderNumber(), or else we may deadlock.
+ DCHECK(order_data_->IsProcessingOrderNumber());
+ if (sync_token.namespace_id() == namespace_id_ &&
+ sync_token.command_buffer_id() == command_buffer_id_) {
+ return false;
+ }
+ uint32_t wait_order_number = order_data_->current_order_num();
+ return sync_point_manager_->Wait(sync_token, wait_order_number, callback);
+}
+
+bool SyncPointClientState::WaitNonThreadSafe(
+ const SyncToken& sync_token,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ const base::Closure& callback) {
+ return Wait(sync_token, base::Bind(&RunOnThread, task_runner, callback));
+}
bool SyncPointClientState::IsFenceSyncReleased(uint64_t release) {
base::AutoLock lock(fence_sync_lock_);
@@ -213,6 +256,13 @@ bool SyncPointClientState::WaitForRelease(uint64_t release,
}
void SyncPointClientState::ReleaseFenceSync(uint64_t release) {
+ // Validate that this Release call is between BeginProcessingOrderNumber() and
+ // FinishProcessingOrderNumber(), or else we may deadlock.
+ DCHECK(order_data_->IsProcessingOrderNumber());
+ ReleaseFenceSyncHelper(release);
+}
+
+void SyncPointClientState::ReleaseFenceSyncHelper(uint64_t release) {
// Call callbacks without the lock to avoid possible deadlocks.
std::vector<base::Closure> callback_list;
{
@@ -229,9 +279,8 @@ void SyncPointClientState::ReleaseFenceSync(uint64_t release) {
}
}
- for (const base::Closure& closure : callback_list) {
+ for (const base::Closure& closure : callback_list)
closure.Run();
- }
}
void SyncPointClientState::EnsureWaitReleased(uint64_t release,
@@ -272,60 +321,63 @@ void SyncPointClientState::EnsureWaitReleased(uint64_t release,
}
}
-SyncPointClient::SyncPointClient(SyncPointManager* sync_point_manager,
- scoped_refptr<SyncPointOrderData> order_data,
- CommandBufferNamespace namespace_id,
- CommandBufferId command_buffer_id)
- : sync_point_manager_(sync_point_manager),
- order_data_(order_data),
- client_state_(new SyncPointClientState(order_data)),
- namespace_id_(namespace_id),
- command_buffer_id_(command_buffer_id) {
- sync_point_manager_->RegisterSyncPointClient(client_state_, namespace_id,
- command_buffer_id);
+SyncPointManager::SyncPointManager() {
+ order_num_generator_.GetNext();
}
-SyncPointClient::~SyncPointClient() {
- // Release all fences on destruction.
- client_state_->ReleaseFenceSync(UINT64_MAX);
- sync_point_manager_->DeregisterSyncPointClient(namespace_id_,
- command_buffer_id_);
+SyncPointManager::~SyncPointManager() {
+ DCHECK(order_data_map_.empty());
+ for (const ClientStateMap& client_state_map : client_state_maps_)
+ DCHECK(client_state_map.empty());
}
-bool SyncPointClient::Wait(const SyncToken& sync_token,
- const base::Closure& callback) {
- // Validate that this Wait call is between BeginProcessingOrderNumber() and
- // FinishProcessingOrderNumber(), or else we may deadlock.
- DCHECK(order_data_->IsProcessingOrderNumber());
- if (sync_token.namespace_id() == namespace_id_ &&
- sync_token.command_buffer_id() == command_buffer_id_) {
- return false;
- }
- uint32_t wait_order_number = order_data_->current_order_num();
- return sync_point_manager_->Wait(sync_token, wait_order_number, callback);
+scoped_refptr<SyncPointOrderData> SyncPointManager::CreateSyncPointOrderData() {
+ base::AutoLock auto_lock(lock_);
+ SequenceId sequence_id = SequenceId::FromUnsafeValue(next_sequence_id_++);
+ scoped_refptr<SyncPointOrderData> order_data =
+ new SyncPointOrderData(this, sequence_id);
+ DCHECK(!order_data_map_.count(sequence_id));
+ order_data_map_.insert(std::make_pair(sequence_id, order_data));
+ return order_data;
}
-bool SyncPointClient::WaitNonThreadSafe(
- const SyncToken& sync_token,
- scoped_refptr<base::SingleThreadTaskRunner> task_runner,
- const base::Closure& callback) {
- return Wait(sync_token, base::Bind(&RunOnThread, task_runner, callback));
+void SyncPointManager::DestroyedSyncPointOrderData(SequenceId sequence_id) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(order_data_map_.count(sequence_id));
+ order_data_map_.erase(sequence_id);
}
-void SyncPointClient::ReleaseFenceSync(uint64_t release) {
- // Validate that this Release call is between BeginProcessingOrderNumber() and
- // FinishProcessingOrderNumber(), or else we may deadlock.
- DCHECK(order_data_->IsProcessingOrderNumber());
- client_state_->ReleaseFenceSync(release);
-}
+scoped_refptr<SyncPointClientState>
+SyncPointManager::CreateSyncPointClientState(
+ CommandBufferNamespace namespace_id,
+ CommandBufferId command_buffer_id,
+ SequenceId sequence_id) {
+ scoped_refptr<SyncPointOrderData> order_data =
+ GetSyncPointOrderData(sequence_id);
-SyncPointManager::SyncPointManager() {
- global_order_num_.GetNext();
+ scoped_refptr<SyncPointClientState> client_state = new SyncPointClientState(
+ this, order_data, namespace_id, command_buffer_id);
+
+ {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_GE(namespace_id, 0);
+ DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_state_maps_));
+ DCHECK(!client_state_maps_[namespace_id].count(command_buffer_id));
+ client_state_maps_[namespace_id].insert(
+ std::make_pair(command_buffer_id, client_state));
+ }
+
+ return client_state;
}
-SyncPointManager::~SyncPointManager() {
- for (const ClientStateMap& client_state_map : client_state_maps_)
- DCHECK(client_state_map.empty());
+void SyncPointManager::DestroyedSyncPointClientState(
+ CommandBufferNamespace namespace_id,
+ CommandBufferId command_buffer_id) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_GE(namespace_id, 0);
+ DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_state_maps_));
+ DCHECK(client_state_maps_[namespace_id].count(command_buffer_id));
+ client_state_maps_[namespace_id].erase(command_buffer_id);
}
bool SyncPointManager::IsSyncTokenReleased(const SyncToken& sync_token) {
@@ -336,6 +388,35 @@ bool SyncPointManager::IsSyncTokenReleased(const SyncToken& sync_token) {
return true;
}
+SequenceId SyncPointManager::GetSyncTokenReleaseSequenceId(
+ const SyncToken& sync_token) {
+ scoped_refptr<SyncPointClientState> client_state = GetSyncPointClientState(
+ sync_token.namespace_id(), sync_token.command_buffer_id());
+ if (client_state)
+ return client_state->sequence_id();
+ return SequenceId();
+}
+
+uint32_t SyncPointManager::GetProcessedOrderNum() const {
+ base::AutoLock auto_lock(lock_);
+ uint32_t processed_order_num = 0;
+ for (const auto& kv : order_data_map_) {
+ processed_order_num =
+ std::max(processed_order_num, kv.second->processed_order_num());
+ }
+ return processed_order_num;
+}
+
+uint32_t SyncPointManager::GetUnprocessedOrderNum() const {
+ base::AutoLock auto_lock(lock_);
+ uint32_t unprocessed_order_num = 0;
+ for (const auto& kv : order_data_map_) {
+ unprocessed_order_num =
+ std::max(unprocessed_order_num, kv.second->unprocessed_order_num());
+ }
+ return unprocessed_order_num;
+}
+
bool SyncPointManager::Wait(const SyncToken& sync_token,
uint32_t wait_order_num,
const base::Closure& callback) {
@@ -375,32 +456,8 @@ bool SyncPointManager::WaitOutOfOrderNonThreadSafe(
base::Bind(&RunOnThread, task_runner, callback));
}
-void SyncPointManager::RegisterSyncPointClient(
- scoped_refptr<SyncPointClientState> client_state,
- CommandBufferNamespace namespace_id,
- CommandBufferId command_buffer_id) {
- DCHECK_GE(namespace_id, 0);
- DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_state_maps_));
-
- base::AutoLock auto_lock(client_state_maps_lock_);
- DCHECK(!client_state_maps_[namespace_id].count(command_buffer_id));
- client_state_maps_[namespace_id].insert(
- std::make_pair(command_buffer_id, client_state));
-}
-
-void SyncPointManager::DeregisterSyncPointClient(
- CommandBufferNamespace namespace_id,
- CommandBufferId command_buffer_id) {
- DCHECK_GE(namespace_id, 0);
- DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_state_maps_));
-
- base::AutoLock auto_lock(client_state_maps_lock_);
- DCHECK(client_state_maps_[namespace_id].count(command_buffer_id));
- client_state_maps_[namespace_id].erase(command_buffer_id);
-}
-
uint32_t SyncPointManager::GenerateOrderNumber() {
- return global_order_num_.GetNext();
+ return order_num_generator_.GetNext();
}
scoped_refptr<SyncPointClientState> SyncPointManager::GetSyncPointClientState(
@@ -408,7 +465,7 @@ scoped_refptr<SyncPointClientState> SyncPointManager::GetSyncPointClientState(
CommandBufferId command_buffer_id) {
if (namespace_id >= 0) {
DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_state_maps_));
- base::AutoLock auto_lock(client_state_maps_lock_);
+ base::AutoLock auto_lock(lock_);
ClientStateMap& client_state_map = client_state_maps_[namespace_id];
auto it = client_state_map.find(command_buffer_id);
if (it != client_state_map.end())
@@ -417,4 +474,13 @@ scoped_refptr<SyncPointClientState> SyncPointManager::GetSyncPointClientState(
return nullptr;
}
+scoped_refptr<SyncPointOrderData> SyncPointManager::GetSyncPointOrderData(
+ SequenceId sequence_id) {
+ base::AutoLock auto_lock(lock_);
+ auto it = order_data_map_.find(sequence_id);
+ if (it != order_data_map_.end())
+ return it->second;
+ return nullptr;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/sync_point_manager.h b/chromium/gpu/command_buffer/service/sync_point_manager.h
index a34a17cede4..ad5587d5f2b 100644
--- a/chromium/gpu/command_buffer/service/sync_point_manager.h
+++ b/chromium/gpu/command_buffer/service/sync_point_manager.h
@@ -24,6 +24,7 @@
#include "gpu/command_buffer/common/command_buffer_id.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/sync_token.h"
+#include "gpu/command_buffer/service/sequence_id.h"
#include "gpu/gpu_export.h"
namespace base {
@@ -39,13 +40,9 @@ class SyncPointManager;
class GPU_EXPORT SyncPointOrderData
: public base::RefCountedThreadSafe<SyncPointOrderData> {
public:
- static scoped_refptr<SyncPointOrderData> Create();
void Destroy();
- uint32_t GenerateUnprocessedOrderNumber(SyncPointManager* sync_point_manager);
- void BeginProcessingOrderNumber(uint32_t order_num);
- void PauseProcessingOrderNumber(uint32_t order_num);
- void FinishProcessingOrderNumber(uint32_t order_num);
+ SequenceId sequence_id() { return sequence_id_; }
uint32_t processed_order_num() const {
base::AutoLock auto_lock(lock_);
@@ -67,14 +64,15 @@ class GPU_EXPORT SyncPointOrderData
return !paused_ && current_order_num_ > processed_order_num();
}
- bool ValidateReleaseOrderNumber(
- scoped_refptr<SyncPointClientState> client_state,
- uint32_t wait_order_num,
- uint64_t fence_release,
- const base::Closure& release_callback);
+ uint32_t GenerateUnprocessedOrderNumber();
+ void BeginProcessingOrderNumber(uint32_t order_num);
+ void PauseProcessingOrderNumber(uint32_t order_num);
+ void FinishProcessingOrderNumber(uint32_t order_num);
private:
friend class base::RefCountedThreadSafe<SyncPointOrderData>;
+ friend class SyncPointManager;
+ friend class SyncPointClientState;
struct OrderFence {
uint32_t order_num;
@@ -99,9 +97,21 @@ class GPU_EXPORT SyncPointOrderData
std::greater<OrderFence>>
OrderFenceQueue;
- SyncPointOrderData();
+ SyncPointOrderData(SyncPointManager* sync_point_manager,
+ SequenceId seqeunce_id);
+
~SyncPointOrderData();
+ bool ValidateReleaseOrderNumber(
+ scoped_refptr<SyncPointClientState> client_state,
+ uint32_t wait_order_num,
+ uint64_t fence_release,
+ const base::Closure& release_callback);
+
+ SyncPointManager* const sync_point_manager_;
+
+ const SequenceId sequence_id_;
+
// Non thread-safe functions need to be called from a single thread.
base::ThreadChecker processing_thread_checker_;
@@ -137,30 +147,35 @@ class GPU_EXPORT SyncPointOrderData
DISALLOW_COPY_AND_ASSIGN(SyncPointOrderData);
};
-// Internal state for sync point clients.
class GPU_EXPORT SyncPointClientState
: public base::RefCountedThreadSafe<SyncPointClientState> {
public:
- explicit SyncPointClientState(scoped_refptr<SyncPointOrderData> order_data);
+ void Destroy();
- bool IsFenceSyncReleased(uint64_t release);
+ CommandBufferNamespace namespace_id() const { return namespace_id_; }
+ CommandBufferId command_buffer_id() const { return command_buffer_id_; }
+ SequenceId sequence_id() const { return order_data_->sequence_id(); }
- // Queues the callback to be called if the release is valid. If the release
- // is invalid this function will return False and the callback will never
- // be called.
- bool WaitForRelease(uint64_t release,
- uint32_t wait_order_num,
- const base::Closure& callback);
+ // This behaves similarly to SyncPointManager::Wait but uses the order data
+ // to guarantee no deadlocks with other clients. Must be called on order
+ // number processing thread.
+ bool Wait(const SyncToken& sync_token, const base::Closure& callback);
- // Releases a fence sync and all fence syncs below.
- void ReleaseFenceSync(uint64_t release);
+ // Like Wait but runs the callback on the given task runner's thread. Must be
+ // called on order number processing thread.
+ bool WaitNonThreadSafe(
+ const SyncToken& sync_token,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ const base::Closure& callback);
- // Does not release the fence sync, but releases callbacks waiting on that
- // fence sync.
- void EnsureWaitReleased(uint64_t release, const base::Closure& callback);
+ // Release fence sync and run queued callbacks. Must be called on order number
+ // processing thread.
+ void ReleaseFenceSync(uint64_t release);
private:
friend class base::RefCountedThreadSafe<SyncPointClientState>;
+ friend class SyncPointManager;
+ friend class SyncPointOrderData;
struct ReleaseCallback {
uint64_t release_count;
@@ -179,11 +194,39 @@ class GPU_EXPORT SyncPointClientState
std::greater<ReleaseCallback>>
ReleaseCallbackQueue;
+ SyncPointClientState(SyncPointManager* sync_point_manager,
+ scoped_refptr<SyncPointOrderData> order_data,
+ CommandBufferNamespace namespace_id,
+ CommandBufferId command_buffer_id);
+
~SyncPointClientState();
+ // Returns true if fence sync has been released.
+ bool IsFenceSyncReleased(uint64_t release);
+
+ // Queues the callback to be called if the release is valid. If the release
+ // is invalid this function will return False and the callback will never
+ // be called.
+ bool WaitForRelease(uint64_t release,
+ uint32_t wait_order_num,
+ const base::Closure& callback);
+
+ // Does not release the fence sync, but releases callbacks waiting on that
+ // fence sync.
+ void EnsureWaitReleased(uint64_t release, const base::Closure& callback);
+
+ void ReleaseFenceSyncHelper(uint64_t release);
+
+ // Sync point manager is guaranteed to exist in the lifetime of the client.
+ SyncPointManager* sync_point_manager_ = nullptr;
+
// Global order data where releases will originate from.
scoped_refptr<SyncPointOrderData> order_data_;
+ // Unique namespace/client id pair for this sync point client.
+ const CommandBufferNamespace namespace_id_;
+ const CommandBufferId command_buffer_id_;
+
// Protects fence_sync_release_, fence_callback_queue_.
base::Lock fence_sync_lock_;
@@ -197,42 +240,6 @@ class GPU_EXPORT SyncPointClientState
DISALLOW_COPY_AND_ASSIGN(SyncPointClientState);
};
-class GPU_EXPORT SyncPointClient {
- public:
- SyncPointClient(SyncPointManager* sync_point_manager,
- scoped_refptr<SyncPointOrderData> order_data,
- CommandBufferNamespace namespace_id,
- CommandBufferId command_buffer_id);
- ~SyncPointClient();
-
- // This behaves similarly to SyncPointManager::Wait but uses the order data
- // to guarantee no deadlocks with other clients.
- bool Wait(const SyncToken& sync_token, const base::Closure& callback);
-
- // Like Wait but runs the callback on the given task runner's thread.
- bool WaitNonThreadSafe(
- const SyncToken& sync_token,
- scoped_refptr<base::SingleThreadTaskRunner> task_runner,
- const base::Closure& callback);
-
- // Release fence sync and run queued callbacks.
- void ReleaseFenceSync(uint64_t release);
-
- private:
- // Sync point manager is guaranteed to exist in the lifetime of the client.
- SyncPointManager* const sync_point_manager_;
-
- scoped_refptr<SyncPointOrderData> order_data_;
-
- scoped_refptr<SyncPointClientState> client_state_;
-
- // Unique namespace/client id pair for this sync point client.
- const CommandBufferNamespace namespace_id_;
- const CommandBufferId command_buffer_id_;
-
- DISALLOW_COPY_AND_ASSIGN(SyncPointClient);
-};
-
// This class manages the sync points, which allow cross-channel
// synchronization.
class GPU_EXPORT SyncPointManager {
@@ -240,10 +247,26 @@ class GPU_EXPORT SyncPointManager {
SyncPointManager();
~SyncPointManager();
- // Returns true if the sync token has been released or if the command buffer
- // does not exist.
+ scoped_refptr<SyncPointOrderData> CreateSyncPointOrderData();
+
+ scoped_refptr<SyncPointClientState> CreateSyncPointClientState(
+ CommandBufferNamespace namespace_id,
+ CommandBufferId command_buffer_id,
+ SequenceId sequence_id);
+
+ // Returns true if the sync token has been released or if the command
+ // buffer does not exist.
bool IsSyncTokenReleased(const SyncToken& sync_token);
+ // Returns the sequence ID that will release this sync token.
+ SequenceId GetSyncTokenReleaseSequenceId(const SyncToken& sync_token);
+
+ // Returns the global last processed order number.
+ uint32_t GetProcessedOrderNum() const;
+
+ // // Returns the global last unprocessed order number.
+ uint32_t GetUnprocessedOrderNum() const;
+
// If the wait is valid (sync token hasn't been processed or command buffer
// does not exist), the callback is queued to run when the sync point is
// released. If the wait is invalid, the callback is NOT run. The callback
@@ -272,33 +295,44 @@ class GPU_EXPORT SyncPointManager {
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
const base::Closure& callback);
- // Used by SyncPointClient.
- void RegisterSyncPointClient(scoped_refptr<SyncPointClientState> client_state,
- CommandBufferNamespace namespace_id,
- CommandBufferId command_buffer_id);
-
- void DeregisterSyncPointClient(CommandBufferNamespace namespace_id,
- CommandBufferId command_buffer_id);
-
// Used by SyncPointOrderData.
uint32_t GenerateOrderNumber();
+ void DestroyedSyncPointOrderData(SequenceId sequence_id);
+
+ void DestroyedSyncPointClientState(CommandBufferNamespace namespace_id,
+ CommandBufferId command_buffer_id);
+
private:
using ClientStateMap = std::unordered_map<CommandBufferId,
scoped_refptr<SyncPointClientState>,
CommandBufferId::Hasher>;
+ using OrderDataMap = std::unordered_map<SequenceId,
+ scoped_refptr<SyncPointOrderData>,
+ SequenceId::Hasher>;
+
+ scoped_refptr<SyncPointOrderData> GetSyncPointOrderData(
+ SequenceId sequence_id);
+
scoped_refptr<SyncPointClientState> GetSyncPointClientState(
CommandBufferNamespace namespace_id,
CommandBufferId command_buffer_id);
// Order number is global for all clients.
- base::AtomicSequenceNumber global_order_num_;
+ base::AtomicSequenceNumber order_num_generator_;
- // Client map holds a map of clients id to client for each namespace.
- base::Lock client_state_maps_lock_;
+ // The following are protected by |lock_|.
+ // Map of command buffer id to client state for each namespace.
ClientStateMap client_state_maps_[NUM_COMMAND_BUFFER_NAMESPACES];
+ // Map of sequence id to order data.
+ OrderDataMap order_data_map_;
+
+ uint32_t next_sequence_id_ = 1;
+
+ mutable base::Lock lock_;
+
DISALLOW_COPY_AND_ASSIGN(SyncPointManager);
};
diff --git a/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc b/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc
index 96b80623bb6..b3501a5ec10 100644
--- a/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc
@@ -28,26 +28,27 @@ class SyncPointManagerTest : public testing::Test {
struct SyncPointStream {
scoped_refptr<SyncPointOrderData> order_data;
- std::unique_ptr<SyncPointClient> client;
+ scoped_refptr<SyncPointClientState> client_state;
std::queue<uint32_t> order_numbers;
SyncPointStream(SyncPointManager* sync_point_manager,
CommandBufferNamespace namespace_id,
CommandBufferId command_buffer_id)
- : order_data(SyncPointOrderData::Create()),
- client(base::MakeUnique<SyncPointClient>(sync_point_manager,
- order_data,
- namespace_id,
- command_buffer_id)) {}
+ : order_data(sync_point_manager->CreateSyncPointOrderData()),
+ client_state(sync_point_manager->CreateSyncPointClientState(
+ namespace_id,
+ command_buffer_id,
+ order_data->sequence_id())) {}
~SyncPointStream() {
- order_data->Destroy();
- order_data = nullptr;
+ if (order_data)
+ order_data->Destroy();
+ if (client_state)
+ client_state->Destroy();
}
- void AllocateOrderNum(SyncPointManager* sync_point_manager) {
- order_numbers.push(
- order_data->GenerateUnprocessedOrderNumber(sync_point_manager));
+ void AllocateOrderNum() {
+ order_numbers.push(order_data->GenerateUnprocessedOrderNumber());
}
void BeginProcessing() {
@@ -63,14 +64,14 @@ struct SyncPointStream {
};
TEST_F(SyncPointManagerTest, BasicSyncPointOrderDataTest) {
- scoped_refptr<SyncPointOrderData> order_data = SyncPointOrderData::Create();
+ scoped_refptr<SyncPointOrderData> order_data =
+ sync_point_manager_->CreateSyncPointOrderData();
EXPECT_EQ(0u, order_data->current_order_num());
EXPECT_EQ(0u, order_data->processed_order_num());
EXPECT_EQ(0u, order_data->unprocessed_order_num());
- uint32_t order_num =
- order_data->GenerateUnprocessedOrderNumber(sync_point_manager_.get());
+ uint32_t order_num = order_data->GenerateUnprocessedOrderNumber();
EXPECT_EQ(1u, order_num);
EXPECT_EQ(0u, order_data->current_order_num());
@@ -94,6 +95,8 @@ TEST_F(SyncPointManagerTest, BasicSyncPointOrderDataTest) {
EXPECT_EQ(order_num, order_data->processed_order_num());
EXPECT_EQ(order_num, order_data->unprocessed_order_num());
EXPECT_FALSE(order_data->IsProcessingOrderNumber());
+
+ order_data->Destroy();
}
TEST_F(SyncPointManagerTest, BasicFenceSyncRelease) {
@@ -108,12 +111,12 @@ TEST_F(SyncPointManagerTest, BasicFenceSyncRelease) {
SyncPointStream stream(sync_point_manager_.get(), kNamespaceId, kBufferId);
- stream.AllocateOrderNum(sync_point_manager_.get());
+ stream.AllocateOrderNum();
EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
stream.order_data->BeginProcessingOrderNumber(1);
- stream.client->ReleaseFenceSync(release_count);
+ stream.client_state->ReleaseFenceSync(release_count);
stream.order_data->FinishProcessingOrderNumber(1);
EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
@@ -131,16 +134,16 @@ TEST_F(SyncPointManagerTest, MultipleClientsPerOrderData) {
uint64_t release_count = 1;
SyncToken sync_token1(kNamespaceId, 0, kCmdBufferId1, release_count);
- stream1.AllocateOrderNum(sync_point_manager_.get());
+ stream1.AllocateOrderNum();
SyncToken sync_token2(kNamespaceId, 0, kCmdBufferId2, release_count);
- stream2.AllocateOrderNum(sync_point_manager_.get());
+ stream2.AllocateOrderNum();
EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token1));
EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token2));
stream1.order_data->BeginProcessingOrderNumber(1);
- stream1.client->ReleaseFenceSync(release_count);
+ stream1.client_state->ReleaseFenceSync(release_count);
stream1.order_data->FinishProcessingOrderNumber(1);
EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token1));
@@ -157,15 +160,15 @@ TEST_F(SyncPointManagerTest, BasicFenceSyncWaitRelease) {
SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
kWaitCmdBufferId);
- release_stream.AllocateOrderNum(sync_point_manager_.get());
- wait_stream.AllocateOrderNum(sync_point_manager_.get());
+ release_stream.AllocateOrderNum();
+ wait_stream.AllocateOrderNum();
uint64_t release_count = 1;
SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
wait_stream.BeginProcessing();
int test_num = 10;
- bool valid_wait = wait_stream.client->Wait(
+ bool valid_wait = wait_stream.client_state->Wait(
sync_token,
base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
EXPECT_TRUE(valid_wait);
@@ -173,7 +176,7 @@ TEST_F(SyncPointManagerTest, BasicFenceSyncWaitRelease) {
EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
release_stream.BeginProcessing();
- release_stream.client->ReleaseFenceSync(release_count);
+ release_stream.client_state->ReleaseFenceSync(release_count);
EXPECT_EQ(123, test_num);
EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
}
@@ -188,15 +191,15 @@ TEST_F(SyncPointManagerTest, WaitOnSelfFails) {
SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
kWaitCmdBufferId);
- release_stream.AllocateOrderNum(sync_point_manager_.get());
- wait_stream.AllocateOrderNum(sync_point_manager_.get());
+ release_stream.AllocateOrderNum();
+ wait_stream.AllocateOrderNum();
uint64_t release_count = 1;
SyncToken sync_token(kNamespaceId, 0, kWaitCmdBufferId, release_count);
wait_stream.BeginProcessing();
int test_num = 10;
- bool valid_wait = wait_stream.client->Wait(
+ bool valid_wait = wait_stream.client_state->Wait(
sync_token,
base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
EXPECT_FALSE(valid_wait);
@@ -215,15 +218,15 @@ TEST_F(SyncPointManagerTest, OutOfOrderRelease) {
kWaitCmdBufferId);
// Generate wait order number first.
- wait_stream.AllocateOrderNum(sync_point_manager_.get());
- release_stream.AllocateOrderNum(sync_point_manager_.get());
+ wait_stream.AllocateOrderNum();
+ release_stream.AllocateOrderNum();
uint64_t release_count = 1;
SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
wait_stream.BeginProcessing();
int test_num = 10;
- bool valid_wait = wait_stream.client->Wait(
+ bool valid_wait = wait_stream.client_state->Wait(
sync_token,
base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
EXPECT_FALSE(valid_wait);
@@ -242,21 +245,21 @@ TEST_F(SyncPointManagerTest, HigherOrderNumberRelease) {
kWaitCmdBufferId);
// Generate wait order number first.
- wait_stream.AllocateOrderNum(sync_point_manager_.get());
- release_stream.AllocateOrderNum(sync_point_manager_.get());
+ wait_stream.AllocateOrderNum();
+ release_stream.AllocateOrderNum();
uint64_t release_count = 1;
SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
// Order number was higher but it was actually released.
release_stream.BeginProcessing();
- release_stream.client->ReleaseFenceSync(release_count);
+ release_stream.client_state->ReleaseFenceSync(release_count);
release_stream.EndProcessing();
// Release stream has already released so there's no need to wait.
wait_stream.BeginProcessing();
int test_num = 10;
- bool valid_wait = wait_stream.client->Wait(
+ bool valid_wait = wait_stream.client_state->Wait(
sync_token,
base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
EXPECT_FALSE(valid_wait);
@@ -274,8 +277,8 @@ TEST_F(SyncPointManagerTest, DestroyedClientRelease) {
SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId,
kWaitCmdBufferId);
- release_stream.AllocateOrderNum(sync_point_manager_.get());
- wait_stream.AllocateOrderNum(sync_point_manager_.get());
+ release_stream.AllocateOrderNum();
+ wait_stream.AllocateOrderNum();
uint64_t release_count = 1;
SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
@@ -283,14 +286,16 @@ TEST_F(SyncPointManagerTest, DestroyedClientRelease) {
wait_stream.BeginProcessing();
int test_num = 10;
- bool valid_wait = wait_stream.client->Wait(
+ bool valid_wait = wait_stream.client_state->Wait(
sync_token,
base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
EXPECT_TRUE(valid_wait);
EXPECT_EQ(10, test_num);
// Destroying the client should release the wait.
- release_stream.client.reset();
+ release_stream.client_state->Destroy();
+ release_stream.client_state = nullptr;
+
EXPECT_EQ(123, test_num);
EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
}
@@ -309,15 +314,15 @@ TEST_F(SyncPointManagerTest, NonExistentRelease) {
// This test simply tests that a wait stream of order [2] waiting on
// release stream of order [1] will still release the fence sync even
// though nothing was released.
- release_stream.AllocateOrderNum(sync_point_manager_.get());
- wait_stream.AllocateOrderNum(sync_point_manager_.get());
+ release_stream.AllocateOrderNum();
+ wait_stream.AllocateOrderNum();
uint64_t release_count = 1;
SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
wait_stream.BeginProcessing();
int test_num = 10;
- bool valid_wait = wait_stream.client->Wait(
+ bool valid_wait = wait_stream.client_state->Wait(
sync_token,
base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
EXPECT_TRUE(valid_wait);
@@ -348,9 +353,9 @@ TEST_F(SyncPointManagerTest, NonExistentRelease2) {
// The wait stream [3] is waiting on release stream [1] even though
// order [2] was also generated. Although order [2] only exists on the
// wait stream so the release stream should only know about order [1].
- release_stream.AllocateOrderNum(sync_point_manager_.get());
- wait_stream.AllocateOrderNum(sync_point_manager_.get());
- wait_stream.AllocateOrderNum(sync_point_manager_.get());
+ release_stream.AllocateOrderNum();
+ wait_stream.AllocateOrderNum();
+ wait_stream.AllocateOrderNum();
uint64_t release_count = 1;
SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
@@ -363,7 +368,7 @@ TEST_F(SyncPointManagerTest, NonExistentRelease2) {
wait_stream.BeginProcessing();
EXPECT_EQ(3u, wait_stream.order_data->current_order_num());
int test_num = 10;
- bool valid_wait = wait_stream.client->Wait(
+ bool valid_wait = wait_stream.client_state->Wait(
sync_token,
base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
EXPECT_TRUE(valid_wait);
@@ -381,9 +386,9 @@ TEST_F(SyncPointManagerTest, NonExistentRelease2) {
// Ensure that the wait callback does not get triggered again when it is
// actually released.
test_num = 1;
- release_stream.AllocateOrderNum(sync_point_manager_.get());
+ release_stream.AllocateOrderNum();
release_stream.BeginProcessing();
- release_stream.client->ReleaseFenceSync(release_count);
+ release_stream.client_state->ReleaseFenceSync(release_count);
release_stream.EndProcessing();
EXPECT_EQ(1, test_num);
EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
@@ -405,10 +410,10 @@ TEST_F(SyncPointManagerTest, NonExistentOrderNumRelease) {
// to the release stream so it is essentially non-existent to the release
// stream's point of view. Once the release stream begins processing the next
// order [3], it should realize order [2] didn't exist and release the fence.
- release_stream.AllocateOrderNum(sync_point_manager_.get());
- wait_stream.AllocateOrderNum(sync_point_manager_.get());
- wait_stream.AllocateOrderNum(sync_point_manager_.get());
- release_stream.AllocateOrderNum(sync_point_manager_.get());
+ release_stream.AllocateOrderNum();
+ wait_stream.AllocateOrderNum();
+ wait_stream.AllocateOrderNum();
+ release_stream.AllocateOrderNum();
uint64_t release_count = 1;
SyncToken sync_token(kNamespaceId, 0, kReleaseCmdBufferId, release_count);
@@ -420,7 +425,7 @@ TEST_F(SyncPointManagerTest, NonExistentOrderNumRelease) {
wait_stream.BeginProcessing();
EXPECT_EQ(3u, wait_stream.order_data->current_order_num());
int test_num = 10;
- bool valid_wait = wait_stream.client->Wait(
+ bool valid_wait = wait_stream.client_state->Wait(
sync_token,
base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
EXPECT_TRUE(valid_wait);
@@ -444,7 +449,7 @@ TEST_F(SyncPointManagerTest, NonExistentOrderNumRelease) {
// Ensure that the wait callback does not get triggered again when it is
// actually released.
test_num = 1;
- release_stream.client->ReleaseFenceSync(1);
+ release_stream.client_state->ReleaseFenceSync(1);
EXPECT_EQ(1, test_num);
EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
}
diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc
index e9307a24f28..68cd6bf3e18 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager.cc
@@ -1384,10 +1384,13 @@ GLenum Texture::SetParameterf(
case GL_TEXTURE_BASE_LEVEL:
case GL_TEXTURE_MAX_LEVEL:
case GL_TEXTURE_USAGE_ANGLE:
- {
- GLint iparam = static_cast<GLint>(std::round(param));
- return SetParameteri(feature_info, pname, iparam);
- }
+ case GL_TEXTURE_SWIZZLE_R:
+ case GL_TEXTURE_SWIZZLE_G:
+ case GL_TEXTURE_SWIZZLE_B:
+ case GL_TEXTURE_SWIZZLE_A: {
+ GLint iparam = static_cast<GLint>(std::round(param));
+ return SetParameteri(feature_info, pname, iparam);
+ }
case GL_TEXTURE_MIN_LOD:
sampler_state_.min_lod = param;
break;
@@ -1753,17 +1756,15 @@ void Texture::DumpLevelMemory(base::trace_event::ProcessMemoryDump* pmd,
continue;
// If a level has a GLImage, ask the GLImage to dump itself.
+ // If a level does not have a GLImage bound to it, then dump the
+ // texture allocation also as the storage is not provided by the
+ // GLImage in that case.
if (level_infos[level_index].image) {
level_infos[level_index].image->OnMemoryDump(
pmd, client_tracing_id,
base::StringPrintf("%s/face_%d/level_%d", dump_name.c_str(),
face_index, level_index));
- }
-
- // If a level does not have a GLImage bound to it, then dump the
- // texture allocation also as the storage is not provided by the
- // GLImage in that case.
- if (level_infos[level_index].image_state != BOUND) {
+ } else {
MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(base::StringPrintf(
"%s/face_%d/level_%d", dump_name.c_str(), face_index, level_index));
dump->AddScalar(
diff --git a/chromium/gpu/config/BUILD.gn b/chromium/gpu/config/BUILD.gn
index b579142188d..f8d7af04399 100644
--- a/chromium/gpu/config/BUILD.gn
+++ b/chromium/gpu/config/BUILD.gn
@@ -6,12 +6,6 @@ import("//build/config/chrome_build.gni")
import("//build/config/chromecast_build.gni")
import("//build/config/ui.gni")
-declare_args() {
- # Use the PCI lib to collect GPU information on Linux.
- use_libpci = is_linux && (!is_chromecast || is_cast_desktop_build) &&
- (use_x11 || use_ozone)
-}
-
group("config") {
if (is_component_build) {
public_deps = [
@@ -24,6 +18,46 @@ group("config") {
}
}
+# This needs to be a small target, because it links into both chrome.exe and
+# chrome.dll targets. If this target grows, that will lead to a size regression.
+# See https://crbug.com/703622
+source_set("crash_keys") {
+ sources = [
+ "gpu_crash_keys.cc",
+ "gpu_crash_keys.h",
+ ]
+}
+
+process_json_outputs = [
+ "$target_gen_dir/gpu_driver_bug_list_arrays_and_structs_autogen.h",
+ "$target_gen_dir/gpu_driver_bug_list_autogen.cc",
+ "$target_gen_dir/gpu_driver_bug_list_autogen.h",
+ "$target_gen_dir/gpu_driver_bug_list_exceptions_autogen.h",
+ "$target_gen_dir/software_rendering_list_arrays_and_structs_autogen.h",
+ "$target_gen_dir/software_rendering_list_autogen.cc",
+ "$target_gen_dir/software_rendering_list_autogen.h",
+ "$target_gen_dir/software_rendering_list_exceptions_autogen.h",
+]
+
+action("process_json") {
+ script = "process_json.py"
+
+ inputs = [
+ "gpu_driver_bug_list.json",
+ "gpu_driver_bug_workaround_type.h",
+ "gpu_feature_type.h",
+ "software_rendering_list.json",
+ ]
+
+ outputs = process_json_outputs
+
+ args = [
+ "--output-dir",
+ rebase_path("$target_gen_dir", root_build_dir),
+ "--skip-testing-data",
+ ]
+}
+
source_set("config_sources") {
# External code should depend on this via //gpu/config above rather than
# depending on this directly or the component build will break.
@@ -36,10 +70,8 @@ source_set("config_sources") {
"gpu_blacklist.h",
"gpu_control_list.cc",
"gpu_control_list.h",
- "gpu_control_list_jsons.h",
"gpu_driver_bug_list.cc",
"gpu_driver_bug_list.h",
- "gpu_driver_bug_list_json.cc",
"gpu_driver_bug_workaround_type.h",
"gpu_driver_bug_workarounds.cc",
"gpu_driver_bug_workarounds.h",
@@ -55,11 +87,8 @@ source_set("config_sources") {
"gpu_info_collector.h",
"gpu_info_collector_android.cc",
"gpu_info_collector_linux.cc",
- "gpu_info_collector_linux.h",
"gpu_info_collector_mac.mm",
- "gpu_info_collector_ozone.cc",
"gpu_info_collector_win.cc",
- "gpu_info_collector_x11.cc",
"gpu_switches.cc",
"gpu_switches.h",
"gpu_test_config.cc",
@@ -68,9 +97,10 @@ source_set("config_sources") {
"gpu_test_expectations_parser.h",
"gpu_util.cc",
"gpu_util.h",
- "software_rendering_list_json.cc",
]
+ sources += process_json_outputs
+
configs += [
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
"//build/config/compiler:no_size_t_to_int_warning",
@@ -78,6 +108,8 @@ source_set("config_sources") {
]
deps = [
+ ":crash_keys",
+ ":process_json",
"//base",
"//third_party/re2",
"//ui/gl",
@@ -88,7 +120,6 @@ source_set("config_sources") {
include_dirs = [ "//third_party/mesa/src/include" ]
if (is_win) {
- deps += [ "//third_party/libxml" ]
libs = [
"dxguid.lib",
"setupapi.lib",
@@ -101,29 +132,7 @@ source_set("config_sources") {
]
}
}
- if (is_mac) {
- libs = [
- "IOKit.framework",
- "CoreFoundation.framework",
- ]
- }
- if (use_libpci) {
- defines = [ "USE_LIBPCI=1" ]
- deps += [ "//build/linux/libpci" ]
- }
- if (is_linux && use_x11) {
- configs += [
- "//build/config/linux:x11",
- "//build/config/linux:xext",
- ]
- deps += [
- "//third_party/libXNVCtrl",
- "//ui/gfx/x",
- ]
- } else {
- sources -= [ "gpu_info_collector_x11.cc" ]
- }
- if (!use_ozone) {
- sources -= [ "gpu_info_collector_ozone.cc" ]
+ if (is_linux || is_mac) {
+ deps += [ "//third_party/angle:angle_gpu_info_util" ]
}
}
diff --git a/chromium/gpu/config/DEPS b/chromium/gpu/config/DEPS
deleted file mode 100644
index 39b325a88df..00000000000
--- a/chromium/gpu/config/DEPS
+++ /dev/null
@@ -1,4 +0,0 @@
-include_rules = [
- "+third_party/libxml", # For parsing WinSAT results files.
- "+third_party/libXNVCtrl", # For NV driver version query.
-]
diff --git a/chromium/gpu/config/OWNERS b/chromium/gpu/config/OWNERS
index 4b8dfc90e4f..1a6650fcdd5 100644
--- a/chromium/gpu/config/OWNERS
+++ b/chromium/gpu/config/OWNERS
@@ -3,4 +3,4 @@ kbr@chromium.org
piman@chromium.org
zmo@chromium.org
-# COMPONENT: Internals>GPU
+# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/config/gpu_blacklist.cc b/chromium/gpu/config/gpu_blacklist.cc
index 883cdd1d806..7d5d87da5bc 100644
--- a/chromium/gpu/config/gpu_blacklist.cc
+++ b/chromium/gpu/config/gpu_blacklist.cc
@@ -5,44 +5,46 @@
#include "gpu/config/gpu_blacklist.h"
#include "gpu/config/gpu_feature_type.h"
+#include "gpu/config/software_rendering_list_autogen.h"
namespace gpu {
-GpuBlacklist::GpuBlacklist()
- : GpuControlList() {
-}
+GpuBlacklist::GpuBlacklist(const GpuControlListData& data)
+ : GpuControlList(data) {}
GpuBlacklist::~GpuBlacklist() {
}
// static
-GpuBlacklist* GpuBlacklist::Create() {
- GpuBlacklist* list = new GpuBlacklist();
+std::unique_ptr<GpuBlacklist> GpuBlacklist::Create() {
+ GpuControlListData data(kSoftwareRenderingListVersion,
+ kSoftwareRenderingListEntryCount,
+ kSoftwareRenderingListEntries);
+ return Create(data);
+}
+
+// static
+std::unique_ptr<GpuBlacklist> GpuBlacklist::Create(
+ const GpuControlListData& data) {
+ std::unique_ptr<GpuBlacklist> list(new GpuBlacklist(data));
list->AddSupportedFeature("accelerated_2d_canvas",
GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS);
list->AddSupportedFeature("gpu_compositing",
GPU_FEATURE_TYPE_GPU_COMPOSITING);
- list->AddSupportedFeature("webgl",
- GPU_FEATURE_TYPE_WEBGL);
- list->AddSupportedFeature("flash_3d",
- GPU_FEATURE_TYPE_FLASH3D);
- list->AddSupportedFeature("flash_stage3d",
- GPU_FEATURE_TYPE_FLASH_STAGE3D);
+ list->AddSupportedFeature("accelerated_webgl",
+ GPU_FEATURE_TYPE_ACCELERATED_WEBGL);
+ list->AddSupportedFeature("flash3d", GPU_FEATURE_TYPE_FLASH3D);
+ list->AddSupportedFeature("flash_stage3d", GPU_FEATURE_TYPE_FLASH_STAGE3D);
list->AddSupportedFeature("flash_stage3d_baseline",
GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE);
list->AddSupportedFeature("accelerated_video_decode",
GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE);
list->AddSupportedFeature("accelerated_video_encode",
GPU_FEATURE_TYPE_ACCELERATED_VIDEO_ENCODE);
- list->AddSupportedFeature("panel_fitting",
- GPU_FEATURE_TYPE_PANEL_FITTING);
+ list->AddSupportedFeature("panel_fitting", GPU_FEATURE_TYPE_PANEL_FITTING);
list->AddSupportedFeature("gpu_rasterization",
GPU_FEATURE_TYPE_GPU_RASTERIZATION);
- list->AddSupportedFeature("accelerated_vpx_decode",
- GPU_FEATURE_TYPE_ACCELERATED_VPX_DECODE);
- list->AddSupportedFeature("webgl2",
- GPU_FEATURE_TYPE_WEBGL2);
- list->set_supports_feature_type_all(true);
+ list->AddSupportedFeature("webgl2", GPU_FEATURE_TYPE_WEBGL2);
return list;
}
diff --git a/chromium/gpu/config/gpu_blacklist.h b/chromium/gpu/config/gpu_blacklist.h
index 79535074992..a04e1bdbbbb 100644
--- a/chromium/gpu/config/gpu_blacklist.h
+++ b/chromium/gpu/config/gpu_blacklist.h
@@ -5,7 +5,7 @@
#ifndef GPU_CONFIG_GPU_BLACKLIST_H_
#define GPU_CONFIG_GPU_BLACKLIST_H_
-#include <string>
+#include <memory>
#include "base/macros.h"
#include "gpu/config/gpu_control_list.h"
@@ -16,10 +16,11 @@ class GPU_EXPORT GpuBlacklist : public GpuControlList {
public:
~GpuBlacklist() override;
- static GpuBlacklist* Create();
+ static std::unique_ptr<GpuBlacklist> Create();
+ static std::unique_ptr<GpuBlacklist> Create(const GpuControlListData& data);
private:
- GpuBlacklist();
+ explicit GpuBlacklist(const GpuControlListData& data);
DISALLOW_COPY_AND_ASSIGN(GpuBlacklist);
};
diff --git a/chromium/gpu/config/gpu_blacklist_unittest.cc b/chromium/gpu/config/gpu_blacklist_unittest.cc
index f156b6cece4..aeb70a8560b 100644
--- a/chromium/gpu/config/gpu_blacklist_unittest.cc
+++ b/chromium/gpu/config/gpu_blacklist_unittest.cc
@@ -3,55 +3,54 @@
// found in the LICENSE file.
#include "gpu/config/gpu_blacklist.h"
-
-#include <memory>
-
-#include "gpu/config/gpu_control_list_jsons.h"
#include "gpu/config/gpu_feature_type.h"
#include "gpu/config/gpu_info.h"
#include "testing/gtest/include/gtest/gtest.h"
-const char kOsVersion[] = "10.6.4";
-
namespace gpu {
class GpuBlacklistTest : public testing::Test {
public:
GpuBlacklistTest() { }
-
~GpuBlacklistTest() override {}
const GPUInfo& gpu_info() const {
return gpu_info_;
}
- void RunFeatureTest(const std::string& feature_name,
- GpuFeatureType feature_type) {
- const std::string json =
- "{\n"
- " \"name\": \"gpu blacklist\",\n"
- " \"version\": \"0.1\",\n"
- " \"entries\": [\n"
- " {\n"
- " \"id\": 1,\n"
- " \"os\": {\n"
- " \"type\": \"macosx\"\n"
- " },\n"
- " \"vendor_id\": \"0x10de\",\n"
- " \"device_id\": [\"0x0640\"],\n"
- " \"features\": [\n"
- " \"" +
- feature_name +
- "\"\n"
- " ]\n"
- " }\n"
- " ]\n"
- "}";
-
- std::unique_ptr<GpuBlacklist> blacklist(GpuBlacklist::Create());
- EXPECT_TRUE(blacklist->LoadList(json, GpuBlacklist::kAllOs));
- std::set<int> type = blacklist->MakeDecision(
- GpuBlacklist::kOsMacosx, kOsVersion, gpu_info());
+ void RunFeatureTest(GpuFeatureType feature_type) {
+ const int kFeatureListForEntry1[1] = {feature_type};
+ const uint32_t kDeviceIDsForEntry1[1] = {0x0640};
+ const GpuControlList::Entry kTestEntries[1] = {{
+ 1, // id
+ "Test entry", // description
+ 1, // features size
+ kFeatureListForEntry1, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x10de, // vendor_id
+ 1, // DeviceIDs size
+ kDeviceIDsForEntry1, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ }};
+ GpuControlListData data("1.0", 1, kTestEntries);
+ std::unique_ptr<GpuBlacklist> blacklist = GpuBlacklist::Create(data);
+ std::set<int> type =
+ blacklist->MakeDecision(GpuBlacklist::kOsMacosx, "10.12.3", gpu_info());
EXPECT_EQ(1u, type.size());
EXPECT_EQ(1u, type.count(feature_type));
}
@@ -69,76 +68,43 @@ class GpuBlacklistTest : public testing::Test {
gpu_info_.gl_renderer = "NVIDIA GeForce GT 120 OpenGL Engine";
}
- void TearDown() override {}
-
private:
GPUInfo gpu_info_;
};
-TEST_F(GpuBlacklistTest, CurrentBlacklistValidation) {
- std::unique_ptr<GpuBlacklist> blacklist(GpuBlacklist::Create());
- EXPECT_TRUE(blacklist->LoadList(
- kSoftwareRenderingListJson, GpuBlacklist::kAllOs));
-}
-
-TEST_F(GpuBlacklistTest, DuplicatedIDValidation) {
- std::unique_ptr<GpuBlacklist> blacklist(GpuBlacklist::Create());
- EXPECT_TRUE(blacklist->LoadList(
- kSoftwareRenderingListJson, GpuBlacklist::kAllOs));
- EXPECT_FALSE(blacklist->has_duplicated_entry_id());
-}
-
-#define GPU_BLACKLIST_FEATURE_TEST(test_name, feature_name, feature_type) \
-TEST_F(GpuBlacklistTest, test_name) { \
- RunFeatureTest(feature_name, feature_type); \
-}
+#define GPU_BLACKLIST_FEATURE_TEST(test_name, feature_type) \
+ TEST_F(GpuBlacklistTest, test_name) { RunFeatureTest(feature_type); }
GPU_BLACKLIST_FEATURE_TEST(Accelerated2DCanvas,
- "accelerated_2d_canvas",
GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS)
GPU_BLACKLIST_FEATURE_TEST(GpuCompositing,
- "gpu_compositing",
GPU_FEATURE_TYPE_GPU_COMPOSITING)
-GPU_BLACKLIST_FEATURE_TEST(WebGL,
- "webgl",
- GPU_FEATURE_TYPE_WEBGL)
+GPU_BLACKLIST_FEATURE_TEST(AcceleratedWebGL, GPU_FEATURE_TYPE_ACCELERATED_WEBGL)
GPU_BLACKLIST_FEATURE_TEST(Flash3D,
- "flash_3d",
GPU_FEATURE_TYPE_FLASH3D)
GPU_BLACKLIST_FEATURE_TEST(FlashStage3D,
- "flash_stage3d",
GPU_FEATURE_TYPE_FLASH_STAGE3D)
GPU_BLACKLIST_FEATURE_TEST(FlashStage3DBaseline,
- "flash_stage3d_baseline",
GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE)
GPU_BLACKLIST_FEATURE_TEST(AcceleratedVideoDecode,
- "accelerated_video_decode",
GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE)
GPU_BLACKLIST_FEATURE_TEST(AcceleratedVideoEncode,
- "accelerated_video_encode",
GPU_FEATURE_TYPE_ACCELERATED_VIDEO_ENCODE)
GPU_BLACKLIST_FEATURE_TEST(PanelFitting,
- "panel_fitting",
GPU_FEATURE_TYPE_PANEL_FITTING)
GPU_BLACKLIST_FEATURE_TEST(GpuRasterization,
- "gpu_rasterization",
GPU_FEATURE_TYPE_GPU_RASTERIZATION)
-GPU_BLACKLIST_FEATURE_TEST(AcceleratedVpxDecode,
- "accelerated_vpx_decode",
- GPU_FEATURE_TYPE_ACCELERATED_VPX_DECODE)
-
GPU_BLACKLIST_FEATURE_TEST(WebGL2,
- "webgl2",
GPU_FEATURE_TYPE_WEBGL2)
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list.cc b/chromium/gpu/config/gpu_control_list.cc
index a7850406f25..334ea2a1390 100644
--- a/chromium/gpu/config/gpu_control_list.cc
+++ b/chromium/gpu/config/gpu_control_list.cc
@@ -4,13 +4,6 @@
#include "gpu/config/gpu_control_list.h"
-#include <stddef.h>
-#include <stdint.h>
-
-#include <utility>
-
-#include "base/cpu.h"
-#include "base/json/json_reader.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
@@ -18,7 +11,6 @@
#include "base/strings/stringprintf.h"
#include "base/sys_info.h"
#include "gpu/config/gpu_info.h"
-#include "gpu/config/gpu_util.h"
#include "third_party/re2/src/re2/re2.h"
namespace gpu {
@@ -104,93 +96,53 @@ bool StringMismatch(const std::string& input, const std::string& pattern) {
return !RE2::FullMatch(input, pattern);
}
-const char kMultiGpuStyleStringAMDSwitchable[] = "amd_switchable";
-const char kMultiGpuStyleStringAMDSwitchableDiscrete[] =
- "amd_switchable_discrete";
-const char kMultiGpuStyleStringAMDSwitchableIntegrated[] =
- "amd_switchable_integrated";
-const char kMultiGpuStyleStringOptimus[] = "optimus";
-
-const char kMultiGpuCategoryStringPrimary[] = "primary";
-const char kMultiGpuCategoryStringSecondary[] = "secondary";
-const char kMultiGpuCategoryStringActive[] = "active";
-const char kMultiGpuCategoryStringAny[] = "any";
-
-const char kGLTypeStringGL[] = "gl";
-const char kGLTypeStringGLES[] = "gles";
-const char kGLTypeStringANGLE[] = "angle";
-
-const char kVersionStyleStringNumerical[] = "numerical";
-const char kVersionStyleStringLexical[] = "lexical";
-
-const char kOp[] = "op";
-
-} // namespace
-
-GpuControlList::VersionInfo::VersionInfo(
- const std::string& version_op,
- const std::string& version_style,
- const std::string& version_string,
- const std::string& version_string2)
- : version_style_(kVersionStyleNumerical) {
- op_ = StringToNumericOp(version_op);
- if (op_ == kUnknown || op_ == kAny)
- return;
- version_style_ = StringToVersionStyle(version_style);
- if (!ProcessVersionString(version_string, '.', &version_)) {
- op_ = kUnknown;
- return;
- }
- if (op_ == kBetween) {
- if (!ProcessVersionString(version_string2, '.', &version2_))
- op_ = kUnknown;
- }
-}
-
-GpuControlList::VersionInfo::~VersionInfo() {
+bool StringMismatch(const std::string& input, const char* pattern) {
+ if (!pattern)
+ return false;
+ std::string pattern_string(pattern);
+ return StringMismatch(input, pattern_string);
}
-bool GpuControlList::VersionInfo::Contains(
- const std::string& version_string) const {
- return Contains(version_string, '.');
-}
+} // namespace
-bool GpuControlList::VersionInfo::Contains(
- const std::string& version_string, char splitter) const {
- if (op_ == kUnknown)
+bool GpuControlList::Version::Contains(const std::string& version_string,
+ char splitter) const {
+ if (op == kUnknown)
return false;
- if (op_ == kAny)
+ if (op == kAny)
return true;
std::vector<std::string> version;
if (!ProcessVersionString(version_string, splitter, &version))
return false;
- int relation = Compare(version, version_, version_style_);
- if (op_ == kEQ)
- return (relation == 0);
- else if (op_ == kLT)
- return (relation < 0);
- else if (op_ == kLE)
- return (relation <= 0);
- else if (op_ == kGT)
- return (relation > 0);
- else if (op_ == kGE)
- return (relation >= 0);
- // op_ == kBetween
+ std::vector<std::string> ref_version;
+ bool valid = ProcessVersionString(value1, '.', &ref_version);
+ DCHECK(valid);
+ int relation = Version::Compare(version, ref_version, style);
+ switch (op) {
+ case kEQ:
+ return (relation == 0);
+ case kLT:
+ return (relation < 0);
+ case kLE:
+ return (relation <= 0);
+ case kGT:
+ return (relation > 0);
+ case kGE:
+ return (relation >= 0);
+ default:
+ break;
+ }
+ DCHECK_EQ(kBetween, op);
if (relation < 0)
return false;
- return Compare(version, version2_, version_style_) <= 0;
-}
-
-bool GpuControlList::VersionInfo::IsValid() const {
- return (op_ != kUnknown && version_style_ != kVersionStyleUnknown);
-}
-
-bool GpuControlList::VersionInfo::IsLexical() const {
- return version_style_ == kVersionStyleLexical;
+ ref_version.clear();
+ valid = ProcessVersionString(value2, '.', &ref_version);
+ DCHECK(valid);
+ return Compare(version, ref_version, style) <= 0;
}
// static
-int GpuControlList::VersionInfo::Compare(
+int GpuControlList::Version::Compare(
const std::vector<std::string>& version,
const std::vector<std::string>& version_ref,
VersionStyle version_style) {
@@ -211,976 +163,47 @@ int GpuControlList::VersionInfo::Compare(
return 0;
}
-// static
-GpuControlList::VersionInfo::VersionStyle
-GpuControlList::VersionInfo::StringToVersionStyle(
- const std::string& version_style) {
- if (version_style.empty() || version_style == kVersionStyleStringNumerical)
- return kVersionStyleNumerical;
- if (version_style == kVersionStyleStringLexical)
- return kVersionStyleLexical;
- return kVersionStyleUnknown;
-}
-
-GpuControlList::OsInfo::OsInfo(const std::string& os,
- const std::string& version_op,
- const std::string& version_string,
- const std::string& version_string2) {
- type_ = StringToOsType(os);
- if (type_ != kOsUnknown) {
- version_info_.reset(new VersionInfo(
- version_op, std::string(), version_string, version_string2));
- }
-}
-
-GpuControlList::OsInfo::~OsInfo() {}
-
-bool GpuControlList::OsInfo::Contains(
- OsType type, const std::string& version) const {
- if (!IsValid())
+bool GpuControlList::More::GLVersionInfoMismatch(
+ const std::string& gl_version_string) const {
+ if (gl_version_string.empty())
return false;
- if (type_ != type && type_ != kOsAny)
+ if (!gl_version.IsSpecified() && gl_type == kGLTypeNone)
return false;
- std::string processed_version;
- size_t pos = version.find_first_not_of("0123456789.");
- if (pos != std::string::npos)
- processed_version = version.substr(0, pos);
- else
- processed_version = version;
-
- return version_info_->Contains(processed_version);
-}
-
-bool GpuControlList::OsInfo::IsValid() const {
- return type_ != kOsUnknown && version_info_->IsValid();
-}
-
-GpuControlList::OsType GpuControlList::OsInfo::type() const {
- return type_;
-}
-
-GpuControlList::OsType GpuControlList::OsInfo::StringToOsType(
- const std::string& os) {
- if (os == "win")
- return kOsWin;
- else if (os == "macosx")
- return kOsMacosx;
- else if (os == "android")
- return kOsAndroid;
- else if (os == "linux")
- return kOsLinux;
- else if (os == "chromeos")
- return kOsChromeOS;
- else if (os == "any")
- return kOsAny;
- return kOsUnknown;
-}
-
-GpuControlList::FloatInfo::FloatInfo(const std::string& float_op,
- const std::string& float_value,
- const std::string& float_value2)
- : op_(kUnknown),
- value_(0.f),
- value2_(0.f) {
- op_ = StringToNumericOp(float_op);
- if (op_ == kAny)
- return;
- double dvalue = 0;
- if (!base::StringToDouble(float_value, &dvalue)) {
- op_ = kUnknown;
- return;
- }
- value_ = static_cast<float>(dvalue);
- if (op_ == kBetween) {
- if (!base::StringToDouble(float_value2, &dvalue)) {
- op_ = kUnknown;
- return;
- }
- value2_ = static_cast<float>(dvalue);
- }
-}
-
-bool GpuControlList::FloatInfo::Contains(float value) const {
- if (op_ == kUnknown)
- return false;
- if (op_ == kAny)
- return true;
- if (op_ == kEQ)
- return (value == value_);
- if (op_ == kLT)
- return (value < value_);
- if (op_ == kLE)
- return (value <= value_);
- if (op_ == kGT)
- return (value > value_);
- if (op_ == kGE)
- return (value >= value_);
- DCHECK(op_ == kBetween);
- return ((value_ <= value && value <= value2_) ||
- (value2_ <= value && value <= value_));
-}
-
-bool GpuControlList::FloatInfo::IsValid() const {
- return op_ != kUnknown;
-}
-
-GpuControlList::IntInfo::IntInfo(const std::string& int_op,
- const std::string& int_value,
- const std::string& int_value2)
- : op_(kUnknown),
- value_(0),
- value2_(0) {
- op_ = StringToNumericOp(int_op);
- if (op_ == kAny)
- return;
- if (!base::StringToInt(int_value, &value_)) {
- op_ = kUnknown;
- return;
- }
- if (op_ == kBetween &&
- !base::StringToInt(int_value2, &value2_))
- op_ = kUnknown;
-}
-
-bool GpuControlList::IntInfo::Contains(int value) const {
- if (op_ == kUnknown)
- return false;
- if (op_ == kAny)
- return true;
- if (op_ == kEQ)
- return (value == value_);
- if (op_ == kLT)
- return (value < value_);
- if (op_ == kLE)
- return (value <= value_);
- if (op_ == kGT)
- return (value > value_);
- if (op_ == kGE)
- return (value >= value_);
- DCHECK(op_ == kBetween);
- return ((value_ <= value && value <= value2_) ||
- (value2_ <= value && value <= value_));
-}
-
-bool GpuControlList::IntInfo::IsValid() const {
- return op_ != kUnknown;
-}
-
-GpuControlList::BoolInfo::BoolInfo(bool value) : value_(value) {}
-
-bool GpuControlList::BoolInfo::Contains(bool value) const {
- return value_ == value;
-}
-
-// static
-GpuControlList::ScopedGpuControlListEntry
-GpuControlList::GpuControlListEntry::GetEntryFromValue(
- const base::DictionaryValue* value, bool top_level,
- const FeatureMap& feature_map,
- bool supports_feature_type_all) {
- DCHECK(value);
- ScopedGpuControlListEntry entry(new GpuControlListEntry());
-
- size_t dictionary_entry_count = 0;
-
- if (top_level) {
- uint32_t id;
- if (!value->GetInteger("id", reinterpret_cast<int*>(&id)) ||
- !entry->SetId(id)) {
- LOG(WARNING) << "Malformed id entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
-
- bool disabled;
- if (value->GetBoolean("disabled", &disabled)) {
- entry->SetDisabled(disabled);
- dictionary_entry_count++;
- }
- }
-
- std::string description;
- if (value->GetString("description", &description)) {
- entry->description_ = description;
- dictionary_entry_count++;
- } else {
- entry->description_ = "The GPU is unavailable for an unexplained reason.";
- }
-
- const base::ListValue* cr_bugs;
- if (value->GetList("cr_bugs", &cr_bugs)) {
- for (size_t i = 0; i < cr_bugs->GetSize(); ++i) {
- int bug_id;
- if (cr_bugs->GetInteger(i, &bug_id)) {
- entry->cr_bugs_.push_back(bug_id);
- } else {
- LOG(WARNING) << "Malformed cr_bugs entry " << entry->id();
- return NULL;
- }
- }
- dictionary_entry_count++;
- }
-
- const base::ListValue* webkit_bugs;
- if (value->GetList("webkit_bugs", &webkit_bugs)) {
- for (size_t i = 0; i < webkit_bugs->GetSize(); ++i) {
- int bug_id;
- if (webkit_bugs->GetInteger(i, &bug_id)) {
- entry->webkit_bugs_.push_back(bug_id);
- } else {
- LOG(WARNING) << "Malformed webkit_bugs entry " << entry->id();
- return NULL;
- }
- }
- dictionary_entry_count++;
- }
-
- const base::ListValue* disabled_extensions;
- if (value->GetList("disabled_extensions", &disabled_extensions)) {
- for (size_t i = 0; i < disabled_extensions->GetSize(); ++i) {
- std::string disabled_extension;
- if (disabled_extensions->GetString(i, &disabled_extension)) {
- entry->disabled_extensions_.push_back(disabled_extension);
- } else {
- LOG(WARNING) << "Malformed disabled_extensions entry " << entry->id();
- return NULL;
- }
- }
- dictionary_entry_count++;
- }
-
- const base::DictionaryValue* os_value = NULL;
- if (value->GetDictionary("os", &os_value)) {
- std::string os_type;
- std::string os_version_op = "any";
- std::string os_version_string;
- std::string os_version_string2;
- os_value->GetString("type", &os_type);
- const base::DictionaryValue* os_version_value = NULL;
- if (os_value->GetDictionary("version", &os_version_value)) {
- os_version_value->GetString(kOp, &os_version_op);
- os_version_value->GetString("value", &os_version_string);
- os_version_value->GetString("value2", &os_version_string2);
- }
- if (!entry->SetOsInfo(os_type, os_version_op, os_version_string,
- os_version_string2)) {
- LOG(WARNING) << "Malformed os entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- std::string vendor_id;
- if (value->GetString("vendor_id", &vendor_id)) {
- if (!entry->SetVendorId(vendor_id)) {
- LOG(WARNING) << "Malformed vendor_id entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- const base::ListValue* device_id_list;
- if (value->GetList("device_id", &device_id_list)) {
- for (size_t i = 0; i < device_id_list->GetSize(); ++i) {
- std::string device_id;
- if (!device_id_list->GetString(i, &device_id) ||
- !entry->AddDeviceId(device_id)) {
- LOG(WARNING) << "Malformed device_id entry " << entry->id();
- return NULL;
- }
- }
- dictionary_entry_count++;
- }
-
- std::string multi_gpu_style;
- if (value->GetString("multi_gpu_style", &multi_gpu_style)) {
- if (!entry->SetMultiGpuStyle(multi_gpu_style)) {
- LOG(WARNING) << "Malformed multi_gpu_style entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- std::string multi_gpu_category;
- if (value->GetString("multi_gpu_category", &multi_gpu_category)) {
- if (!entry->SetMultiGpuCategory(multi_gpu_category)) {
- LOG(WARNING) << "Malformed multi_gpu_category entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- std::string driver_vendor_value;
- if (value->GetString("driver_vendor", &driver_vendor_value)) {
- if (!entry->SetDriverVendorInfo(driver_vendor_value)) {
- LOG(WARNING) << "Malformed driver_vendor entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- const base::DictionaryValue* driver_version_value = NULL;
- if (value->GetDictionary("driver_version", &driver_version_value)) {
- std::string driver_version_op = "any";
- std::string driver_version_style;
- std::string driver_version_string;
- std::string driver_version_string2;
- driver_version_value->GetString(kOp, &driver_version_op);
- driver_version_value->GetString("style", &driver_version_style);
- driver_version_value->GetString("value", &driver_version_string);
- driver_version_value->GetString("value2", &driver_version_string2);
- if (!entry->SetDriverVersionInfo(driver_version_op,
- driver_version_style,
- driver_version_string,
- driver_version_string2)) {
- LOG(WARNING) << "Malformed driver_version entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- const base::DictionaryValue* driver_date_value = NULL;
- if (value->GetDictionary("driver_date", &driver_date_value)) {
- std::string driver_date_op = "any";
- std::string driver_date_string;
- std::string driver_date_string2;
- driver_date_value->GetString(kOp, &driver_date_op);
- driver_date_value->GetString("value", &driver_date_string);
- driver_date_value->GetString("value2", &driver_date_string2);
- if (!entry->SetDriverDateInfo(driver_date_op, driver_date_string,
- driver_date_string2)) {
- LOG(WARNING) << "Malformed driver_date entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- std::string gl_type;
- if (value->GetString("gl_type", &gl_type)) {
- if (!entry->SetGLType(gl_type)) {
- LOG(WARNING) << "Malformed gl_type entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- const base::DictionaryValue* gl_version_value = NULL;
- if (value->GetDictionary("gl_version", &gl_version_value)) {
- std::string version_op = "any";
- std::string version_string;
- std::string version_string2;
- gl_version_value->GetString(kOp, &version_op);
- gl_version_value->GetString("value", &version_string);
- gl_version_value->GetString("value2", &version_string2);
- if (!entry->SetGLVersionInfo(
- version_op, version_string, version_string2)) {
- LOG(WARNING) << "Malformed gl_version entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- std::string gl_version_string_value;
- if (value->GetString("gl_version_string", &gl_version_string_value)) {
- if (!entry->SetGLVersionStringInfo(gl_version_string_value)) {
- LOG(WARNING) << "Malformed gl_version_string entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- std::string gl_vendor_value;
- if (value->GetString("gl_vendor", &gl_vendor_value)) {
- if (!entry->SetGLVendorInfo(gl_vendor_value)) {
- LOG(WARNING) << "Malformed gl_vendor entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- std::string gl_renderer_value;
- if (value->GetString("gl_renderer", &gl_renderer_value)) {
- if (!entry->SetGLRendererInfo(gl_renderer_value)) {
- LOG(WARNING) << "Malformed gl_renderer entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- std::string gl_extensions_value;
- if (value->GetString("gl_extensions", &gl_extensions_value)) {
- if (!entry->SetGLExtensionsInfo(gl_extensions_value)) {
- LOG(WARNING) << "Malformed gl_extensions entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- const base::DictionaryValue* gl_reset_notification_strategy_value = NULL;
- if (value->GetDictionary("gl_reset_notification_strategy",
- &gl_reset_notification_strategy_value)) {
- std::string op;
- std::string int_value;
- std::string int_value2;
- gl_reset_notification_strategy_value->GetString(kOp, &op);
- gl_reset_notification_strategy_value->GetString("value", &int_value);
- gl_reset_notification_strategy_value->GetString("value2", &int_value2);
- if (!entry->SetGLResetNotificationStrategyInfo(
- op, int_value, int_value2)) {
- LOG(WARNING) << "Malformed gl_reset_notification_strategy entry "
- << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- std::string cpu_brand_value;
- if (value->GetString("cpu_info", &cpu_brand_value)) {
- if (!entry->SetCpuBrand(cpu_brand_value)) {
- LOG(WARNING) << "Malformed cpu_brand entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- const base::DictionaryValue* perf_graphics_value = NULL;
- if (value->GetDictionary("perf_graphics", &perf_graphics_value)) {
- std::string op;
- std::string float_value;
- std::string float_value2;
- perf_graphics_value->GetString(kOp, &op);
- perf_graphics_value->GetString("value", &float_value);
- perf_graphics_value->GetString("value2", &float_value2);
- if (!entry->SetPerfGraphicsInfo(op, float_value, float_value2)) {
- LOG(WARNING) << "Malformed perf_graphics entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- const base::DictionaryValue* perf_gaming_value = NULL;
- if (value->GetDictionary("perf_gaming", &perf_gaming_value)) {
- std::string op;
- std::string float_value;
- std::string float_value2;
- perf_gaming_value->GetString(kOp, &op);
- perf_gaming_value->GetString("value", &float_value);
- perf_gaming_value->GetString("value2", &float_value2);
- if (!entry->SetPerfGamingInfo(op, float_value, float_value2)) {
- LOG(WARNING) << "Malformed perf_gaming entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- const base::DictionaryValue* perf_overall_value = NULL;
- if (value->GetDictionary("perf_overall", &perf_overall_value)) {
- std::string op;
- std::string float_value;
- std::string float_value2;
- perf_overall_value->GetString(kOp, &op);
- perf_overall_value->GetString("value", &float_value);
- perf_overall_value->GetString("value2", &float_value2);
- if (!entry->SetPerfOverallInfo(op, float_value, float_value2)) {
- LOG(WARNING) << "Malformed perf_overall entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- const base::ListValue* machine_model_name_list;
- if (value->GetList("machine_model_name", &machine_model_name_list)) {
- for (size_t i = 0; i < machine_model_name_list->GetSize(); ++i) {
- std::string model_name;
- if (!machine_model_name_list->GetString(i, &model_name) ||
- !entry->AddMachineModelName(model_name)) {
- LOG(WARNING) << "Malformed machine_model_name entry " << entry->id();
- return NULL;
- }
- }
- dictionary_entry_count++;
- }
-
- const base::DictionaryValue* machine_model_version_value = NULL;
- if (value->GetDictionary(
- "machine_model_version", &machine_model_version_value)) {
- std::string version_op = "any";
- std::string version_string;
- std::string version_string2;
- machine_model_version_value->GetString(kOp, &version_op);
- machine_model_version_value->GetString("value", &version_string);
- machine_model_version_value->GetString("value2", &version_string2);
- if (!entry->SetMachineModelVersionInfo(
- version_op, version_string, version_string2)) {
- LOG(WARNING) << "Malformed machine_model_version entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- const base::DictionaryValue* gpu_count_value = NULL;
- if (value->GetDictionary("gpu_count", &gpu_count_value)) {
- std::string op;
- std::string int_value;
- std::string int_value2;
- gpu_count_value->GetString(kOp, &op);
- gpu_count_value->GetString("value", &int_value);
- gpu_count_value->GetString("value2", &int_value2);
- if (!entry->SetGpuCountInfo(op, int_value, int_value2)) {
- LOG(WARNING) << "Malformed gpu_count entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- bool direct_rendering;
- if (value->GetBoolean("direct_rendering", &direct_rendering)) {
- entry->SetDirectRenderingInfo(direct_rendering);
- dictionary_entry_count++;
- }
-
- bool in_process_gpu;
- if (value->GetBoolean("in_process_gpu", &in_process_gpu)) {
- entry->SetInProcessGPUInfo(in_process_gpu);
- dictionary_entry_count++;
- }
-
- const base::DictionaryValue* pixel_shader_version_value = NULL;
- if (value->GetDictionary("pixel_shader_version",
- &pixel_shader_version_value)) {
- std::string pixel_shader_version_op = "any";
- std::string pixel_shader_version_string;
- std::string pixel_shader_version_string2;
- pixel_shader_version_value->GetString(kOp, &pixel_shader_version_op);
- pixel_shader_version_value->GetString("value",
- &pixel_shader_version_string);
- pixel_shader_version_value->GetString("value2",
- &pixel_shader_version_string2);
- if (!entry->SetPixelShaderVersionInfo(pixel_shader_version_op,
- pixel_shader_version_string,
- pixel_shader_version_string2)) {
- LOG(WARNING) << "Malformed pixel shader version entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
-
- if (top_level) {
- const base::ListValue* feature_value = NULL;
- if (value->GetList("features", &feature_value)) {
- std::vector<std::string> feature_list;
- std::vector<std::string> feature_exception_list;
- for (size_t i = 0; i < feature_value->GetSize(); ++i) {
- std::string feature;
- const base::DictionaryValue* features_info_value = NULL;
- if (feature_value->GetString(i, &feature)) {
- feature_list.push_back(feature);
- } else if (feature_value->GetDictionary(i, &features_info_value)) {
- const base::ListValue* exception_list_value = NULL;
- if (features_info_value->size() > 1) {
- LOG(WARNING) << "Malformed feature entry " << entry->id();
- return NULL;
- }
- if (features_info_value->GetList("exceptions",
- &exception_list_value)) {
- for (size_t i = 0; i < exception_list_value->GetSize(); ++i) {
- std::string exception_feature;
- if (exception_list_value->GetString(i, &exception_feature)) {
- feature_exception_list.push_back(exception_feature);
- } else {
- LOG(WARNING) << "Malformed feature entry " << entry->id();
- return NULL;
- }
- }
- } else {
- LOG(WARNING) << "Malformed feature entry " << entry->id();
- return NULL;
- }
- } else {
- LOG(WARNING) << "Malformed feature entry " << entry->id();
- return NULL;
- }
- }
- if (!entry->SetFeatures(feature_list, feature_exception_list, feature_map,
- supports_feature_type_all)) {
- LOG(WARNING) << "Malformed feature entry " << entry->id();
- return NULL;
- }
- dictionary_entry_count++;
- }
- }
-
- if (top_level) {
- const base::ListValue* exception_list_value = NULL;
- if (value->GetList("exceptions", &exception_list_value)) {
- for (size_t i = 0; i < exception_list_value->GetSize(); ++i) {
- const base::DictionaryValue* exception_value = NULL;
- if (!exception_list_value->GetDictionary(i, &exception_value)) {
- LOG(WARNING) << "Malformed exceptions entry " << entry->id();
- return NULL;
- }
- ScopedGpuControlListEntry exception(GetEntryFromValue(
- exception_value, false, feature_map, supports_feature_type_all));
- if (exception.get() == NULL) {
- LOG(WARNING) << "Malformed exceptions entry " << entry->id();
- return NULL;
- }
- // Exception should inherit vendor_id from parent, otherwise if only
- // device_ids are specified in Exception, the info will be incomplete.
- if (exception->vendor_id_ == 0 && entry->vendor_id_ != 0)
- exception->vendor_id_ = entry->vendor_id_;
- entry->AddException(exception);
- }
- dictionary_entry_count++;
- }
- }
-
- if (value->size() != dictionary_entry_count) {
- LOG(WARNING) << "Entry with unknown fields " << entry->id();
- return NULL;
- }
-
- // If GL_VERSION is specified, but no info about whether it's GL or GLES,
- // we use the default for the platform. See GLType enum declaration.
- if (entry->gl_version_info_.get() != NULL && entry->gl_type_ == kGLTypeNone)
- entry->gl_type_ = GetDefaultGLType();
-
- return entry;
-}
-
-GpuControlList::GpuControlListEntry::GpuControlListEntry()
- : id_(0),
- disabled_(false),
- vendor_id_(0),
- multi_gpu_style_(kMultiGpuStyleNone),
- multi_gpu_category_(kMultiGpuCategoryActive),
- gl_type_(kGLTypeNone) {
-}
-
-GpuControlList::GpuControlListEntry::~GpuControlListEntry() { }
-
-bool GpuControlList::GpuControlListEntry::SetId(uint32_t id) {
- if (id != 0) {
- id_ = id;
- return true;
- }
- return false;
-}
-
-void GpuControlList::GpuControlListEntry::SetDisabled(bool disabled) {
- disabled_ = disabled;
-}
-
-bool GpuControlList::GpuControlListEntry::SetOsInfo(
- const std::string& os,
- const std::string& version_op,
- const std::string& version_string,
- const std::string& version_string2) {
- os_info_.reset(new OsInfo(os, version_op, version_string, version_string2));
- return os_info_->IsValid();
-}
-
-bool GpuControlList::GpuControlListEntry::SetVendorId(
- const std::string& vendor_id_string) {
- vendor_id_ = 0;
- return base::HexStringToUInt(vendor_id_string, &vendor_id_) &&
- vendor_id_ != 0;
-}
-
-bool GpuControlList::GpuControlListEntry::AddDeviceId(
- const std::string& device_id_string) {
- uint32_t device_id = 0;
- if (base::HexStringToUInt(device_id_string, &device_id) && device_id != 0) {
- device_id_list_.push_back(device_id);
- return true;
- }
- return false;
-}
-
-bool GpuControlList::GpuControlListEntry::SetMultiGpuStyle(
- const std::string& multi_gpu_style_string) {
- MultiGpuStyle style = StringToMultiGpuStyle(multi_gpu_style_string);
- if (style == kMultiGpuStyleNone)
- return false;
- multi_gpu_style_ = style;
- return true;
-}
-
-bool GpuControlList::GpuControlListEntry::SetMultiGpuCategory(
- const std::string& multi_gpu_category_string) {
- MultiGpuCategory category =
- StringToMultiGpuCategory(multi_gpu_category_string);
- if (category == kMultiGpuCategoryNone)
- return false;
- multi_gpu_category_ = category;
- return true;
-}
-
-bool GpuControlList::GpuControlListEntry::SetGLType(
- const std::string& gl_type_string) {
- GLType gl_type = StringToGLType(gl_type_string);
- if (gl_type == kGLTypeNone)
- return false;
- gl_type_ = gl_type;
- return true;
-}
-
-bool GpuControlList::GpuControlListEntry::SetDriverVendorInfo(
- const std::string& vendor_value) {
- driver_vendor_info_ = vendor_value;
- return !driver_vendor_info_.empty();
-}
-
-bool GpuControlList::GpuControlListEntry::SetDriverVersionInfo(
- const std::string& version_op,
- const std::string& version_style,
- const std::string& version_string,
- const std::string& version_string2) {
- driver_version_info_.reset(new VersionInfo(
- version_op, version_style, version_string, version_string2));
- return driver_version_info_->IsValid();
-}
-
-bool GpuControlList::GpuControlListEntry::SetDriverDateInfo(
- const std::string& date_op,
- const std::string& date_string,
- const std::string& date_string2) {
- driver_date_info_.reset(
- new VersionInfo(date_op, std::string(), date_string, date_string2));
- return driver_date_info_->IsValid();
-}
-
-bool GpuControlList::GpuControlListEntry::SetGLVersionInfo(
- const std::string& version_op,
- const std::string& version_string,
- const std::string& version_string2) {
- gl_version_info_.reset(new VersionInfo(
- version_op, std::string(), version_string, version_string2));
- return gl_version_info_->IsValid();
-}
-
-bool GpuControlList::GpuControlListEntry::SetGLVersionStringInfo(
- const std::string& version_string_value) {
- gl_version_string_info_ = version_string_value;
- return !gl_version_string_info_.empty();
-}
-
-bool GpuControlList::GpuControlListEntry::SetGLVendorInfo(
- const std::string& vendor_value) {
- gl_vendor_info_ = vendor_value;
- return !gl_vendor_info_.empty();
-}
-
-bool GpuControlList::GpuControlListEntry::SetGLRendererInfo(
- const std::string& renderer_value) {
- gl_renderer_info_ = renderer_value;
- return !gl_renderer_info_.empty();
-}
-
-bool GpuControlList::GpuControlListEntry::SetGLExtensionsInfo(
- const std::string& extensions_value) {
- gl_extensions_info_ = extensions_value;
- return !gl_extensions_info_.empty();
-}
-
-bool GpuControlList::GpuControlListEntry::SetGLResetNotificationStrategyInfo(
- const std::string& op,
- const std::string& int_string,
- const std::string& int_string2) {
- gl_reset_notification_strategy_info_.reset(
- new IntInfo(op, int_string, int_string2));
- return gl_reset_notification_strategy_info_->IsValid();
-}
-
-bool GpuControlList::GpuControlListEntry::SetCpuBrand(
- const std::string& cpu_value) {
- cpu_brand_ = cpu_value;
- return !cpu_brand_.empty();
-}
-
-bool GpuControlList::GpuControlListEntry::SetPerfGraphicsInfo(
- const std::string& op,
- const std::string& float_string,
- const std::string& float_string2) {
- perf_graphics_info_.reset(new FloatInfo(op, float_string, float_string2));
- return perf_graphics_info_->IsValid();
-}
-
-bool GpuControlList::GpuControlListEntry::SetPerfGamingInfo(
- const std::string& op,
- const std::string& float_string,
- const std::string& float_string2) {
- perf_gaming_info_.reset(new FloatInfo(op, float_string, float_string2));
- return perf_gaming_info_->IsValid();
-}
-
-bool GpuControlList::GpuControlListEntry::SetPerfOverallInfo(
- const std::string& op,
- const std::string& float_string,
- const std::string& float_string2) {
- perf_overall_info_.reset(new FloatInfo(op, float_string, float_string2));
- return perf_overall_info_->IsValid();
-}
-
-bool GpuControlList::GpuControlListEntry::AddMachineModelName(
- const std::string& model_name) {
- if (model_name.empty())
- return false;
- machine_model_name_list_.push_back(model_name);
- return true;
-}
-
-bool GpuControlList::GpuControlListEntry::SetMachineModelVersionInfo(
- const std::string& version_op,
- const std::string& version_string,
- const std::string& version_string2) {
- machine_model_version_info_.reset(new VersionInfo(
- version_op, std::string(), version_string, version_string2));
- return machine_model_version_info_->IsValid();
-}
-
-bool GpuControlList::GpuControlListEntry::SetGpuCountInfo(
- const std::string& op,
- const std::string& int_string,
- const std::string& int_string2) {
- gpu_count_info_.reset(new IntInfo(op, int_string, int_string2));
- return gpu_count_info_->IsValid();
-}
-
-void GpuControlList::GpuControlListEntry::SetDirectRenderingInfo(bool value) {
- direct_rendering_info_.reset(new BoolInfo(value));
-}
-
-void GpuControlList::GpuControlListEntry::SetInProcessGPUInfo(bool value) {
- in_process_gpu_info_.reset(new BoolInfo(value));
-}
-
-bool GpuControlList::GpuControlListEntry::SetFeatures(
- const std::vector<std::string>& feature_strings,
- const std::vector<std::string>& exception_strings,
- const FeatureMap& feature_map,
- bool supports_feature_type_all) {
- size_t size = feature_strings.size();
- if (size == 0)
- return false;
- features_.clear();
- for (size_t i = 0; i < size; ++i) {
- int feature = 0;
- if (supports_feature_type_all && feature_strings[i] == "all") {
- for (FeatureMap::const_iterator iter = feature_map.begin();
- iter != feature_map.end(); ++iter) {
- if (std::find(exception_strings.begin(), exception_strings.end(),
- iter->first) == exception_strings.end())
- features_.insert(iter->second);
- }
- continue;
- }
- if (!StringToFeature(feature_strings[i], &feature, feature_map)) {
- features_.clear();
- return false;
- }
- if (std::find(exception_strings.begin(), exception_strings.end(),
- feature_strings[i]) == exception_strings.end())
- features_.insert(feature);
- }
- return true;
-}
-
-bool GpuControlList::GpuControlListEntry::SetPixelShaderVersionInfo(
- const std::string& version_op,
- const std::string& version_string,
- const std::string& version_string2) {
- pixel_shader_version_info_.reset(new VersionInfo(
- version_op, std::string(), version_string, version_string2));
- return pixel_shader_version_info_->IsValid();
-}
-
-void GpuControlList::GpuControlListEntry::AddException(
- ScopedGpuControlListEntry exception) {
- exceptions_.push_back(exception);
-}
-
-bool GpuControlList::GpuControlListEntry::GLVersionInfoMismatch(
- const std::string& gl_version) const {
- if (gl_version.empty())
- return false;
-
- if (gl_version_info_.get() == NULL && gl_type_ == kGLTypeNone)
- return false;
-
std::vector<std::string> segments = base::SplitString(
- gl_version, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ gl_version_string, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
std::string number;
- GLType gl_type = kGLTypeNone;
+ GLType target_gl_type = kGLTypeNone;
if (segments.size() > 2 &&
segments[0] == "OpenGL" && segments[1] == "ES") {
bool full_match = RE2::FullMatch(segments[2], "([\\d.]+).*", &number);
DCHECK(full_match);
- gl_type = kGLTypeGLES;
+ target_gl_type = kGLTypeGLES;
if (segments.size() > 3 &&
base::StartsWith(segments[3], "(ANGLE",
base::CompareCase::INSENSITIVE_ASCII)) {
- gl_type = kGLTypeANGLE;
+ target_gl_type = kGLTypeANGLE;
}
} else {
number = segments[0];
- gl_type = kGLTypeGL;
+ target_gl_type = kGLTypeGL;
}
- if (gl_type_ != kGLTypeNone && gl_type_ != gl_type)
+ GLType entry_gl_type = gl_type;
+ if (entry_gl_type == kGLTypeNone && gl_version.IsSpecified()) {
+ entry_gl_type = GetDefaultGLType();
+ }
+ if (entry_gl_type != kGLTypeNone && entry_gl_type != target_gl_type) {
return true;
- if (gl_version_info_.get() != NULL && !gl_version_info_->Contains(number))
+ }
+ if (gl_version.IsSpecified() && !gl_version.Contains(number)) {
return true;
+ }
return false;
}
// static
-GpuControlList::GpuControlListEntry::MultiGpuStyle
-GpuControlList::GpuControlListEntry::StringToMultiGpuStyle(
- const std::string& style) {
- if (style == kMultiGpuStyleStringOptimus)
- return kMultiGpuStyleOptimus;
- if (style == kMultiGpuStyleStringAMDSwitchable)
- return kMultiGpuStyleAMDSwitchable;
- if (style == kMultiGpuStyleStringAMDSwitchableIntegrated)
- return kMultiGpuStyleAMDSwitchableIntegrated;
- if (style == kMultiGpuStyleStringAMDSwitchableDiscrete)
- return kMultiGpuStyleAMDSwitchableDiscrete;
- return kMultiGpuStyleNone;
-}
-
-// static
-GpuControlList::GpuControlListEntry::MultiGpuCategory
-GpuControlList::GpuControlListEntry::StringToMultiGpuCategory(
- const std::string& category) {
- if (category == kMultiGpuCategoryStringPrimary)
- return kMultiGpuCategoryPrimary;
- if (category == kMultiGpuCategoryStringSecondary)
- return kMultiGpuCategorySecondary;
- if (category == kMultiGpuCategoryStringActive)
- return kMultiGpuCategoryActive;
- if (category == kMultiGpuCategoryStringAny)
- return kMultiGpuCategoryAny;
- return kMultiGpuCategoryNone;
-}
-
-// static
-GpuControlList::GpuControlListEntry::GLType
-GpuControlList::GpuControlListEntry::StringToGLType(
- const std::string& gl_type) {
- if (gl_type == kGLTypeStringGL)
- return kGLTypeGL;
- if (gl_type == kGLTypeStringGLES)
- return kGLTypeGLES;
- if (gl_type == kGLTypeStringANGLE)
- return kGLTypeANGLE;
- return kGLTypeNone;
-}
-
-// static
-GpuControlList::GpuControlListEntry::GLType
-GpuControlList::GpuControlListEntry::GetDefaultGLType() {
+GpuControlList::GLType GpuControlList::More::GetDefaultGLType() {
#if defined(OS_CHROMEOS)
return kGLTypeGL;
#elif defined(OS_LINUX) || defined(OS_OPENBSD)
@@ -1196,23 +219,105 @@ GpuControlList::GpuControlListEntry::GetDefaultGLType() {
#endif
}
-void GpuControlList::GpuControlListEntry::LogControlListMatch(
+void GpuControlList::Entry::LogControlListMatch(
const std::string& control_list_logging_name) const {
static const char kControlListMatchMessage[] =
"Control list match for rule #%u in %s.";
- VLOG(1) << base::StringPrintf(kControlListMatchMessage, id_,
+ VLOG(1) << base::StringPrintf(kControlListMatchMessage, id,
control_list_logging_name.c_str());
}
-bool GpuControlList::GpuControlListEntry::Contains(
- OsType os_type, const std::string& os_version,
- const GPUInfo& gpu_info) const {
- DCHECK(os_type != kOsAny);
- if (os_info_.get() != NULL && !os_info_->Contains(os_type, os_version))
+bool GpuControlList::DriverInfo::Contains(const GPUInfo& gpu_info) const {
+ if (StringMismatch(gpu_info.driver_vendor, driver_vendor)) {
+ return false;
+ }
+ if (driver_version.IsSpecified() && !gpu_info.driver_version.empty() &&
+ !driver_version.Contains(gpu_info.driver_version)) {
+ return false;
+ }
+ if (driver_date.IsSpecified() && !gpu_info.driver_date.empty() &&
+ !driver_date.Contains(gpu_info.driver_date, '-')) {
+ return false;
+ }
+ return true;
+}
+
+bool GpuControlList::GLStrings::Contains(const GPUInfo& gpu_info) const {
+ if (StringMismatch(gpu_info.gl_version, gl_version))
+ return false;
+ if (StringMismatch(gpu_info.gl_vendor, gl_vendor))
+ return false;
+ if (StringMismatch(gpu_info.gl_renderer, gl_renderer))
+ return false;
+ if (StringMismatch(gpu_info.gl_extensions, gl_extensions))
+ return false;
+ return true;
+}
+
+bool GpuControlList::MachineModelInfo::Contains(const GPUInfo& gpu_info) const {
+ if (machine_model_name_size > 0) {
+ if (gpu_info.machine_model_name.empty())
+ return false;
+ bool found_match = false;
+ for (size_t ii = 0; ii < machine_model_name_size; ++ii) {
+ if (RE2::FullMatch(gpu_info.machine_model_name,
+ machine_model_names[ii])) {
+ found_match = true;
+ break;
+ }
+ }
+ if (!found_match)
+ return false;
+ }
+ if (machine_model_version.IsSpecified() &&
+ (gpu_info.machine_model_version.empty() ||
+ !machine_model_version.Contains(gpu_info.machine_model_version))) {
+ return false;
+ }
+ return true;
+}
+
+bool GpuControlList::More::Contains(const GPUInfo& gpu_info) const {
+ if (GLVersionInfoMismatch(gpu_info.gl_version)) {
+ return false;
+ }
+ if (gl_reset_notification_strategy != 0 &&
+ gl_reset_notification_strategy !=
+ gpu_info.gl_reset_notification_strategy) {
+ return false;
+ }
+ if (gpu_count.IsSpecified()) {
+ size_t count = gpu_info.secondary_gpus.size() + 1;
+ if (!gpu_count.Contains(std::to_string(count))) {
+ return false;
+ }
+ }
+ if (!direct_rendering && gpu_info.direct_rendering) {
+ return false;
+ }
+ if (in_process_gpu && !gpu_info.in_process_gpu) {
+ return false;
+ }
+ if (pixel_shader_version.IsSpecified() &&
+ !pixel_shader_version.Contains(gpu_info.pixel_shader_version)) {
return false;
- if (vendor_id_ != 0) {
+ }
+ return true;
+}
+
+bool GpuControlList::Conditions::Contains(OsType target_os_type,
+ const std::string& target_os_version,
+ const GPUInfo& gpu_info) const {
+ DCHECK(target_os_type != kOsAny);
+ if (os_type != kOsAny) {
+ if (os_type != target_os_type)
+ return false;
+ if (os_version.IsSpecified() && !os_version.Contains(target_os_version))
+ return false;
+ }
+ if (vendor_id != 0) {
std::vector<GPUInfo::GPUDevice> candidates;
- switch (multi_gpu_category_) {
+ switch (multi_gpu_category) {
case kMultiGpuCategoryPrimary:
candidates.push_back(gpu_info.gpu);
break;
@@ -1224,6 +329,8 @@ bool GpuControlList::GpuControlListEntry::Contains(
candidates.push_back(gpu_info.gpu);
break;
case kMultiGpuCategoryActive:
+ case kMultiGpuCategoryNone:
+ // If gpu category is not specified, default to the active gpu.
if (gpu_info.gpu.active || gpu_info.secondary_gpus.empty())
candidates.push_back(gpu_info.gpu);
for (size_t ii = 0; ii < gpu_info.secondary_gpus.size(); ++ii) {
@@ -1232,14 +339,12 @@ bool GpuControlList::GpuControlListEntry::Contains(
}
if (candidates.empty())
candidates.push_back(gpu_info.gpu);
- default:
- break;
}
GPUInfo::GPUDevice gpu;
- gpu.vendor_id = vendor_id_;
+ gpu.vendor_id = vendor_id;
bool found = false;
- if (device_id_list_.empty()) {
+ if (device_id_size == 0) {
for (size_t ii = 0; ii < candidates.size(); ++ii) {
if (gpu.vendor_id == candidates[ii].vendor_id) {
found = true;
@@ -1247,8 +352,8 @@ bool GpuControlList::GpuControlListEntry::Contains(
}
}
} else {
- for (size_t ii = 0; ii < device_id_list_.size(); ++ii) {
- gpu.device_id = device_id_list_[ii];
+ for (size_t ii = 0; ii < device_id_size; ++ii) {
+ gpu.device_id = device_ids[ii];
for (size_t jj = 0; jj < candidates.size(); ++jj) {
if (gpu.vendor_id == candidates[jj].vendor_id &&
gpu.device_id == candidates[jj].device_id) {
@@ -1261,7 +366,7 @@ bool GpuControlList::GpuControlListEntry::Contains(
if (!found)
return false;
}
- switch (multi_gpu_style_) {
+ switch (multi_gpu_style) {
case kMultiGpuStyleOptimus:
if (!gpu_info.optimus)
return false;
@@ -1289,219 +394,108 @@ bool GpuControlList::GpuControlListEntry::Contains(
case kMultiGpuStyleNone:
break;
}
- if (StringMismatch(gpu_info.driver_vendor, driver_vendor_info_))
+ if (driver_info && !driver_info->Contains(gpu_info)) {
return false;
- if (driver_version_info_.get() != NULL && !gpu_info.driver_version.empty()) {
- if (!driver_version_info_->Contains(gpu_info.driver_version))
- return false;
}
- if (driver_date_info_.get() != NULL && !gpu_info.driver_date.empty()) {
- if (!driver_date_info_->Contains(gpu_info.driver_date, '-'))
- return false;
- }
- if (GLVersionInfoMismatch(gpu_info.gl_version))
- return false;
- if (StringMismatch(gpu_info.gl_version, gl_version_string_info_))
- return false;
- if (StringMismatch(gpu_info.gl_vendor, gl_vendor_info_))
+ if (gl_strings && !gl_strings->Contains(gpu_info)) {
return false;
- if (StringMismatch(gpu_info.gl_renderer, gl_renderer_info_))
- return false;
- if (StringMismatch(gpu_info.gl_extensions, gl_extensions_info_))
- return false;
- if (gl_reset_notification_strategy_info_.get() != NULL &&
- !gl_reset_notification_strategy_info_->Contains(
- gpu_info.gl_reset_notification_strategy))
- return false;
- if (!machine_model_name_list_.empty()) {
- if (gpu_info.machine_model_name.empty())
- return false;
- bool found_match = false;
- for (size_t ii = 0; ii < machine_model_name_list_.size(); ++ii) {
- if (RE2::FullMatch(gpu_info.machine_model_name,
- machine_model_name_list_[ii])) {
- found_match = true;
- break;
- }
- }
- if (!found_match)
- return false;
}
- if (machine_model_version_info_.get() != NULL &&
- (gpu_info.machine_model_version.empty() ||
- !machine_model_version_info_->Contains(gpu_info.machine_model_version)))
- return false;
- if (gpu_count_info_.get() != NULL &&
- !gpu_count_info_->Contains(gpu_info.secondary_gpus.size() + 1))
- return false;
- if (direct_rendering_info_.get() != NULL &&
- !direct_rendering_info_->Contains(gpu_info.direct_rendering))
- return false;
- if (in_process_gpu_info_.get() != NULL &&
- !in_process_gpu_info_->Contains(gpu_info.in_process_gpu))
+ if (machine_model_info && !machine_model_info->Contains(gpu_info)) {
return false;
- if (!cpu_brand_.empty()) {
- base::CPU cpu_info;
- if (StringMismatch(cpu_info.cpu_brand(), cpu_brand_))
- return false;
}
- if (pixel_shader_version_info_.get() != NULL) {
- if (!pixel_shader_version_info_->Contains(gpu_info.pixel_shader_version))
- return false;
+ if (more && !more->Contains(gpu_info)) {
+ return false;
}
+ return true;
+}
- for (size_t i = 0; i < exceptions_.size(); ++i) {
- if (exceptions_[i]->Contains(os_type, os_version, gpu_info) &&
- !exceptions_[i]->NeedsMoreInfo(gpu_info, true))
+bool GpuControlList::Entry::Contains(OsType target_os_type,
+ const std::string& target_os_version,
+ const GPUInfo& gpu_info) const {
+ if (!conditions.Contains(target_os_type, target_os_version, gpu_info)) {
+ return false;
+ }
+ for (size_t ii = 0; ii < exception_size; ++ii) {
+ if (exceptions[ii].Contains(target_os_type, target_os_version, gpu_info) &&
+ !exceptions[ii].NeedsMoreInfo(gpu_info)) {
return false;
+ }
}
return true;
}
-bool GpuControlList::GpuControlListEntry::NeedsMoreInfo(
- const GPUInfo& gpu_info,
- bool consider_exceptions) const {
+bool GpuControlList::Conditions::NeedsMoreInfo(const GPUInfo& gpu_info) const {
// We only check for missing info that might be collected with a gl context.
// If certain info is missing due to some error, say, we fail to collect
// vendor_id/device_id, then even if we launch GPU process and create a gl
// context, we won't gather such missing info, so we still return false.
- if (!driver_vendor_info_.empty() && gpu_info.driver_vendor.empty())
- return true;
- if (driver_version_info_.get() && gpu_info.driver_version.empty())
- return true;
- if ((gl_version_info_.get() || !gl_version_string_info_.empty()) &&
+ if (driver_info) {
+ if (driver_info->driver_vendor && gpu_info.driver_vendor.empty()) {
+ return true;
+ }
+ if (driver_info->driver_version.IsSpecified() &&
+ gpu_info.driver_version.empty()) {
+ return true;
+ }
+ }
+ if (((more && more->gl_version.IsSpecified()) ||
+ (gl_strings && gl_strings->gl_version)) &&
gpu_info.gl_version.empty()) {
return true;
}
- if (!gl_vendor_info_.empty() && gpu_info.gl_vendor.empty())
+ if (gl_strings && gl_strings->gl_vendor && gpu_info.gl_vendor.empty())
return true;
- if (!gl_renderer_info_.empty() && gpu_info.gl_renderer.empty())
+ if (gl_strings && gl_strings->gl_renderer && gpu_info.gl_renderer.empty())
return true;
- if (pixel_shader_version_info_.get() != NULL &&
- gpu_info.pixel_shader_version.empty())
+ if (more && more->pixel_shader_version.IsSpecified() &&
+ gpu_info.pixel_shader_version.empty()) {
return true;
+ }
+ return false;
+}
+bool GpuControlList::Entry::NeedsMoreInfo(const GPUInfo& gpu_info,
+ bool consider_exceptions) const {
+ if (conditions.NeedsMoreInfo(gpu_info))
+ return true;
if (consider_exceptions) {
- for (size_t i = 0; i < exceptions_.size(); ++i) {
- if (exceptions_[i]->NeedsMoreInfo(gpu_info, consider_exceptions))
+ for (size_t ii = 0; ii < exception_size; ++ii) {
+ if (exceptions[ii].NeedsMoreInfo(gpu_info))
return true;
}
}
-
return false;
}
-GpuControlList::OsType GpuControlList::GpuControlListEntry::GetOsType() const {
- if (os_info_.get() == NULL)
- return kOsAny;
- return os_info_->type();
-}
-
-uint32_t GpuControlList::GpuControlListEntry::id() const {
- return id_;
-}
-
-bool GpuControlList::GpuControlListEntry::disabled() const {
- return disabled_;
-}
-
-const std::set<int>& GpuControlList::GpuControlListEntry::features() const {
- return features_;
-}
-
-void GpuControlList::GpuControlListEntry::GetFeatureNames(
+void GpuControlList::Entry::GetFeatureNames(
base::ListValue* feature_names,
- const FeatureMap& feature_map,
- bool supports_feature_type_all) const {
+ const FeatureMap& feature_map) const {
DCHECK(feature_names);
- if (supports_feature_type_all && features_.size() == feature_map.size()) {
- feature_names->AppendString("all");
- return;
- }
- for (FeatureMap::const_iterator iter = feature_map.begin();
- iter != feature_map.end(); ++iter) {
- if (features_.count(iter->second) > 0)
- feature_names->AppendString(iter->first);
- }
-}
-
-// static
-bool GpuControlList::GpuControlListEntry::StringToFeature(
- const std::string& feature_name, int* feature_id,
- const FeatureMap& feature_map) {
- FeatureMap::const_iterator iter = feature_map.find(feature_name);
- if (iter != feature_map.end()) {
- *feature_id = iter->second;
- return true;
+ for (size_t ii = 0; ii < feature_size; ++ii) {
+ auto iter = feature_map.find(features[ii]);
+ DCHECK(iter != feature_map.end());
+ feature_names->AppendString(iter->second);
}
- return false;
}
-GpuControlList::GpuControlList()
- : max_entry_id_(0),
+GpuControlList::GpuControlList(const GpuControlListData& data)
+ : version_(data.version),
+ entry_count_(data.entry_count),
+ entries_(data.entries),
+ max_entry_id_(0),
needs_more_info_(false),
- supports_feature_type_all_(false),
control_list_logging_enabled_(false) {
+ DCHECK_LT(0u, entry_count_);
+ // Assume the newly last added entry has the largest ID.
+ max_entry_id_ = entries_[entry_count_ - 1].id;
}
GpuControlList::~GpuControlList() {
- Clear();
-}
-
-bool GpuControlList::LoadList(
- const std::string& json_context,
- GpuControlList::OsFilter os_filter) {
- std::unique_ptr<base::DictionaryValue> root =
- base::DictionaryValue::From(base::JSONReader::Read(json_context));
- if (!root)
- return false;
- return LoadList(*root, os_filter);
-}
-
-bool GpuControlList::LoadList(const base::DictionaryValue& parsed_json,
- GpuControlList::OsFilter os_filter) {
- std::vector<ScopedGpuControlListEntry> entries;
-
- parsed_json.GetString("version", &version_);
- std::vector<std::string> pieces;
- if (!ProcessVersionString(version_, '.', &pieces))
- return false;
-
- const base::ListValue* list = NULL;
- if (!parsed_json.GetList("entries", &list))
- return false;
-
- uint32_t max_entry_id = 0;
- for (size_t i = 0; i < list->GetSize(); ++i) {
- const base::DictionaryValue* list_item = NULL;
- bool valid = list->GetDictionary(i, &list_item);
- if (!valid || list_item == NULL)
- return false;
- ScopedGpuControlListEntry entry(GpuControlListEntry::GetEntryFromValue(
- list_item, true, feature_map_, supports_feature_type_all_));
- if (entry.get() == NULL)
- return false;
- if (entry->id() > max_entry_id)
- max_entry_id = entry->id();
- entries.push_back(entry);
- }
-
- Clear();
- OsType my_os = GetOsType();
- for (size_t i = 0; i < entries.size(); ++i) {
- OsType entry_os = entries[i]->GetOsType();
- if (os_filter == GpuControlList::kAllOs ||
- entry_os == kOsAny || entry_os == my_os)
- entries_.push_back(entries[i]);
- }
- max_entry_id_ = max_entry_id;
- return true;
}
-std::set<int> GpuControlList::MakeDecision(
- GpuControlList::OsType os,
- std::string os_version,
- const GPUInfo& gpu_info) {
+std::set<int> GpuControlList::MakeDecision(GpuControlList::OsType os,
+ const std::string& os_version,
+ const GPUInfo& gpu_info) {
active_entries_.clear();
std::set<int> features;
@@ -1515,37 +509,42 @@ std::set<int> GpuControlList::MakeDecision(
if (os == kOsAny)
os = GetOsType();
- if (os_version.empty())
- os_version = base::SysInfo::OperatingSystemVersion();
-
- for (size_t i = 0; i < entries_.size(); ++i) {
- ScopedGpuControlListEntry entry = entries_[i];
- if (entry->Contains(os, os_version, gpu_info)) {
- bool needs_more_info_main = entry->NeedsMoreInfo(gpu_info, false);
- bool needs_more_info_exception = entry->NeedsMoreInfo(gpu_info, true);
-
- if (!entry->disabled()) {
- if (control_list_logging_enabled_)
- entry->LogControlListMatch(control_list_logging_name_);
- // Only look at main entry info when deciding what to add to "features"
- // set. If we don't have enough info for an exception, it's safer if we
- // just ignore the exception and assume the exception doesn't apply.
- for (std::set<int>::const_iterator iter = entry->features().begin();
- iter != entry->features().end(); ++iter) {
- if (needs_more_info_main) {
- if (!features.count(*iter))
- potential_features.insert(*iter);
- } else {
- features.insert(*iter);
- potential_features.erase(*iter);
- if (!needs_more_info_exception)
- permanent_features.insert(*iter);
- }
+ std::string processed_os_version = os_version;
+ if (processed_os_version.empty())
+ processed_os_version = base::SysInfo::OperatingSystemVersion();
+ // Get rid of the non numbers because later processing expects a valid
+ // version string in the format of "a.b.c".
+ size_t pos = processed_os_version.find_first_not_of("0123456789.");
+ if (pos != std::string::npos)
+ processed_os_version = processed_os_version.substr(0, pos);
+
+ for (size_t ii = 0; ii < entry_count_; ++ii) {
+ const Entry& entry = entries_[ii];
+ DCHECK_NE(0u, entry.id);
+ if (entry.Contains(os, processed_os_version, gpu_info)) {
+ bool needs_more_info_main = entry.NeedsMoreInfo(gpu_info, false);
+ bool needs_more_info_exception = entry.NeedsMoreInfo(gpu_info, true);
+
+ if (control_list_logging_enabled_)
+ entry.LogControlListMatch(control_list_logging_name_);
+ // Only look at main entry info when deciding what to add to "features"
+ // set. If we don't have enough info for an exception, it's safer if we
+ // just ignore the exception and assume the exception doesn't apply.
+ for (size_t jj = 0; jj < entry.feature_size; ++jj) {
+ int feature = entry.features[jj];
+ if (needs_more_info_main) {
+ if (!features.count(feature))
+ potential_features.insert(feature);
+ } else {
+ features.insert(feature);
+ potential_features.erase(feature);
+ if (!needs_more_info_exception)
+ permanent_features.insert(feature);
}
}
if (!needs_more_info_main)
- active_entries_.push_back(entry);
+ active_entries_.push_back(ii);
}
}
@@ -1554,26 +553,24 @@ std::set<int> GpuControlList::MakeDecision(
return features;
}
-void GpuControlList::GetDecisionEntries(std::vector<uint32_t>* entry_ids,
- bool disabled) const {
+void GpuControlList::GetDecisionEntries(
+ std::vector<uint32_t>* entry_ids) const {
DCHECK(entry_ids);
entry_ids->clear();
- for (size_t i = 0; i < active_entries_.size(); ++i) {
- if (disabled == active_entries_[i]->disabled())
- entry_ids->push_back(active_entries_[i]->id());
+ for (auto index : active_entries_) {
+ DCHECK_LT(index, entry_count_);
+ entry_ids->push_back(entries_[index].id);
}
}
std::vector<std::string> GpuControlList::GetDisabledExtensions() {
std::set<std::string> disabled_extensions;
- for (size_t i = 0; i < active_entries_.size(); ++i) {
- GpuControlListEntry* entry = active_entries_[i].get();
-
- if (entry->disabled())
- continue;
-
- disabled_extensions.insert(entry->disabled_extensions().begin(),
- entry->disabled_extensions().end());
+ for (auto index : active_entries_) {
+ DCHECK_LT(index, entry_count_);
+ const Entry& entry = entries_[index];
+ for (size_t ii = 0; ii < entry.disabled_extension_size; ++ii) {
+ disabled_extensions.insert(entry.disabled_extensions[ii]);
+ }
}
return std::vector<std::string>(disabled_extensions.begin(),
disabled_extensions.end());
@@ -1582,27 +579,19 @@ std::vector<std::string> GpuControlList::GetDisabledExtensions() {
void GpuControlList::GetReasons(base::ListValue* problem_list,
const std::string& tag) const {
DCHECK(problem_list);
- for (size_t i = 0; i < active_entries_.size(); ++i) {
- GpuControlListEntry* entry = active_entries_[i].get();
- if (entry->disabled())
- continue;
+ for (auto index : active_entries_) {
+ const Entry& entry = entries_[index];
std::unique_ptr<base::DictionaryValue> problem(new base::DictionaryValue());
- problem->SetString("description", entry->description());
+ problem->SetString("description", entry.description);
base::ListValue* cr_bugs = new base::ListValue();
- for (size_t j = 0; j < entry->cr_bugs().size(); ++j)
- cr_bugs->AppendInteger(entry->cr_bugs()[j]);
+ for (size_t jj = 0; jj < entry.cr_bug_size; ++jj)
+ cr_bugs->AppendInteger(entry.cr_bugs[jj]);
problem->Set("crBugs", cr_bugs);
- base::ListValue* webkit_bugs = new base::ListValue();
- for (size_t j = 0; j < entry->webkit_bugs().size(); ++j) {
- webkit_bugs->AppendInteger(entry->webkit_bugs()[j]);
- }
- problem->Set("webkitBugs", webkit_bugs);
-
base::ListValue* features = new base::ListValue();
- entry->GetFeatureNames(features, feature_map_, supports_feature_type_all_);
+ entry.GetFeatureNames(features, feature_map_);
problem->Set("affectedGpuSettings", features);
DCHECK(tag == "workarounds" || tag == "disabledFeatures");
@@ -1613,18 +602,7 @@ void GpuControlList::GetReasons(base::ListValue* problem_list,
}
size_t GpuControlList::num_entries() const {
- return entries_.size();
-}
-
-bool GpuControlList::has_duplicated_entry_id() const {
- std::set<int> ids;
- for (size_t i = 0; i < entries_.size(); ++i) {
- if (ids.count(entries_[i]->id()) == 0)
- ids.insert(entries_[i]->id());
- else
- return true;
- }
- return false;
+ return entry_count_;
}
uint32_t GpuControlList::max_entry_id() const {
@@ -1635,6 +613,7 @@ std::string GpuControlList::version() const {
return version_;
}
+// static
GpuControlList::OsType GpuControlList::GetOsType() {
#if defined(OS_CHROMEOS)
return kOsChromeOS;
@@ -1647,44 +626,13 @@ GpuControlList::OsType GpuControlList::GetOsType() {
#elif defined(OS_MACOSX)
return kOsMacosx;
#else
- return kOsUnknown;
+ return kOsAny;
#endif
}
-void GpuControlList::Clear() {
- entries_.clear();
- active_entries_.clear();
- max_entry_id_ = 0;
-}
-
-// static
-GpuControlList::NumericOp GpuControlList::StringToNumericOp(
- const std::string& op) {
- if (op == "=")
- return kEQ;
- if (op == "<")
- return kLT;
- if (op == "<=")
- return kLE;
- if (op == ">")
- return kGT;
- if (op == ">=")
- return kGE;
- if (op == "any")
- return kAny;
- if (op == "between")
- return kBetween;
- return kUnknown;
-}
-
void GpuControlList::AddSupportedFeature(
const std::string& feature_name, int feature_id) {
- feature_map_[feature_name] = feature_id;
-}
-
-void GpuControlList::set_supports_feature_type_all(bool supported) {
- supports_feature_type_all_ = supported;
+ feature_map_[feature_id] = feature_name;
}
} // namespace gpu
-
diff --git a/chromium/gpu/config/gpu_control_list.h b/chromium/gpu/config/gpu_control_list.h
index 3158e1bb372..46199f41562 100644
--- a/chromium/gpu/config/gpu_control_list.h
+++ b/chromium/gpu/config/gpu_control_list.h
@@ -6,33 +6,24 @@
#define GPU_CONFIG_GPU_CONTROL_LIST_H_
#include <stddef.h>
-#include <stdint.h>
-#include <memory>
#include <set>
#include <string>
#include <vector>
#include "base/containers/hash_tables.h"
-#include "base/memory/ref_counted.h"
#include "base/values.h"
-#include "build/build_config.h"
#include "gpu/gpu_export.h"
namespace gpu {
+struct GpuControlListData;
struct GPUInfo;
class GPU_EXPORT GpuControlList {
public:
- enum OsType {
- kOsLinux,
- kOsMacosx,
- kOsWin,
- kOsChromeOS,
- kOsAndroid,
- kOsAny,
- kOsUnknown
- };
+ typedef base::hash_map<int, std::string> FeatureMap;
+
+ enum OsType { kOsLinux, kOsMacosx, kOsWin, kOsChromeOS, kOsAndroid, kOsAny };
enum OsFilter {
// In loading, ignore all entries that belong to other OS.
@@ -41,79 +32,6 @@ class GPU_EXPORT GpuControlList {
kAllOs
};
- GpuControlList();
- virtual ~GpuControlList();
-
- // Loads control list information from a json file.
- // If failed, the current GpuControlList is un-touched.
- bool LoadList(const std::string& json_context, OsFilter os_filter);
-
- // Collects system information and combines them with gpu_info and control
- // list information to decide which entries are applied to the current
- // system and returns the union of features specified in each entry.
- // If os is kOsAny, use the current OS; if os_version is empty, use the
- // current OS version.
- std::set<int> MakeDecision(
- OsType os, std::string os_version, const GPUInfo& gpu_info);
-
- // Collects the active entries from the last MakeDecision() call.
- // If disabled set to true, return entries that are disabled; otherwise,
- // return enabled entries.
- void GetDecisionEntries(std::vector<uint32_t>* entry_ids,
- bool disabled) const;
-
- // Collects all disabled extensions.
- std::vector<std::string> GetDisabledExtensions();
-
- // Returns the description and bugs from active entries from the last
- // MakeDecision() call.
- //
- // Each problems has:
- // {
- // "description": "Your GPU is too old",
- // "crBugs": [1234],
- // "webkitBugs": []
- // }
- void GetReasons(
- base::ListValue* problem_list, const std::string& tag) const;
-
- // Return the largest entry id. This is used for histogramming.
- uint32_t max_entry_id() const;
-
- // Returns the version of the control list.
- std::string version() const;
-
- // Check if we need more gpu info to make the decisions.
- // This is computed from the last MakeDecision() call.
- // If yes, we should create a gl context and do a full gpu info collection.
- bool needs_more_info() const { return needs_more_info_; }
-
- // Returns the number of entries. This is only for tests.
- size_t num_entries() const;
-
- // This is only for tests.
- bool has_duplicated_entry_id() const;
-
- // Register a feature to FeatureMap - used to construct a GpuControlList.
- void AddSupportedFeature(const std::string& feature_name, int feature_id);
- // Register whether "all" is recognized as all features.
- void set_supports_feature_type_all(bool supported);
-
- // Enables logging of control list decisions.
- void enable_control_list_logging(
- const std::string& control_list_logging_name) {
- control_list_logging_enabled_ = true;
- control_list_logging_name_ = control_list_logging_name;
- }
-
- private:
- friend class GpuControlListEntryTest;
- friend class MachineModelInfoTest;
- friend class NumberInfoTest;
- friend class OsInfoTest;
- friend class StringInfoTest;
- friend class VersionInfoTest;
-
enum NumericOp {
kBetween, // <= * <=
kEQ, // =
@@ -125,35 +43,52 @@ class GPU_EXPORT GpuControlList {
kUnknown // Indicates the data is invalid.
};
- class GPU_EXPORT VersionInfo {
- public:
- // If version_style is empty, it defaults to kNumerical.
- VersionInfo(const std::string& version_op,
- const std::string& version_style,
- const std::string& version_string,
- const std::string& version_string2);
- ~VersionInfo();
+ enum MultiGpuStyle {
+ kMultiGpuStyleOptimus,
+ kMultiGpuStyleAMDSwitchable,
+ kMultiGpuStyleAMDSwitchableIntegrated,
+ kMultiGpuStyleAMDSwitchableDiscrete,
+ kMultiGpuStyleNone
+ };
- // Determines if a given version is included in the VersionInfo range.
- // "splitter" divides version string into segments.
- bool Contains(const std::string& version, char splitter) const;
- // Same as above, using '.' as splitter.
- bool Contains(const std::string& version) const;
+ enum MultiGpuCategory {
+ // This entry applies if this is the primary GPU on the system.
+ kMultiGpuCategoryPrimary,
+ // This entry applies if this is a secondary GPU on the system.
+ kMultiGpuCategorySecondary,
+ // This entry applies if this is the active GPU on the system.
+ kMultiGpuCategoryActive,
+ // This entry applies if this is any of the GPUs on the system.
+ kMultiGpuCategoryAny,
+ kMultiGpuCategoryNone
+ };
- // Determine if the version_style is lexical.
- bool IsLexical() const;
+ enum GLType {
+ kGLTypeGL, // This is default on MacOSX, Linux, ChromeOS
+ kGLTypeGLES, // This is default on Android
+ kGLTypeANGLE, // This is default on Windows
+ kGLTypeNone
+ };
- // Determines if the VersionInfo contains valid information.
- bool IsValid() const;
+ enum VersionStyle {
+ kVersionStyleNumerical,
+ kVersionStyleLexical,
+ kVersionStyleUnknown
+ };
+
+ struct GPU_EXPORT Version {
+ NumericOp op;
+ VersionStyle style;
+ const char* value1;
+ const char* value2;
- private:
- enum VersionStyle {
- kVersionStyleNumerical,
- kVersionStyleLexical,
- kVersionStyleUnknown
- };
+ bool IsSpecified() const { return op != kUnknown; }
- static VersionStyle StringToVersionStyle(const std::string& version_style);
+ bool Contains(const std::string& version_string, char splitter) const;
+
+ bool Contains(const std::string& version_string) const {
+ return Contains(version_string, '.');
+ }
// Compare two version strings.
// Return 1 if version > version_ref,
@@ -168,329 +103,172 @@ class GPU_EXPORT GpuControlList {
static int Compare(const std::vector<std::string>& version,
const std::vector<std::string>& version_ref,
VersionStyle version_style);
-
- NumericOp op_;
- VersionStyle version_style_;
- std::vector<std::string> version_;
- std::vector<std::string> version2_;
};
- class GPU_EXPORT OsInfo {
- public:
- OsInfo(const std::string& os,
- const std::string& version_op,
- const std::string& version_string,
- const std::string& version_string2);
- ~OsInfo();
+ struct GPU_EXPORT DriverInfo {
+ const char* driver_vendor;
+ Version driver_version;
+ Version driver_date;
- // Determines if a given os/version is included in the OsInfo set.
- bool Contains(OsType type, const std::string& version) const;
-
- // Determines if the VersionInfo contains valid information.
- bool IsValid() const;
-
- OsType type() const;
-
- // Maps string to OsType; returns kOsUnknown if it's not a valid os.
- static OsType StringToOsType(const std::string& os);
-
- private:
- OsType type_;
- std::unique_ptr<VersionInfo> version_info_;
+ bool Contains(const GPUInfo& gpu_info) const;
};
- class GPU_EXPORT FloatInfo {
- public:
- FloatInfo(const std::string& float_op,
- const std::string& float_value,
- const std::string& float_value2);
+ struct GPU_EXPORT GLStrings {
+ const char* gl_vendor;
+ const char* gl_renderer;
+ const char* gl_extensions;
+ const char* gl_version;
- // Determines if a given float is included in the FloatInfo.
- bool Contains(float value) const;
+ bool Contains(const GPUInfo& gpu_info) const;
+ };
- // Determines if the FloatInfo contains valid information.
- bool IsValid() const;
+ struct GPU_EXPORT MachineModelInfo {
+ size_t machine_model_name_size;
+ const char** machine_model_names;
+ Version machine_model_version;
- private:
- NumericOp op_;
- float value_;
- float value2_;
+ bool Contains(const GPUInfo& gpu_info) const;
};
- class GPU_EXPORT IntInfo {
- public:
- IntInfo(const std::string& int_op,
- const std::string& int_value,
- const std::string& int_value2);
+ struct GPU_EXPORT More {
+ // These are just part of Entry fields that are less common.
+ // Putting them to a separate struct to save Entry data size.
+ GLType gl_type;
+ Version gl_version;
+ Version pixel_shader_version;
+ bool in_process_gpu;
+ uint32_t gl_reset_notification_strategy;
+ bool direct_rendering;
+ Version gpu_count;
- // Determines if a given int is included in the IntInfo.
- bool Contains(int value) const;
+ // Return true if GL_VERSION string does not fit the entry info
+ // on GL type and GL version.
+ bool GLVersionInfoMismatch(const std::string& gl_version_string) const;
- // Determines if the IntInfo contains valid information.
- bool IsValid() const;
+ bool Contains(const GPUInfo& gpu_info) const;
- private:
- NumericOp op_;
- int value_;
- int value2_;
+ // Return the default GL type, depending on the OS.
+ // See GLType declaration.
+ static GLType GetDefaultGLType();
};
- class GPU_EXPORT BoolInfo {
- public:
- explicit BoolInfo(bool value);
-
- // Determines if a given bool is included in the BoolInfo.
- bool Contains(bool value) const;
+ struct GPU_EXPORT Conditions {
+ OsType os_type;
+ Version os_version;
+ uint32_t vendor_id;
+ size_t device_id_size;
+ const uint32_t* device_ids;
+ MultiGpuCategory multi_gpu_category;
+ MultiGpuStyle multi_gpu_style;
+ const DriverInfo* driver_info;
+ const GLStrings* gl_strings;
+ const MachineModelInfo* machine_model_info;
+ const More* more;
+
+ bool Contains(OsType os_type,
+ const std::string& os_version,
+ const GPUInfo& gpu_info) const;
- private:
- bool value_;
+ // Determines whether we needs more gpu info to make the blacklisting
+ // decision. It should only be checked if Contains() returns true.
+ bool NeedsMoreInfo(const GPUInfo& gpu_info) const;
};
- class GpuControlListEntry;
- typedef scoped_refptr<GpuControlListEntry> ScopedGpuControlListEntry;
-
- typedef base::hash_map<std::string, int> FeatureMap;
-
- class GPU_EXPORT GpuControlListEntry
- : public base::RefCounted<GpuControlListEntry> {
- public:
- // Constructs GpuControlListEntry from DictionaryValue loaded from json.
- // Top-level entry must have an id number. Others are exceptions.
- static ScopedGpuControlListEntry GetEntryFromValue(
- const base::DictionaryValue* value, bool top_level,
- const FeatureMap& feature_map,
- bool supports_feature_type_all);
-
- // Logs a control list match for this rule in the list identified by
- // |control_list_logging_name|.
- void LogControlListMatch(
- const std::string& control_list_logging_name) const;
-
- // Determines if a given os/gc/machine_model/driver is included in the
- // Entry set.
- bool Contains(OsType os_type, const std::string& os_version,
+ struct GPU_EXPORT Entry {
+ uint32_t id;
+ const char* description;
+ size_t feature_size;
+ const int* features;
+ size_t disabled_extension_size;
+ const char** disabled_extensions;
+ size_t cr_bug_size;
+ const uint32_t* cr_bugs;
+ Conditions conditions;
+ size_t exception_size;
+ const Conditions* exceptions;
+
+ bool Contains(OsType os_type,
+ const std::string& os_version,
const GPUInfo& gpu_info) const;
// Determines whether we needs more gpu info to make the blacklisting
// decision. It should only be checked if Contains() returns true.
bool NeedsMoreInfo(const GPUInfo& gpu_info, bool consider_exceptions) const;
- // Returns the OsType.
- OsType GetOsType() const;
-
- // Returns the entry's unique id. 0 is reserved.
- uint32_t id() const;
-
- // Returns whether the entry is disabled.
- bool disabled() const;
-
- // Returns the description of the entry
- const std::string& description() const { return description_; }
-
- // Returns a list of Chromium and Webkit bugs applicable to this entry
- const std::vector<int>& cr_bugs() const { return cr_bugs_; }
- const std::vector<int>& webkit_bugs() const { return webkit_bugs_; }
- const std::vector<std::string>& disabled_extensions() const {
- return disabled_extensions_;
- }
-
- // Returns the blacklisted features in this entry.
- const std::set<int>& features() const;
-
- // Returns a list of blacklisted feature names in this entry.
void GetFeatureNames(base::ListValue* feature_names,
- const FeatureMap& feature_map,
- bool supports_feature_type_all) const;
-
- private:
- friend class base::RefCounted<GpuControlListEntry>;
-
- enum MultiGpuStyle {
- kMultiGpuStyleOptimus,
- kMultiGpuStyleAMDSwitchable,
- kMultiGpuStyleAMDSwitchableIntegrated,
- kMultiGpuStyleAMDSwitchableDiscrete,
- kMultiGpuStyleNone
- };
-
- enum MultiGpuCategory {
- // This entry applies if this is the primary GPU on the system.
- kMultiGpuCategoryPrimary,
- // This entry applies if this is a secondary GPU on the system.
- kMultiGpuCategorySecondary,
- // This entry applies if this is the active GPU on the system.
- kMultiGpuCategoryActive,
- // This entry applies if this is any of the GPUs on the system.
- kMultiGpuCategoryAny,
- kMultiGpuCategoryNone
- };
-
- enum GLType {
- kGLTypeGL, // This is default on MacOSX, Linux, ChromeOS
- kGLTypeGLES, // This is default on Android
- kGLTypeANGLE, // This is default on Windows
- kGLTypeNone
- };
-
- GpuControlListEntry();
- ~GpuControlListEntry();
-
- bool SetId(uint32_t id);
-
- void SetDisabled(bool disabled);
-
- bool SetOsInfo(const std::string& os,
- const std::string& version_op,
- const std::string& version_string,
- const std::string& version_string2);
-
- bool SetVendorId(const std::string& vendor_id_string);
-
- bool AddDeviceId(const std::string& device_id_string);
-
- bool SetMultiGpuStyle(const std::string& multi_gpu_style_string);
-
- bool SetMultiGpuCategory(const std::string& multi_gpu_category_string);
-
- bool SetGLType(const std::string& gl_type_string);
-
- bool SetDriverVendorInfo(const std::string& vendor_value);
-
- bool SetDriverVersionInfo(const std::string& version_op,
- const std::string& version_style,
- const std::string& version_string,
- const std::string& version_string2);
-
- bool SetDriverDateInfo(const std::string& date_op,
- const std::string& date_string,
- const std::string& date_string2);
-
- bool SetGLVersionInfo(const std::string& version_op,
- const std::string& version_string,
- const std::string& version_string2);
-
- bool SetGLVersionStringInfo(const std::string& version_string_value);
-
- bool SetGLVendorInfo(const std::string& vendor_value);
+ const FeatureMap& feature_map) const;
- bool SetGLRendererInfo(const std::string& renderer_value);
-
- bool SetGLExtensionsInfo(const std::string& extensions_value);
-
- bool SetGLResetNotificationStrategyInfo(const std::string& op,
- const std::string& int_string,
- const std::string& int_string2);
-
- bool SetCpuBrand(const std::string& cpu_value);
-
- bool SetPerfGraphicsInfo(const std::string& op,
- const std::string& float_string,
- const std::string& float_string2);
-
- bool SetPerfGamingInfo(const std::string& op,
- const std::string& float_string,
- const std::string& float_string2);
-
- bool SetPerfOverallInfo(const std::string& op,
- const std::string& float_string,
- const std::string& float_string2);
-
- bool AddMachineModelName(const std::string& model_name);
-
- bool SetMachineModelVersionInfo(const std::string& version_op,
- const std::string& version_string,
- const std::string& version_string2);
+ // Logs a control list match for this rule in the list identified by
+ // |control_list_logging_name|.
+ void LogControlListMatch(
+ const std::string& control_list_logging_name) const;
+ };
- bool SetGpuCountInfo(const std::string& op,
- const std::string& int_string,
- const std::string& int_string2);
+ explicit GpuControlList(const GpuControlListData& data);
+ virtual ~GpuControlList();
- void SetDirectRenderingInfo(bool value);
- void SetInProcessGPUInfo(bool value);
+ // Collects system information and combines them with gpu_info and control
+ // list information to decide which entries are applied to the current
+ // system and returns the union of features specified in each entry.
+ // If os is kOsAny, use the current OS; if os_version is empty, use the
+ // current OS version.
+ std::set<int> MakeDecision(OsType os,
+ const std::string& os_version,
+ const GPUInfo& gpu_info);
- bool SetPixelShaderVersionInfo(const std::string& version_op,
- const std::string& version_string,
- const std::string& version_string2);
+ // Collects the active entries from the last MakeDecision() call.
+ void GetDecisionEntries(std::vector<uint32_t>* entry_ids) const;
- bool SetFeatures(const std::vector<std::string>& features,
- const std::vector<std::string>& exceptions,
- const FeatureMap& feature_map,
- bool supports_feature_type_all);
+ // Collects all disabled extensions.
+ std::vector<std::string> GetDisabledExtensions();
- void AddException(ScopedGpuControlListEntry exception);
+ // Returns the description and bugs from active entries from the last
+ // MakeDecision() call.
+ //
+ // Each problems has:
+ // {
+ // "description": "Your GPU is too old",
+ // "crBugs": [1234],
+ // }
+ void GetReasons(base::ListValue* problem_list, const std::string& tag) const;
- // Return true if GL_VERSION string does not fit the entry info
- // on GL type and GL version.
- bool GLVersionInfoMismatch(const std::string& gl_version) const;
+ // Return the largest entry id. This is used for histogramming.
+ uint32_t max_entry_id() const;
- static MultiGpuStyle StringToMultiGpuStyle(const std::string& style);
+ // Returns the version of the control list.
+ std::string version() const;
- static MultiGpuCategory StringToMultiGpuCategory(
- const std::string& category);
+ // Check if we need more gpu info to make the decisions.
+ // This is computed from the last MakeDecision() call.
+ // If yes, we should create a gl context and do a full gpu info collection.
+ bool needs_more_info() const { return needs_more_info_; }
- static GLType StringToGLType(const std::string& gl_type);
+ // Returns the number of entries. This is only for tests.
+ size_t num_entries() const;
- // map a feature_name to feature_id. If the string is not a registered
- // feature name, return false.
- static bool StringToFeature(const std::string& feature_name,
- int* feature_id,
- const FeatureMap& feature_map);
+ // Register a feature to FeatureMap.
+ void AddSupportedFeature(const std::string& feature_name, int feature_id);
- // Return the default GL type, depending on the OS.
- // See GLType declaration.
- static GLType GetDefaultGLType();
+ // Enables logging of control list decisions.
+ void EnableControlListLogging(const std::string& control_list_logging_name) {
+ control_list_logging_enabled_ = true;
+ control_list_logging_name_ = control_list_logging_name;
+ }
- uint32_t id_;
- bool disabled_;
- std::string description_;
- std::vector<int> cr_bugs_;
- std::vector<int> webkit_bugs_;
- std::vector<std::string> disabled_extensions_;
- std::unique_ptr<OsInfo> os_info_;
- uint32_t vendor_id_;
- std::vector<uint32_t> device_id_list_;
- MultiGpuStyle multi_gpu_style_;
- MultiGpuCategory multi_gpu_category_;
- GLType gl_type_;
- std::string driver_vendor_info_;
- std::unique_ptr<VersionInfo> driver_version_info_;
- std::unique_ptr<VersionInfo> driver_date_info_;
- std::unique_ptr<VersionInfo> gl_version_info_;
- std::string gl_version_string_info_;
- std::string gl_vendor_info_;
- std::string gl_renderer_info_;
- std::string gl_extensions_info_;
- std::unique_ptr<IntInfo> gl_reset_notification_strategy_info_;
- std::string cpu_brand_;
- std::unique_ptr<FloatInfo> perf_graphics_info_;
- std::unique_ptr<FloatInfo> perf_gaming_info_;
- std::unique_ptr<FloatInfo> perf_overall_info_;
- std::vector<std::string> machine_model_name_list_;
- std::unique_ptr<VersionInfo> machine_model_version_info_;
- std::unique_ptr<IntInfo> gpu_count_info_;
- std::unique_ptr<BoolInfo> direct_rendering_info_;
- std::unique_ptr<BoolInfo> in_process_gpu_info_;
- std::unique_ptr<VersionInfo> pixel_shader_version_info_;
- std::set<int> features_;
- std::vector<ScopedGpuControlListEntry> exceptions_;
- };
+ private:
+ friend class GpuControlListEntryTest;
+ friend class VersionInfoTest;
// Gets the current OS type.
static OsType GetOsType();
- bool LoadList(const base::DictionaryValue& parsed_json, OsFilter os_filter);
-
- void Clear();
-
- static NumericOp StringToNumericOp(const std::string& op);
-
std::string version_;
- std::vector<ScopedGpuControlListEntry> entries_;
-
- // This records all the blacklist entries that are appliable to the current
- // user machine. It is updated everytime MakeDecision() is called and is
- // used later by GetDecisionEntries().
- std::vector<ScopedGpuControlListEntry> active_entries_;
+ size_t entry_count_;
+ const Entry* entries_;
+ // This records all the entries that are appliable to the current user
+ // machine. It is updated everytime MakeDecision() is called and is used
+ // later by GetDecisionEntries().
+ std::vector<size_t> active_entries_;
uint32_t max_entry_id_;
@@ -498,13 +276,24 @@ class GPU_EXPORT GpuControlList {
// The features a GpuControlList recognizes and handles.
FeatureMap feature_map_;
- bool supports_feature_type_all_;
bool control_list_logging_enabled_;
std::string control_list_logging_name_;
};
+struct GPU_EXPORT GpuControlListData {
+ const char* version;
+ size_t entry_count;
+ const GpuControlList::Entry* entries;
+
+ GpuControlListData() : version(nullptr), entry_count(0u), entries(nullptr) {}
+
+ GpuControlListData(const char* a_version,
+ size_t a_entry_count,
+ const GpuControlList::Entry* a_entries)
+ : version(a_version), entry_count(a_entry_count), entries(a_entries) {}
+};
+
} // namespace gpu
#endif // GPU_CONFIG_GPU_CONTROL_LIST_H_
-
diff --git a/chromium/gpu/config/gpu_control_list_entry_unittest.cc b/chromium/gpu/config/gpu_control_list_entry_unittest.cc
index 7c63438e291..2ea1feca67f 100644
--- a/chromium/gpu/config/gpu_control_list_entry_unittest.cc
+++ b/chromium/gpu/config/gpu_control_list_entry_unittest.cc
@@ -4,54 +4,52 @@
#include <stddef.h>
-#include <memory>
-
-#include "base/json/json_reader.h"
#include "gpu/config/gpu_control_list.h"
+#include "gpu/config/gpu_control_list_testing_data.h"
#include "gpu/config/gpu_info.h"
#include "testing/gtest/include/gtest/gtest.h"
-#define LONG_STRING_CONST(...) #__VA_ARGS__
-
namespace gpu {
-enum TestFeatureType {
- TEST_FEATURE_0 = 0,
- TEST_FEATURE_1,
- TEST_FEATURE_2
-};
+namespace {
+
+constexpr auto kOsLinux = GpuControlList::kOsLinux;
+constexpr auto kOsMacosx = GpuControlList::kOsMacosx;
+constexpr auto kOsWin = GpuControlList::kOsWin;
+constexpr auto kOsChromeOS = GpuControlList::kOsChromeOS;
+constexpr auto kOsAndroid = GpuControlList::kOsAndroid;
+constexpr auto kOsAny = GpuControlList::kOsAny;
+
+} // namespace anonymous
class GpuControlListEntryTest : public testing::Test {
public:
- GpuControlListEntryTest() { }
+ typedef GpuControlList::Entry Entry;
+
+ GpuControlListEntryTest() {}
~GpuControlListEntryTest() override {}
const GPUInfo& gpu_info() const {
return gpu_info_;
}
- typedef GpuControlList::ScopedGpuControlListEntry ScopedEntry;
-
- static ScopedEntry GetEntryFromString(
- const std::string& json, bool supports_feature_type_all) {
- std::unique_ptr<base::Value> root = base::JSONReader::Read(json);
- base::DictionaryValue* value = NULL;
- if (!root || !root->GetAsDictionary(&value))
- return NULL;
-
- GpuControlList::FeatureMap feature_map;
- feature_map["test_feature_0"] = TEST_FEATURE_0;
- feature_map["test_feature_1"] = TEST_FEATURE_1;
- feature_map["test_feature_2"] = TEST_FEATURE_2;
-
- return GpuControlList::GpuControlListEntry::GetEntryFromValue(
- value, true, feature_map, supports_feature_type_all);
+ const Entry& GetEntry(size_t index) {
+ EXPECT_LT(index, kGpuControlListTestingEntryCount);
+ EXPECT_EQ(index + 1, kGpuControlListTestingEntries[index].id);
+ return kGpuControlListTestingEntries[index];
}
- static ScopedEntry GetEntryFromString(const std::string& json) {
- return GetEntryFromString(json, false);
+ size_t CountFeature(const Entry& entry, int feature) {
+ size_t count = 0;
+ for (size_t ii = 0; ii < entry.feature_size; ++ii) {
+ if (entry.features[ii] == feature) {
+ ++count;
+ }
+ }
+ return count;
}
+ protected:
void SetUp() override {
gpu_info_.gpu.vendor_id = 0x10de;
gpu_info_.gpu.device_id = 0x0640;
@@ -64,1057 +62,362 @@ class GpuControlListEntryTest : public testing::Test {
gpu_info_.gl_renderer = "NVIDIA GeForce GT 120 OpenGL Engine";
}
- protected:
GPUInfo gpu_info_;
};
TEST_F(GpuControlListEntryTest, DetailedEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 5,
- "description": "test entry",
- "cr_bugs": [1024, 678],
- "webkit_bugs": [1950],
- "os": {
- "type": "macosx",
- "version": {
- "op": "=",
- "value": "10.6.4"
- }
- },
- "vendor_id": "0x10de",
- "device_id": ["0x0640"],
- "driver_version": {
- "op": "=",
- "value": "1.6.18"
- },
- "features": [
- "test_feature_0"
- ],
- "disabled_extensions": [
- "test_extension1",
- "test_extension2"
- ]
- }
- );
-
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsMacosx, entry->GetOsType());
- EXPECT_FALSE(entry->disabled());
- EXPECT_EQ(5u, entry->id());
- EXPECT_STREQ("test entry", entry->description().c_str());
- EXPECT_EQ(2u, entry->cr_bugs().size());
- EXPECT_EQ(1024, entry->cr_bugs()[0]);
- EXPECT_EQ(678, entry->cr_bugs()[1]);
- EXPECT_EQ(1u, entry->webkit_bugs().size());
- EXPECT_EQ(1950, entry->webkit_bugs()[0]);
- EXPECT_EQ(1u, entry->features().size());
- EXPECT_EQ(1u, entry->features().count(TEST_FEATURE_0));
- EXPECT_FALSE(entry->NeedsMoreInfo(gpu_info(), true));
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.6.4", gpu_info()));
- EXPECT_STREQ("test_extension1", entry->disabled_extensions()[0].c_str());
- EXPECT_STREQ("test_extension2", entry->disabled_extensions()[1].c_str());
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_DetailedEntry);
+ EXPECT_EQ(kOsMacosx, entry.conditions.os_type);
+ EXPECT_STREQ("GpuControlListEntryTest.DetailedEntry", entry.description);
+ EXPECT_EQ(2u, entry.cr_bug_size);
+ EXPECT_EQ(1024u, entry.cr_bugs[0]);
+ EXPECT_EQ(678u, entry.cr_bugs[1]);
+ EXPECT_EQ(1u, entry.feature_size);
+ EXPECT_EQ(1u, CountFeature(entry, TEST_FEATURE_0));
+ EXPECT_FALSE(entry.NeedsMoreInfo(gpu_info(), true));
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.6.4", gpu_info()));
+ EXPECT_EQ(2u, entry.disabled_extension_size);
+ EXPECT_STREQ("test_extension1", entry.disabled_extensions[0]);
+ EXPECT_STREQ("test_extension2", entry.disabled_extensions[1]);
}
TEST_F(GpuControlListEntryTest, VendorOnAllOsEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "vendor_id": "0x10de",
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsAny, entry->GetOsType());
-
- const GpuControlList::OsType os_type[] = {
- GpuControlList::kOsMacosx,
- GpuControlList::kOsWin,
- GpuControlList::kOsLinux,
- GpuControlList::kOsChromeOS,
- GpuControlList::kOsAndroid
- };
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_VendorOnAllOsEntry);
+ EXPECT_EQ(kOsAny, entry.conditions.os_type);
+ const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsLinux,
+ kOsChromeOS, kOsAndroid};
for (size_t i = 0; i < arraysize(os_type); ++i)
- EXPECT_TRUE(entry->Contains(os_type[i], "10.6", gpu_info()));
+ EXPECT_TRUE(entry.Contains(os_type[i], "10.6", gpu_info()));
}
TEST_F(GpuControlListEntryTest, VendorOnLinuxEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "linux"
- },
- "vendor_id": "0x10de",
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsLinux, entry->GetOsType());
-
- const GpuControlList::OsType os_type[] = {
- GpuControlList::kOsMacosx,
- GpuControlList::kOsWin,
- GpuControlList::kOsChromeOS,
- GpuControlList::kOsAndroid
- };
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_VendorOnLinuxEntry);
+ EXPECT_EQ(kOsLinux, entry.conditions.os_type);
+ const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsChromeOS,
+ kOsAndroid};
for (size_t i = 0; i < arraysize(os_type); ++i)
- EXPECT_FALSE(entry->Contains(os_type[i], "10.6", gpu_info()));
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsLinux, "10.6", gpu_info()));
+ EXPECT_FALSE(entry.Contains(os_type[i], "10.6", gpu_info()));
+ EXPECT_TRUE(entry.Contains(kOsLinux, "10.6", gpu_info()));
}
TEST_F(GpuControlListEntryTest, AllExceptNVidiaOnLinuxEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "linux"
- },
- "exceptions": [
- {
- "vendor_id": "0x10de"
- }
- ],
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsLinux, entry->GetOsType());
-
- const GpuControlList::OsType os_type[] = {
- GpuControlList::kOsMacosx,
- GpuControlList::kOsWin,
- GpuControlList::kOsLinux,
- GpuControlList::kOsChromeOS,
- GpuControlList::kOsAndroid
- };
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_AllExceptNVidiaOnLinuxEntry);
+ EXPECT_EQ(kOsLinux, entry.conditions.os_type);
+ const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsLinux,
+ kOsChromeOS, kOsAndroid};
for (size_t i = 0; i < arraysize(os_type); ++i)
- EXPECT_FALSE(entry->Contains(os_type[i], "10.6", gpu_info()));
+ EXPECT_FALSE(entry.Contains(os_type[i], "10.6", gpu_info()));
}
TEST_F(GpuControlListEntryTest, AllExceptIntelOnLinuxEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "linux"
- },
- "exceptions": [
- {
- "vendor_id": "0x8086"
- }
- ],
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsLinux, entry->GetOsType());
-
- const GpuControlList::OsType os_type[] = {
- GpuControlList::kOsMacosx,
- GpuControlList::kOsWin,
- GpuControlList::kOsChromeOS,
- GpuControlList::kOsAndroid
- };
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_AllExceptIntelOnLinuxEntry);
+ EXPECT_EQ(kOsLinux, entry.conditions.os_type);
+ const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsChromeOS,
+ kOsAndroid};
for (size_t i = 0; i < arraysize(os_type); ++i)
- EXPECT_FALSE(entry->Contains(os_type[i], "10.6", gpu_info()));
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsLinux, "10.6", gpu_info()));
+ EXPECT_FALSE(entry.Contains(os_type[i], "10.6", gpu_info()));
+ EXPECT_TRUE(entry.Contains(kOsLinux, "10.6", gpu_info()));
}
TEST_F(GpuControlListEntryTest, DateOnWindowsEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "win"
- },
- "driver_date": {
- "op": "<",
- "value": "2010.5.8"
- },
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsWin, entry->GetOsType());
-
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_DateOnWindowsEntry);
+ EXPECT_EQ(kOsWin, entry.conditions.os_type);
GPUInfo gpu_info;
gpu_info.driver_date = "4-12-2010";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsWin, "10.6", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsWin, "10.6", gpu_info));
gpu_info.driver_date = "5-8-2010";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsWin, "10.6", gpu_info));
+ EXPECT_FALSE(entry.Contains(kOsWin, "10.6", gpu_info));
gpu_info.driver_date = "5-9-2010";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsWin, "10.6", gpu_info));
+ EXPECT_FALSE(entry.Contains(kOsWin, "10.6", gpu_info));
}
TEST_F(GpuControlListEntryTest, MultipleDevicesEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "vendor_id": "0x10de",
- "device_id": ["0x1023", "0x0640"],
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsAny, entry->GetOsType());
-
- const GpuControlList::OsType os_type[] = {
- GpuControlList::kOsMacosx,
- GpuControlList::kOsWin,
- GpuControlList::kOsLinux,
- GpuControlList::kOsChromeOS,
- GpuControlList::kOsAndroid
- };
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_MultipleDevicesEntry);
+ EXPECT_EQ(kOsAny, entry.conditions.os_type);
+ const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsLinux,
+ kOsChromeOS, kOsAndroid};
for (size_t i = 0; i < arraysize(os_type); ++i)
- EXPECT_TRUE(entry->Contains(os_type[i], "10.6", gpu_info()));
+ EXPECT_TRUE(entry.Contains(os_type[i], "10.6", gpu_info()));
}
TEST_F(GpuControlListEntryTest, ChromeOSEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "chromeos"
- },
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsChromeOS, entry->GetOsType());
-
- const GpuControlList::OsType os_type[] = {
- GpuControlList::kOsMacosx,
- GpuControlList::kOsWin,
- GpuControlList::kOsLinux,
- GpuControlList::kOsAndroid
- };
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_ChromeOSEntry);
+ EXPECT_EQ(kOsChromeOS, entry.conditions.os_type);
+ const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsLinux,
+ kOsAndroid};
for (size_t i = 0; i < arraysize(os_type); ++i)
- EXPECT_FALSE(entry->Contains(os_type[i], "10.6", gpu_info()));
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsChromeOS, "10.6", gpu_info()));
-}
-
-TEST_F(GpuControlListEntryTest, MalformedVendor) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "vendor_id": "[0x10de]",
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() == NULL);
-}
-
-TEST_F(GpuControlListEntryTest, UnknownFieldEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "unknown_field": 0,
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() == NULL);
-}
-
-TEST_F(GpuControlListEntryTest, UnknownExceptionFieldEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 2,
- "exceptions": [
- {
- "unknown_field": 0
- }
- ],
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() == NULL);
-}
-
-TEST_F(GpuControlListEntryTest, UnknownFeatureEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "features": [
- "some_unknown_feature",
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() == NULL);
+ EXPECT_FALSE(entry.Contains(os_type[i], "10.6", gpu_info()));
+ EXPECT_TRUE(entry.Contains(kOsChromeOS, "10.6", gpu_info()));
}
TEST_F(GpuControlListEntryTest, GlVersionGLESEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "gl_type": "gles",
- "gl_version": {
- "op": "=",
- "value": "3.0"
- },
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_GlVersionGLESEntry);
GPUInfo gpu_info;
gpu_info.gl_version = "OpenGL ES 3.0 V@66.0 AU@ (CL@)";
- EXPECT_TRUE(entry->Contains(GpuControlList::kOsAndroid, "4.4.2", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsAndroid, "4.4.2", gpu_info));
gpu_info.gl_version = "OpenGL ES 3.0V@66.0 AU@ (CL@)";
- EXPECT_TRUE(entry->Contains(GpuControlList::kOsAndroid, "4.4.2", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsAndroid, "4.4.2", gpu_info));
gpu_info.gl_version = "OpenGL ES 3.1 V@66.0 AU@ (CL@)";
- EXPECT_FALSE(entry->Contains(GpuControlList::kOsAndroid, "4.4.2", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "4.4.2", gpu_info));
gpu_info.gl_version = "3.0 NVIDIA-8.24.11 310.90.9b01";
- EXPECT_FALSE(entry->Contains(GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.gl_version = "OpenGL ES 3.0 (ANGLE 1.2.0.2450)";
- EXPECT_FALSE(entry->Contains(GpuControlList::kOsWin, "6.1", gpu_info));
+ EXPECT_FALSE(entry.Contains(kOsWin, "6.1", gpu_info));
}
TEST_F(GpuControlListEntryTest, GlVersionANGLEEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "gl_type": "angle",
- "gl_version": {
- "op": ">",
- "value": "2.0"
- },
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_GlVersionANGLEEntry);
GPUInfo gpu_info;
gpu_info.gl_version = "OpenGL ES 3.0 V@66.0 AU@ (CL@)";
- EXPECT_FALSE(entry->Contains(GpuControlList::kOsAndroid, "4.4.2", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "4.4.2", gpu_info));
gpu_info.gl_version = "3.0 NVIDIA-8.24.11 310.90.9b01";
- EXPECT_FALSE(entry->Contains(GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.gl_version = "OpenGL ES 3.0 (ANGLE 1.2.0.2450)";
- EXPECT_TRUE(entry->Contains(GpuControlList::kOsWin, "6.1", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsWin, "6.1", gpu_info));
gpu_info.gl_version = "OpenGL ES 2.0 (ANGLE 1.2.0.2450)";
- EXPECT_FALSE(entry->Contains(GpuControlList::kOsWin, "6.1", gpu_info));
+ EXPECT_FALSE(entry.Contains(kOsWin, "6.1", gpu_info));
}
TEST_F(GpuControlListEntryTest, GlVersionGLEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "gl_type": "gl",
- "gl_version": {
- "op": "<",
- "value": "4.0"
- },
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_GlVersionGLEntry);
GPUInfo gpu_info;
gpu_info.gl_version = "OpenGL ES 3.0 V@66.0 AU@ (CL@)";
- EXPECT_FALSE(entry->Contains(GpuControlList::kOsAndroid, "4.4.2", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "4.4.2", gpu_info));
gpu_info.gl_version = "3.0 NVIDIA-8.24.11 310.90.9b01";
- EXPECT_TRUE(entry->Contains(GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.gl_version = "4.0 NVIDIA-8.24.11 310.90.9b01";
- EXPECT_FALSE(entry->Contains(GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.gl_version = "OpenGL ES 3.0 (ANGLE 1.2.0.2450)";
- EXPECT_FALSE(entry->Contains(GpuControlList::kOsWin, "6.1", gpu_info));
+ EXPECT_FALSE(entry.Contains(kOsWin, "6.1", gpu_info));
}
TEST_F(GpuControlListEntryTest, GlVendorEqual) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "gl_vendor": "NVIDIA",
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_GlVendorEqual);
GPUInfo gpu_info;
gpu_info.gl_vendor = "NVIDIA";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.9", gpu_info));
// Case sensitive.
gpu_info.gl_vendor = "NVidia";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.gl_vendor = "NVIDIA-x";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.9", gpu_info));
}
TEST_F(GpuControlListEntryTest, GlVendorWithDot) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "gl_vendor": "X\\.Org.*",
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_GlVendorWithDot);
GPUInfo gpu_info;
gpu_info.gl_vendor = "X.Org R300 Project";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsLinux, "", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsLinux, "", gpu_info));
gpu_info.gl_vendor = "X.Org";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsLinux, "", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsLinux, "", gpu_info));
}
TEST_F(GpuControlListEntryTest, GlRendererContains) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "gl_renderer": ".*GeForce.*",
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_GlRendererContains);
GPUInfo gpu_info;
gpu_info.gl_renderer = "NVIDIA GeForce GT 120 OpenGL Engine";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.9", gpu_info));
// Case sensitive.
gpu_info.gl_renderer = "NVIDIA GEFORCE GT 120 OpenGL Engine";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.gl_renderer = "GeForce GT 120 OpenGL Engine";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.gl_renderer = "NVIDIA GeForce";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.gl_renderer = "NVIDIA Ge Force";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.9", gpu_info));
}
TEST_F(GpuControlListEntryTest, GlRendererCaseInsensitive) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "gl_renderer": "(?i).*software.*",
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_GlRendererCaseInsensitive);
GPUInfo gpu_info;
gpu_info.gl_renderer = "software rasterizer";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.gl_renderer = "Software Rasterizer";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.9", gpu_info));
}
TEST_F(GpuControlListEntryTest, GlExtensionsEndWith) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "gl_extensions": ".*GL_SUN_slice_accum",
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_GlExtensionsEndWith);
GPUInfo gpu_info;
gpu_info.gl_extensions = "GL_SGIS_generate_mipmap "
"GL_SGIX_shadow "
"GL_SUN_slice_accum";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.gl_extensions = "GL_SGIS_generate_mipmap "
"GL_SUN_slice_accum "
"GL_SGIX_shadow";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-}
-
-TEST_F(GpuControlListEntryTest, DisabledEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "disabled": true,
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_TRUE(entry->disabled());
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.9", gpu_info));
}
TEST_F(GpuControlListEntryTest, OptimusEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "linux"
- },
- "multi_gpu_style": "optimus",
- "features": [
- "test_feature_0"
- ]
- }
- );
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_OptimusEntry);
+ EXPECT_EQ(kOsLinux, entry.conditions.os_type);
GPUInfo gpu_info;
gpu_info.optimus = true;
-
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsLinux, entry->GetOsType());
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsLinux, "10.6", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsLinux, "10.6", gpu_info));
}
TEST_F(GpuControlListEntryTest, AMDSwitchableEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "multi_gpu_style": "amd_switchable",
- "features": [
- "test_feature_0"
- ]
- }
- );
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_AMDSwitchableEntry);
+ EXPECT_EQ(kOsMacosx, entry.conditions.os_type);
GPUInfo gpu_info;
gpu_info.amd_switchable = true;
-
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsMacosx, entry->GetOsType());
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.6", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.6", gpu_info));
}
TEST_F(GpuControlListEntryTest, DriverVendorBeginWith) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "driver_vendor": "NVIDIA.*",
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_DriverVendorBeginWith);
GPUInfo gpu_info;
gpu_info.driver_vendor = "NVIDIA Corporation";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.9", gpu_info));
// Case sensitive.
gpu_info.driver_vendor = "NVidia Corporation";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.driver_vendor = "NVIDIA";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.9", gpu_info));
gpu_info.driver_vendor = "USA NVIDIA";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsMacosx, "10.9", gpu_info));
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.9", gpu_info));
}
TEST_F(GpuControlListEntryTest, LexicalDriverVersionEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "linux"
- },
- "vendor_id": "0x1002",
- "driver_version": {
- "op": "=",
- "style": "lexical",
- "value": "8.76"
- },
- "features": [
- "test_feature_0"
- ]
- }
- );
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_LexicalDriverVersionEntry);
+ EXPECT_EQ(kOsLinux, entry.conditions.os_type);
GPUInfo gpu_info;
gpu_info.gpu.vendor_id = 0x1002;
-
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsLinux, entry->GetOsType());
-
gpu_info.driver_version = "8.76";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsLinux, "10.6", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsLinux, "10.6", gpu_info));
gpu_info.driver_version = "8.768";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsLinux, "10.6", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsLinux, "10.6", gpu_info));
gpu_info.driver_version = "8.76.8";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsLinux, "10.6", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsLinux, "10.6", gpu_info));
}
TEST_F(GpuControlListEntryTest, NeedsMoreInfoEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "vendor_id": "0x8086",
- "driver_version": {
- "op": "<",
- "value": "10.7"
- },
- "features": [
- "test_feature_1"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_NeedsMoreInfoEntry);
GPUInfo gpu_info;
gpu_info.gpu.vendor_id = 0x8086;
- EXPECT_TRUE(entry->NeedsMoreInfo(gpu_info, true));
-
+ EXPECT_TRUE(entry.NeedsMoreInfo(gpu_info, true));
gpu_info.driver_version = "10.6";
- EXPECT_FALSE(entry->NeedsMoreInfo(gpu_info, true));
+ EXPECT_FALSE(entry.NeedsMoreInfo(gpu_info, true));
}
TEST_F(GpuControlListEntryTest, NeedsMoreInfoForExceptionsEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "vendor_id": "0x8086",
- "exceptions": [
- {
- "gl_renderer": ".*mesa.*"
- }
- ],
- "features": [
- "test_feature_1"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_NeedsMoreInfoForExceptionsEntry);
GPUInfo gpu_info;
gpu_info.gpu.vendor_id = 0x8086;
- EXPECT_TRUE(entry->NeedsMoreInfo(gpu_info, true));
- EXPECT_FALSE(entry->NeedsMoreInfo(gpu_info, false));
-
+ EXPECT_TRUE(entry.NeedsMoreInfo(gpu_info, true));
+ EXPECT_FALSE(entry.NeedsMoreInfo(gpu_info, false));
gpu_info.gl_renderer = "mesa";
- EXPECT_FALSE(entry->NeedsMoreInfo(gpu_info, true));
+ EXPECT_FALSE(entry.NeedsMoreInfo(gpu_info, true));
}
TEST_F(GpuControlListEntryTest, NeedsMoreInfoForGlVersionEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id" : 1,
- "gl_type": "gl",
- "gl_version": {
- "op": "<",
- "value" : "3.5"
- },
- "features" : [
- "test_feature_1"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
-
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_NeedsMoreInfoForGlVersionEntry);
GPUInfo gpu_info;
- EXPECT_TRUE(entry->NeedsMoreInfo(gpu_info, true));
- EXPECT_TRUE(
- entry->Contains(GpuControlList::kOsUnknown, std::string(), gpu_info));
-
+ EXPECT_TRUE(entry.NeedsMoreInfo(gpu_info, true));
+ EXPECT_TRUE(entry.Contains(kOsLinux, std::string(), gpu_info));
gpu_info.gl_version = "3.1 Mesa 11.1.0";
- EXPECT_FALSE(entry->NeedsMoreInfo(gpu_info, false));
- EXPECT_TRUE(
- entry->Contains(GpuControlList::kOsUnknown, std::string(), gpu_info));
-
+ EXPECT_FALSE(entry.NeedsMoreInfo(gpu_info, false));
+ EXPECT_TRUE(entry.Contains(kOsLinux, std::string(), gpu_info));
gpu_info.gl_version = "4.1 Mesa 12.1.0";
- EXPECT_FALSE(entry->NeedsMoreInfo(gpu_info, false));
- EXPECT_FALSE(
- entry->Contains(GpuControlList::kOsUnknown, std::string(), gpu_info));
-
+ EXPECT_FALSE(entry.NeedsMoreInfo(gpu_info, false));
+ EXPECT_FALSE(entry.Contains(kOsLinux, std::string(), gpu_info));
gpu_info.gl_version = "OpenGL ES 2.0 Mesa 12.1.0";
- EXPECT_FALSE(entry->NeedsMoreInfo(gpu_info, false));
- EXPECT_FALSE(
- entry->Contains(GpuControlList::kOsUnknown, std::string(), gpu_info));
+ EXPECT_FALSE(entry.NeedsMoreInfo(gpu_info, false));
+ EXPECT_FALSE(entry.Contains(kOsLinux, std::string(), gpu_info));
}
TEST_F(GpuControlListEntryTest, FeatureTypeAllEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "features": [
- "all"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json, true));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(3u, entry->features().size());
- EXPECT_EQ(1u, entry->features().count(TEST_FEATURE_0));
- EXPECT_EQ(1u, entry->features().count(TEST_FEATURE_1));
- EXPECT_EQ(1u, entry->features().count(TEST_FEATURE_2));
-}
-
-TEST_F(GpuControlListEntryTest, FeatureTypeAllEntryWithExceptions) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "features": [
- "all",
- {"exceptions" : [
- "test_feature_0"
- ]}
- ]
- }
- );
- bool supports_feature_type_all = true;
- ScopedEntry entry(GetEntryFromString(json, supports_feature_type_all));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(1u, entry->features().count(TEST_FEATURE_1));
- EXPECT_EQ(1u, entry->features().count(TEST_FEATURE_2));
- EXPECT_EQ(2u, entry->features().size());
-
- supports_feature_type_all = false;
- entry = ScopedEntry(GetEntryFromString(json, supports_feature_type_all));
- EXPECT_TRUE(entry.get() == NULL);
-}
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_FeatureTypeAllEntry);
-TEST_F(GpuControlListEntryTest, FeatureTypeAllEntryWithUnknownField) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "features": [
- "all", {
- "exceptions" : [
- "test_feature_0"
- ],
- "unknown_field" : 0
- }
- ]
- }
- );
- bool supports_feature_type_all = true;
- ScopedEntry entry(GetEntryFromString(json, supports_feature_type_all));
- EXPECT_TRUE(entry.get() == NULL);
-
- supports_feature_type_all = false;
- entry = ScopedEntry(GetEntryFromString(json, supports_feature_type_all));
- EXPECT_TRUE(entry.get() == NULL);
+ EXPECT_EQ(3u, entry.feature_size);
+ EXPECT_EQ(1u, CountFeature(entry, TEST_FEATURE_0));
+ EXPECT_EQ(1u, CountFeature(entry, TEST_FEATURE_1));
+ EXPECT_EQ(1u, CountFeature(entry, TEST_FEATURE_2));
}
-TEST_F(GpuControlListEntryTest, InvalidVendorIdEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "vendor_id": "0x0000",
- "features": [
- "test_feature_1"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() == NULL);
-}
-
-TEST_F(GpuControlListEntryTest, InvalidDeviceIdEntry) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "vendor_id": "0x10de",
- "device_id": ["0x1023", "0x0000"],
- "features": [
- "test_feature_1"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() == NULL);
+TEST_F(GpuControlListEntryTest, FeatureTypeAllEntryWithExceptions) {
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_FeatureTypeAllEntryWithExceptions);
+ EXPECT_EQ(2u, entry.feature_size);
+ EXPECT_EQ(1u, CountFeature(entry, TEST_FEATURE_1));
+ EXPECT_EQ(1u, CountFeature(entry, TEST_FEATURE_2));
}
TEST_F(GpuControlListEntryTest, SingleActiveGPU) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "vendor_id": "0x10de",
- "device_id": ["0x0640"],
- "multi_gpu_category": "active",
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsMacosx, entry->GetOsType());
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.6", gpu_info()));
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_SingleActiveGPU);
+ EXPECT_EQ(kOsMacosx, entry.conditions.os_type);
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.6", gpu_info()));
}
TEST_F(GpuControlListEntryTest, MachineModelName) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "android"
- },
- "machine_model_name": [
- "Nexus 4", "XT1032", "GT-.*", "SCH-.*"
- ],
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsAndroid, entry->GetOsType());
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_MachineModelName);
+ EXPECT_EQ(kOsAndroid, entry.conditions.os_type);
GPUInfo gpu_info;
-
gpu_info.machine_model_name = "Nexus 4";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsAndroid, "4.1", gpu_info));
gpu_info.machine_model_name = "XT1032";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsAndroid, "4.1", gpu_info));
gpu_info.machine_model_name = "XT1032i";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "4.1", gpu_info));
gpu_info.machine_model_name = "Nexus 5";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "4.1", gpu_info));
gpu_info.machine_model_name = "Nexus";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "4.1", gpu_info));
gpu_info.machine_model_name = "";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "4.1", gpu_info));
gpu_info.machine_model_name = "GT-N7100";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsAndroid, "4.1", gpu_info));
gpu_info.machine_model_name = "GT-I9300";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsAndroid, "4.1", gpu_info));
gpu_info.machine_model_name = "SCH-I545";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsAndroid, "4.1", gpu_info));
}
TEST_F(GpuControlListEntryTest, MachineModelNameException) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "exceptions": [
- {
- "os": {
- "type": "android"
- },
- "machine_model_name": ["Nexus.*"]
- }
- ],
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsAny, entry->GetOsType());
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_MachineModelNameException);
+ EXPECT_EQ(kOsAny, entry.conditions.os_type);
GPUInfo gpu_info;
-
gpu_info.machine_model_name = "Nexus 4";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsLinux, "4.1", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "4.1", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsLinux, "4.1", gpu_info));
gpu_info.machine_model_name = "Nexus 7";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsLinux, "4.1", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "4.1", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsLinux, "4.1", gpu_info));
gpu_info.machine_model_name = "";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsAndroid, "4.1", gpu_info));
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsLinux, "4.1", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsAndroid, "4.1", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsLinux, "4.1", gpu_info));
}
TEST_F(GpuControlListEntryTest, MachineModelVersion) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "machine_model_name": ["MacBookPro"],
- "machine_model_version": {
- "op": "=",
- "value": "7.1"
- },
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_MachineModelVersion);
GPUInfo gpu_info;
gpu_info.machine_model_name = "MacBookPro";
gpu_info.machine_model_version = "7.1";
- EXPECT_EQ(GpuControlList::kOsMacosx, entry->GetOsType());
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.6", gpu_info));
+ EXPECT_EQ(kOsMacosx, entry.conditions.os_type);
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.6", gpu_info));
}
TEST_F(GpuControlListEntryTest, MachineModelVersionException) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "machine_model_name": ["MacBookPro"],
- "exceptions": [
- {
- "machine_model_version": {
- "op": ">",
- "value": "7.1"
- }
- }
- ],
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsMacosx, entry->GetOsType());
-
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_MachineModelVersionException);
+ EXPECT_EQ(kOsMacosx, entry.conditions.os_type);
GPUInfo gpu_info;
gpu_info.machine_model_name = "MacBookPro";
gpu_info.machine_model_version = "7.0";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.6", gpu_info));
-
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.6", gpu_info));
gpu_info.machine_model_version = "7.2";
- EXPECT_FALSE(entry->Contains(
- GpuControlList::kOsMacosx, "10.6", gpu_info));
-
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.6", gpu_info));
gpu_info.machine_model_version = "";
- EXPECT_TRUE(entry->Contains(
- GpuControlList::kOsMacosx, "10.6", gpu_info));
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.6", gpu_info));
}
class GpuControlListEntryDualGPUTest : public GpuControlListEntryTest {
@@ -1140,238 +443,226 @@ class GpuControlListEntryDualGPUTest : public GpuControlListEntryTest {
gpu_info_.secondary_gpus[0].active = false;
}
- void EntryShouldApply(const std::string& entry_json) const {
- EXPECT_TRUE(EntryApplies(entry_json));
+ void EntryShouldApply(const Entry& entry) const {
+ EXPECT_TRUE(EntryApplies(entry));
}
- void EntryShouldNotApply(const std::string& entry_json) const {
- EXPECT_FALSE(EntryApplies(entry_json));
+ void EntryShouldNotApply(const Entry& entry) const {
+ EXPECT_FALSE(EntryApplies(entry));
}
private:
- bool EntryApplies(const std::string& entry_json) const {
- ScopedEntry entry(GetEntryFromString(entry_json));
- EXPECT_TRUE(entry.get());
- EXPECT_EQ(GpuControlList::kOsMacosx, entry->GetOsType());
- return entry->Contains(GpuControlList::kOsMacosx, "10.6", gpu_info());
+ bool EntryApplies(const Entry& entry) const {
+ EXPECT_EQ(kOsMacosx, entry.conditions.os_type);
+ return entry.Contains(kOsMacosx, "10.6", gpu_info());
}
};
TEST_F(GpuControlListEntryDualGPUTest, CategoryAny) {
- const std::string json_intel = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "vendor_id": "0x8086",
- "device_id": ["0x0166"],
- "multi_gpu_category": "any",
- "features": [
- "test_feature_0"
- ]
- }
- );
- EntryShouldApply(json_intel);
-
- const std::string json_nvidia = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "vendor_id": "0x10de",
- "device_id": ["0x0640"],
- "multi_gpu_category": "any",
- "features": [
- "test_feature_0"
- ]
- }
- );
- EntryShouldApply(json_nvidia);
+ const Entry& entry_intel =
+ GetEntry(kGpuControlListEntryDualGPUTest_CategoryAny_Intel);
+ EntryShouldApply(entry_intel);
+ const Entry& entry_nvidia =
+ GetEntry(kGpuControlListEntryDualGPUTest_CategoryAny_NVidia);
+ EntryShouldApply(entry_nvidia);
}
TEST_F(GpuControlListEntryDualGPUTest, CategoryPrimarySecondary) {
- const std::string json_secondary = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "vendor_id": "0x8086",
- "device_id": ["0x0166"],
- "multi_gpu_category": "secondary",
- "features": [
- "test_feature_0"
- ]
- }
- );
- EntryShouldApply(json_secondary);
-
- const std::string json_primary = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "vendor_id": "0x8086",
- "device_id": ["0x0166"],
- "multi_gpu_category": "primary",
- "features": [
- "test_feature_0"
- ]
- }
- );
- EntryShouldNotApply(json_primary);
-
- const std::string json_default = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "vendor_id": "0x8086",
- "device_id": ["0x0166"],
- "features": [
- "test_feature_0"
- ]
- }
- );
+ const Entry& entry_secondary =
+ GetEntry(kGpuControlListEntryDualGPUTest_CategorySecondary);
+ EntryShouldApply(entry_secondary);
+ const Entry& entry_primary =
+ GetEntry(kGpuControlListEntryDualGPUTest_CategoryPrimary);
+ EntryShouldNotApply(entry_primary);
+ const Entry& entry_default =
+ GetEntry(kGpuControlListEntryDualGPUTest_CategoryDefault);
// Default is active, and the secondary Intel GPU is active.
- EntryShouldApply(json_default);
+ EntryShouldApply(entry_default);
}
TEST_F(GpuControlListEntryDualGPUTest, ActiveSecondaryGPU) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "vendor_id": "0x8086",
- "device_id": ["0x0166", "0x0168"],
- "multi_gpu_category": "active",
- "features": [
- "test_feature_0"
- ]
- }
- );
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryDualGPUTest_ActiveSecondaryGPU);
// By default, secondary GPU is active.
- EntryShouldApply(json);
-
+ EntryShouldApply(entry);
ActivatePrimaryGPU();
- EntryShouldNotApply(json);
+ EntryShouldNotApply(entry);
}
TEST_F(GpuControlListEntryDualGPUTest, VendorOnlyActiveSecondaryGPU) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "vendor_id": "0x8086",
- "multi_gpu_category": "active",
- "features": [
- "test_feature_0"
- ]
- }
- );
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryDualGPUTest_VendorOnlyActiveSecondaryGPU);
// By default, secondary GPU is active.
- EntryShouldApply(json);
-
+ EntryShouldApply(entry);
ActivatePrimaryGPU();
- EntryShouldNotApply(json);
+ EntryShouldNotApply(entry);
}
TEST_F(GpuControlListEntryDualGPUTest, ActivePrimaryGPU) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "vendor_id": "0x10de",
- "device_id": ["0x0640"],
- "multi_gpu_category": "active",
- "features": [
- "test_feature_0"
- ]
- }
- );
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryDualGPUTest_ActivePrimaryGPU);
// By default, secondary GPU is active.
- EntryShouldNotApply(json);
-
+ EntryShouldNotApply(entry);
ActivatePrimaryGPU();
- EntryShouldApply(json);
+ EntryShouldApply(entry);
}
TEST_F(GpuControlListEntryDualGPUTest, VendorOnlyActivePrimaryGPU) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "vendor_id": "0x10de",
- "multi_gpu_category": "active",
- "features": [
- "test_feature_0"
- ]
- }
- );
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryDualGPUTest_VendorOnlyActivePrimaryGPU);
// By default, secondary GPU is active.
- EntryShouldNotApply(json);
-
+ EntryShouldNotApply(entry);
ActivatePrimaryGPU();
- EntryShouldApply(json);
+ EntryShouldApply(entry);
}
-TEST_F(GpuControlListEntryTest, LinuxKernelVersion) {
- const std::string json = LONG_STRING_CONST(
- {
- "id": 1,
- "os": {
- "type": "linux",
- "version": {
- "op": "<",
- "value": "3.19.1"
- }
- },
- "vendor_id": "0x8086",
- "features": [
- "test_feature_0"
- ]
- }
- );
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsLinux, entry->GetOsType());
+TEST_F(GpuControlListEntryTest, PixelShaderVersion) {
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_PixelShaderVersion);
+ EXPECT_EQ(kOsAny, entry.conditions.os_type);
+ GPUInfo gpu_info;
+ gpu_info.pixel_shader_version = "3.2";
+ EXPECT_TRUE(entry.Contains(kOsMacosx, "10.9", gpu_info));
+ gpu_info.pixel_shader_version = "4.9";
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.9", gpu_info));
+}
+TEST_F(GpuControlListEntryTest, OsVersionZero) {
+ {
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_OsVersionZeroLT);
+ // All forms of version 0 is considered invalid.
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "0", gpu_info()));
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "0.0", gpu_info()));
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "0.00.0", gpu_info()));
+ }
+ {
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_OsVersionZeroAny);
+ EXPECT_TRUE(entry.Contains(kOsAndroid, "0", gpu_info()));
+ EXPECT_TRUE(entry.Contains(kOsAndroid, "0.0", gpu_info()));
+ EXPECT_TRUE(entry.Contains(kOsAndroid, "0.00.0", gpu_info()));
+ }
+}
+
+TEST_F(GpuControlListEntryTest, OsComparison) {
+ {
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_OsComparisonAny);
+ const GpuControlList::OsType os_type[] = {kOsWin, kOsLinux, kOsMacosx,
+ kOsChromeOS, kOsAndroid};
+ for (size_t i = 0; i < arraysize(os_type); ++i) {
+ EXPECT_TRUE(entry.Contains(os_type[i], std::string(), gpu_info()));
+ EXPECT_TRUE(entry.Contains(os_type[i], "7.8", gpu_info()));
+ }
+ }
+ {
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_OsComparisonGE);
+ EXPECT_FALSE(entry.Contains(kOsMacosx, "10.8.3", gpu_info()));
+ EXPECT_FALSE(entry.Contains(kOsLinux, "10", gpu_info()));
+ EXPECT_FALSE(entry.Contains(kOsChromeOS, "13", gpu_info()));
+ EXPECT_FALSE(entry.Contains(kOsAndroid, "7", gpu_info()));
+ EXPECT_FALSE(entry.Contains(kOsWin, std::string(), gpu_info()));
+ EXPECT_TRUE(entry.Contains(kOsWin, "6", gpu_info()));
+ EXPECT_TRUE(entry.Contains(kOsWin, "6.1", gpu_info()));
+ EXPECT_TRUE(entry.Contains(kOsWin, "7", gpu_info()));
+ EXPECT_FALSE(entry.Contains(kOsWin, "5", gpu_info()));
+ }
+}
+
+TEST_F(GpuControlListEntryTest, ExceptionWithoutVendorId) {
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_ExceptionWithoutVendorId);
+ EXPECT_EQ(0x8086u, entry.exceptions[0].vendor_id);
+ EXPECT_EQ(0x8086u, entry.exceptions[1].vendor_id);
GPUInfo gpu_info;
gpu_info.gpu.vendor_id = 0x8086;
+ gpu_info.gpu.device_id = 0x2a02;
+ gpu_info.driver_version = "9.1";
+ EXPECT_FALSE(entry.Contains(kOsLinux, "2.1", gpu_info));
+ gpu_info.driver_version = "9.0";
+ EXPECT_TRUE(entry.Contains(kOsLinux, "2.1", gpu_info));
+}
- EXPECT_TRUE(entry->Contains(GpuControlList::kOsLinux,
- "3.13.0-63-generic",
- gpu_info));
- EXPECT_FALSE(entry->Contains(GpuControlList::kOsLinux,
- "3.19.2-1-generic",
- gpu_info));
+TEST_F(GpuControlListEntryTest, MultiGpuStyleAMDSwitchable) {
+ GPUInfo gpu_info;
+ gpu_info.amd_switchable = true;
+ gpu_info.gpu.vendor_id = 0x1002;
+ gpu_info.gpu.device_id = 0x6760;
+ GPUInfo::GPUDevice integrated_gpu;
+ integrated_gpu.vendor_id = 0x8086;
+ integrated_gpu.device_id = 0x0116;
+ gpu_info.secondary_gpus.push_back(integrated_gpu);
+
+ { // amd_switchable_discrete entry
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_MultiGpuStyleAMDSwitchableDiscrete);
+ // Integrated GPU is active
+ gpu_info.gpu.active = false;
+ gpu_info.secondary_gpus[0].active = true;
+ EXPECT_FALSE(entry.Contains(kOsWin, "6.0", gpu_info));
+ // Discrete GPU is active
+ gpu_info.gpu.active = true;
+ gpu_info.secondary_gpus[0].active = false;
+ EXPECT_TRUE(entry.Contains(kOsWin, "6.0", gpu_info));
+ }
+
+ { // amd_switchable_integrated entry
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_MultiGpuStyleAMDSwitchableIntegrated);
+ // Discrete GPU is active
+ gpu_info.gpu.active = true;
+ gpu_info.secondary_gpus[0].active = false;
+ EXPECT_FALSE(entry.Contains(kOsWin, "6.0", gpu_info));
+ // Integrated GPU is active
+ gpu_info.gpu.active = false;
+ gpu_info.secondary_gpus[0].active = true;
+ EXPECT_TRUE(entry.Contains(kOsWin, "6.0", gpu_info));
+ // For non AMD switchable
+ gpu_info.amd_switchable = false;
+ EXPECT_FALSE(entry.Contains(kOsWin, "6.0", gpu_info));
+ }
}
-TEST_F(GpuControlListEntryTest, PixelShaderVersion) {
- const std::string json = LONG_STRING_CONST(
- {"id" : 1, "pixel_shader_version" : {"op" : "<", "value" : "4.1"}});
- ScopedEntry entry(GetEntryFromString(json));
- EXPECT_TRUE(entry.get() != NULL);
- EXPECT_EQ(GpuControlList::kOsAny, entry->GetOsType());
+TEST_F(GpuControlListEntryTest, InProcessGPU) {
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_InProcessGPU);
+ GPUInfo gpu_info;
+ gpu_info.in_process_gpu = true;
+ EXPECT_TRUE(entry.Contains(kOsWin, "6.1", gpu_info));
+ gpu_info.in_process_gpu = false;
+ EXPECT_FALSE(entry.Contains(kOsWin, "6.1", gpu_info));
+}
+TEST_F(GpuControlListEntryTest, SameGPUTwiceTest) {
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_SameGPUTwiceTest);
GPUInfo gpu_info;
- gpu_info.pixel_shader_version = "3.2";
- EXPECT_TRUE(entry->Contains(GpuControlList::kOsMacosx, "10.9", gpu_info));
- gpu_info.pixel_shader_version = "4.9";
- EXPECT_FALSE(entry->Contains(GpuControlList::kOsMacosx, "10.9", gpu_info));
+ gpu_info.gpu.vendor_id = 0x8086;
+ // Real case on Intel GMA* on Windows
+ gpu_info.secondary_gpus.push_back(gpu_info.gpu);
+ EXPECT_TRUE(entry.Contains(kOsWin, "6.1", gpu_info));
}
-} // namespace gpu
+TEST_F(GpuControlListEntryTest, NVidiaNumberingScheme) {
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_NVidiaNumberingScheme);
+ GPUInfo gpu_info;
+ gpu_info.gl_vendor = "NVIDIA";
+ gpu_info.gl_renderer = "NVIDIA GeForce GT 120 OpenGL Engine";
+ gpu_info.gpu.vendor_id = 0x10de;
+ gpu_info.gpu.device_id = 0x0640;
+ // test the same driver version number
+ gpu_info.driver_version = "8.17.12.6973";
+ EXPECT_TRUE(entry.Contains(kOsWin, "7.0", gpu_info));
+ // test a lower driver version number
+ gpu_info.driver_version = "8.15.11.8647";
+ EXPECT_TRUE(entry.Contains(kOsWin, "7.0", gpu_info));
+ // test a higher driver version number
+ gpu_info.driver_version = "9.18.13.2723";
+ EXPECT_FALSE(entry.Contains(kOsWin, "7.0", gpu_info));
+}
+
+TEST_F(GpuControlListEntryTest, DirectRendering) {
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_DirectRendering);
+ GPUInfo gpu_info;
+ gpu_info.direct_rendering = true;
+ EXPECT_FALSE(entry.Contains(kOsLinux, "7.0", gpu_info));
+ gpu_info.direct_rendering = false;
+ EXPECT_TRUE(entry.Contains(kOsLinux, "7.0", gpu_info));
+}
+} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list_jsons.h b/chromium/gpu/config/gpu_control_list_jsons.h
deleted file mode 100644
index 71d574cbe82..00000000000
--- a/chromium/gpu/config/gpu_control_list_jsons.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_CONFIG_GPU_CONTROL_LIST_JSONS_H_
-#define GPU_CONFIG_GPU_CONTROL_LIST_JSONS_H_
-
-#include "gpu/gpu_export.h"
-
-namespace gpu {
-
-GPU_EXPORT extern const char kGpuDriverBugListJson[];
-GPU_EXPORT extern const char kSoftwareRenderingListJson[];
-
-} // namespace gpu
-
-#endif // GPU_CONFIG_GPU_CONTROL_LIST_JSONS_H_
-
diff --git a/chromium/gpu/config/gpu_control_list_number_info_unittest.cc b/chromium/gpu/config/gpu_control_list_number_info_unittest.cc
deleted file mode 100644
index f2fffffe043..00000000000
--- a/chromium/gpu/config/gpu_control_list_number_info_unittest.cc
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stddef.h>
-
-#include "gpu/config/gpu_control_list.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace gpu {
-
-class NumberInfoTest : public testing::Test {
- public:
- NumberInfoTest() { }
- ~NumberInfoTest() override {}
-
- typedef GpuControlList::FloatInfo FloatInfo;
- typedef GpuControlList::IntInfo IntInfo;
- typedef GpuControlList::BoolInfo BoolInfo;
-};
-
-TEST_F(NumberInfoTest, ValidFloatInfo) {
- const std::string op[] = {
- "=",
- "<",
- "<=",
- ">",
- ">=",
- "any",
- "between"
- };
- for (size_t i = 0; i < arraysize(op); ++i) {
- std::string value1;
- std::string value2;
- if (op[i] != "any")
- value1 = "3.14";
- if (op[i] == "between")
- value2 = "4.21";
- FloatInfo info(op[i], value1, value2);
- EXPECT_TRUE(info.IsValid());
- }
-
- const std::string value[] = {
- "1.0E12",
- "1.0e12",
- "2013",
- "1.0e-12",
- "2.1400",
- "-2.14",
- };
- for (size_t i = 0; i < arraysize(value); ++i) {
- FloatInfo info("=", value[i], std::string());
- EXPECT_TRUE(info.IsValid());
- }
-}
-
-TEST_F(NumberInfoTest, InvalidFloatInfo) {
- const std::string op[] = {
- "=",
- "<",
- "<=",
- ">",
- ">=",
- };
- for (size_t i = 0; i < arraysize(op); ++i) {
- FloatInfo info(op[i], std::string(), std::string());
- EXPECT_FALSE(info.IsValid());
- }
- {
- FloatInfo info("between", "3.14", std::string());
- EXPECT_FALSE(info.IsValid());
- }
- const std::string value[] = {
- "1.0 E12",
- "1.0e 12",
- " 2013",
- "2013 ",
- "- 2.14",
- };
- for (size_t i = 0; i < arraysize(value); ++i) {
- FloatInfo info("=", value[i], std::string());
- EXPECT_FALSE(info.IsValid());
- }
-}
-
-TEST_F(NumberInfoTest, FloatComparison) {
- {
- FloatInfo info("=", "3.14", std::string());
- EXPECT_TRUE(info.Contains(3.14f));
- EXPECT_TRUE(info.Contains(3.1400f));
- EXPECT_FALSE(info.Contains(3.1f));
- EXPECT_FALSE(info.Contains(3));
- }
- {
- FloatInfo info(">", "3.14", std::string());
- EXPECT_FALSE(info.Contains(3.14f));
- EXPECT_TRUE(info.Contains(3.141f));
- EXPECT_FALSE(info.Contains(3.1f));
- }
- {
- FloatInfo info("<=", "3.14", std::string());
- EXPECT_TRUE(info.Contains(3.14f));
- EXPECT_FALSE(info.Contains(3.141f));
- EXPECT_TRUE(info.Contains(3.1f));
- }
- {
- FloatInfo info("any", std::string(), std::string());
- EXPECT_TRUE(info.Contains(3.14f));
- }
- {
- FloatInfo info("between", "3.14", "5.4");
- EXPECT_TRUE(info.Contains(3.14f));
- EXPECT_TRUE(info.Contains(5.4f));
- EXPECT_TRUE(info.Contains(4));
- EXPECT_FALSE(info.Contains(5.6f));
- EXPECT_FALSE(info.Contains(3.12f));
- }
-}
-
-TEST_F(NumberInfoTest, ValidIntInfo) {
- const std::string op[] = {
- "=",
- "<",
- "<=",
- ">",
- ">=",
- "any",
- "between"
- };
- for (size_t i = 0; i < arraysize(op); ++i) {
- std::string value1;
- std::string value2;
- if (op[i] != "any")
- value1 = "3";
- if (op[i] == "between")
- value2 = "9";
- IntInfo info(op[i], value1, value2);
- EXPECT_TRUE(info.IsValid());
- }
-
- const std::string value[] = {
- "12",
- "-12",
- };
- for (size_t i = 0; i < arraysize(value); ++i) {
- IntInfo info("=", value[i], std::string());
- EXPECT_TRUE(info.IsValid());
- }
-}
-
-TEST_F(NumberInfoTest, InvalidIntInfo) {
- const std::string op[] = {
- "=",
- "<",
- "<=",
- ">",
- ">=",
- };
- for (size_t i = 0; i < arraysize(op); ++i) {
- IntInfo info(op[i], std::string(), std::string());
- EXPECT_FALSE(info.IsValid());
- }
- {
- IntInfo info("between", "3", std::string());
- EXPECT_FALSE(info.IsValid());
- }
- const std::string value[] = {
- " 12",
- "12 ",
- "- 12",
- " -12",
- "3.14"
- };
- for (size_t i = 0; i < arraysize(value); ++i) {
- IntInfo info("=", value[i], std::string());
- EXPECT_FALSE(info.IsValid());
- }
-}
-
-TEST_F(NumberInfoTest, IntComparison) {
- {
- IntInfo info("=", "3", std::string());
- EXPECT_TRUE(info.Contains(3));
- EXPECT_FALSE(info.Contains(4));
- }
- {
- IntInfo info(">", "3", std::string());
- EXPECT_FALSE(info.Contains(2));
- EXPECT_FALSE(info.Contains(3));
- EXPECT_TRUE(info.Contains(4));
- }
- {
- IntInfo info("<=", "3", std::string());
- EXPECT_TRUE(info.Contains(2));
- EXPECT_TRUE(info.Contains(3));
- EXPECT_FALSE(info.Contains(4));
- }
- {
- IntInfo info("any", std::string(), std::string());
- EXPECT_TRUE(info.Contains(3));
- }
- {
- IntInfo info("between", "3", "5");
- EXPECT_TRUE(info.Contains(3));
- EXPECT_TRUE(info.Contains(5));
- EXPECT_TRUE(info.Contains(4));
- EXPECT_FALSE(info.Contains(6));
- EXPECT_FALSE(info.Contains(2));
- }
-}
-
-TEST_F(NumberInfoTest, Bool) {
- {
- BoolInfo info(true);
- EXPECT_TRUE(info.Contains(true));
- EXPECT_FALSE(info.Contains(false));
- }
- {
- BoolInfo info(false);
- EXPECT_FALSE(info.Contains(true));
- EXPECT_TRUE(info.Contains(false));
- }
-}
-
-} // namespace gpu
-
diff --git a/chromium/gpu/config/gpu_control_list_os_info_unittest.cc b/chromium/gpu/config/gpu_control_list_os_info_unittest.cc
deleted file mode 100644
index 510487c4dea..00000000000
--- a/chromium/gpu/config/gpu_control_list_os_info_unittest.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stddef.h>
-
-#include "gpu/config/gpu_control_list.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace gpu {
-
-class OsInfoTest : public testing::Test {
- public:
- OsInfoTest() { }
- ~OsInfoTest() override {}
-
- typedef GpuControlList::OsInfo OsInfo;
-};
-
-TEST_F(OsInfoTest, ValidOsInfo) {
- const std::string os[] = {
- "win",
- "linux",
- "macosx",
- "chromeos",
- "android",
- "any"
- };
- const GpuControlList::OsType os_type[] = {
- GpuControlList::kOsWin,
- GpuControlList::kOsLinux,
- GpuControlList::kOsMacosx,
- GpuControlList::kOsChromeOS,
- GpuControlList::kOsAndroid,
- GpuControlList::kOsAny
- };
- for (size_t i = 0; i < arraysize(os); ++i) {
- OsInfo info(os[i], "=", "10.6", std::string());
- EXPECT_TRUE(info.IsValid());
- EXPECT_EQ(os_type[i], info.type());
- }
- {
- OsInfo info("any", "any", std::string(), std::string());
- EXPECT_TRUE(info.IsValid());
- }
-}
-
-TEST_F(OsInfoTest, InvalidOsInfo) {
- const std::string os[] = {
- "win",
- "linux",
- "macosx",
- "chromeos",
- "android",
- "any"
- };
- for (size_t i = 0; i < arraysize(os); ++i) {
- {
- OsInfo info(os[i], std::string(), std::string(), std::string());
- EXPECT_FALSE(info.IsValid());
- }
- {
- OsInfo info(os[i], "=", std::string(), std::string());
- EXPECT_FALSE(info.IsValid());
- }
- {
- OsInfo info(os[i], std::string(), "10.6", std::string());
- EXPECT_FALSE(info.IsValid());
- }
- }
- const std::string os_cap[] = {
- "Win",
- "Linux",
- "MacOSX",
- "ChromeOS",
- "Android",
- };
- for (size_t i = 0; i < arraysize(os_cap); ++i) {
- OsInfo info(os_cap[i], "=", "10.6", std::string());
- EXPECT_FALSE(info.IsValid());
- }
-}
-
-TEST_F(OsInfoTest, NonNumericOsVersion) {
- {
- OsInfo info("android", "<", "4.2", std::string());
- EXPECT_TRUE(info.IsValid());
- // The expectation is the version number first, then extra info.
- EXPECT_TRUE(info.Contains(
- GpuControlList::kOsAndroid, "4.0 bug fix version 5.2"));
- EXPECT_FALSE(info.Contains(GpuControlList::kOsAndroid, "F"));
- EXPECT_FALSE(info.Contains(GpuControlList::kOsAndroid, "F 4.0"));
- EXPECT_FALSE(info.Contains(GpuControlList::kOsAndroid, std::string()));
- }
- {
- OsInfo info("android", "any", std::string(), std::string());
- EXPECT_TRUE(info.IsValid());
- EXPECT_TRUE(info.Contains(
- GpuControlList::kOsAndroid, "4.0 bug fix version 5.2"));
- EXPECT_TRUE(info.Contains(GpuControlList::kOsAndroid, "F"));
- EXPECT_TRUE(info.Contains(GpuControlList::kOsAndroid, "F 4.0"));
- EXPECT_TRUE(info.Contains(GpuControlList::kOsAndroid, std::string()));
- }
-}
-
-TEST_F(OsInfoTest, OsVersionZero) {
- {
- OsInfo info("android", "<", "4.2", std::string());
- EXPECT_TRUE(info.IsValid());
- // All forms of version 0 is considered invalid.
- EXPECT_FALSE(info.Contains(GpuControlList::kOsAndroid, "0"));
- EXPECT_FALSE(info.Contains(GpuControlList::kOsAndroid, "0.0"));
- EXPECT_FALSE(info.Contains(GpuControlList::kOsAndroid, "0.00.0"));
- }
- {
- OsInfo info("android", "any", std::string(), std::string());
- EXPECT_TRUE(info.IsValid());
- EXPECT_TRUE(info.Contains(GpuControlList::kOsAndroid, "0"));
- EXPECT_TRUE(info.Contains(GpuControlList::kOsAndroid, "0.0"));
- EXPECT_TRUE(info.Contains(GpuControlList::kOsAndroid, "0.00.0"));
- }
-}
-
-TEST_F(OsInfoTest, OsComparison) {
- {
- OsInfo info("any", "any", std::string(), std::string());
- const GpuControlList::OsType os_type[] = {
- GpuControlList::kOsWin, GpuControlList::kOsLinux,
- GpuControlList::kOsMacosx, GpuControlList::kOsChromeOS,
- GpuControlList::kOsAndroid,
- };
- for (size_t i = 0; i < arraysize(os_type); ++i) {
- EXPECT_TRUE(info.Contains(os_type[i], std::string()));
- EXPECT_TRUE(info.Contains(os_type[i], "7.8"));
- }
- }
- {
- OsInfo info("win", ">=", "6", std::string());
- EXPECT_FALSE(info.Contains(GpuControlList::kOsMacosx, "10.8.3"));
- EXPECT_FALSE(info.Contains(GpuControlList::kOsLinux, "10"));
- EXPECT_FALSE(info.Contains(GpuControlList::kOsChromeOS, "13"));
- EXPECT_FALSE(info.Contains(GpuControlList::kOsAndroid, "7"));
- EXPECT_FALSE(info.Contains(GpuControlList::kOsAny, "7"));
- EXPECT_FALSE(info.Contains(GpuControlList::kOsWin, std::string()));
- EXPECT_TRUE(info.Contains(GpuControlList::kOsWin, "6"));
- EXPECT_TRUE(info.Contains(GpuControlList::kOsWin, "6.1"));
- EXPECT_TRUE(info.Contains(GpuControlList::kOsWin, "7"));
- EXPECT_FALSE(info.Contains(GpuControlList::kOsWin, "5"));
- }
-}
-
-} // namespace gpu
-
diff --git a/chromium/gpu/config/gpu_control_list_testing.json b/chromium/gpu/config/gpu_control_list_testing.json
new file mode 100644
index 00000000000..e49b0f9023f
--- /dev/null
+++ b/chromium/gpu/config/gpu_control_list_testing.json
@@ -0,0 +1,722 @@
+{
+ "name": "gpu control list testing",
+ "version": "1.0",
+ "entries": [
+ {
+ "id": 1,
+ "description": "GpuControlListEntryTest.DetailedEntry",
+ "cr_bugs": [1024, 678],
+ "webkit_bugs": [1950],
+ "os": {
+ "type": "macosx",
+ "version": {
+ "op": "=",
+ "value": "10.6.4"
+ }
+ },
+ "vendor_id": "0x10de",
+ "device_id": ["0x0640"],
+ "driver_version": {
+ "op": "=",
+ "value": "1.6.18"
+ },
+ "features": [
+ "test_feature_0"
+ ],
+ "disabled_extensions": [
+ "test_extension1",
+ "test_extension2"
+ ]
+ },
+ {
+ "id": 2,
+ "description": "GpuControlListEntryTest.VendorOnAllOsEntry",
+ "vendor_id": "0x10de",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 3,
+ "description": "GpuControlListEntryTest.VendorOnLinuxEntry",
+ "os": {
+ "type": "linux"
+ },
+ "vendor_id": "0x10de",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 4,
+ "description": "GpuControlListEntryTest.AllExceptNVidiaOnLinuxEntry",
+ "os": {
+ "type": "linux"
+ },
+ "exceptions": [
+ {
+ "vendor_id": "0x10de"
+ }
+ ],
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 5,
+ "description": "GpuControlListEntryTest.AllExceptIntelOnLinuxEntry",
+ "os": {
+ "type": "linux"
+ },
+ "exceptions": [
+ {
+ "vendor_id": "0x8086"
+ }
+ ],
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 6,
+ "description": "GpuControlListEntryTest.DateOnWindowsEntry",
+ "os": {
+ "type": "win"
+ },
+ "driver_date": {
+ "op": "<",
+ "value": "2010.5.8"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 7,
+ "description": "GpuControlListEntryTest.MultipleDevicesEntry",
+ "vendor_id": "0x10de",
+ "device_id": ["0x1023", "0x0640"],
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 8,
+ "description": "GpuControlListEntryTest.ChromeOSEntry",
+ "os": {
+ "type": "chromeos"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 9,
+ "description": "GpuControlListEntryTest.GlVersionGLESEntry",
+ "gl_type": "gles",
+ "gl_version": {
+ "op": "=",
+ "value": "3.0"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 10,
+ "description": "GpuControlListEntryTest.GlVersionANGLEEntry",
+ "gl_type": "angle",
+ "gl_version": {
+ "op": ">",
+ "value": "2.0"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 11,
+ "description": "GpuControlListEntryTest.GlVersionGLEntry",
+ "gl_type": "gl",
+ "gl_version": {
+ "op": "<",
+ "value": "4.0"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 12,
+ "description": "GpuControlListEntryTest.GlVendorEqual",
+ "gl_vendor": "NVIDIA",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 13,
+ "description": "GpuControlListEntryTest.GlVendorWithDot",
+ "gl_vendor": "X\\.Org.*",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 14,
+ "description": "GpuControlListEntryTest.GlRendererContains",
+ "gl_renderer": ".*GeForce.*",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 15,
+ "description": "GpuControlListEntryTest.GlRendererCaseInsensitive",
+ "gl_renderer": "(?i).*software.*",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 16,
+ "description": "GpuControlListEntryTest.GlExtensionsEndWith",
+ "gl_extensions": ".*GL_SUN_slice_accum",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 17,
+ "description": "GpuControlListEntryTest.OptimusEntry",
+ "os": {
+ "type": "linux"
+ },
+ "multi_gpu_style": "optimus",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 18,
+ "description": "GpuControlListEntryTest.AMDSwitchableEntry",
+ "os": {
+ "type": "macosx"
+ },
+ "multi_gpu_style": "amd_switchable",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 19,
+ "description": "GpuControlListEntryTest.DriverVendorBeginWith",
+ "driver_vendor": "NVIDIA.*",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 20,
+ "description": "GpuControlListEntryTest.LexicalDriverVersionEntry",
+ "os": {
+ "type": "linux"
+ },
+ "vendor_id": "0x1002",
+ "driver_version": {
+ "op": "=",
+ "style": "lexical",
+ "value": "8.76"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 21,
+ "description": "GpuControlListEntryTest.NeedsMoreInfoEntry",
+ "vendor_id": "0x8086",
+ "driver_version": {
+ "op": "<",
+ "value": "10.7"
+ },
+ "features": [
+ "test_feature_1"
+ ]
+ },
+ {
+ "id": 22,
+ "description": "GpuControlListEntryTest.NeedsMoreInfoForExceptionsEntry",
+ "vendor_id": "0x8086",
+ "exceptions": [
+ {
+ "gl_renderer": ".*mesa.*"
+ }
+ ],
+ "features": [
+ "test_feature_1"
+ ]
+ },
+ {
+ "id" : 23,
+ "description": "GpuControlListEntryTest.NeedsMoreInfoForGlVersionEntry",
+ "gl_type": "gl",
+ "gl_version": {
+ "op": "<",
+ "value" : "3.5"
+ },
+ "features" : [
+ "test_feature_1"
+ ]
+ },
+ {
+ "id": 24,
+ "description": "GpuControlListEntryTest.FeatureTypeAllEntry",
+ "features": [
+ "all"
+ ]
+ },
+ {
+ "id": 25,
+ "description": "GpuControlListEntryTest.FeatureTypeAllEntryWithExceptions",
+ "features": [
+ "all",
+ {
+ "exceptions" : [
+ "test_feature_0"
+ ]
+ }
+ ]
+ },
+ {
+ "id": 26,
+ "description": "GpuControlListEntryTest.SingleActiveGPU",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x10de",
+ "device_id": ["0x0640"],
+ "multi_gpu_category": "active",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 27,
+ "description": "GpuControlListEntryTest.MachineModelName",
+ "os": {
+ "type": "android"
+ },
+ "machine_model_name": [
+ "Nexus 4", "XT1032", "GT-.*", "SCH-.*"
+ ],
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 28,
+ "description": "GpuControlListEntryTest.MachineModelNameException",
+ "exceptions": [
+ {
+ "os": {
+ "type": "android"
+ },
+ "machine_model_name": ["Nexus.*"]
+ }
+ ],
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 29,
+ "description": "GpuControlListEntryTest.MachineModelVersion",
+ "os": {
+ "type": "macosx"
+ },
+ "machine_model_name": ["MacBookPro"],
+ "machine_model_version": {
+ "op": "=",
+ "value": "7.1"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 30,
+ "description": "GpuControlListEntryTest.MachineModelVersionException",
+ "os": {
+ "type": "macosx"
+ },
+ "machine_model_name": ["MacBookPro"],
+ "exceptions": [
+ {
+ "machine_model_version": {
+ "op": ">",
+ "value": "7.1"
+ }
+ }
+ ],
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 31,
+ "description": "GpuControlListEntryDualGPUTest.CategoryAny.Intel",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x8086",
+ "device_id": ["0x0166"],
+ "multi_gpu_category": "any",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 32,
+ "description": "GpuControlListEntryDualGPUTest.CategoryAny.NVidia",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x10de",
+ "device_id": ["0x0640"],
+ "multi_gpu_category": "any",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 33,
+ "description": "GpuControlListEntryDualGPUTest.CategorySecondary",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x8086",
+ "device_id": ["0x0166"],
+ "multi_gpu_category": "secondary",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 34,
+ "description": "GpuControlListEntryDualGPUTest.CategoryPrimary",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x8086",
+ "device_id": ["0x0166"],
+ "multi_gpu_category": "primary",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 35,
+ "description": "GpuControlListEntryDualGPUTest.CategoryDefault",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x8086",
+ "device_id": ["0x0166"],
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 36,
+ "description": "GpuControlListEntryDualGPUTest.ActiveSecondaryGPU",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x8086",
+ "device_id": ["0x0166", "0x0168"],
+ "multi_gpu_category": "active",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 37,
+ "description": "GpuControlListEntryDualGPUTest.VendorOnlyActiveSecondaryGPU",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x8086",
+ "multi_gpu_category": "active",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 38,
+ "description": "GpuControlListEntryDualGPUTest.ActivePrimaryGPU",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x10de",
+ "device_id": ["0x0640"],
+ "multi_gpu_category": "active",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 39,
+ "description": "GpuControlListEntryDualGPUTest.VendorOnlyActivePrimaryGPU",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x10de",
+ "multi_gpu_category": "active",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 40,
+ "description": "GpuControlListEntryTest.PixelShaderVersion",
+ "pixel_shader_version": {
+ "op": "<",
+ "value": "4.1"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 41,
+ "description": "GpuControlListEntryTest.OsVersionZeroLT",
+ "os": {
+ "type": "android",
+ "version": {
+ "op": "<",
+ "value": "4.2"
+ }
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 42,
+ "description": "GpuControlListEntryTest.OsVersionZeroAny",
+ "os": {
+ "type": "android"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 43,
+ "description": "GpuControlListEntryTest.OsComparisonAny",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 44,
+ "description": "GpuControlListEntryTest.OsComparisonGE",
+ "os": {
+ "type": "win",
+ "version": {
+ "op": ">=",
+ "value": "6"
+ }
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 45,
+ "description": "GpuControlListEntryTest.ExceptionWithoutVendorId",
+ "os": {
+ "type": "linux"
+ },
+ "vendor_id": "0x8086",
+ "exceptions": [
+ {
+ "device_id": ["0x2a06"],
+ "driver_version": {
+ "op": ">=",
+ "value": "8.1"
+ }
+ },
+ {
+ "device_id": ["0x2a02"],
+ "driver_version": {
+ "op": ">=",
+ "value": "9.1"
+ }
+ }
+ ],
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 46,
+ "description": "GpuControlListEntryTest.MultiGpuStyleAMDSwitchableDiscrete",
+ "multi_gpu_style": "amd_switchable_discrete",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 47,
+ "description": "GpuControlListEntryTest.MultiGpuStyleAMDSwitchableIntegrated",
+ "multi_gpu_style": "amd_switchable_integrated",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 48,
+ "description": "GpuControlListEntryTest.InProcessGPU",
+ "os": {
+ "type": "win"
+ },
+ "in_process_gpu": true,
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 49,
+ "description": "GpuControlListEntryTest.SameGPUTwiceTest",
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x8086",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 50,
+ "description": "GpuControlListEntryTest.NVidiaNumberingScheme",
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x10de",
+ "driver_version": {
+ "op": "<=",
+ "value": "8.17.12.6973"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 51,
+ "description": "GpuControlListTest.NeedsMoreInfo",
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x10de",
+ "driver_version": {
+ "op": "<",
+ "value": "12"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 52,
+ "description": "GpuControlListTest.NeedsMoreInfoForExceptions",
+ "os": {
+ "type": "linux"
+ },
+ "vendor_id": "0x8086",
+ "exceptions": [
+ {
+ "gl_renderer": ".*mesa.*"
+ }
+ ],
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 53,
+ "description": "GpuControlListTest.IgnorableEntries.0",
+ "os": {
+ "type": "linux"
+ },
+ "vendor_id": "0x8086",
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 54,
+ "description": "GpuControlListTest.IgnorableEntries.1",
+ "os": {
+ "type": "linux"
+ },
+ "vendor_id": "0x8086",
+ "driver_version": {
+ "op": "<",
+ "value": "10.7"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 55,
+ "description": "GpuControlListTest.DisabledExtensionTest.0",
+ "os": {
+ "type": "win"
+ },
+ "disabled_extensions": [
+ "test_extension2",
+ "test_extension1"
+ ]
+ },
+ {
+ "id": 56,
+ "description": "GpuControlListTest.DisabledExtensionTest.1",
+ "os": {
+ "type": "win"
+ },
+ "disabled_extensions": [
+ "test_extension3",
+ "test_extension2"
+ ]
+ },
+ {
+ "id": 57,
+ "description": "GpuControlListEntryTest.DirectRendering",
+ "os": {
+ "type": "linux"
+ },
+ "direct_rendering": false,
+ "features": [
+ "test_feature_1"
+ ]
+ },
+ {
+ "id": 58,
+ "description": "GpuControlListTest.LinuxKernelVersion",
+ "os": {
+ "type": "linux",
+ "version": {
+ "op": "<",
+ "value": "3.19.1"
+ }
+ },
+ "vendor_id": "0x8086",
+ "features": [
+ "test_feature_0"
+ ]
+ }
+ ]
+}
diff --git a/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h b/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h
new file mode 100644
index 00000000000..fea4dd4081f
--- /dev/null
+++ b/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h
@@ -0,0 +1,558 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/config/process_json.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_CONFIG_GPU_CONTROL_LIST_TESTING_ARRAYS_AND_STRUCTS_AUTOGEN_H_
+#define GPU_CONFIG_GPU_CONTROL_LIST_TESTING_ARRAYS_AND_STRUCTS_AUTOGEN_H_
+
+#include "gpu/config/gpu_control_list_testing_data.h"
+
+namespace gpu {
+const int kFeatureListForEntry1[1] = {
+ TEST_FEATURE_0,
+};
+
+const char* kDisabledExtensionsForEntry1[2] = {
+ "test_extension1", "test_extension2",
+};
+
+const uint32_t kCrBugsForEntry1[2] = {
+ 1024, 678,
+};
+
+const uint32_t kDeviceIDsForEntry1[1] = {
+ 0x0640,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForEntry1 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kEQ, GpuControlList::kVersionStyleNumerical, "1.6.18",
+ nullptr}, // driver_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // driver_date
+};
+
+const int kFeatureListForEntry2[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry3[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry4[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry5[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry6[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForEntry6 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // driver_version
+ {GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "2010.5.8",
+ nullptr}, // driver_date
+};
+
+const int kFeatureListForEntry7[1] = {
+ TEST_FEATURE_0,
+};
+
+const uint32_t kDeviceIDsForEntry7[2] = {
+ 0x1023, 0x0640,
+};
+
+const int kFeatureListForEntry8[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry9[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::More kMoreForEntry9 = {
+ GpuControlList::kGLTypeGLES, // gl_type
+ {GpuControlList::kEQ, GpuControlList::kVersionStyleNumerical, "3.0",
+ nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ true, // direct_rendering
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gpu_count
+};
+
+const int kFeatureListForEntry10[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::More kMoreForEntry10 = {
+ GpuControlList::kGLTypeANGLE, // gl_type
+ {GpuControlList::kGT, GpuControlList::kVersionStyleNumerical, "2.0",
+ nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ true, // direct_rendering
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gpu_count
+};
+
+const int kFeatureListForEntry11[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::More kMoreForEntry11 = {
+ GpuControlList::kGLTypeGL, // gl_type
+ {GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "4.0",
+ nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ true, // direct_rendering
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gpu_count
+};
+
+const int kFeatureListForEntry12[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::GLStrings kGLStringsForEntry12 = {
+ "NVIDIA", nullptr, nullptr, nullptr,
+};
+
+const int kFeatureListForEntry13[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::GLStrings kGLStringsForEntry13 = {
+ "X\\.Org.*", nullptr, nullptr, nullptr,
+};
+
+const int kFeatureListForEntry14[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::GLStrings kGLStringsForEntry14 = {
+ nullptr, ".*GeForce.*", nullptr, nullptr,
+};
+
+const int kFeatureListForEntry15[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::GLStrings kGLStringsForEntry15 = {
+ nullptr, "(?i).*software.*", nullptr, nullptr,
+};
+
+const int kFeatureListForEntry16[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::GLStrings kGLStringsForEntry16 = {
+ nullptr, nullptr, ".*GL_SUN_slice_accum", nullptr,
+};
+
+const int kFeatureListForEntry17[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry18[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry19[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForEntry19 = {
+ "NVIDIA.*", // driver_vendor
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // driver_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // driver_date
+};
+
+const int kFeatureListForEntry20[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForEntry20 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kEQ, GpuControlList::kVersionStyleLexical, "8.76",
+ nullptr}, // driver_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // driver_date
+};
+
+const int kFeatureListForEntry21[1] = {
+ TEST_FEATURE_1,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForEntry21 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "10.7",
+ nullptr}, // driver_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // driver_date
+};
+
+const int kFeatureListForEntry22[1] = {
+ TEST_FEATURE_1,
+};
+
+const GpuControlList::GLStrings kGLStringsForEntry22Exception0 = {
+ nullptr, ".*mesa.*", nullptr, nullptr,
+};
+
+const int kFeatureListForEntry23[1] = {
+ TEST_FEATURE_1,
+};
+
+const GpuControlList::More kMoreForEntry23 = {
+ GpuControlList::kGLTypeGL, // gl_type
+ {GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "3.5",
+ nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ true, // direct_rendering
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gpu_count
+};
+
+const int kFeatureListForEntry24[3] = {
+ TEST_FEATURE_0, TEST_FEATURE_1, TEST_FEATURE_2,
+};
+
+const int kFeatureListForEntry25[2] = {
+ TEST_FEATURE_1, TEST_FEATURE_2,
+};
+
+const int kFeatureListForEntry26[1] = {
+ TEST_FEATURE_0,
+};
+
+const uint32_t kDeviceIDsForEntry26[1] = {
+ 0x0640,
+};
+
+const int kFeatureListForEntry27[1] = {
+ TEST_FEATURE_0,
+};
+
+const char* kMachineModelNameForEntry27[4] = {
+ "Nexus 4", "XT1032", "GT-.*", "SCH-.*",
+};
+
+const GpuControlList::MachineModelInfo kMachineModelInfoForEntry27 = {
+ arraysize(kMachineModelNameForEntry27), // machine model name size
+ kMachineModelNameForEntry27, // machine model names
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // machine model version
+};
+
+const int kFeatureListForEntry28[1] = {
+ TEST_FEATURE_0,
+};
+
+const char* kMachineModelNameForEntry28Exception0[1] = {
+ "Nexus.*",
+};
+
+const GpuControlList::MachineModelInfo kMachineModelInfoForEntry28Exception0 = {
+ arraysize(
+ kMachineModelNameForEntry28Exception0), // machine model name size
+ kMachineModelNameForEntry28Exception0, // machine model names
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // machine model version
+};
+
+const int kFeatureListForEntry29[1] = {
+ TEST_FEATURE_0,
+};
+
+const char* kMachineModelNameForEntry29[1] = {
+ "MacBookPro",
+};
+
+const GpuControlList::MachineModelInfo kMachineModelInfoForEntry29 = {
+ arraysize(kMachineModelNameForEntry29), // machine model name size
+ kMachineModelNameForEntry29, // machine model names
+ {GpuControlList::kEQ, GpuControlList::kVersionStyleNumerical, "7.1",
+ nullptr}, // machine model version
+};
+
+const int kFeatureListForEntry30[1] = {
+ TEST_FEATURE_0,
+};
+
+const char* kMachineModelNameForEntry30[1] = {
+ "MacBookPro",
+};
+
+const GpuControlList::MachineModelInfo kMachineModelInfoForEntry30 = {
+ arraysize(kMachineModelNameForEntry30), // machine model name size
+ kMachineModelNameForEntry30, // machine model names
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // machine model version
+};
+
+const GpuControlList::MachineModelInfo kMachineModelInfoForEntry30Exception0 = {
+ 0, // machine model name size
+ nullptr, // machine model names
+ {GpuControlList::kGT, GpuControlList::kVersionStyleNumerical, "7.1",
+ nullptr}, // machine model version
+};
+
+const int kFeatureListForEntry31[1] = {
+ TEST_FEATURE_0,
+};
+
+const uint32_t kDeviceIDsForEntry31[1] = {
+ 0x0166,
+};
+
+const int kFeatureListForEntry32[1] = {
+ TEST_FEATURE_0,
+};
+
+const uint32_t kDeviceIDsForEntry32[1] = {
+ 0x0640,
+};
+
+const int kFeatureListForEntry33[1] = {
+ TEST_FEATURE_0,
+};
+
+const uint32_t kDeviceIDsForEntry33[1] = {
+ 0x0166,
+};
+
+const int kFeatureListForEntry34[1] = {
+ TEST_FEATURE_0,
+};
+
+const uint32_t kDeviceIDsForEntry34[1] = {
+ 0x0166,
+};
+
+const int kFeatureListForEntry35[1] = {
+ TEST_FEATURE_0,
+};
+
+const uint32_t kDeviceIDsForEntry35[1] = {
+ 0x0166,
+};
+
+const int kFeatureListForEntry36[1] = {
+ TEST_FEATURE_0,
+};
+
+const uint32_t kDeviceIDsForEntry36[2] = {
+ 0x0166, 0x0168,
+};
+
+const int kFeatureListForEntry37[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry38[1] = {
+ TEST_FEATURE_0,
+};
+
+const uint32_t kDeviceIDsForEntry38[1] = {
+ 0x0640,
+};
+
+const int kFeatureListForEntry39[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry40[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::More kMoreForEntry40 = {
+ GpuControlList::kGLTypeNone, // gl_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gl_version
+ {GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "4.1",
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ true, // direct_rendering
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gpu_count
+};
+
+const int kFeatureListForEntry41[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry42[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry43[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry44[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry45[1] = {
+ TEST_FEATURE_0,
+};
+
+const uint32_t kDeviceIDsForEntry45Exception0[1] = {
+ 0x2a06,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForEntry45Exception0 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kGE, GpuControlList::kVersionStyleNumerical, "8.1",
+ nullptr}, // driver_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // driver_date
+};
+
+const uint32_t kDeviceIDsForEntry45Exception1[1] = {
+ 0x2a02,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForEntry45Exception1 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kGE, GpuControlList::kVersionStyleNumerical, "9.1",
+ nullptr}, // driver_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // driver_date
+};
+
+const int kFeatureListForEntry46[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry47[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry48[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::More kMoreForEntry48 = {
+ GpuControlList::kGLTypeNone, // gl_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // pixel_shader_version
+ true, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ true, // direct_rendering
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gpu_count
+};
+
+const int kFeatureListForEntry49[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry50[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForEntry50 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kLE, GpuControlList::kVersionStyleNumerical,
+ "8.17.12.6973", nullptr}, // driver_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // driver_date
+};
+
+const int kFeatureListForEntry51[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForEntry51 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "12",
+ nullptr}, // driver_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // driver_date
+};
+
+const int kFeatureListForEntry52[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::GLStrings kGLStringsForEntry52Exception0 = {
+ nullptr, ".*mesa.*", nullptr, nullptr,
+};
+
+const int kFeatureListForEntry53[1] = {
+ TEST_FEATURE_0,
+};
+
+const int kFeatureListForEntry54[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForEntry54 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "10.7",
+ nullptr}, // driver_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // driver_date
+};
+
+const char* kDisabledExtensionsForEntry55[2] = {
+ "test_extension2", "test_extension1",
+};
+
+const char* kDisabledExtensionsForEntry56[2] = {
+ "test_extension3", "test_extension2",
+};
+
+const int kFeatureListForEntry57[1] = {
+ TEST_FEATURE_1,
+};
+
+const GpuControlList::More kMoreForEntry57 = {
+ GpuControlList::kGLTypeNone, // gl_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ false, // direct_rendering
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gpu_count
+};
+
+const int kFeatureListForEntry58[1] = {
+ TEST_FEATURE_0,
+};
+
+} // namespace gpu
+
+#endif // GPU_CONFIG_GPU_CONTROL_LIST_TESTING_ARRAYS_AND_STRUCTS_AUTOGEN_H_
diff --git a/chromium/gpu/config/gpu_control_list_testing_autogen.cc b/chromium/gpu/config/gpu_control_list_testing_autogen.cc
new file mode 100644
index 00000000000..fc93fbfa7d9
--- /dev/null
+++ b/chromium/gpu/config/gpu_control_list_testing_autogen.cc
@@ -0,0 +1,1533 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/config/process_json.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#include "gpu/config/gpu_control_list_testing_autogen.h"
+
+#include "gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h"
+#include "gpu/config/gpu_control_list_testing_exceptions_autogen.h"
+
+namespace gpu {
+
+const char kGpuControlListTestingVersion[] = "1.0";
+
+const size_t kGpuControlListTestingEntryCount = 58;
+const GpuControlList::Entry kGpuControlListTestingEntries[58] = {
+ {
+ 1, // id
+ "GpuControlListEntryTest.DetailedEntry",
+ arraysize(kFeatureListForEntry1), // features size
+ kFeatureListForEntry1, // features
+ arraysize(kDisabledExtensionsForEntry1), // DisabledExtensions size
+ kDisabledExtensionsForEntry1, // DisabledExtensions
+ arraysize(kCrBugsForEntry1), // CrBugs size
+ kCrBugsForEntry1, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kEQ, GpuControlList::kVersionStyleNumerical,
+ "10.6.4", nullptr}, // os_version
+ 0x10de, // vendor_id
+ arraysize(kDeviceIDsForEntry1), // DeviceIDs size
+ kDeviceIDsForEntry1, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForEntry1, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 2, // id
+ "GpuControlListEntryTest.VendorOnAllOsEntry",
+ arraysize(kFeatureListForEntry2), // features size
+ kFeatureListForEntry2, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x10de, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 3, // id
+ "GpuControlListEntryTest.VendorOnLinuxEntry",
+ arraysize(kFeatureListForEntry3), // features size
+ kFeatureListForEntry3, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsLinux, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x10de, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 4, // id
+ "GpuControlListEntryTest.AllExceptNVidiaOnLinuxEntry",
+ arraysize(kFeatureListForEntry4), // features size
+ kFeatureListForEntry4, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsLinux, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ arraysize(kExceptionsForEntry4), // exceptions count
+ kExceptionsForEntry4, // exceptions
+ },
+ {
+ 5, // id
+ "GpuControlListEntryTest.AllExceptIntelOnLinuxEntry",
+ arraysize(kFeatureListForEntry5), // features size
+ kFeatureListForEntry5, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsLinux, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ arraysize(kExceptionsForEntry5), // exceptions count
+ kExceptionsForEntry5, // exceptions
+ },
+ {
+ 6, // id
+ "GpuControlListEntryTest.DateOnWindowsEntry",
+ arraysize(kFeatureListForEntry6), // features size
+ kFeatureListForEntry6, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsWin, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForEntry6, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 7, // id
+ "GpuControlListEntryTest.MultipleDevicesEntry",
+ arraysize(kFeatureListForEntry7), // features size
+ kFeatureListForEntry7, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x10de, // vendor_id
+ arraysize(kDeviceIDsForEntry7), // DeviceIDs size
+ kDeviceIDsForEntry7, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 8, // id
+ "GpuControlListEntryTest.ChromeOSEntry",
+ arraysize(kFeatureListForEntry8), // features size
+ kFeatureListForEntry8, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsChromeOS, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 9, // id
+ "GpuControlListEntryTest.GlVersionGLESEntry",
+ arraysize(kFeatureListForEntry9), // features size
+ kFeatureListForEntry9, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ &kMoreForEntry9, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 10, // id
+ "GpuControlListEntryTest.GlVersionANGLEEntry",
+ arraysize(kFeatureListForEntry10), // features size
+ kFeatureListForEntry10, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ &kMoreForEntry10, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 11, // id
+ "GpuControlListEntryTest.GlVersionGLEntry",
+ arraysize(kFeatureListForEntry11), // features size
+ kFeatureListForEntry11, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ &kMoreForEntry11, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 12, // id
+ "GpuControlListEntryTest.GlVendorEqual",
+ arraysize(kFeatureListForEntry12), // features size
+ kFeatureListForEntry12, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ &kGLStringsForEntry12, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 13, // id
+ "GpuControlListEntryTest.GlVendorWithDot",
+ arraysize(kFeatureListForEntry13), // features size
+ kFeatureListForEntry13, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ &kGLStringsForEntry13, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 14, // id
+ "GpuControlListEntryTest.GlRendererContains",
+ arraysize(kFeatureListForEntry14), // features size
+ kFeatureListForEntry14, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ &kGLStringsForEntry14, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 15, // id
+ "GpuControlListEntryTest.GlRendererCaseInsensitive",
+ arraysize(kFeatureListForEntry15), // features size
+ kFeatureListForEntry15, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ &kGLStringsForEntry15, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 16, // id
+ "GpuControlListEntryTest.GlExtensionsEndWith",
+ arraysize(kFeatureListForEntry16), // features size
+ kFeatureListForEntry16, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ &kGLStringsForEntry16, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 17, // id
+ "GpuControlListEntryTest.OptimusEntry",
+ arraysize(kFeatureListForEntry17), // features size
+ kFeatureListForEntry17, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsLinux, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleOptimus, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 18, // id
+ "GpuControlListEntryTest.AMDSwitchableEntry",
+ arraysize(kFeatureListForEntry18), // features size
+ kFeatureListForEntry18, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleAMDSwitchable, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 19, // id
+ "GpuControlListEntryTest.DriverVendorBeginWith",
+ arraysize(kFeatureListForEntry19), // features size
+ kFeatureListForEntry19, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForEntry19, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 20, // id
+ "GpuControlListEntryTest.LexicalDriverVersionEntry",
+ arraysize(kFeatureListForEntry20), // features size
+ kFeatureListForEntry20, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsLinux, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x1002, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForEntry20, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 21, // id
+ "GpuControlListEntryTest.NeedsMoreInfoEntry",
+ arraysize(kFeatureListForEntry21), // features size
+ kFeatureListForEntry21, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForEntry21, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 22, // id
+ "GpuControlListEntryTest.NeedsMoreInfoForExceptionsEntry",
+ arraysize(kFeatureListForEntry22), // features size
+ kFeatureListForEntry22, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ arraysize(kExceptionsForEntry22), // exceptions count
+ kExceptionsForEntry22, // exceptions
+ },
+ {
+ 23, // id
+ "GpuControlListEntryTest.NeedsMoreInfoForGlVersionEntry",
+ arraysize(kFeatureListForEntry23), // features size
+ kFeatureListForEntry23, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ &kMoreForEntry23, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 24, // id
+ "GpuControlListEntryTest.FeatureTypeAllEntry",
+ arraysize(kFeatureListForEntry24), // features size
+ kFeatureListForEntry24, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 25, // id
+ "GpuControlListEntryTest.FeatureTypeAllEntryWithExceptions",
+ arraysize(kFeatureListForEntry25), // features size
+ kFeatureListForEntry25, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 26, // id
+ "GpuControlListEntryTest.SingleActiveGPU",
+ arraysize(kFeatureListForEntry26), // features size
+ kFeatureListForEntry26, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x10de, // vendor_id
+ arraysize(kDeviceIDsForEntry26), // DeviceIDs size
+ kDeviceIDsForEntry26, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 27, // id
+ "GpuControlListEntryTest.MachineModelName",
+ arraysize(kFeatureListForEntry27), // features size
+ kFeatureListForEntry27, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAndroid, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ &kMachineModelInfoForEntry27, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 28, // id
+ "GpuControlListEntryTest.MachineModelNameException",
+ arraysize(kFeatureListForEntry28), // features size
+ kFeatureListForEntry28, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ arraysize(kExceptionsForEntry28), // exceptions count
+ kExceptionsForEntry28, // exceptions
+ },
+ {
+ 29, // id
+ "GpuControlListEntryTest.MachineModelVersion",
+ arraysize(kFeatureListForEntry29), // features size
+ kFeatureListForEntry29, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ &kMachineModelInfoForEntry29, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 30, // id
+ "GpuControlListEntryTest.MachineModelVersionException",
+ arraysize(kFeatureListForEntry30), // features size
+ kFeatureListForEntry30, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ &kMachineModelInfoForEntry30, // machine model info
+ nullptr, // more conditions
+ },
+ arraysize(kExceptionsForEntry30), // exceptions count
+ kExceptionsForEntry30, // exceptions
+ },
+ {
+ 31, // id
+ "GpuControlListEntryDualGPUTest.CategoryAny.Intel",
+ arraysize(kFeatureListForEntry31), // features size
+ kFeatureListForEntry31, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ arraysize(kDeviceIDsForEntry31), // DeviceIDs size
+ kDeviceIDsForEntry31, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 32, // id
+ "GpuControlListEntryDualGPUTest.CategoryAny.NVidia",
+ arraysize(kFeatureListForEntry32), // features size
+ kFeatureListForEntry32, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x10de, // vendor_id
+ arraysize(kDeviceIDsForEntry32), // DeviceIDs size
+ kDeviceIDsForEntry32, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 33, // id
+ "GpuControlListEntryDualGPUTest.CategorySecondary",
+ arraysize(kFeatureListForEntry33), // features size
+ kFeatureListForEntry33, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ arraysize(kDeviceIDsForEntry33), // DeviceIDs size
+ kDeviceIDsForEntry33, // DeviceIDs
+ GpuControlList::kMultiGpuCategorySecondary, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 34, // id
+ "GpuControlListEntryDualGPUTest.CategoryPrimary",
+ arraysize(kFeatureListForEntry34), // features size
+ kFeatureListForEntry34, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ arraysize(kDeviceIDsForEntry34), // DeviceIDs size
+ kDeviceIDsForEntry34, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryPrimary, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 35, // id
+ "GpuControlListEntryDualGPUTest.CategoryDefault",
+ arraysize(kFeatureListForEntry35), // features size
+ kFeatureListForEntry35, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ arraysize(kDeviceIDsForEntry35), // DeviceIDs size
+ kDeviceIDsForEntry35, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 36, // id
+ "GpuControlListEntryDualGPUTest.ActiveSecondaryGPU",
+ arraysize(kFeatureListForEntry36), // features size
+ kFeatureListForEntry36, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ arraysize(kDeviceIDsForEntry36), // DeviceIDs size
+ kDeviceIDsForEntry36, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 37, // id
+ "GpuControlListEntryDualGPUTest.VendorOnlyActiveSecondaryGPU",
+ arraysize(kFeatureListForEntry37), // features size
+ kFeatureListForEntry37, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 38, // id
+ "GpuControlListEntryDualGPUTest.ActivePrimaryGPU",
+ arraysize(kFeatureListForEntry38), // features size
+ kFeatureListForEntry38, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x10de, // vendor_id
+ arraysize(kDeviceIDsForEntry38), // DeviceIDs size
+ kDeviceIDsForEntry38, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 39, // id
+ "GpuControlListEntryDualGPUTest.VendorOnlyActivePrimaryGPU",
+ arraysize(kFeatureListForEntry39), // features size
+ kFeatureListForEntry39, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsMacosx, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x10de, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 40, // id
+ "GpuControlListEntryTest.PixelShaderVersion",
+ arraysize(kFeatureListForEntry40), // features size
+ kFeatureListForEntry40, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ &kMoreForEntry40, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 41, // id
+ "GpuControlListEntryTest.OsVersionZeroLT",
+ arraysize(kFeatureListForEntry41), // features size
+ kFeatureListForEntry41, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAndroid, // os_type
+ {GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "4.2",
+ nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 42, // id
+ "GpuControlListEntryTest.OsVersionZeroAny",
+ arraysize(kFeatureListForEntry42), // features size
+ kFeatureListForEntry42, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAndroid, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 43, // id
+ "GpuControlListEntryTest.OsComparisonAny",
+ arraysize(kFeatureListForEntry43), // features size
+ kFeatureListForEntry43, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 44, // id
+ "GpuControlListEntryTest.OsComparisonGE",
+ arraysize(kFeatureListForEntry44), // features size
+ kFeatureListForEntry44, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsWin, // os_type
+ {GpuControlList::kGE, GpuControlList::kVersionStyleNumerical, "6",
+ nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 45, // id
+ "GpuControlListEntryTest.ExceptionWithoutVendorId",
+ arraysize(kFeatureListForEntry45), // features size
+ kFeatureListForEntry45, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsLinux, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ arraysize(kExceptionsForEntry45), // exceptions count
+ kExceptionsForEntry45, // exceptions
+ },
+ {
+ 46, // id
+ "GpuControlListEntryTest.MultiGpuStyleAMDSwitchableDiscrete",
+ arraysize(kFeatureListForEntry46), // features size
+ kFeatureListForEntry46, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::
+ kMultiGpuStyleAMDSwitchableDiscrete, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 47, // id
+ "GpuControlListEntryTest.MultiGpuStyleAMDSwitchableIntegrated",
+ arraysize(kFeatureListForEntry47), // features size
+ kFeatureListForEntry47, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::
+ kMultiGpuStyleAMDSwitchableIntegrated, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 48, // id
+ "GpuControlListEntryTest.InProcessGPU",
+ arraysize(kFeatureListForEntry48), // features size
+ kFeatureListForEntry48, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsWin, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ &kMoreForEntry48, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 49, // id
+ "GpuControlListEntryTest.SameGPUTwiceTest",
+ arraysize(kFeatureListForEntry49), // features size
+ kFeatureListForEntry49, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsWin, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 50, // id
+ "GpuControlListEntryTest.NVidiaNumberingScheme",
+ arraysize(kFeatureListForEntry50), // features size
+ kFeatureListForEntry50, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsWin, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x10de, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForEntry50, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 51, // id
+ "GpuControlListTest.NeedsMoreInfo",
+ arraysize(kFeatureListForEntry51), // features size
+ kFeatureListForEntry51, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsWin, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x10de, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForEntry51, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 52, // id
+ "GpuControlListTest.NeedsMoreInfoForExceptions",
+ arraysize(kFeatureListForEntry52), // features size
+ kFeatureListForEntry52, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsLinux, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ arraysize(kExceptionsForEntry52), // exceptions count
+ kExceptionsForEntry52, // exceptions
+ },
+ {
+ 53, // id
+ "GpuControlListTest.IgnorableEntries.0",
+ arraysize(kFeatureListForEntry53), // features size
+ kFeatureListForEntry53, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsLinux, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 54, // id
+ "GpuControlListTest.IgnorableEntries.1",
+ arraysize(kFeatureListForEntry54), // features size
+ kFeatureListForEntry54, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsLinux, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForEntry54, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 55, // id
+ "GpuControlListTest.DisabledExtensionTest.0",
+ 0, // feature size
+ nullptr, // features
+ arraysize(kDisabledExtensionsForEntry55), // DisabledExtensions size
+ kDisabledExtensionsForEntry55, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsWin, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 56, // id
+ "GpuControlListTest.DisabledExtensionTest.1",
+ 0, // feature size
+ nullptr, // features
+ arraysize(kDisabledExtensionsForEntry56), // DisabledExtensions size
+ kDisabledExtensionsForEntry56, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsWin, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 57, // id
+ "GpuControlListEntryTest.DirectRendering",
+ arraysize(kFeatureListForEntry57), // features size
+ kFeatureListForEntry57, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsLinux, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ &kMoreForEntry57, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 58, // id
+ "GpuControlListTest.LinuxKernelVersion",
+ arraysize(kFeatureListForEntry58), // features size
+ kFeatureListForEntry58, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsLinux, // os_type
+ {GpuControlList::kLT, GpuControlList::kVersionStyleNumerical,
+ "3.19.1", nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+};
+} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list_testing_autogen.h b/chromium/gpu/config/gpu_control_list_testing_autogen.h
new file mode 100644
index 00000000000..7e8c012bb97
--- /dev/null
+++ b/chromium/gpu/config/gpu_control_list_testing_autogen.h
@@ -0,0 +1,22 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/config/process_json.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_CONFIG_GPU_CONTROL_LIST_TESTING_AUTOGEN_H_
+#define GPU_CONFIG_GPU_CONTROL_LIST_TESTING_AUTOGEN_H_
+
+#include "gpu/config/gpu_control_list.h"
+
+namespace gpu {
+extern const char kGpuControlListTestingVersion[];
+extern const size_t kGpuControlListTestingEntryCount;
+extern const GpuControlList::Entry kGpuControlListTestingEntries[];
+} // namespace gpu
+
+#endif // GPU_CONFIG_GPU_CONTROL_LIST_TESTING_AUTOGEN_H_
diff --git a/chromium/gpu/config/gpu_control_list_testing_data.h b/chromium/gpu/config/gpu_control_list_testing_data.h
new file mode 100644
index 00000000000..51649d3c570
--- /dev/null
+++ b/chromium/gpu/config/gpu_control_list_testing_data.h
@@ -0,0 +1,15 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_CONFIG_GPU_CONTROL_LIST_TESTING_DATA_H_
+#define GPU_CONFIG_GPU_CONTROL_LIST_TESTING_DATA_H_
+
+#include "gpu/config/gpu_control_list_testing_autogen.h"
+#include "gpu/config/gpu_control_list_testing_entry_enums_autogen.h"
+
+namespace gpu {
+enum TestFeatureType { TEST_FEATURE_0 = 0, TEST_FEATURE_1, TEST_FEATURE_2 };
+} // namespace gpu
+
+#endif // GPU_CONFIG_GPU_CONTROL_LIST_TESTING_DATA_H_
diff --git a/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h b/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h
new file mode 100644
index 00000000000..d61c5e0bd33
--- /dev/null
+++ b/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h
@@ -0,0 +1,77 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/config/process_json.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_CONFIG_GPU_CONTROL_LIST_TESTING_ENTRY_ENUMS_AUTOGEN_H_
+#define GPU_CONFIG_GPU_CONTROL_LIST_TESTING_ENTRY_ENUMS_AUTOGEN_H_
+
+namespace gpu {
+enum GpuControlListTestingEntryEnum {
+ kGpuControlListEntryTest_DetailedEntry = 0,
+ kGpuControlListEntryTest_VendorOnAllOsEntry = 1,
+ kGpuControlListEntryTest_VendorOnLinuxEntry = 2,
+ kGpuControlListEntryTest_AllExceptNVidiaOnLinuxEntry = 3,
+ kGpuControlListEntryTest_AllExceptIntelOnLinuxEntry = 4,
+ kGpuControlListEntryTest_DateOnWindowsEntry = 5,
+ kGpuControlListEntryTest_MultipleDevicesEntry = 6,
+ kGpuControlListEntryTest_ChromeOSEntry = 7,
+ kGpuControlListEntryTest_GlVersionGLESEntry = 8,
+ kGpuControlListEntryTest_GlVersionANGLEEntry = 9,
+ kGpuControlListEntryTest_GlVersionGLEntry = 10,
+ kGpuControlListEntryTest_GlVendorEqual = 11,
+ kGpuControlListEntryTest_GlVendorWithDot = 12,
+ kGpuControlListEntryTest_GlRendererContains = 13,
+ kGpuControlListEntryTest_GlRendererCaseInsensitive = 14,
+ kGpuControlListEntryTest_GlExtensionsEndWith = 15,
+ kGpuControlListEntryTest_OptimusEntry = 16,
+ kGpuControlListEntryTest_AMDSwitchableEntry = 17,
+ kGpuControlListEntryTest_DriverVendorBeginWith = 18,
+ kGpuControlListEntryTest_LexicalDriverVersionEntry = 19,
+ kGpuControlListEntryTest_NeedsMoreInfoEntry = 20,
+ kGpuControlListEntryTest_NeedsMoreInfoForExceptionsEntry = 21,
+ kGpuControlListEntryTest_NeedsMoreInfoForGlVersionEntry = 22,
+ kGpuControlListEntryTest_FeatureTypeAllEntry = 23,
+ kGpuControlListEntryTest_FeatureTypeAllEntryWithExceptions = 24,
+ kGpuControlListEntryTest_SingleActiveGPU = 25,
+ kGpuControlListEntryTest_MachineModelName = 26,
+ kGpuControlListEntryTest_MachineModelNameException = 27,
+ kGpuControlListEntryTest_MachineModelVersion = 28,
+ kGpuControlListEntryTest_MachineModelVersionException = 29,
+ kGpuControlListEntryDualGPUTest_CategoryAny_Intel = 30,
+ kGpuControlListEntryDualGPUTest_CategoryAny_NVidia = 31,
+ kGpuControlListEntryDualGPUTest_CategorySecondary = 32,
+ kGpuControlListEntryDualGPUTest_CategoryPrimary = 33,
+ kGpuControlListEntryDualGPUTest_CategoryDefault = 34,
+ kGpuControlListEntryDualGPUTest_ActiveSecondaryGPU = 35,
+ kGpuControlListEntryDualGPUTest_VendorOnlyActiveSecondaryGPU = 36,
+ kGpuControlListEntryDualGPUTest_ActivePrimaryGPU = 37,
+ kGpuControlListEntryDualGPUTest_VendorOnlyActivePrimaryGPU = 38,
+ kGpuControlListEntryTest_PixelShaderVersion = 39,
+ kGpuControlListEntryTest_OsVersionZeroLT = 40,
+ kGpuControlListEntryTest_OsVersionZeroAny = 41,
+ kGpuControlListEntryTest_OsComparisonAny = 42,
+ kGpuControlListEntryTest_OsComparisonGE = 43,
+ kGpuControlListEntryTest_ExceptionWithoutVendorId = 44,
+ kGpuControlListEntryTest_MultiGpuStyleAMDSwitchableDiscrete = 45,
+ kGpuControlListEntryTest_MultiGpuStyleAMDSwitchableIntegrated = 46,
+ kGpuControlListEntryTest_InProcessGPU = 47,
+ kGpuControlListEntryTest_SameGPUTwiceTest = 48,
+ kGpuControlListEntryTest_NVidiaNumberingScheme = 49,
+ kGpuControlListTest_NeedsMoreInfo = 50,
+ kGpuControlListTest_NeedsMoreInfoForExceptions = 51,
+ kGpuControlListTest_IgnorableEntries_0 = 52,
+ kGpuControlListTest_IgnorableEntries_1 = 53,
+ kGpuControlListTest_DisabledExtensionTest_0 = 54,
+ kGpuControlListTest_DisabledExtensionTest_1 = 55,
+ kGpuControlListEntryTest_DirectRendering = 56,
+ kGpuControlListTest_LinuxKernelVersion = 57,
+};
+} // namespace gpu
+
+#endif // GPU_CONFIG_GPU_CONTROL_LIST_TESTING_ENTRY_ENUMS_AUTOGEN_H_
diff --git a/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h b/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h
new file mode 100644
index 00000000000..b1590ae499c
--- /dev/null
+++ b/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h
@@ -0,0 +1,150 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/config/process_json.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_CONFIG_GPU_CONTROL_LIST_TESTING_EXCEPTIONS_AUTOGEN_H_
+#define GPU_CONFIG_GPU_CONTROL_LIST_TESTING_EXCEPTIONS_AUTOGEN_H_
+
+namespace gpu {
+const GpuControlList::Conditions kExceptionsForEntry4[1] = {
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x10de, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+};
+
+const GpuControlList::Conditions kExceptionsForEntry5[1] = {
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+};
+
+const GpuControlList::Conditions kExceptionsForEntry22[1] = {
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ &kGLStringsForEntry22Exception0, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+};
+
+const GpuControlList::Conditions kExceptionsForEntry28[1] = {
+ {
+ GpuControlList::kOsAndroid, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ &kMachineModelInfoForEntry28Exception0, // machine model info
+ nullptr, // more conditions
+ },
+};
+
+const GpuControlList::Conditions kExceptionsForEntry30[1] = {
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ &kMachineModelInfoForEntry30Exception0, // machine model info
+ nullptr, // more conditions
+ },
+};
+
+const GpuControlList::Conditions kExceptionsForEntry45[2] = {
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ arraysize(kDeviceIDsForEntry45Exception0), // DeviceIDs size
+ kDeviceIDsForEntry45Exception0, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForEntry45Exception0, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ arraysize(kDeviceIDsForEntry45Exception1), // DeviceIDs size
+ kDeviceIDsForEntry45Exception1, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForEntry45Exception1, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+};
+
+const GpuControlList::Conditions kExceptionsForEntry52[1] = {
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ &kGLStringsForEntry52Exception0, // GL strings
+ nullptr, // machine model info
+ nullptr, // more conditions
+ },
+};
+
+} // namespace gpu
+
+#endif // GPU_CONFIG_GPU_CONTROL_LIST_TESTING_EXCEPTIONS_AUTOGEN_H_
diff --git a/chromium/gpu/config/gpu_control_list_unittest.cc b/chromium/gpu/config/gpu_control_list_unittest.cc
index f3a3c099d61..81de77debbb 100644
--- a/chromium/gpu/config/gpu_control_list_unittest.cc
+++ b/chromium/gpu/config/gpu_control_list_unittest.cc
@@ -8,13 +8,13 @@
#include <vector>
#include "gpu/config/gpu_control_list.h"
+#include "gpu/config/gpu_control_list_testing_data.h"
#include "gpu/config/gpu_info.h"
#include "testing/gtest/include/gtest/gtest.h"
const char kOsVersion[] = "10.6.4";
const uint32_t kIntelVendorId = 0x8086;
const uint32_t kNvidiaVendorId = 0x10de;
-const uint32_t kAmdVendorId = 0x10de;
#define LONG_STRING_CONST(...) #__VA_ARGS__
@@ -24,24 +24,21 @@ const uint32_t kAmdVendorId = 0x10de;
namespace gpu {
-enum TestFeatureType {
- TEST_FEATURE_0 = 1,
- TEST_FEATURE_1 = 1 << 2,
- TEST_FEATURE_2 = 1 << 3,
-};
-
class GpuControlListTest : public testing::Test {
public:
- GpuControlListTest() { }
+ typedef GpuControlList::Entry Entry;
+ GpuControlListTest() {}
~GpuControlListTest() override {}
const GPUInfo& gpu_info() const {
return gpu_info_;
}
- GpuControlList* Create() {
- GpuControlList* rt = new GpuControlList();
+ std::unique_ptr<GpuControlList> Create(size_t entry_count,
+ const Entry* entries) {
+ GpuControlListData data("0.1", entry_count, entries);
+ std::unique_ptr<GpuControlList> rt(new GpuControlList(data));
rt->AddSupportedFeature("test_feature_0", TEST_FEATURE_0);
rt->AddSupportedFeature("test_feature_1", TEST_FEATURE_1);
rt->AddSupportedFeature("test_feature_2", TEST_FEATURE_2);
@@ -67,261 +64,20 @@ class GpuControlListTest : public testing::Test {
GPUInfo gpu_info_;
};
-TEST_F(GpuControlListTest, DefaultControlListSettings) {
- std::unique_ptr<GpuControlList> control_list(Create());
- // Default control list settings: all feature are allowed.
- std::set<int> features = control_list->MakeDecision(
- GpuControlList::kOsMacosx, kOsVersion, gpu_info());
- EXPECT_EMPTY_SET(features);
-}
-
-TEST_F(GpuControlListTest, EmptyControlList) {
- // Empty list: all features are allowed.
- const std::string empty_list_json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "2.5",
- "entries": [
- ]
- }
- );
- std::unique_ptr<GpuControlList> control_list(Create());
-
- EXPECT_TRUE(control_list->LoadList(empty_list_json,
- GpuControlList::kAllOs));
- EXPECT_EQ("2.5", control_list->version());
- std::set<int> features = control_list->MakeDecision(
- GpuControlList::kOsMacosx, kOsVersion, gpu_info());
- EXPECT_EMPTY_SET(features);
-}
-
-TEST_F(GpuControlListTest, DetailedEntryAndInvalidJson) {
- // exact setting.
- const std::string exact_list_json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 5,
- "os": {
- "type": "macosx",
- "version": {
- "op": "=",
- "value": "10.6.4"
- }
- },
- "vendor_id": "0x10de",
- "device_id": ["0x0640"],
- "driver_version": {
- "op": "=",
- "value": "1.6.18"
- },
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
- std::unique_ptr<GpuControlList> control_list(Create());
-
- EXPECT_TRUE(control_list->LoadList(exact_list_json, GpuControlList::kAllOs));
- std::set<int> features = control_list->MakeDecision(
- GpuControlList::kOsMacosx, kOsVersion, gpu_info());
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
-
- // Invalid json input should not change the current control_list settings.
- const std::string invalid_json = "invalid";
-
- EXPECT_FALSE(control_list->LoadList(invalid_json, GpuControlList::kAllOs));
- features = control_list->MakeDecision(
- GpuControlList::kOsMacosx, kOsVersion, gpu_info());
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
- std::vector<uint32_t> entries;
- control_list->GetDecisionEntries(&entries, false);
- ASSERT_EQ(1u, entries.size());
- EXPECT_EQ(5u, entries[0]);
- EXPECT_EQ(5u, control_list->max_entry_id());
-}
-
-TEST_F(GpuControlListTest, VendorOnAllOsEntry) {
- // ControlList a vendor on all OS.
- const std::string vendor_json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "vendor_id": "0x10de",
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
- std::unique_ptr<GpuControlList> control_list(Create());
-
- // ControlList entries won't be filtered to the current OS only upon loading.
- EXPECT_TRUE(control_list->LoadList(vendor_json, GpuControlList::kAllOs));
- std::set<int> features = control_list->MakeDecision(
- GpuControlList::kOsMacosx, kOsVersion, gpu_info());
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
- features = control_list->MakeDecision(
- GpuControlList::kOsWin, kOsVersion, gpu_info());
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
- features = control_list->MakeDecision(
- GpuControlList::kOsLinux, kOsVersion, gpu_info());
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
-#if defined(OS_WIN) || defined(OS_LINUX) || defined(OS_MACOSX) || \
- defined(OS_OPENBSD)
- // ControlList entries will be filtered to the current OS only upon loading.
- EXPECT_TRUE(control_list->LoadList(
- vendor_json, GpuControlList::kCurrentOsOnly));
- features = control_list->MakeDecision(
- GpuControlList::kOsMacosx, kOsVersion, gpu_info());
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
- features = control_list->MakeDecision(
- GpuControlList::kOsWin, kOsVersion, gpu_info());
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
- features = control_list->MakeDecision(
- GpuControlList::kOsLinux, kOsVersion, gpu_info());
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
-#endif
-}
-
-TEST_F(GpuControlListTest, UnknownField) {
- const std::string unknown_field_json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "unknown_field": 0,
- "features": [
- "test_feature_1"
- ]
- },
- {
- "id": 2,
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
- std::unique_ptr<GpuControlList> control_list(Create());
-
- EXPECT_FALSE(control_list->LoadList(
- unknown_field_json, GpuControlList::kAllOs));
-}
-
-TEST_F(GpuControlListTest, UnknownExceptionField) {
- const std::string unknown_exception_field_json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "unknown_field": 0,
- "features": [
- "test_feature_2"
- ]
- },
- {
- "id": 2,
- "exceptions": [
- {
- "unknown_field": 0
- }
- ],
- "features": [
- "test_feature_1"
- ]
- },
- {
- "id": 3,
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
- std::unique_ptr<GpuControlList> control_list(Create());
-
- EXPECT_FALSE(control_list->LoadList(
- unknown_exception_field_json, GpuControlList::kAllOs));
-}
-
-TEST_F(GpuControlListTest, DisabledEntry) {
- const std::string disabled_json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "disabled": true,
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
- std::unique_ptr<GpuControlList> control_list(Create());
- EXPECT_TRUE(control_list->LoadList(disabled_json, GpuControlList::kAllOs));
- std::set<int> features = control_list->MakeDecision(
- GpuControlList::kOsWin, kOsVersion, gpu_info());
- EXPECT_EMPTY_SET(features);
- std::vector<uint32_t> flag_entries;
- control_list->GetDecisionEntries(&flag_entries, false);
- EXPECT_EQ(0u, flag_entries.size());
- control_list->GetDecisionEntries(&flag_entries, true);
- EXPECT_EQ(1u, flag_entries.size());
-}
-
TEST_F(GpuControlListTest, NeedsMoreInfo) {
- const std::string json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "os": {
- "type": "win"
- },
- "vendor_id": "0x10de",
- "driver_version": {
- "op": "<",
- "value": "12"
- },
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
+ const Entry kEntries[1] = {
+ kGpuControlListTestingEntries[kGpuControlListTest_NeedsMoreInfo]};
+ std::unique_ptr<GpuControlList> control_list = Create(1, kEntries);
+
GPUInfo gpu_info;
gpu_info.gpu.vendor_id = kNvidiaVendorId;
- std::unique_ptr<GpuControlList> control_list(Create());
- EXPECT_TRUE(control_list->LoadList(json, GpuControlList::kAllOs));
-
std::set<int> features = control_list->MakeDecision(
GpuControlList::kOsWin, kOsVersion, gpu_info);
EXPECT_EMPTY_SET(features);
EXPECT_TRUE(control_list->needs_more_info());
std::vector<uint32_t> decision_entries;
- control_list->GetDecisionEntries(&decision_entries, false);
+ control_list->GetDecisionEntries(&decision_entries);
EXPECT_EQ(0u, decision_entries.size());
gpu_info.driver_version = "11";
@@ -329,40 +85,19 @@ TEST_F(GpuControlListTest, NeedsMoreInfo) {
GpuControlList::kOsWin, kOsVersion, gpu_info);
EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
EXPECT_FALSE(control_list->needs_more_info());
- control_list->GetDecisionEntries(&decision_entries, false);
+ control_list->GetDecisionEntries(&decision_entries);
EXPECT_EQ(1u, decision_entries.size());
}
TEST_F(GpuControlListTest, NeedsMoreInfoForExceptions) {
- const std::string json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "os": {
- "type": "linux"
- },
- "vendor_id": "0x8086",
- "exceptions": [
- {
- "gl_renderer": ".*mesa.*"
- }
- ],
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
+ const Entry kEntries[1] = {
+ kGpuControlListTestingEntries
+ [kGpuControlListTest_NeedsMoreInfoForExceptions]};
+ std::unique_ptr<GpuControlList> control_list = Create(1, kEntries);
+
GPUInfo gpu_info;
gpu_info.gpu.vendor_id = kIntelVendorId;
- std::unique_ptr<GpuControlList> control_list(Create());
- EXPECT_TRUE(control_list->LoadList(json, GpuControlList::kAllOs));
-
// The case this entry does not apply.
std::set<int> features = control_list->MakeDecision(
GpuControlList::kOsMacosx, kOsVersion, gpu_info);
@@ -395,228 +130,28 @@ TEST_F(GpuControlListTest, NeedsMoreInfoForExceptions) {
TEST_F(GpuControlListTest, IgnorableEntries) {
// If an entry will not change the control_list decisions, then it should not
// trigger the needs_more_info flag.
- const std::string json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "os": {
- "type": "linux"
- },
- "vendor_id": "0x8086",
- "features": [
- "test_feature_0"
- ]
- },
- {
- "id": 2,
- "os": {
- "type": "linux"
- },
- "vendor_id": "0x8086",
- "driver_version": {
- "op": "<",
- "value": "10.7"
- },
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
- GPUInfo gpu_info;
- gpu_info.gpu.vendor_id = kIntelVendorId;
+ const Entry kEntries[2] = {
+ kGpuControlListTestingEntries[kGpuControlListTest_IgnorableEntries_0],
+ kGpuControlListTestingEntries[kGpuControlListTest_IgnorableEntries_1]};
+ std::unique_ptr<GpuControlList> control_list = Create(2, kEntries);
- std::unique_ptr<GpuControlList> control_list(Create());
- EXPECT_TRUE(control_list->LoadList(json, GpuControlList::kAllOs));
- std::set<int> features = control_list->MakeDecision(
- GpuControlList::kOsLinux, kOsVersion, gpu_info);
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
- EXPECT_FALSE(control_list->needs_more_info());
-}
-
-TEST_F(GpuControlListTest, ExceptionWithoutVendorId) {
- const std::string json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "os": {
- "type": "linux"
- },
- "vendor_id": "0x8086",
- "exceptions": [
- {
- "device_id": ["0x2a06"],
- "driver_version": {
- "op": ">=",
- "value": "8.1"
- }
- },
- {
- "device_id": ["0x2a02"],
- "driver_version": {
- "op": ">=",
- "value": "9.1"
- }
- }
- ],
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
GPUInfo gpu_info;
gpu_info.gpu.vendor_id = kIntelVendorId;
- gpu_info.gpu.device_id = 0x2a02;
- gpu_info.driver_version = "9.1";
-
- std::unique_ptr<GpuControlList> control_list(Create());
- EXPECT_TRUE(control_list->LoadList(json, GpuControlList::kAllOs));
std::set<int> features = control_list->MakeDecision(
GpuControlList::kOsLinux, kOsVersion, gpu_info);
- EXPECT_EMPTY_SET(features);
-
- gpu_info.driver_version = "9.0";
- features = control_list->MakeDecision(
- GpuControlList::kOsLinux, kOsVersion, gpu_info);
EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
-}
-
-TEST_F(GpuControlListTest, AMDSwitchable) {
- GPUInfo gpu_info;
- gpu_info.amd_switchable = true;
- gpu_info.gpu.vendor_id = kAmdVendorId;
- gpu_info.gpu.device_id = 0x6760;
- GPUInfo::GPUDevice integrated_gpu;
- integrated_gpu.vendor_id = kIntelVendorId;
- integrated_gpu.device_id = 0x0116;
- gpu_info.secondary_gpus.push_back(integrated_gpu);
-
- { // amd_switchable_discrete entry
- const std::string json= LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "os": {
- "type": "win"
- },
- "multi_gpu_style": "amd_switchable_discrete",
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
-
- std::unique_ptr<GpuControlList> control_list(Create());
- EXPECT_TRUE(control_list->LoadList(json, GpuControlList::kAllOs));
-
- // Integrated GPU is active
- gpu_info.gpu.active = false;
- gpu_info.secondary_gpus[0].active = true;
- std::set<int> features = control_list->MakeDecision(
- GpuControlList::kOsWin, kOsVersion, gpu_info);
- EXPECT_EMPTY_SET(features);
-
- // Discrete GPU is active
- gpu_info.gpu.active = true;
- gpu_info.secondary_gpus[0].active = false;
- features = control_list->MakeDecision(
- GpuControlList::kOsWin, kOsVersion, gpu_info);
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
- }
-
- { // amd_switchable_integrated entry
- const std::string json= LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "os": {
- "type": "win"
- },
- "multi_gpu_style": "amd_switchable_integrated",
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
-
- std::unique_ptr<GpuControlList> control_list(Create());
- EXPECT_TRUE(control_list->LoadList(json, GpuControlList::kAllOs));
-
- // Discrete GPU is active
- gpu_info.gpu.active = true;
- gpu_info.secondary_gpus[0].active = false;
- std::set<int> features = control_list->MakeDecision(
- GpuControlList::kOsWin, kOsVersion, gpu_info);
- EXPECT_EMPTY_SET(features);
-
- // Integrated GPU is active
- gpu_info.gpu.active = false;
- gpu_info.secondary_gpus[0].active = true;
- features = control_list->MakeDecision(
- GpuControlList::kOsWin, kOsVersion, gpu_info);
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
-
- // For non AMD switchable
- gpu_info.amd_switchable = false;
- features = control_list->MakeDecision(
- GpuControlList::kOsWin, kOsVersion, gpu_info);
- EXPECT_EMPTY_SET(features);
- }
+ EXPECT_FALSE(control_list->needs_more_info());
}
TEST_F(GpuControlListTest, DisabledExtensionTest) {
// exact setting.
- const std::string exact_list_json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "os": {
- "type": "win"
- },
- "disabled_extensions": [
- "test_extension2",
- "test_extension1"
- ]
- },
- {
- "id": 2,
- "os": {
- "type": "win"
- },
- "disabled_extensions": [
- "test_extension3",
- "test_extension2"
- ]
- }
- ]
- }
- );
- std::unique_ptr<GpuControlList> control_list(Create());
-
- EXPECT_TRUE(control_list->LoadList(exact_list_json, GpuControlList::kAllOs));
+ const Entry kEntries[2] = {kGpuControlListTestingEntries
+ [kGpuControlListTest_DisabledExtensionTest_0],
+ kGpuControlListTestingEntries
+ [kGpuControlListTest_DisabledExtensionTest_1]};
+ std::unique_ptr<GpuControlList> control_list = Create(2, kEntries);
+
GPUInfo gpu_info;
control_list->MakeDecision(GpuControlList::kOsWin, kOsVersion, gpu_info);
@@ -629,71 +164,21 @@ TEST_F(GpuControlListTest, DisabledExtensionTest) {
ASSERT_STREQ("test_extension3", disabled_extensions[2].c_str());
}
-TEST_F(GpuControlListTest, DisabledInProcessGPUTest) {
- const std::string exact_list_json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "os": {
- "type": "win"
- },
- "in_process_gpu": true,
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
- std::unique_ptr<GpuControlList> control_list(Create());
-
- EXPECT_TRUE(control_list->LoadList(exact_list_json, GpuControlList::kAllOs));
+TEST_F(GpuControlListTest, LinuxKernelVersion) {
+ const Entry kEntries[1] = {
+ kGpuControlListTestingEntries[kGpuControlListTest_LinuxKernelVersion]};
+ std::unique_ptr<GpuControlList> control_list = Create(1, kEntries);
+
GPUInfo gpu_info;
+ gpu_info.gpu.vendor_id = 0x8086;
- gpu_info.in_process_gpu = true;
std::set<int> features = control_list->MakeDecision(
- GpuControlList::kOsWin, kOsVersion, gpu_info);
+ GpuControlList::kOsLinux, "3.13.0-63-generic", gpu_info);
EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
- gpu_info.in_process_gpu = false;
- features = control_list->MakeDecision(
- GpuControlList::kOsWin, kOsVersion, gpu_info);
+ features = control_list->MakeDecision(GpuControlList::kOsLinux,
+ "3.19.2-1-generic", gpu_info);
EXPECT_EMPTY_SET(features);
}
-TEST_F(GpuControlListTest, SameGPUTwiceTest) {
- const std::string json = LONG_STRING_CONST(
- {
- "name": "gpu control list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "os": {
- "type": "win"
- },
- "vendor_id": "0x8086",
- "features": [
- "test_feature_0"
- ]
- }
- ]
- }
- );
- GPUInfo gpu_info;
- gpu_info.gpu.vendor_id = kIntelVendorId;
- // Real case on Intel GMA* on Windows
- gpu_info.secondary_gpus.push_back(gpu_info.gpu);
-
- std::unique_ptr<GpuControlList> control_list(Create());
- EXPECT_TRUE(control_list->LoadList(json, GpuControlList::kAllOs));
- std::set<int> features = control_list->MakeDecision(
- GpuControlList::kOsWin, kOsVersion, gpu_info);
- EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
- EXPECT_FALSE(control_list->needs_more_info());
-}
-
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list_version_info_unittest.cc b/chromium/gpu/config/gpu_control_list_version_unittest.cc
index bde9d0c2f56..0a1bf03c6ee 100644
--- a/chromium/gpu/config/gpu_control_list_version_info_unittest.cc
+++ b/chromium/gpu/config/gpu_control_list_version_unittest.cc
@@ -9,133 +9,52 @@
namespace gpu {
-class VersionInfoTest : public testing::Test {
- public:
- VersionInfoTest() { }
- ~VersionInfoTest() override {}
+namespace {
- typedef GpuControlList::VersionInfo VersionInfo;
-};
+constexpr auto kNumerical = GpuControlList::kVersionStyleNumerical;
+constexpr auto kLexical = GpuControlList::kVersionStyleLexical;
-TEST_F(VersionInfoTest, ValidVersionInfo) {
- const std::string op[] = {
- "=",
- "<",
- "<=",
- ">",
- ">=",
- "any",
- "between"
- };
- for (size_t i = 0; i < arraysize(op); ++i) {
- std::string string1;
- std::string string2;
- if (op[i] != "any")
- string1 = "8.9";
- if (op[i] == "between")
- string2 = "9.0";
- VersionInfo info(op[i], std::string(), string1, string2);
- EXPECT_TRUE(info.IsValid());
- }
+constexpr auto kBetween = GpuControlList::kBetween;
+constexpr auto kEQ = GpuControlList::kEQ;
+constexpr auto kLT = GpuControlList::kLT;
+constexpr auto kLE = GpuControlList::kLE;
+constexpr auto kGT = GpuControlList::kGT;
+constexpr auto kGE = GpuControlList::kGE;
+constexpr auto kAny = GpuControlList::kAny;
- const std::string style[] = {
- "lexical",
- "numerical",
- "" // Default, same as "numerical"
- };
- for (size_t i =0; i < arraysize(style); ++i) {
- VersionInfo info("=", style[i], "8.9", std::string());
- EXPECT_TRUE(info.IsValid());
- if (style[i] == "lexical")
- EXPECT_TRUE(info.IsLexical());
- else
- EXPECT_FALSE(info.IsLexical());
- }
-
- const std::string number[] = {
- "10",
- "10.9",
- "10.0",
- "10.0.9",
- "0.8",
- // Leading 0s are valid.
- "10.09",
- // Whitespaces are ignored.
- " 10.9",
- "10.9 ",
- "10 .9",
- "10. 9",
- };
- for (size_t i =0; i < arraysize(number); ++i) {
- VersionInfo info("=", std::string(), number[i], std::string());
- EXPECT_TRUE(info.IsValid());
- }
-}
+} // namespace anonymous
-TEST_F(VersionInfoTest, InvalidVersionInfo) {
- const std::string op[] = {
- "=",
- "<",
- "<=",
- ">",
- ">=",
- "any",
- "between"
- };
- for (size_t i = 0; i < arraysize(op); ++i) {
- {
- VersionInfo info(op[i], std::string(), "8.9", std::string());
- if (op[i] == "between")
- EXPECT_FALSE(info.IsValid());
- else
- EXPECT_TRUE(info.IsValid());
- }
- {
- VersionInfo info(op[i], std::string(), std::string(), std::string());
- if (op[i] == "any")
- EXPECT_TRUE(info.IsValid());
- else
- EXPECT_FALSE(info.IsValid());
- }
- {
- VersionInfo info(op[i], std::string(), "8.9", "9.0");
- EXPECT_TRUE(info.IsValid());
- }
- }
+class VersionTest : public testing::Test {
+ public:
+ VersionTest() {}
+ ~VersionTest() override {}
- const std::string number[] = {
- "8.E",
- "8-9",
- };
- for (size_t i = 0; i < arraysize(number); ++i) {
- VersionInfo info("=", std::string(), number[i], std::string());
- EXPECT_FALSE(info.IsValid());
- }
-}
+ typedef GpuControlList::Version Version;
+};
-TEST_F(VersionInfoTest, VersionComparison) {
+TEST_F(VersionTest, VersionComparison) {
{
- VersionInfo info("any", std::string(), std::string(), std::string());
+ Version info = {kAny, kNumerical, nullptr, nullptr};
EXPECT_TRUE(info.Contains("0"));
EXPECT_TRUE(info.Contains("8.9"));
EXPECT_TRUE(info.Contains("100"));
}
{
- VersionInfo info(">", std::string(), "8.9", std::string());
+ Version info = {kGT, kNumerical, "8.9", nullptr};
EXPECT_FALSE(info.Contains("7"));
EXPECT_FALSE(info.Contains("8.9"));
EXPECT_FALSE(info.Contains("8.9.1"));
EXPECT_TRUE(info.Contains("9"));
}
{
- VersionInfo info(">=", std::string(), "8.9", std::string());
+ Version info = {kGE, kNumerical, "8.9", nullptr};
EXPECT_FALSE(info.Contains("7"));
EXPECT_TRUE(info.Contains("8.9"));
EXPECT_TRUE(info.Contains("8.9.1"));
EXPECT_TRUE(info.Contains("9"));
}
{
- VersionInfo info("=", std::string(), "8.9", std::string());
+ Version info = {kEQ, kNumerical, "8.9", nullptr};
EXPECT_FALSE(info.Contains("7"));
EXPECT_TRUE(info.Contains("8"));
EXPECT_TRUE(info.Contains("8.9"));
@@ -143,7 +62,7 @@ TEST_F(VersionInfoTest, VersionComparison) {
EXPECT_FALSE(info.Contains("9"));
}
{
- VersionInfo info("<", std::string(), "8.9", std::string());
+ Version info = {kLT, kNumerical, "8.9", nullptr};
EXPECT_TRUE(info.Contains("7"));
EXPECT_TRUE(info.Contains("8.8"));
EXPECT_FALSE(info.Contains("8"));
@@ -152,7 +71,7 @@ TEST_F(VersionInfoTest, VersionComparison) {
EXPECT_FALSE(info.Contains("9"));
}
{
- VersionInfo info("<=", std::string(), "8.9", std::string());
+ Version info = {kLE, kNumerical, "8.9", nullptr};
EXPECT_TRUE(info.Contains("7"));
EXPECT_TRUE(info.Contains("8.8"));
EXPECT_TRUE(info.Contains("8"));
@@ -161,7 +80,7 @@ TEST_F(VersionInfoTest, VersionComparison) {
EXPECT_FALSE(info.Contains("9"));
}
{
- VersionInfo info("between", std::string(), "8.9", "9.1");
+ Version info = {kBetween, kNumerical, "8.9", "9.1"};
EXPECT_FALSE(info.Contains("7"));
EXPECT_FALSE(info.Contains("8.8"));
EXPECT_TRUE(info.Contains("8"));
@@ -175,18 +94,18 @@ TEST_F(VersionInfoTest, VersionComparison) {
}
}
-TEST_F(VersionInfoTest, DateComparison) {
+TEST_F(VersionTest, DateComparison) {
// When we use '-' as splitter, we assume a format of mm-dd-yyyy
// or mm-yyyy, i.e., a date.
{
- VersionInfo info("=", std::string(), "1976.3.21", std::string());
+ Version info = {kEQ, kNumerical, "1976.3.21", nullptr};
EXPECT_TRUE(info.Contains("3-21-1976", '-'));
EXPECT_TRUE(info.Contains("3-1976", '-'));
EXPECT_TRUE(info.Contains("03-1976", '-'));
EXPECT_FALSE(info.Contains("21-3-1976", '-'));
}
{
- VersionInfo info(">", std::string(), "1976.3.21", std::string());
+ Version info = {kGT, kNumerical, "1976.3.21", nullptr};
EXPECT_TRUE(info.Contains("3-22-1976", '-'));
EXPECT_TRUE(info.Contains("4-1976", '-'));
EXPECT_TRUE(info.Contains("04-1976", '-'));
@@ -194,7 +113,7 @@ TEST_F(VersionInfoTest, DateComparison) {
EXPECT_FALSE(info.Contains("2-1976", '-'));
}
{
- VersionInfo info("between", std::string(), "1976.3.21", "2012.12.25");
+ Version info = {kBetween, kNumerical, "1976.3.21", "2012.12.25"};
EXPECT_FALSE(info.Contains("3-20-1976", '-'));
EXPECT_TRUE(info.Contains("3-21-1976", '-'));
EXPECT_TRUE(info.Contains("3-22-1976", '-'));
@@ -213,11 +132,11 @@ TEST_F(VersionInfoTest, DateComparison) {
}
}
-TEST_F(VersionInfoTest, LexicalComparison) {
+TEST_F(VersionTest, LexicalComparison) {
// When we use lexical style, we assume a format major.minor.*.
// We apply numerical comparison to major, lexical comparison to others.
{
- VersionInfo info("<", "lexical", "8.201", std::string());
+ Version info = {kLT, kLexical, "8.201", nullptr};
EXPECT_TRUE(info.Contains("8.001.100"));
EXPECT_TRUE(info.Contains("8.109"));
EXPECT_TRUE(info.Contains("8.10900"));
@@ -236,7 +155,7 @@ TEST_F(VersionInfoTest, LexicalComparison) {
EXPECT_FALSE(info.Contains("12.201"));
}
{
- VersionInfo info("<", "lexical", "9.002", std::string());
+ Version info = {kLT, kLexical, "9.002", nullptr};
EXPECT_TRUE(info.Contains("8.001.100"));
EXPECT_TRUE(info.Contains("8.109"));
EXPECT_TRUE(info.Contains("8.10900"));
@@ -257,4 +176,3 @@ TEST_F(VersionInfoTest, LexicalComparison) {
}
} // namespace gpu
-
diff --git a/chromium/gpu/config/gpu_crash_keys.cc b/chromium/gpu/config/gpu_crash_keys.cc
new file mode 100644
index 00000000000..427bb950d46
--- /dev/null
+++ b/chromium/gpu/config/gpu_crash_keys.cc
@@ -0,0 +1,23 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/config/gpu_crash_keys.h"
+
+namespace gpu {
+namespace crash_keys {
+#if !defined(OS_ANDROID)
+const char kGPUVendorID[] = "gpu-venid";
+const char kGPUDeviceID[] = "gpu-devid";
+#endif
+const char kGPUDriverVersion[] = "gpu-driver";
+const char kGPUPixelShaderVersion[] = "gpu-psver";
+const char kGPUVertexShaderVersion[] = "gpu-vsver";
+#if defined(OS_MACOSX)
+const char kGPUGLVersion[] = "gpu-glver";
+#elif defined(OS_POSIX)
+const char kGPUVendor[] = "gpu-gl-vendor";
+const char kGPURenderer[] = "gpu-gl-renderer";
+#endif
+} // namespace crash_keys
+} // namespace gpu
diff --git a/chromium/gpu/config/gpu_crash_keys.h b/chromium/gpu/config/gpu_crash_keys.h
new file mode 100644
index 00000000000..b7ee75c09c0
--- /dev/null
+++ b/chromium/gpu/config/gpu_crash_keys.h
@@ -0,0 +1,30 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_CONFIG_GPU_CRASH_KEYS_H_
+#define GPU_CONFIG_GPU_CRASH_KEYS_H_
+
+#include "build/build_config.h"
+
+namespace gpu {
+namespace crash_keys {
+
+// Keys that can be used for crash reporting.
+#if !defined(OS_ANDROID)
+extern const char kGPUVendorID[];
+extern const char kGPUDeviceID[];
+#endif
+extern const char kGPUDriverVersion[];
+extern const char kGPUPixelShaderVersion[];
+extern const char kGPUVertexShaderVersion[];
+#if defined(OS_MACOSX)
+extern const char kGPUGLVersion[];
+#elif defined(OS_POSIX)
+extern const char kGPUVendor[];
+extern const char kGPURenderer[];
+#endif
+} // namespace crash_keys
+} // namespace gpu
+
+#endif // GPU_CONFIG_GPU_CRASH_KEYS_H_
diff --git a/chromium/gpu/config/gpu_driver_bug_list.README b/chromium/gpu/config/gpu_driver_bug_list.README
new file mode 100644
index 00000000000..fad15de3919
--- /dev/null
+++ b/chromium/gpu/config/gpu_driver_bug_list.README
@@ -0,0 +1,7 @@
+Determines whether a certain driver bug exists in the current system.
+
+The format of a valid gpu_driver_bug_list.json file is defined in
+ <gpu/config/gpu_control_list_format.txt>.
+
+The supported "features" can be found in
+ <gpu/config/gpu_driver_bug_workaround_type.h>.
diff --git a/chromium/gpu/config/gpu_driver_bug_list.cc b/chromium/gpu/config/gpu_driver_bug_list.cc
index be0bc1ba07a..7a32c28569e 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.cc
+++ b/chromium/gpu/config/gpu_driver_bug_list.cc
@@ -5,6 +5,7 @@
#include "gpu/config/gpu_driver_bug_list.h"
#include "base/logging.h"
+#include "gpu/config/gpu_driver_bug_list_autogen.h"
#include "gpu/config/gpu_driver_bug_workaround_type.h"
#include "gpu/config/gpu_switches.h"
#include "gpu/config/gpu_util.h"
@@ -26,16 +27,23 @@ const GpuDriverBugWorkaroundInfo kFeatureList[] = {
} // namespace anonymous
-GpuDriverBugList::GpuDriverBugList()
- : GpuControlList() {
-}
+GpuDriverBugList::GpuDriverBugList(const GpuControlListData& data)
+ : GpuControlList(data) {}
GpuDriverBugList::~GpuDriverBugList() {
}
// static
-GpuDriverBugList* GpuDriverBugList::Create() {
- GpuDriverBugList* list = new GpuDriverBugList();
+std::unique_ptr<GpuDriverBugList> GpuDriverBugList::Create() {
+ GpuControlListData data(kGpuDriverBugListVersion, kGpuDriverBugListEntryCount,
+ kGpuDriverBugListEntries);
+ return Create(data);
+}
+
+// static
+std::unique_ptr<GpuDriverBugList> GpuDriverBugList::Create(
+ const GpuControlListData& data) {
+ std::unique_ptr<GpuDriverBugList> list(new GpuDriverBugList(data));
DCHECK_EQ(static_cast<int>(arraysize(kFeatureList)),
NUMBER_OF_GPU_DRIVER_BUG_WORKAROUND_TYPES);
@@ -106,4 +114,3 @@ void GpuDriverBugList::AppendAllWorkarounds(
}
} // namespace gpu
-
diff --git a/chromium/gpu/config/gpu_driver_bug_list.h b/chromium/gpu/config/gpu_driver_bug_list.h
index 0ec839ed4ee..c30a019d87e 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.h
+++ b/chromium/gpu/config/gpu_driver_bug_list.h
@@ -5,13 +5,12 @@
#ifndef GPU_CONFIG_GPU_DRIVER_BUG_LIST_H_
#define GPU_CONFIG_GPU_DRIVER_BUG_LIST_H_
+#include <memory>
#include <set>
-#include <string>
#include "base/command_line.h"
#include "base/macros.h"
#include "gpu/config/gpu_control_list.h"
-#include "gpu/config/gpu_driver_bug_workaround_type.h"
#include "gpu/gpu_export.h"
namespace gpu {
@@ -20,7 +19,9 @@ class GPU_EXPORT GpuDriverBugList : public GpuControlList {
public:
~GpuDriverBugList() override;
- static GpuDriverBugList* Create();
+ static std::unique_ptr<GpuDriverBugList> Create();
+ static std::unique_ptr<GpuDriverBugList> Create(
+ const GpuControlListData& data);
// Append |workarounds| with these passed in through the
// |command_line|.
@@ -34,7 +35,7 @@ class GPU_EXPORT GpuDriverBugList : public GpuControlList {
static void AppendAllWorkarounds(std::vector<const char*>* workarounds);
private:
- GpuDriverBugList();
+ explicit GpuDriverBugList(const GpuControlListData& data);
DISALLOW_COPY_AND_ASSIGN(GpuDriverBugList);
};
@@ -42,4 +43,3 @@ class GPU_EXPORT GpuDriverBugList : public GpuControlList {
} // namespace gpu
#endif // GPU_CONFIG_GPU_DRIVER_BUG_LIST_H_
-
diff --git a/chromium/gpu/config/gpu_driver_bug_list_json.cc b/chromium/gpu/config/gpu_driver_bug_list.json
index ac67cf4d53c..1d933dbcb6b 100644
--- a/chromium/gpu/config/gpu_driver_bug_list_json.cc
+++ b/chromium/gpu/config/gpu_driver_bug_list.json
@@ -1,25 +1,6 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Determines whether a certain driver bug exists in the current system.
-// The format of a valid gpu_driver_bug_list.json file is defined in
-// <gpu/config/gpu_control_list_format.txt>.
-// The supported "features" can be found in
-// <gpu/config/gpu_driver_bug_workaround_type.h>.
-
-#include "gpu/config/gpu_control_list_jsons.h"
-
-#define LONG_STRING_CONST(...) #__VA_ARGS__
-
-namespace gpu {
-
-const char kGpuDriverBugListJson[] = LONG_STRING_CONST(
-
{
"name": "gpu driver bug list",
- // Please update the version number whenever you change this file.
- "version": "9.36",
+ "version": "10.102",
"entries": [
{
"id": 1,
@@ -83,7 +64,8 @@ const char kGpuDriverBugListJson[] = LONG_STRING_CONST(
},
{
"id": 19,
- "description": "Disable depth textures on Android with Qualcomm GPUs",
+ "comment": "Corresponds to software rendering list #140",
+ "description": "Disable depth textures on older Qualcomm GPUs (legacy blacklist entry, original problem unclear)",
"cr_bugs": [682075],
"gl_renderer": "Adreno \\(TM\\) [23].*",
"features": [
@@ -416,22 +398,6 @@ const char kGpuDriverBugListJson[] = LONG_STRING_CONST(
]
},
{
- "id": 52,
- "cr_bugs": [449116, 471200, 612474, 682075],
- "description": "ES3 MSAA is broken on Qualcomm",
- "os": {
- "type": "android",
- "version": {
- "op": "<",
- "value": "6.0"
- }
- },
- "gl_renderer": "Adreno \\(TM\\) 4.*",
- "features": [
- "disable_chromium_framebuffer_multisample"
- ]
- },
- {
"id": 54,
"cr_bugs": [124764, 349137],
"description": "Clear uniforms before first program use on all platforms",
@@ -518,7 +484,7 @@ const char kGpuDriverBugListJson[] = LONG_STRING_CONST(
"gl_type": "gl",
"gl_renderer": ".*Mesa.*",
"features": [
- "disable_non_empty_post_sub_buffers_for_onscreen_surfaces"
+ "disable_post_sub_buffers_for_onscreen_surfaces"
]
},
{
@@ -577,9 +543,6 @@ const char kGpuDriverBugListJson[] = LONG_STRING_CONST(
"use_virtualized_gl_contexts"
]
},
-) // LONG_STRING_CONST macro
-// Avoid C2026 (string too big) error on VisualStudio.
-LONG_STRING_CONST(
{
"id": 74,
"cr_bugs": [278606, 382686],
@@ -891,7 +854,7 @@ LONG_STRING_CONST(
"gl_vendor": "ARM.*",
"gl_renderer": "Mali-T.*",
"features": [
- "disable_post_sub_buffers_for_onscreen_surfaces"
+ "disable_non_empty_post_sub_buffers_for_onscreen_surfaces"
]
},
{
@@ -1090,7 +1053,10 @@ LONG_STRING_CONST(
{
"id": 116,
"description": "Adreno 420 support for EXT_multisampled_render_to_texture is buggy on Android < 5.1",
- "cr_bugs": [490379],
+ "comment": [
+ "Disabling EXT_multisampled_render_to_texture triggers the explicit multisample resolve path, which is broken on Adreno 4xx/5xx."
+ ],
+ "cr_bugs": [490379, 696126],
"os": {
"type": "android",
"version": {
@@ -1101,6 +1067,9 @@ LONG_STRING_CONST(
"gl_renderer": "Adreno \\(TM\\) 4.*",
"disabled_extensions": [
"GL_EXT_multisampled_render_to_texture"
+ ],
+ "features": [
+ "disable_chromium_framebuffer_multisample"
]
},
{
@@ -1227,7 +1196,7 @@ LONG_STRING_CONST(
},
{
"id": 129,
- // TODO(dshwang): Fix ANGLE crash. crbug.com/518889
+ "comment": "TODO(dshwang): Fix ANGLE crash. crbug.com/518889",
"description": "ANGLE crash on glReadPixels from incomplete cube map texture",
"cr_bugs": [518889],
"os": {
@@ -1380,11 +1349,12 @@ LONG_STRING_CONST(
"description": "glReadPixels fails on FBOs with SRGB_ALPHA textures, Nexus 5X",
"cr_bugs": [550292, 565179],
"os": {
- "type": "android"
- // Originally on Android 6.0. Expect it to fail in later versions.
+ "type": "android",
+ "comment": "Originally on Android 6.0. Expect it to fail in later versions."
},
"gl_vendor": "Qualcomm",
- "gl_renderer": "Adreno \\(TM\\) 4.*", // Originally on 418.
+ "gl_renderer": "Adreno \\(TM\\) 4.*",
+ "comment": "Originally on 418.",
"disabled_extensions": ["GL_EXT_sRGB"]
},
{
@@ -1449,9 +1419,6 @@ LONG_STRING_CONST(
"broken_egl_image_ref_counting"
]
},
-) // LONG_STRING_CONST macro
-// Avoid C2026 (string too big) error on VisualStudio.
-LONG_STRING_CONST(
{
"id": 147,
"description": "Limit max texure size to 4096 on all of Android",
@@ -1751,19 +1718,6 @@ LONG_STRING_CONST(
]
},
{
- "id": 171,
- "description": "NV12 DXGI video hangs or displays incorrect colors on AMD drivers",
- "cr_bugs": [623029, 644293],
- "os": {
- "type": "win"
- },
- "vendor_id": "0x1002",
- "features": [
- "disable_dxgi_zero_copy_video",
- "disable_nv12_dxgi_video"
- ]
- },
- {
"id": 172,
"description": "Limited enabling of Chromium GL_INTEL_framebuffer_CMAA",
"cr_bugs": [535198],
@@ -1790,25 +1744,34 @@ LONG_STRING_CONST(
{
"id": 174,
"description": "Adreno 4xx support for EXT_multisampled_render_to_texture is buggy on Android 7.0",
- "cr_bugs": [612474],
+ "comment": [
+ "Disabling EXT_multisampled_render_to_texture triggers the explicit multisample resolve path, which is broken on Adreno 4xx/5xx."
+ ],
+ "cr_bugs": [612474, 696126],
"os": {
"type": "android",
"version": {
"op": "between",
"value": "7.0.0",
- "value2": "7.0.99"
- // Only initial version of N.
+ "value2": "7.0.99",
+ "comment": "Only initial version of N."
}
},
"gl_renderer": "Adreno \\(TM\\) 4.*",
"disabled_extensions": [
"GL_EXT_multisampled_render_to_texture"
+ ],
+ "features": [
+ "disable_chromium_framebuffer_multisample"
]
},
{
"id": 175,
"description": "Adreno 5xx support for EXT_multisampled_render_to_texture is buggy on Android < 7.0",
- "cr_bugs": [612474],
+ "comment": [
+ "Disabling EXT_multisampled_render_to_texture triggers the explicit multisample resolve path, which is broken on Adreno 4xx/5xx."
+ ],
+ "cr_bugs": [612474, 696126],
"os": {
"type": "android",
"version": {
@@ -1819,6 +1782,9 @@ LONG_STRING_CONST(
"gl_renderer": "Adreno \\(TM\\) 5.*",
"disabled_extensions": [
"GL_EXT_multisampled_render_to_texture"
+ ],
+ "features": [
+ "disable_chromium_framebuffer_multisample"
]
},
{
@@ -1892,7 +1858,11 @@ LONG_STRING_CONST(
"description": "glTexStorage* are buggy when base mipmap level is not 0",
"cr_bugs": [640506],
"os": {
- "type": "macosx"
+ "type": "macosx",
+ "version": {
+ "op": "<",
+ "value": "10.12.4"
+ }
},
"features": [
"reset_base_mipmap_level_before_texstorage"
@@ -2202,7 +2172,10 @@ LONG_STRING_CONST(
{
"id": 205,
"description": "Adreno 5xx support for EXT_multisampled_render_to_texture is buggy on Android 7.1",
- "cr_bugs": [663811],
+ "comment": [
+ "Disabling EXT_multisampled_render_to_texture triggers the explicit multisample resolve path, which is broken on Adreno 4xx/5xx."
+ ],
+ "cr_bugs": [663811, 696126],
"os": {
"type": "android",
"version": {
@@ -2213,6 +2186,9 @@ LONG_STRING_CONST(
"gl_renderer": "Adreno \\(TM\\) 5.*",
"disabled_extensions": [
"GL_EXT_multisampled_render_to_texture"
+ ],
+ "features": [
+ "disable_chromium_framebuffer_multisample"
]
},
{
@@ -2310,19 +2286,18 @@ LONG_STRING_CONST(
"use_virtualized_gl_contexts"
]
},
-) // LONG_STRING_CONST macro
-// Avoid C2026 (string too big) error on VisualStudio.
-LONG_STRING_CONST(
{
- // Corresponds to software rendering list #140.
"id": 214,
- "description": "Certain versions of Qualcomm driver don't setup scissor state correctly when FBO0 is bound.",
- "cr_bugs": [670607, 696627, 698197],
+ "comment": [
+ "Corresponds to software rendering list #140",
+ "Mysteriously, the first workaround won't work without the second. crbug.com/698197#c10",
+ "MSAA workaround shouldn't be needed beyond Adreno 3xx. crbug.com/682075#c17"
+ ],
+ "description": "Some Adreno 3xx don't setup scissor state correctly when FBO0 is bound, nor support MSAA properly.",
+ "cr_bugs": [670607, 682075, 696627, 698197, 707839],
"gl_renderer": "Adreno \\(TM\\) 3.*",
"features": [
"force_update_scissor_state_when_binding_fbo0",
- // Somehow the main workaround above won't work without the one below.
- // See https://crbug.com/698197 for details.
"disable_chromium_framebuffer_multisample"
]
},
@@ -2375,12 +2350,182 @@ LONG_STRING_CONST(
"features": [
"disable_program_disk_cache"
]
+ },
+ {
+ "id": 219,
+ "description": "Zero-copy DXGI video hangs or displays incorrect colors on AMD drivers",
+ "cr_bugs": [623029],
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x1002",
+ "features": [
+ "disable_dxgi_zero_copy_video"
+ ]
+ },
+ {
+ "id": 220,
+ "description": "NV12 DXGI video displays incorrect colors on older AMD drivers",
+ "cr_bugs": [644293],
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x1002",
+ "driver_version": {
+ "op": "<",
+ "value": "21.19.519.2"
+ },
+ "features": [
+ "disable_nv12_dxgi_video"
+ ]
+ },
+ {
+ "id": 221,
+ "description": "Very large instanced draw calls crash on some Adreno 3xx drivers",
+ "cr_bugs": [701682],
+ "os": {
+ "type": "android"
+ },
+ "gl_renderer": "Adreno \\(TM\\) 3.*",
+ "features": [
+ "disallow_large_instanced_draw"
+ ]
+ },
+ {
+ "id": 222,
+ "description": "Software to Accelerated canvas update breaks Linux AMD",
+ "cr_bugs": [710029],
+ "os": {
+ "type": "linux"
+ },
+ "vendor_id": "0x1002",
+ "features": [
+ "disable_software_to_accelerated_canvas_upgrade"
+ ]
+ },
+ {
+ "id": 227,
+ "description": "Certain Apple devices leak stencil buffers",
+ "cr_bugs": [713854],
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x8086",
+ "device_id": ["0x0412", "0x0d26", "0x0a2e", "0x0a26", "0x0d22", "0x161e",
+ "0x1626", "0x162b", "0x1622"],
+ "features": [
+ "avoid_stencil_buffers"
+ ]
+ },
+ {
+ "id": 224,
+ "description": "VPx decoding isn't supported before Windows 10 anniversary update.",
+ "cr_bugs": [616318],
+ "os": {
+ "type": "win",
+ "version": {
+ "op": "<",
+ "value": "10.0.14393"
+ }
+ },
+ "features": [
+ "disable_accelerated_vpx_decode"
+ ]
+ },
+ {
+ "id": 225,
+ "description": "VPx decoding is too slow on Intel Broadwell, Skylake, and CherryView",
+ "cr_bugs": [616318],
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x8086",
+ "device_id": ["0x1602", "0x1606", "0x160a", "0x160b", "0x160d",
+ "0x160e", "0x1612", "0x1616", "0x161a", "0x161b",
+ "0x161d", "0x161e", "0x1622", "0x1626", "0x162a",
+ "0x162b", "0x162d", "0x162e", "0x22b0", "0x22b1",
+ "0x22b2", "0x22b3", "0x1902", "0x1906", "0x190a",
+ "0x190b", "0x190e", "0x1912", "0x1913", "0x1915",
+ "0x1916", "0x1917", "0x191a", "0x191b", "0x191d",
+ "0x191e", "0x1921", "0x1923", "0x1926", "0x1927",
+ "0x192a", "0x192b", "0x192d", "0x1932", "0x193a",
+ "0x193b", "0x193d"],
+ "features": [
+ "disable_accelerated_vpx_decode"
+ ]
+ },
+ {
+ "id": 226,
+ "description": "Accelerated VPx decoding is hanging on some videos.",
+ "cr_bugs": [654111],
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x8086",
+ "driver_version": {
+ "op": "<",
+ "value": "21.20.16.4542"
+ },
+ "features": [
+ "disable_accelerated_vpx_decode"
+ ]
+ },
+ {
+ "id": 229,
+ "description": "Overlay sizes bigger than screen aren't accelerated on some Intel drivers",
+ "cr_bugs": [720059],
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x8086",
+ "driver_version": {
+ "op": "<",
+ "value": "21.20.16.4542"
+ },
+ "features": [
+ "disable_larger_than_screen_overlays"
+ ]
+ },
+ {
+ "id": 230,
+ "description": "Adreno 4xx support for EXT_multisampled_render_to_texture is broken on Android 8.0",
+ "comment": [
+ "Disabling EXT_multisampled_render_to_texture triggers the explicit multisample resolve path, which is broken on Adreno 4xx/5xx."
+ ],
+ "cr_bugs": [722962, 696126],
+ "os": {
+ "type": "android",
+ "version": {
+ "op": "between",
+ "value": "8.0.0",
+ "value2": "8.0.99",
+ "comment": "Only initial version of O."
+ }
+ },
+ "gl_renderer": "Adreno \\(TM\\) 4.*",
+ "disabled_extensions": [
+ "GL_EXT_multisampled_render_to_texture"
+ ],
+ "features": [
+ "disable_chromium_framebuffer_multisample"
+ ]
+ },
+ {
+ "id": 231,
+ "description": "Disable use of Direct3D 11 on Intel Cherryview due to visual glitches.",
+ "cr_bugs": [730126],
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x8086",
+ "device_id": ["0x22B0", "0x22B1", "0x22B2", "0x22B3"],
+ "features": [
+ "disable_d3d11"
+ ]
}
+ ],
+ "comment": [
+ "Please update the version number on top whenever you change this file",
+ "Please run gpu/config/process_json.py whenever you change this file"
]
- // Please update the version number at beginning of this file whenever you
- // change this file.
}
-
-); // LONG_STRING_CONST macro
-
-} // namespace gpu
diff --git a/chromium/gpu/config/gpu_driver_bug_list_unittest.cc b/chromium/gpu/config/gpu_driver_bug_list_unittest.cc
index 03bf5a3e66e..5ed55ecdd4a 100644
--- a/chromium/gpu/config/gpu_driver_bug_list_unittest.cc
+++ b/chromium/gpu/config/gpu_driver_bug_list_unittest.cc
@@ -2,61 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <stdint.h>
-
-#include <memory>
-
#include "base/command_line.h"
-#include "base/logging.h"
-#include "gpu/config/gpu_control_list_jsons.h"
#include "gpu/config/gpu_driver_bug_list.h"
#include "gpu/config/gpu_driver_bug_workaround_type.h"
#include "gpu/config/gpu_info.h"
#include "testing/gtest/include/gtest/gtest.h"
-#define LONG_STRING_CONST(...) #__VA_ARGS__
-
namespace gpu {
class GpuDriverBugListTest : public testing::Test {
public:
- GpuDriverBugListTest() { }
-
+ GpuDriverBugListTest() {}
~GpuDriverBugListTest() override {}
-
- const GPUInfo& gpu_info() const {
- return gpu_info_;
- }
-
- protected:
- void SetUp() override {
- gpu_info_.gpu.vendor_id = 0x10de;
- gpu_info_.gpu.device_id = 0x0640;
- gpu_info_.driver_vendor = "NVIDIA";
- gpu_info_.driver_version = "1.6.18";
- gpu_info_.driver_date = "7-14-2009";
- gpu_info_.machine_model_name = "MacBookPro";
- gpu_info_.machine_model_version = "7.1";
- gpu_info_.gl_vendor = "NVIDIA Corporation";
- gpu_info_.gl_renderer = "NVIDIA GeForce GT 120 OpenGL Engine";
- }
-
- void TearDown() override {}
-
- private:
- GPUInfo gpu_info_;
};
-TEST_F(GpuDriverBugListTest, CurrentDriverBugListValidation) {
- std::unique_ptr<GpuDriverBugList> list(GpuDriverBugList::Create());
- std::string json;
- EXPECT_TRUE(list->LoadList(kGpuDriverBugListJson, GpuControlList::kAllOs));
-}
-
TEST_F(GpuDriverBugListTest, CurrentListForARM) {
- std::unique_ptr<GpuDriverBugList> list(GpuDriverBugList::Create());
- EXPECT_TRUE(list->LoadList(kGpuDriverBugListJson, GpuControlList::kAllOs));
-
+ std::unique_ptr<GpuDriverBugList> list = GpuDriverBugList::Create();
GPUInfo gpu_info;
gpu_info.gl_vendor = "ARM";
gpu_info.gl_renderer = "MALi_T604";
@@ -66,9 +27,7 @@ TEST_F(GpuDriverBugListTest, CurrentListForARM) {
}
TEST_F(GpuDriverBugListTest, CurrentListForImagination) {
- std::unique_ptr<GpuDriverBugList> list(GpuDriverBugList::Create());
- EXPECT_TRUE(list->LoadList(kGpuDriverBugListJson, GpuControlList::kAllOs));
-
+ std::unique_ptr<GpuDriverBugList> list = GpuDriverBugList::Create();
GPUInfo gpu_info;
gpu_info.gl_vendor = "Imagination Technologies";
gpu_info.gl_renderer = "PowerVR SGX 540";
@@ -77,55 +36,6 @@ TEST_F(GpuDriverBugListTest, CurrentListForImagination) {
EXPECT_EQ(1u, bugs.count(USE_CLIENT_SIDE_ARRAYS_FOR_STREAM_BUFFERS));
}
-TEST_F(GpuDriverBugListTest, GpuSwitching) {
- const std::string json = LONG_STRING_CONST(
- {
- "name": "gpu driver bug list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "os": {
- "type": "macosx"
- },
- "features": [
- "force_discrete_gpu"
- ]
- },
- {
- "id": 2,
- "os": {
- "type": "win"
- },
- "features": [
- "force_integrated_gpu"
- ]
- }
- ]
- }
- );
- std::unique_ptr<GpuDriverBugList> driver_bug_list(GpuDriverBugList::Create());
- EXPECT_TRUE(driver_bug_list->LoadList(json, GpuControlList::kAllOs));
- std::set<int> switching = driver_bug_list->MakeDecision(
- GpuControlList::kOsMacosx, "10.8", gpu_info());
- EXPECT_EQ(1u, switching.size());
- EXPECT_EQ(1u, switching.count(FORCE_DISCRETE_GPU));
- std::vector<uint32_t> entries;
- driver_bug_list->GetDecisionEntries(&entries, false);
- ASSERT_EQ(1u, entries.size());
- EXPECT_EQ(1u, entries[0]);
-
- driver_bug_list.reset(GpuDriverBugList::Create());
- EXPECT_TRUE(driver_bug_list->LoadList(json, GpuControlList::kAllOs));
- switching = driver_bug_list->MakeDecision(
- GpuControlList::kOsWin, "6.1", gpu_info());
- EXPECT_EQ(1u, switching.size());
- EXPECT_EQ(1u, switching.count(FORCE_INTEGRATED_GPU));
- driver_bug_list->GetDecisionEntries(&entries, false);
- ASSERT_EQ(1u, entries.size());
- EXPECT_EQ(2u, entries[0]);
-}
-
TEST_F(GpuDriverBugListTest, AppendSingleWorkaround) {
base::CommandLine command_line(0, NULL);
command_line.AppendSwitch(GpuDriverBugWorkaroundTypeToString(
@@ -156,62 +66,4 @@ TEST_F(GpuDriverBugListTest, AppendForceGPUWorkaround) {
EXPECT_EQ(1u, workarounds.count(FORCE_DISCRETE_GPU));
}
-TEST_F(GpuDriverBugListTest, NVIDIANumberingScheme) {
- const std::string json = LONG_STRING_CONST(
- {
- "name": "gpu driver bug list",
- "version": "0.1",
- "entries": [
- {
- "id": 1,
- "os": {
- "type": "win"
- },
- "vendor_id": "0x10de",
- "driver_version": {
- "op": "<=",
- "value": "8.17.12.6973"
- },
- "features": [
- "disable_d3d11"
- ]
- }
- ]
- }
- );
-
- std::unique_ptr<GpuDriverBugList> list(GpuDriverBugList::Create());
- EXPECT_TRUE(list->LoadList(json, GpuControlList::kAllOs));
-
- GPUInfo gpu_info;
- gpu_info.gl_vendor = "NVIDIA";
- gpu_info.gl_renderer = "NVIDIA GeForce GT 120 OpenGL Engine";
- gpu_info.gpu.vendor_id = 0x10de;
- gpu_info.gpu.device_id = 0x0640;
-
- // test the same driver version number
- gpu_info.driver_version = "8.17.12.6973";
- std::set<int> bugs = list->MakeDecision(
- GpuControlList::kOsWin, "7.0", gpu_info);
- EXPECT_EQ(1u, bugs.count(DISABLE_D3D11));
-
- // test a lower driver version number
- gpu_info.driver_version = "8.15.11.8647";
-
- bugs = list->MakeDecision(GpuControlList::kOsWin, "7.0", gpu_info);
- EXPECT_EQ(1u, bugs.count(DISABLE_D3D11));
-
- // test a higher driver version number
- gpu_info.driver_version = "9.18.13.2723";
- bugs = list->MakeDecision(GpuControlList::kOsWin, "7.0", gpu_info);
- EXPECT_EQ(0u, bugs.count(DISABLE_D3D11));
-}
-
-TEST_F(GpuDriverBugListTest, DuplicatedBugIDValidation) {
- std::unique_ptr<GpuDriverBugList> list(GpuDriverBugList::Create());
- EXPECT_TRUE(list->LoadList(kGpuDriverBugListJson, GpuControlList::kAllOs));
- EXPECT_FALSE(list->has_duplicated_entry_id());
-}
-
} // namespace gpu
-
diff --git a/chromium/gpu/config/gpu_driver_bug_workaround_type.h b/chromium/gpu/config/gpu_driver_bug_workaround_type.h
index 3e68fd3b472..82ff977484a 100644
--- a/chromium/gpu/config/gpu_driver_bug_workaround_type.h
+++ b/chromium/gpu/config/gpu_driver_bug_workaround_type.h
@@ -35,6 +35,8 @@
create_default_gl_context) \
GPU_OP(DECODE_ENCODE_SRGB_FOR_GENERATEMIPMAP, \
decode_encode_srgb_for_generatemipmap) \
+ GPU_OP(DISABLE_ACCELERATED_VPX_DECODE, \
+ disable_accelerated_vpx_decode) \
GPU_OP(DISABLE_ANGLE_INSTANCED_ARRAYS, \
disable_angle_instanced_arrays) \
GPU_OP(DISABLE_ASYNC_READPIXELS, \
@@ -63,6 +65,8 @@
disable_framebuffer_cmaa) \
GPU_OP(DISABLE_GL_RGB_FORMAT, \
disable_gl_rgb_format) \
+ GPU_OP(DISABLE_LARGER_THAN_SCREEN_OVERLAYS, \
+ disable_larger_than_screen_overlays) \
GPU_OP(DISABLE_MULTIMONITOR_MULTISAMPLING, \
disable_multimonitor_multisampling) \
GPU_OP(DISABLE_NV12_DXGI_VIDEO, \
@@ -205,10 +209,16 @@
validate_multisample_buffer_allocation) \
GPU_OP(WAKE_UP_GPU_BEFORE_DRAWING, \
wake_up_gpu_before_drawing) \
- GPU_OP(USE_TESTING_GPU_DRIVER_WORKAROUND, \
+ GPU_OP(USE_GPU_DRIVER_WORKAROUND_FOR_TESTING, \
use_gpu_driver_workaround_for_testing) \
+ GPU_OP(DISALLOW_LARGE_INSTANCED_DRAW, \
+ disallow_large_instanced_draw) \
+ GPU_OP(DISABLE_SOFTWARE_TO_ACCELERATED_CANVAS_UPGRADE, \
+ disable_software_to_accelerated_canvas_upgrade) \
GPU_OP(DISABLE_NON_EMPTY_POST_SUB_BUFFERS_FOR_ONSCREEN_SURFACES, \
disable_non_empty_post_sub_buffers_for_onscreen_surfaces) \
+ GPU_OP(AVOID_STENCIL_BUFFERS, \
+ avoid_stencil_buffers) \
// clang-format on
namespace gpu {
diff --git a/chromium/gpu/config/gpu_feature_type.h b/chromium/gpu/config/gpu_feature_type.h
index e52440324cc..226b3c0f350 100644
--- a/chromium/gpu/config/gpu_feature_type.h
+++ b/chromium/gpu/config/gpu_feature_type.h
@@ -13,7 +13,7 @@ namespace gpu {
enum GpuFeatureType {
GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS = 0,
GPU_FEATURE_TYPE_GPU_COMPOSITING,
- GPU_FEATURE_TYPE_WEBGL,
+ GPU_FEATURE_TYPE_ACCELERATED_WEBGL,
GPU_FEATURE_TYPE_FLASH3D,
GPU_FEATURE_TYPE_FLASH_STAGE3D,
GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE,
@@ -21,7 +21,6 @@ enum GpuFeatureType {
GPU_FEATURE_TYPE_PANEL_FITTING,
GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE,
GPU_FEATURE_TYPE_GPU_RASTERIZATION,
- GPU_FEATURE_TYPE_ACCELERATED_VPX_DECODE,
GPU_FEATURE_TYPE_WEBGL2,
NUMBER_OF_GPU_FEATURE_TYPES
};
diff --git a/chromium/gpu/config/gpu_finch_features.cc b/chromium/gpu/config/gpu_finch_features.cc
index f3b9685d473..b5b60aa934d 100644
--- a/chromium/gpu/config/gpu_finch_features.cc
+++ b/chromium/gpu/config/gpu_finch_features.cc
@@ -9,8 +9,8 @@ namespace features {
// Enable GPU Rasterization by default. This can still be overridden by
// --force-gpu-rasterization or --disable-gpu-rasterization.
-#if defined(OS_ANDROID) || defined(OS_MACOSX)
-// DefaultEnableGpuRasterization has launched on Android and Mac.
+#if defined(OS_MACOSX)
+// DefaultEnableGpuRasterization has launched on Mac.
const base::Feature kDefaultEnableGpuRasterization{
"DefaultEnableGpuRasterization", base::FEATURE_ENABLED_BY_DEFAULT};
#else
diff --git a/chromium/gpu/config/gpu_info.cc b/chromium/gpu/config/gpu_info.cc
index 025cd456522..bf4e07dbecf 100644
--- a/chromium/gpu/config/gpu_info.cc
+++ b/chromium/gpu/config/gpu_info.cc
@@ -93,6 +93,17 @@ GPUInfo::GPUInfo(const GPUInfo& other) = default;
GPUInfo::~GPUInfo() { }
+const GPUInfo::GPUDevice& GPUInfo::active_gpu() const {
+ if (gpu.active)
+ return gpu;
+ for (const GPUDevice& secondary_gpu : secondary_gpus) {
+ if (secondary_gpu.active)
+ return secondary_gpu;
+ }
+ DLOG(ERROR) << "No active GPU found, returning primary GPU.";
+ return gpu;
+}
+
void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
struct GPUInfoKnownFields {
base::TimeDelta initialization_time;
@@ -122,6 +133,7 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
int process_crash_count;
bool in_process_gpu;
bool passthrough_cmd_decoder;
+ bool supports_overlays;
CollectInfoResult basic_info_state;
CollectInfoResult context_info_state;
#if defined(OS_WIN)
@@ -180,6 +192,7 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
enumerator->AddInt("processCrashCount", process_crash_count);
enumerator->AddBool("inProcessGpu", in_process_gpu);
enumerator->AddBool("passthroughCmdDecoder", passthrough_cmd_decoder);
+ enumerator->AddBool("supportsOverlays", supports_overlays);
enumerator->AddInt("basicInfoState", basic_info_state);
enumerator->AddInt("contextInfoState", context_info_state);
#if defined(OS_WIN)
diff --git a/chromium/gpu/config/gpu_info.h b/chromium/gpu/config/gpu_info.h
index d736ee3f077..8331c582086 100644
--- a/chromium/gpu/config/gpu_info.h
+++ b/chromium/gpu/config/gpu_info.h
@@ -143,6 +143,9 @@ struct GPU_EXPORT GPUInfo {
// Secondary GPUs, for example, the integrated GPU in a dual GPU machine.
std::vector<GPUDevice> secondary_gpus;
+ // The currently active gpu.
+ const GPUDevice& active_gpu() const;
+
// The vendor of the graphics driver currently installed.
std::string driver_vendor;
@@ -217,6 +220,9 @@ struct GPU_EXPORT GPUInfo {
// True if the GPU process is using the passthrough command decoder.
bool passthrough_cmd_decoder;
+ // True if the current set of outputs supports overlays.
+ bool supports_overlays = false;
+
// The state of whether the basic/context/DxDiagnostics info is collected and
// if the collection fails or not.
CollectInfoResult basic_info_state;
diff --git a/chromium/gpu/config/gpu_info_collector.cc b/chromium/gpu/config/gpu_info_collector.cc
index 2d2dfe2db47..3889ef6a462 100644
--- a/chromium/gpu/config/gpu_info_collector.cc
+++ b/chromium/gpu/config/gpu_info_collector.cc
@@ -19,6 +19,7 @@
#include "base/strings/string_util.h"
#include "base/trace_event/trace_event.h"
#include "gpu/config/gpu_switches.h"
+#include "third_party/angle/src/gpu_info_util/SystemInfo.h" // nogncheck
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_implementation.h"
@@ -230,6 +231,7 @@ void MergeGPUInfoGL(GPUInfo* basic_gpu_info,
basic_gpu_info->in_process_gpu = context_gpu_info.in_process_gpu;
basic_gpu_info->passthrough_cmd_decoder =
context_gpu_info.passthrough_cmd_decoder;
+ basic_gpu_info->supports_overlays = context_gpu_info.supports_overlays;
basic_gpu_info->context_info_state = context_gpu_info.context_info_state;
basic_gpu_info->initialization_time = context_gpu_info.initialization_time;
basic_gpu_info->video_decode_accelerator_capabilities =
@@ -304,4 +306,43 @@ void IdentifyActiveGPU(GPUInfo* gpu_info) {
}
}
+void FillGPUInfoFromSystemInfo(GPUInfo* gpu_info,
+ angle::SystemInfo* system_info) {
+ DCHECK(system_info->primaryGPUIndex >= 0);
+
+ angle::GPUDeviceInfo* primary =
+ &system_info->gpus[system_info->primaryGPUIndex];
+
+ gpu_info->gpu.vendor_id = primary->vendorId;
+ gpu_info->gpu.device_id = primary->deviceId;
+ if (system_info->primaryGPUIndex == system_info->activeGPUIndex) {
+ gpu_info->gpu.active = true;
+ }
+
+ gpu_info->driver_vendor = std::move(primary->driverVendor);
+ gpu_info->driver_version = std::move(primary->driverVersion);
+ gpu_info->driver_date = std::move(primary->driverDate);
+
+ for (size_t i = 0; i < system_info->gpus.size(); i++) {
+ if (static_cast<int>(i) == system_info->primaryGPUIndex) {
+ continue;
+ }
+
+ GPUInfo::GPUDevice device;
+ device.vendor_id = system_info->gpus[i].vendorId;
+ device.device_id = system_info->gpus[i].deviceId;
+ if (static_cast<int>(i) == system_info->activeGPUIndex) {
+ device.active = true;
+ }
+
+ gpu_info->secondary_gpus.push_back(device);
+ }
+
+ gpu_info->optimus = system_info->isOptimus;
+ gpu_info->amd_switchable = system_info->isAMDSwitchable;
+
+ gpu_info->machine_model_name = system_info->machineModelName;
+ gpu_info->machine_model_version = system_info->machineModelVersion;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info_collector.h b/chromium/gpu/config/gpu_info_collector.h
index 840dc1d3be0..aa873336213 100644
--- a/chromium/gpu/config/gpu_info_collector.h
+++ b/chromium/gpu/config/gpu_info_collector.h
@@ -11,11 +11,11 @@
#include "gpu/config/gpu_info.h"
#include "gpu/gpu_export.h"
-namespace gpu {
+namespace angle {
+struct SystemInfo;
+}
-// Collect GPU vendor_id and device ID.
-GPU_EXPORT CollectInfoResult CollectGpuID(uint32_t* vendor_id,
- uint32_t* device_id);
+namespace gpu {
// Collects basic GPU info without creating a GL/DirectX context (and without
// the danger of crashing), including vendor_id and device_id.
@@ -52,6 +52,11 @@ GPU_EXPORT void MergeGPUInfoGL(GPUInfo* basic_gpu_info,
// identify the active GPU based on GL strings.
GPU_EXPORT void IdentifyActiveGPU(GPUInfo* gpu_info);
+// Helper function to convert data from ANGLE's system info gathering library
+// into a GPUInfo
+void FillGPUInfoFromSystemInfo(GPUInfo* gpu_info,
+ angle::SystemInfo* system_info);
+
} // namespace gpu
#endif // GPU_CONFIG_GPU_INFO_COLLECTOR_H_
diff --git a/chromium/gpu/config/gpu_info_collector_android.cc b/chromium/gpu/config/gpu_info_collector_android.cc
index faeb664b63c..e17a32ac2a8 100644
--- a/chromium/gpu/config/gpu_info_collector_android.cc
+++ b/chromium/gpu/config/gpu_info_collector_android.cc
@@ -271,13 +271,6 @@ CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
return CollectBasicGraphicsInfo(gpu_info);
}
-CollectInfoResult CollectGpuID(uint32_t* vendor_id, uint32_t* device_id) {
- DCHECK(vendor_id && device_id);
- *vendor_id = 0;
- *device_id = 0;
- return kCollectInfoNonFatalFailure;
-}
-
CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
// When command buffer is compiled as a standalone library, the process might
// not have a Java environment.
diff --git a/chromium/gpu/config/gpu_info_collector_linux.cc b/chromium/gpu/config/gpu_info_collector_linux.cc
index fbd46de372d..a5d1b66cfe6 100644
--- a/chromium/gpu/config/gpu_info_collector_linux.cc
+++ b/chromium/gpu/config/gpu_info_collector_linux.cc
@@ -2,185 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "gpu/config/gpu_info_collector_linux.h"
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <vector>
-
#include "base/command_line.h"
-#include "base/files/file_util.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
-#include "base/strings/string_tokenizer.h"
-#include "base/strings/string_util.h"
#include "base/trace_event/trace_event.h"
#include "gpu/config/gpu_info_collector.h"
#include "gpu/config/gpu_switches.h"
+#include "third_party/angle/src/gpu_info_util/SystemInfo.h"
#include "third_party/re2/src/re2/re2.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_implementation.h"
-#include "ui/gl/gl_surface.h"
-#include "ui/gl/gl_switches.h"
-
-#if defined(USE_LIBPCI)
-#include "library_loaders/libpci.h" // nogncheck
-#endif
namespace gpu {
-namespace {
-
-#if defined(USE_LIBPCI)
-// This checks if a system supports PCI bus.
-// We check the existence of /sys/bus/pci or /sys/bug/pci_express.
-bool IsPciSupported() {
- const base::FilePath pci_path("/sys/bus/pci/");
- const base::FilePath pcie_path("/sys/bus/pci_express/");
- return (base::PathExists(pci_path) ||
- base::PathExists(pcie_path));
-}
-#endif // defined(USE_LIBPCI)
-
-// Scan /sys/module/amdgpu/version.
-// Return empty string on failing.
-std::string CollectDriverVersionAMDBrahma() {
- const base::FilePath ati_file_path("/sys/module/amdgpu/version");
- if (!base::PathExists(ati_file_path))
- return std::string();
- std::string contents;
- if (!base::ReadFileToString(ati_file_path, &contents))
- return std::string();
- size_t begin = contents.find_first_of("0123456789");
- if (begin != std::string::npos) {
- size_t end = contents.find_first_not_of("0123456789.", begin);
- if (end == std::string::npos)
- return contents.substr(begin);
- else
- return contents.substr(begin, end - begin);
- }
- return std::string();
-}
-
-// Scan /etc/ati/amdpcsdb.default for "ReleaseVersion".
-// Return empty string on failing.
-std::string CollectDriverVersionAMDCatalyst() {
- const base::FilePath ati_file_path("/etc/ati/amdpcsdb.default");
- if (!base::PathExists(ati_file_path))
- return std::string();
- std::string contents;
- if (!base::ReadFileToString(ati_file_path, &contents))
- return std::string();
- base::StringTokenizer t(contents, "\r\n");
- while (t.GetNext()) {
- std::string line = t.token();
- if (base::StartsWith(line, "ReleaseVersion=",
- base::CompareCase::SENSITIVE)) {
- size_t begin = line.find_first_of("0123456789");
- if (begin != std::string::npos) {
- size_t end = line.find_first_not_of("0123456789.", begin);
- if (end == std::string::npos)
- return line.substr(begin);
- else
- return line.substr(begin, end - begin);
- }
- }
- }
- return std::string();
-}
-
-const uint32_t kVendorIDIntel = 0x8086;
-const uint32_t kVendorIDNVidia = 0x10de;
-const uint32_t kVendorIDAMD = 0x1002;
-
-CollectInfoResult CollectPCIVideoCardInfo(GPUInfo* gpu_info) {
- DCHECK(gpu_info);
-
-#if !defined(USE_LIBPCI)
- return kCollectInfoNonFatalFailure;
-#else
-
- if (IsPciSupported() == false) {
- VLOG(1) << "PCI bus scanning is not supported";
- return kCollectInfoNonFatalFailure;
- }
-
- // TODO(zmo): be more flexible about library name.
- LibPciLoader libpci_loader;
- if (!libpci_loader.Load("libpci.so.3") &&
- !libpci_loader.Load("libpci.so")) {
- VLOG(1) << "Failed to locate libpci";
- return kCollectInfoNonFatalFailure;
- }
-
- pci_access* access = (libpci_loader.pci_alloc)();
- DCHECK(access != NULL);
- (libpci_loader.pci_init)(access);
- (libpci_loader.pci_scan_bus)(access);
- bool primary_gpu_identified = false;
- for (pci_dev* device = access->devices;
- device != NULL; device = device->next) {
- // Fill the IDs and class fields.
- (libpci_loader.pci_fill_info)(device, 33);
- bool is_gpu = false;
- switch (device->device_class) {
- case PCI_CLASS_DISPLAY_VGA:
- case PCI_CLASS_DISPLAY_XGA:
- case PCI_CLASS_DISPLAY_3D:
- is_gpu = true;
- break;
- case PCI_CLASS_DISPLAY_OTHER:
- default:
- break;
- }
- if (!is_gpu)
- continue;
- if (device->vendor_id == 0 || device->device_id == 0)
- continue;
-
- GPUInfo::GPUDevice gpu;
- gpu.vendor_id = device->vendor_id;
- gpu.device_id = device->device_id;
-
- if (!primary_gpu_identified) {
- primary_gpu_identified = true;
- gpu_info->gpu = gpu;
- } else {
- // TODO(zmo): if there are multiple GPUs, we assume the non Intel
- // one is primary. Revisit this logic because we actually don't know
- // which GPU we are using at this point.
- if (gpu_info->gpu.vendor_id == kVendorIDIntel &&
- gpu.vendor_id != kVendorIDIntel) {
- gpu_info->secondary_gpus.push_back(gpu_info->gpu);
- gpu_info->gpu = gpu;
- } else {
- gpu_info->secondary_gpus.push_back(gpu);
- }
- }
- }
-
- // Detect Optimus or AMD Switchable GPU.
- if (gpu_info->secondary_gpus.size() == 1 &&
- gpu_info->secondary_gpus[0].vendor_id == kVendorIDIntel) {
- if (gpu_info->gpu.vendor_id == kVendorIDNVidia)
- gpu_info->optimus = true;
- if (gpu_info->gpu.vendor_id == kVendorIDAMD)
- gpu_info->amd_switchable = true;
- }
-
- (libpci_loader.pci_cleanup)(access);
- if (!primary_gpu_identified)
- return kCollectInfoNonFatalFailure;
- return kCollectInfoSuccess;
-#endif
-}
-
-} // namespace anonymous
-
CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
@@ -191,68 +22,18 @@ CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
return result;
}
-CollectInfoResult CollectGpuID(uint32_t* vendor_id, uint32_t* device_id) {
- DCHECK(vendor_id && device_id);
- *vendor_id = 0;
- *device_id = 0;
-
- GPUInfo gpu_info;
- CollectInfoResult result = CollectPCIVideoCardInfo(&gpu_info);
- if (result == kCollectInfoSuccess) {
- *vendor_id = gpu_info.gpu.vendor_id;
- *device_id = gpu_info.gpu.device_id;
- }
- return result;
-}
-
CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
- CollectInfoResult result = CollectPCIVideoCardInfo(gpu_info);
-
- std::string driver_version;
- switch (gpu_info->gpu.vendor_id) {
- case kVendorIDAMD:
- driver_version = CollectDriverVersionAMDBrahma();
- if (!driver_version.empty()) {
- gpu_info->driver_vendor = "ATI / AMD (Brahma)";
- gpu_info->driver_version = driver_version;
- } else {
- driver_version = CollectDriverVersionAMDCatalyst();
- if (!driver_version.empty()) {
- gpu_info->driver_vendor = "ATI / AMD (Catalyst)";
- gpu_info->driver_version = driver_version;
- }
- }
- break;
- case kVendorIDNVidia:
- driver_version = CollectDriverVersionNVidia();
- if (!driver_version.empty()) {
- gpu_info->driver_vendor = "NVIDIA";
- gpu_info->driver_version = driver_version;
- }
- break;
- case kVendorIDIntel:
- // In dual-GPU cases, sometimes PCI scan only gives us the
- // integrated GPU (i.e., the Intel one).
- if (gpu_info->secondary_gpus.size() == 0) {
- driver_version = CollectDriverVersionNVidia();
- if (!driver_version.empty()) {
- gpu_info->driver_vendor = "NVIDIA";
- gpu_info->driver_version = driver_version;
- gpu_info->optimus = true;
- // Put Intel to the secondary GPU list.
- gpu_info->secondary_gpus.push_back(gpu_info->gpu);
- // Put NVIDIA as the primary GPU.
- gpu_info->gpu.vendor_id = kVendorIDNVidia;
- gpu_info->gpu.device_id = 0; // Unknown Device.
- }
- }
- break;
+ angle::SystemInfo system_info;
+ if (angle::GetSystemInfo(&system_info)) {
+ gpu_info->basic_info_state = kCollectInfoSuccess;
+ FillGPUInfoFromSystemInfo(gpu_info, &system_info);
+ } else {
+ gpu_info->basic_info_state = kCollectInfoNonFatalFailure;
}
- gpu_info->basic_info_state = result;
- return result;
+ return gpu_info->basic_info_state;
}
CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info) {
diff --git a/chromium/gpu/config/gpu_info_collector_linux.h b/chromium/gpu/config/gpu_info_collector_linux.h
deleted file mode 100644
index 2fca7fb3ea0..00000000000
--- a/chromium/gpu/config/gpu_info_collector_linux.h
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_CONFIG_GPU_INFO_COLLECTOR_LINUX_H_
-#define GPU_CONFIG_GPU_INFO_COLLECTOR_LINUX_H_
-
-#include <string>
-
-namespace gpu {
-
-// Queries for the driver version. Returns an empty string on failure.
-std::string CollectDriverVersionNVidia();
-
-} // namespace gpu
-
-#endif // GPU_CONFIG_GPU_INFO_COLLECTOR_LINUX_H_
diff --git a/chromium/gpu/config/gpu_info_collector_mac.mm b/chromium/gpu/config/gpu_info_collector_mac.mm
index 7fecec0ec99..ab6f732fe2e 100644
--- a/chromium/gpu/config/gpu_info_collector_mac.mm
+++ b/chromium/gpu/config/gpu_info_collector_mac.mm
@@ -4,174 +4,11 @@
#include "gpu/config/gpu_info_collector.h"
-#include <vector>
-
-#include "base/logging.h"
-#include "base/mac/mac_util.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "base/mac/scoped_ioobject.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_piece.h"
-#include "base/strings/string_util.h"
-#include "base/strings/sys_string_conversions.h"
#include "base/trace_event/trace_event.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_implementation.h"
-
-#import <Cocoa/Cocoa.h>
-#import <Foundation/Foundation.h>
-#import <IOKit/IOKitLib.h>
-#include <stddef.h>
-#include <stdint.h>
+#include "third_party/angle/src/gpu_info_util/SystemInfo.h"
namespace gpu {
-namespace {
-
-const UInt32 kVendorIDIntel = 0x8086;
-const UInt32 kVendorIDNVidia = 0x10de;
-const UInt32 kVendorIDAMD = 0x1002;
-
-// Return 0 if we couldn't find the property.
-// The property values we use should not be 0, so it's OK to use 0 as failure.
-UInt32 GetEntryProperty(io_registry_entry_t entry, CFStringRef property_name) {
- base::ScopedCFTypeRef<CFDataRef> data_ref(
- static_cast<CFDataRef>(IORegistryEntrySearchCFProperty(
- entry,
- kIOServicePlane,
- property_name,
- kCFAllocatorDefault,
- kIORegistryIterateRecursively | kIORegistryIterateParents)));
- if (!data_ref)
- return 0;
-
- UInt32 value = 0;
- const UInt32* value_pointer =
- reinterpret_cast<const UInt32*>(CFDataGetBytePtr(data_ref));
- if (value_pointer != NULL)
- value = *value_pointer;
- return value;
-}
-
-// CGDisplayIOServicePort is deprecated as of macOS 10.9, but has no
-// replacement.
-// https://crbug.com/650837
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
-
-// Find the info of the current GPU.
-GPUInfo::GPUDevice GetActiveGPU() {
- GPUInfo::GPUDevice gpu;
- io_registry_entry_t dsp_port = CGDisplayIOServicePort(kCGDirectMainDisplay);
- gpu.vendor_id = GetEntryProperty(dsp_port, CFSTR("vendor-id"));
- gpu.device_id = GetEntryProperty(dsp_port, CFSTR("device-id"));
- return gpu;
-}
-
-#pragma clang diagnostic pop
-
-// Scan IO registry for PCI video cards.
-CollectInfoResult CollectPCIVideoCardInfo(GPUInfo* gpu_info) {
- DCHECK(gpu_info);
- GPUInfo::GPUDevice active_gpu = GetActiveGPU();
-
- // Collect all GPUs' info.
- // match_dictionary will be consumed by IOServiceGetMatchingServices, no need
- // to release it.
- CFMutableDictionaryRef match_dictionary = IOServiceMatching("IOPCIDevice");
- io_iterator_t entry_iterator;
- std::vector<GPUInfo::GPUDevice> gpu_list;
- if (IOServiceGetMatchingServices(kIOMasterPortDefault,
- match_dictionary,
- &entry_iterator) == kIOReturnSuccess) {
-
- base::mac::ScopedIOObject<io_registry_entry_t> entry;
- while (entry.reset(IOIteratorNext(entry_iterator)), entry) {
- GPUInfo::GPUDevice gpu;
- if (GetEntryProperty(entry, CFSTR("class-code")) != 0x30000) {
- // 0x30000 : DISPLAY_VGA
- continue;
- }
- gpu.vendor_id = GetEntryProperty(entry, CFSTR("vendor-id"));
- gpu.device_id = GetEntryProperty(entry, CFSTR("device-id"));
- if (gpu.vendor_id && gpu.device_id) {
- if (gpu.vendor_id == active_gpu.vendor_id &&
- gpu.device_id == active_gpu.device_id) {
- gpu.active = true;
- }
- gpu_list.push_back(gpu);
- }
- }
- IOObjectRelease(entry_iterator);
- }
-
- switch (gpu_list.size()) {
- case 0:
- return kCollectInfoNonFatalFailure;
- case 1:
- gpu_info->gpu = gpu_list[0];
- break;
- case 2:
- {
- int integrated = -1;
- int discrete = -1;
- if (gpu_list[0].vendor_id == kVendorIDIntel)
- integrated = 0;
- else if (gpu_list[1].vendor_id == kVendorIDIntel)
- integrated = 1;
- if (integrated >= 0) {
- switch (gpu_list[1 - integrated].vendor_id) {
- case kVendorIDAMD:
- gpu_info->amd_switchable = true;
- discrete = 1 - integrated;
- break;
- case kVendorIDNVidia:
- gpu_info->optimus = true;
- discrete = 1 - integrated;
- break;
- default:
- break;
- }
- }
- if (integrated >= 0 && discrete >= 0) {
- // We always put discrete GPU as primary for blacklisting purpose.
- gpu_info->gpu = gpu_list[discrete];
- gpu_info->secondary_gpus.push_back(gpu_list[integrated]);
- break;
- }
- // If it's not optimus or amd_switchable, we put the current GPU as
- // primary. Fall through to default.
- }
- default:
- {
- size_t current = gpu_list.size();
- for (size_t i = 0; i < gpu_list.size(); ++i) {
- if (gpu_list[i].active) {
- current = i;
- break;
- }
- }
- if (current == gpu_list.size()) {
- // If we fail to identify the current GPU, select any one as primary.
- current = 0;
- }
- for (size_t i = 0; i < gpu_list.size(); ++i) {
- if (i == current)
- gpu_info->gpu = gpu_list[i];
- else
- gpu_info->secondary_gpus.push_back(gpu_list[i]);
- }
- }
- break;
- }
- if (gpu_info->gpu.vendor_id == 0 || gpu_info->gpu.device_id == 0)
- return kCollectInfoNonFatalFailure;
- return kCollectInfoSuccess;
-}
-
-} // namespace anonymous
-
CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
@@ -182,31 +19,18 @@ CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
return result;
}
-CollectInfoResult CollectGpuID(uint32_t* vendor_id, uint32_t* device_id) {
- DCHECK(vendor_id && device_id);
-
- GPUInfo::GPUDevice gpu = GetActiveGPU();
- *vendor_id = gpu.vendor_id;
- *device_id = gpu.device_id;
-
- if (*vendor_id != 0 && *device_id != 0)
- return kCollectInfoSuccess;
- return kCollectInfoNonFatalFailure;
-}
-
CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
- int32_t model_major = 0, model_minor = 0;
- base::mac::ParseModelIdentifier(base::mac::GetModelIdentifier(),
- &gpu_info->machine_model_name,
- &model_major, &model_minor);
- gpu_info->machine_model_version =
- base::IntToString(model_major) + "." + base::IntToString(model_minor);
+ angle::SystemInfo system_info;
+ if (angle::GetSystemInfo(&system_info)) {
+ gpu_info->basic_info_state = kCollectInfoSuccess;
+ FillGPUInfoFromSystemInfo(gpu_info, &system_info);
+ } else {
+ gpu_info->basic_info_state = kCollectInfoNonFatalFailure;
+ }
- CollectInfoResult result = CollectPCIVideoCardInfo(gpu_info);
- gpu_info->basic_info_state = result;
- return result;
+ return gpu_info->basic_info_state;
}
CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info) {
diff --git a/chromium/gpu/config/gpu_info_collector_ozone.cc b/chromium/gpu/config/gpu_info_collector_ozone.cc
deleted file mode 100644
index d5cc632aae4..00000000000
--- a/chromium/gpu/config/gpu_info_collector_ozone.cc
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/logging.h"
-#include "gpu/config/gpu_info_collector_linux.h"
-
-namespace gpu {
-
-std::string CollectDriverVersionNVidia() {
- return std::string();
-}
-
-} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info_collector_win.cc b/chromium/gpu/config/gpu_info_collector_win.cc
index 8dab759f23c..febd6e014ac 100644
--- a/chromium/gpu/config/gpu_info_collector_win.cc
+++ b/chromium/gpu/config/gpu_info_collector_win.cc
@@ -36,7 +36,6 @@
#include "base/win/scoped_com_initializer.h"
#include "base/win/scoped_comptr.h"
#include "base/win/windows_version.h"
-#include "third_party/libxml/chromium/libxml_utils.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_surface_egl.h"
@@ -305,30 +304,6 @@ CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
return kCollectInfoSuccess;
}
-CollectInfoResult CollectGpuID(uint32_t* vendor_id, uint32_t* device_id) {
- DCHECK(vendor_id && device_id);
- *vendor_id = 0;
- *device_id = 0;
-
- // Taken from http://www.nvidia.com/object/device_ids.html
- DISPLAY_DEVICE dd;
- dd.cb = sizeof(DISPLAY_DEVICE);
- std::wstring id;
- for (int i = 0; EnumDisplayDevices(NULL, i, &dd, 0); ++i) {
- if (dd.StateFlags & DISPLAY_DEVICE_PRIMARY_DEVICE) {
- id = dd.DeviceID;
- break;
- }
- }
-
- if (id.length() > 20) {
- DeviceIDToVendorAndDevice(id, vendor_id, device_id);
- if (*vendor_id != 0 && *device_id != 0)
- return kCollectInfoSuccess;
- }
- return kCollectInfoNonFatalFailure;
-}
-
CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
TRACE_EVENT0("gpu", "CollectPreliminaryGraphicsInfo");
diff --git a/chromium/gpu/config/gpu_info_collector_x11.cc b/chromium/gpu/config/gpu_info_collector_x11.cc
deleted file mode 100644
index 38fa72f74dc..00000000000
--- a/chromium/gpu/config/gpu_info_collector_x11.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <X11/Xlib.h>
-
-#include "base/logging.h"
-#include "gpu/config/gpu_info_collector_linux.h"
-#include "third_party/libXNVCtrl/NVCtrl.h"
-#include "third_party/libXNVCtrl/NVCtrlLib.h"
-#include "ui/gfx/x/x11_types.h"
-
-namespace gpu {
-
-// Use NVCtrl extention to query NV driver version.
-// Return empty string on failing.
-std::string CollectDriverVersionNVidia() {
- Display* display = gfx::GetXDisplay();
- if (!display) {
- LOG(ERROR) << "XOpenDisplay failed.";
- return std::string();
- }
- int event_base = 0, error_base = 0;
- if (!XNVCTRLQueryExtension(display, &event_base, &error_base)) {
- VLOG(1) << "NVCtrl extension does not exist.";
- return std::string();
- }
- int screen_count = ScreenCount(display);
- for (int screen = 0; screen < screen_count; ++screen) {
- char* buffer = NULL;
- if (XNVCTRLIsNvScreen(display, screen) &&
- XNVCTRLQueryStringAttribute(display, screen, 0,
- NV_CTRL_STRING_NVIDIA_DRIVER_VERSION,
- &buffer)) {
- std::string driver_version(buffer);
- XFree(buffer);
- return driver_version;
- }
- }
- return std::string();
-}
-
-} // namespace gpu
diff --git a/chromium/gpu/config/gpu_test_config.cc b/chromium/gpu/config/gpu_test_config.cc
index 8d5cb59f42d..d63e283a638 100644
--- a/chromium/gpu/config/gpu_test_config.cc
+++ b/chromium/gpu/config/gpu_test_config.cc
@@ -255,8 +255,7 @@ bool GPUTestBotConfig::LoadCurrentConfig(const GPUInfo* gpu_info) {
bool rt;
if (gpu_info == NULL) {
GPUInfo my_gpu_info;
- CollectInfoResult result = CollectGpuID(
- &my_gpu_info.gpu.vendor_id, &my_gpu_info.gpu.device_id);
+ CollectInfoResult result = CollectBasicGraphicsInfo(&my_gpu_info);
if (result != kCollectInfoSuccess) {
LOG(ERROR) << "Fail to identify GPU";
DisableGPUInfoValidation();
diff --git a/chromium/gpu/config/gpu_util.cc b/chromium/gpu/config/gpu_util.cc
index a305c29698a..ad57fc043ac 100644
--- a/chromium/gpu/config/gpu_util.cc
+++ b/chromium/gpu/config/gpu_util.cc
@@ -8,14 +8,18 @@
#include <vector>
#include "base/command_line.h"
+#include "base/debug/crash_logging.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
#include "base/sys_info.h"
#include "gpu/config/gpu_blacklist.h"
-#include "gpu/config/gpu_control_list_jsons.h"
+#include "gpu/config/gpu_crash_keys.h"
#include "gpu/config/gpu_driver_bug_list.h"
+#include "gpu/config/gpu_driver_bug_workaround_type.h"
#include "gpu/config/gpu_feature_type.h"
#include "gpu/config/gpu_finch_features.h"
#include "gpu/config/gpu_info_collector.h"
@@ -75,10 +79,10 @@ GpuFeatureStatus GetGpuRasterizationFeatureStatus(
return kGpuFeatureStatusBlacklisted;
#if defined(OS_ANDROID)
- // We can't use GPU rasterization on low-end devices, because the Ganesh
- // cache would consume too much memory.
- if (base::SysInfo::IsLowEndDevice())
- return kGpuFeatureStatusBlacklisted;
+ // GPU Raster is always enabled on non-low-end Android. On low-end, it is
+ // controlled by a Finch experiment.
+ if (!base::SysInfo::IsLowEndDevice())
+ return kGpuFeatureStatusEnabled;
#endif // defined(OS_ANDROID)
// Gpu Rasterization on platforms that are not fully enabled is controlled by
@@ -94,8 +98,6 @@ GpuFeatureStatus GetGpuRasterizationFeatureStatus(
void ApplyGpuDriverBugWorkarounds(const GPUInfo& gpu_info,
base::CommandLine* command_line) {
std::unique_ptr<GpuDriverBugList> list(GpuDriverBugList::Create());
- list->LoadList(kGpuDriverBugListJson,
- GpuControlList::kCurrentOsOnly);
std::set<int> workarounds = list->MakeDecision(
GpuControlList::kOsAny, std::string(), gpu_info);
GpuDriverBugList::AppendWorkaroundsFromCommandLine(
@@ -105,23 +107,28 @@ void ApplyGpuDriverBugWorkarounds(const GPUInfo& gpu_info,
IntSetToString(workarounds));
}
- std::set<std::string> disabled_extensions;
std::vector<std::string> buglist_disabled_extensions =
list->GetDisabledExtensions();
- disabled_extensions.insert(buglist_disabled_extensions.begin(),
- buglist_disabled_extensions.end());
+ std::set<base::StringPiece> disabled_extensions(
+ buglist_disabled_extensions.begin(), buglist_disabled_extensions.end());
+ // Must be outside if statement to remain in scope (referenced by
+ // |disabled_extensions|).
+ std::string command_line_disable_gl_extensions;
if (command_line->HasSwitch(switches::kDisableGLExtensions)) {
- std::vector<std::string> existing_disabled_extensions = base::SplitString(
- command_line->GetSwitchValueASCII(switches::kDisableGLExtensions), " ",
- base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+ command_line_disable_gl_extensions =
+ command_line->GetSwitchValueASCII(switches::kDisableGLExtensions);
+ std::vector<base::StringPiece> existing_disabled_extensions =
+ base::SplitStringPiece(command_line_disable_gl_extensions, " ",
+ base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
disabled_extensions.insert(existing_disabled_extensions.begin(),
existing_disabled_extensions.end());
}
if (!disabled_extensions.empty()) {
- std::vector<std::string> v(disabled_extensions.begin(),
- disabled_extensions.end());
+ std::vector<base::StringPiece> v(disabled_extensions.begin(),
+ disabled_extensions.end());
command_line->AppendSwitchASCII(switches::kDisableGLExtensions,
base::JoinString(v, " "));
}
@@ -187,7 +194,6 @@ GpuFeatureInfo GetGpuFeatureInfo(const GPUInfo& gpu_info,
std::set<int> blacklisted_features;
if (!command_line.HasSwitch(switches::kIgnoreGpuBlacklist)) {
std::unique_ptr<GpuBlacklist> list(GpuBlacklist::Create());
- list->LoadList(kSoftwareRenderingListJson, GpuControlList::kCurrentOsOnly);
blacklisted_features =
list->MakeDecision(GpuControlList::kOsAny, std::string(), gpu_info);
}
@@ -199,4 +205,27 @@ GpuFeatureInfo GetGpuFeatureInfo(const GPUInfo& gpu_info,
return gpu_feature_info;
}
+void SetKeysForCrashLogging(const GPUInfo& gpu_info) {
+#if !defined(OS_ANDROID)
+ base::debug::SetCrashKeyValue(
+ crash_keys::kGPUVendorID,
+ base::StringPrintf("0x%04x", gpu_info.gpu.vendor_id));
+ base::debug::SetCrashKeyValue(
+ crash_keys::kGPUDeviceID,
+ base::StringPrintf("0x%04x", gpu_info.gpu.device_id));
+#endif
+ base::debug::SetCrashKeyValue(crash_keys::kGPUDriverVersion,
+ gpu_info.driver_version);
+ base::debug::SetCrashKeyValue(crash_keys::kGPUPixelShaderVersion,
+ gpu_info.pixel_shader_version);
+ base::debug::SetCrashKeyValue(crash_keys::kGPUVertexShaderVersion,
+ gpu_info.vertex_shader_version);
+#if defined(OS_MACOSX)
+ base::debug::SetCrashKeyValue(crash_keys::kGPUGLVersion, gpu_info.gl_version);
+#elif defined(OS_POSIX)
+ base::debug::SetCrashKeyValue(crash_keys::kGPUVendor, gpu_info.gl_vendor);
+ base::debug::SetCrashKeyValue(crash_keys::kGPURenderer, gpu_info.gl_renderer);
+#endif
+}
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_util.h b/chromium/gpu/config/gpu_util.h
index f0cd1794316..b6aa37e4c2b 100644
--- a/chromium/gpu/config/gpu_util.h
+++ b/chromium/gpu/config/gpu_util.h
@@ -47,6 +47,8 @@ GPU_EXPORT GpuFeatureInfo
GetGpuFeatureInfo(const GPUInfo& gpu_info,
const base::CommandLine& command_line);
+GPU_EXPORT void SetKeysForCrashLogging(const GPUInfo& gpu_info);
+
} // namespace gpu
#endif // GPU_CONFIG_GPU_UTIL_H_
diff --git a/chromium/gpu/config/gpu_util_unittest.cc b/chromium/gpu/config/gpu_util_unittest.cc
index 4b28ac649d1..271d821efd4 100644
--- a/chromium/gpu/config/gpu_util_unittest.cc
+++ b/chromium/gpu/config/gpu_util_unittest.cc
@@ -8,7 +8,6 @@
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
-#include "gpu/config/gpu_control_list_jsons.h"
#include "gpu/config/gpu_driver_bug_list.h"
#include "gpu/config/gpu_info.h"
#include "gpu/config/gpu_info_collector.h"
@@ -44,7 +43,6 @@ TEST(GpuUtilTest,
GPUInfo gpu_info;
CollectBasicGraphicsInfo(&gpu_info);
std::unique_ptr<GpuDriverBugList> list(GpuDriverBugList::Create());
- list->LoadList(kGpuDriverBugListJson, GpuControlList::kCurrentOsOnly);
list->MakeDecision(GpuControlList::kOsAny, std::string(), gpu_info);
std::vector<std::string> expected_disabled_extensions =
list->GetDisabledExtensions();
diff --git a/chromium/gpu/config/process_json.py b/chromium/gpu/config/process_json.py
new file mode 100755
index 00000000000..05ec4a26e17
--- /dev/null
+++ b/chromium/gpu/config/process_json.py
@@ -0,0 +1,803 @@
+#!/usr/bin/env python
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import platform
+import sys
+from optparse import OptionParser
+from subprocess import call
+
+"""Generate data struct from GPU blacklist and driver bug workarounds json."""
+
+_LICENSE = """// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"""
+
+_DO_NOT_EDIT_WARNING = """// This file is auto-generated from
+// gpu/config/process_json.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+"""
+
+
+def load_software_rendering_list_features(feature_type_filename):
+ header_file = open(feature_type_filename, 'rb')
+ start = False
+ features = []
+ for line in header_file:
+ if line.startswith('enum GpuFeatureType {'):
+ assert not start
+ start = True
+ continue
+ if not start:
+ continue
+ line = line.strip()
+ line = line.split(' ', 1)[0]
+ line = line.split(',', 1)[0]
+ if line.startswith('NUMBER_OF_GPU_FEATURE_TYPES'):
+ assert start
+ start = False
+ break
+ elif line.startswith('GPU_FEATURE_TYPE_'):
+ name = line[len('GPU_FEATURE_TYPE_'):]
+ features.append(name.lower())
+ else:
+ assert False
+ assert not start
+ assert len(features) > 0
+ header_file.close()
+ return features
+
+
+def load_gpu_driver_bug_workarounds(workaround_type_filename):
+ header_file = open(workaround_type_filename, 'rb')
+ start = False
+ workaround = None
+ workarounds = []
+ for line in header_file:
+ if line.startswith('#define GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)'):
+ assert not start
+ start = True
+ continue
+ if not start:
+ continue
+ line = line.strip()
+ if line.startswith('GPU_OP('):
+ assert not workaround
+ workaround = line[len('GPU_OP('):]
+ workaround = workaround.split(',', 1)[0].lower()
+ continue
+ if workaround:
+ line = line.split(')', 1)[0]
+ assert line == workaround
+ workarounds.append(line)
+ workaround = None
+ continue
+ start = False
+ break
+ assert not start
+ assert len(workarounds) > 0
+ header_file.close()
+ return workarounds
+
+
+def get_feature_set(features, total_feature_set):
+ assert len(features) > 0
+ feature_set = set([])
+ for feature in features:
+ if feature == 'all':
+ feature_set = set(total_feature_set)
+ elif isinstance(feature, dict):
+ for key in feature:
+ if key == 'exceptions':
+ for exception in feature['exceptions']:
+ assert exception in feature_set
+ feature_set.remove(exception)
+ else:
+ raise KeyException('only exceptions are allowed')
+ else:
+ assert feature in total_feature_set
+ feature_set.add(feature)
+ return feature_set
+
+
+def write_features(entry_id, feature_set, feature_name_prefix,
+ data_helper_file):
+ data_helper_file.write('const int kFeatureListForEntry%d[%d] = {\n' %
+ (entry_id, len(feature_set)))
+ for feature in feature_set:
+ data_helper_file.write(feature_name_prefix + feature.upper())
+ data_helper_file.write(',\n')
+ data_helper_file.write('};\n\n')
+
+
+def write_disabled_extension_list(entry_id, data, data_file, data_helper_file):
+ if data:
+ var_name = 'kDisabledExtensionsForEntry' + str(entry_id)
+ # define the list
+ data_helper_file.write('const char* %s[%d] = {\n' % (var_name, len(data)))
+ for item in data:
+ write_string(item, data_helper_file)
+ data_helper_file.write(',\n')
+ data_helper_file.write('};\n\n')
+ # use the list
+ data_file.write('arraysize(%s), // DisabledExtensions size\n' % var_name)
+ data_file.write('%s, // DisabledExtensions\n' % var_name)
+ else:
+ data_file.write('0, // DisabledExtensions size\n')
+ data_file.write('nullptr, // DisabledExtensions\n')
+
+
+def write_gl_strings(entry_id, is_exception, exception_id, data,
+ data_file, data_helper_file):
+ if data:
+ var_name = 'kGLStringsForEntry' + str(entry_id)
+ if is_exception:
+ var_name += 'Exception' + str(exception_id)
+ # define the GL strings
+ data_helper_file.write('const GpuControlList::GLStrings %s = {\n' %
+ var_name)
+ for item in data:
+ write_string(item, data_helper_file)
+ data_helper_file.write(',\n')
+ data_helper_file.write('};\n\n')
+ # reference the GL strings
+ data_file.write('&%s, // GL strings\n' % var_name)
+ else:
+ data_file.write('nullptr, // GL strings\n')
+
+
+def write_version(version_info, name_tag, data_file):
+ op = ''
+ style = ''
+ version1 = ''
+ version2 = ''
+ if version_info:
+ op = version_info['op']
+ if 'style' in version_info:
+ style = version_info['style']
+ version1 = version_info['value']
+ if 'value2' in version_info:
+ version2 = version_info['value2']
+ data_file.write('{')
+ op_map = {
+ '=': 'kEQ',
+ '<': 'kLT',
+ '<=': 'kLE',
+ '>': 'kGT',
+ '>=': 'kGE',
+ 'any': 'kAny',
+ 'between': 'kBetween',
+ '': 'kUnknown',
+ }
+ assert op_map.has_key(op)
+ data_file.write('GpuControlList::%s, ' % op_map[op])
+ style_map = {
+ 'lexical': 'Lexical',
+ 'numerical': 'Numerical',
+ '': 'Numerical',
+ }
+ assert style_map.has_key(style)
+ data_file.write('GpuControlList::kVersionStyle%s, ' % style_map[style])
+ write_string(version1, data_file)
+ data_file.write(', ')
+ write_string(version2, data_file)
+ data_file.write('}, // %s\n' % name_tag)
+
+
+def write_driver_info(entry_id, is_exception, exception_id, driver_vendor,
+ driver_version, driver_date, data_file, data_helper_file):
+ var_name = 'kDriverInfoForEntry' + str(entry_id)
+ if is_exception:
+ var_name += 'Exception' + str(exception_id)
+ # define the GL strings
+ data_helper_file.write('const GpuControlList::DriverInfo %s = {\n' %
+ var_name)
+ write_string_value(driver_vendor, 'driver_vendor', data_helper_file)
+ write_version(driver_version, 'driver_version', data_helper_file)
+ write_version(driver_date, 'driver_date', data_helper_file)
+ data_helper_file.write('};\n\n')
+ # reference the GL strings
+ data_file.write('&%s, // driver info\n' % var_name)
+
+
+def write_number_list(entry_id, data_type, name_tag, data, is_exception,
+ exception_id, data_file, data_helper_file):
+ if data:
+ var_name = 'k' + name_tag + 'ForEntry' + str(entry_id)
+ if is_exception:
+ var_name += 'Exception' + str(exception_id)
+ # define the list
+ data_helper_file.write('const %s %s[%d] = {\n' %
+ (data_type, var_name, len(data)))
+ for item in data:
+ data_helper_file.write(str(item))
+ data_helper_file.write(',\n')
+ data_helper_file.write('};\n\n')
+ # reference the list
+ data_file.write('arraysize(%s), // %s size\n' % (var_name, name_tag))
+ data_file.write('%s, // %s\n' % (var_name, name_tag))
+ else:
+ data_file.write('0, // %s size\n' % name_tag)
+ data_file.write('nullptr, // %s\n' % name_tag)
+
+
+def write_string(string, data_file):
+ if string == '':
+ data_file.write('nullptr')
+ else:
+ data_file.write('"%s"' % string.replace('\\', '\\\\'))
+
+
+def write_string_value(string, name_tag, data_file):
+ write_string(string, data_file)
+ data_file.write(', // %s\n' % name_tag)
+
+
+def write_boolean_value(value, name_tag, data_file):
+ data_file.write('%s, // %s\n' % (str(value).lower(), name_tag))
+
+
+def write_machine_model_info(entry_id, is_exception, exception_id,
+ machine_model_name, machine_model_version,
+ data_file, data_helper_file):
+ model_name_var_name = None
+ if machine_model_name:
+ model_name_var_name = 'kMachineModelNameForEntry' + str(entry_id)
+ if is_exception:
+ model_name_var_name += 'Exception' + str(exception_id)
+ data_helper_file.write('const char* %s[%d] = {\n' %
+ (model_name_var_name, len(machine_model_name)))
+ for item in machine_model_name:
+ write_string(item, data_helper_file)
+ data_helper_file.write(',\n')
+ data_helper_file.write('};\n\n')
+ var_name = None
+ if machine_model_name or machine_model_version:
+ var_name = 'kMachineModelInfoForEntry' + str(entry_id)
+ if is_exception:
+ var_name += 'Exception' + str(exception_id)
+ # define machine model info
+ data_helper_file.write(
+ 'const GpuControlList::MachineModelInfo %s = {\n' % var_name)
+ if machine_model_name:
+ data_helper_file.write('arraysize(%s), // machine model name size\n' %
+ model_name_var_name)
+ data_helper_file.write('%s, // machine model names\n' %
+ model_name_var_name)
+ else:
+ data_helper_file.write('0, // machine model name size\n')
+ data_helper_file.write('nullptr, // machine model names\n')
+ write_version(machine_model_version, 'machine model version',
+ data_helper_file)
+ data_helper_file.write('};\n\n')
+ # reference the machine model info
+ data_file.write('&%s, // machine model info\n' % var_name)
+ else:
+ data_file.write('nullptr, // machine model info\n')
+
+
+def write_os_type(os_type, data_file):
+ map = {
+ 'win': 'kOsWin',
+ 'macosx': 'kOsMacosx',
+ 'android': 'kOsAndroid',
+ 'linux': 'kOsLinux',
+ 'chromeos': 'kOsChromeOS',
+ '': 'kOsAny',
+ }
+ assert map.has_key(os_type)
+ data_file.write('GpuControlList::%s, // os_type\n' % map[os_type])
+
+
+def write_multi_gpu_category(multi_gpu_category, data_file):
+ map = {
+ 'primary': 'Primary',
+ 'secondary': 'Secondary',
+ 'active': 'Active',
+ 'any': 'Any',
+ '': 'None',
+ }
+ assert map.has_key(multi_gpu_category)
+ data_file.write(
+ 'GpuControlList::kMultiGpuCategory%s, // multi_gpu_category\n' %
+ map[multi_gpu_category])
+
+
+def write_multi_gpu_style(multi_gpu_style, data_file):
+ map = {
+ 'optimus': 'Optimus',
+ 'amd_switchable': 'AMDSwitchable',
+ 'amd_switchable_discrete': 'AMDSwitchableDiscrete',
+ 'amd_switchable_integrated': 'AMDSwitchableIntegrated',
+ '': 'None',
+ }
+ assert map.has_key(multi_gpu_style)
+ data_file.write(
+ 'GpuControlList::kMultiGpuStyle%s, // multi_gpu_style\n' %
+ map[multi_gpu_style])
+
+
+def write_gl_type(gl_type, data_file):
+ map = {
+ 'gl': 'GL',
+ 'gles': 'GLES',
+ 'angle': 'ANGLE',
+ '': 'None',
+ }
+ assert map.has_key(gl_type)
+ data_file.write('GpuControlList::kGLType%s, // gl_type\n' % map[gl_type])
+
+
+def write_conditions(entry_id, is_exception, exception_id, entry,
+ data_file, data_helper_file, data_exception_file):
+ os_type = ''
+ os_version = None
+ vendor_id = 0
+ device_id = None
+ multi_gpu_category = ''
+ multi_gpu_style = ''
+ driver_vendor = ''
+ driver_version = None
+ driver_date = None
+ gl_renderer = ''
+ gl_vendor = ''
+ gl_extensions = ''
+ gl_version_string = ''
+ gl_type = ''
+ gl_version = None
+ pixel_shader_version = None
+ in_process_gpu = False
+ gl_reset_notification_strategy = None
+ direct_rendering = True
+ gpu_count = None
+ machine_model_name = None
+ machine_model_version = None
+ exception_count = 0
+ exception_var = 'nullptr'
+ # process the entry
+ for key in entry:
+ if key == 'id':
+ assert not is_exception
+ assert entry['id'] == entry_id
+ continue
+ elif key == 'description':
+ assert not is_exception
+ continue
+ elif key == 'features':
+ assert not is_exception
+ continue
+ elif key == 'disabled_extensions':
+ assert not is_exception
+ continue
+ elif key == 'comment':
+ continue
+ elif key == 'webkit_bugs':
+ assert not is_exception
+ continue
+ elif key == 'cr_bugs':
+ assert not is_exception
+ continue
+ elif key == 'os':
+ os_info = entry[key]
+ os_type = os_info['type']
+ if 'version' in os_info:
+ os_version = os_info['version']
+ elif key == 'vendor_id':
+ vendor_id = int(entry[key], 0)
+ elif key == 'device_id':
+ device_id = entry[key]
+ elif key == 'multi_gpu_category':
+ multi_gpu_category = entry[key]
+ elif key == 'multi_gpu_style':
+ multi_gpu_style = entry[key]
+ elif key == 'driver_vendor':
+ driver_vendor = entry[key]
+ elif key == 'driver_version':
+ driver_version = entry[key]
+ elif key == 'driver_date':
+ driver_date = entry[key]
+ elif key == 'gl_vendor':
+ gl_vendor = entry[key]
+ elif key == 'gl_renderer':
+ gl_renderer = entry[key]
+ elif key == 'gl_version_string':
+ gl_version_string = entry[key]
+ elif key == 'gl_type':
+ gl_type = entry[key]
+ elif key == 'gl_version':
+ gl_version = entry[key]
+ elif key == 'gl_extensions':
+ gl_extensions = entry[key]
+ elif key == 'pixel_shader_version':
+ pixel_shader_version = entry[key]
+ elif key == 'in_process_gpu':
+ assert entry[key]
+ in_process_gpu = True
+ elif key == 'gl_reset_notification_strategy':
+ gl_reset_notification_strategy = entry[key]
+ elif key == 'direct_rendering':
+ assert not entry[key]
+ direct_rendering = False
+ elif key == 'gpu_count':
+ gpu_count = entry[key]
+ elif key == 'machine_model_name':
+ machine_model_name = entry[key]
+ elif key == 'machine_model_version':
+ machine_model_version = entry[key]
+ elif key == 'exceptions':
+ assert not is_exception
+ assert exception_count == 0
+ else:
+ raise ValueError('unknown key: ' + key + ' in entry ' + str(entry))
+ # write out the entry
+ write_os_type(os_type, data_file)
+ write_version(os_version, 'os_version', data_file)
+ data_file.write(format(vendor_id, '#04x'))
+ data_file.write(', // vendor_id\n')
+ write_number_list(entry_id, 'uint32_t', 'DeviceIDs', device_id, is_exception,
+ exception_id, data_file, data_helper_file)
+ write_multi_gpu_category(multi_gpu_category, data_file)
+ write_multi_gpu_style(multi_gpu_style, data_file)
+ # group driver info
+ if driver_vendor != '' or driver_version != None or driver_date != None:
+ write_driver_info(entry_id, is_exception, exception_id, driver_vendor,
+ driver_version, driver_date, data_file, data_helper_file)
+ else:
+ data_file.write('nullptr, // driver info\n')
+ # group GL strings
+ gl_strings = None
+ if (gl_vendor != '' or gl_renderer != '' or gl_extensions != '' or
+ gl_version_string != ''):
+ gl_strings = [gl_vendor, gl_renderer, gl_extensions, gl_version_string]
+ write_gl_strings(entry_id, is_exception, exception_id, gl_strings,
+ data_file, data_helper_file)
+ # group machine model info
+ write_machine_model_info(entry_id, is_exception, exception_id,
+ machine_model_name, machine_model_version,
+ data_file, data_helper_file)
+ # group a bunch of less used conditions
+ if (gl_version != None or pixel_shader_version != None or in_process_gpu or
+ gl_reset_notification_strategy != None or (not direct_rendering) or
+ gpu_count != None):
+ write_entry_more_data(entry_id, is_exception, exception_id, gl_type,
+ gl_version, pixel_shader_version, in_process_gpu,
+ gl_reset_notification_strategy, direct_rendering,
+ gpu_count, data_file, data_helper_file)
+ else:
+ data_file.write('nullptr, // more conditions\n')
+
+
+def write_entry_more_data(entry_id, is_exception, exception_id, gl_type,
+ gl_version, pixel_shader_version, in_process_gpu,
+ gl_reset_notification_strategy, direct_rendering,
+ gpu_count, data_file, data_helper_file):
+ # write more data
+ var_name = 'kMoreForEntry' + str(entry_id)
+ if is_exception:
+ var_name += 'Exception' + str(exception_id)
+ data_helper_file.write('const GpuControlList::More %s = {\n' % var_name)
+ write_gl_type(gl_type, data_helper_file)
+ write_version(gl_version, 'gl_version', data_helper_file)
+ write_version(pixel_shader_version, 'pixel_shader_version', data_helper_file)
+ write_boolean_value(in_process_gpu, 'in_process_gpu', data_helper_file)
+ if not gl_reset_notification_strategy:
+ gl_reset_notification_strategy = '0'
+ data_helper_file.write('%s, // gl_reset_notification_strategy\n' %
+ gl_reset_notification_strategy)
+ write_boolean_value(direct_rendering, 'direct_rendering', data_helper_file)
+ write_version(gpu_count, 'gpu_count', data_helper_file)
+ data_helper_file.write('};\n\n')
+ # reference more data in entry
+ data_file.write('&%s, // more data\n' % var_name)
+
+
+def write_entry(entry, total_feature_set, feature_name_prefix,
+ data_file, data_helper_file, data_exception_file):
+ data_file.write('{\n')
+ # ID
+ entry_id = entry['id']
+ data_file.write('%d, // id\n' % entry_id)
+ data_file.write('"%s",\n' % entry['description']);
+ # Features
+ if 'features' in entry:
+ features = entry['features']
+ feature_set = get_feature_set(features, total_feature_set)
+ data_file.write('arraysize(kFeatureListForEntry%d), // features size\n' %
+ entry_id)
+ data_file.write('kFeatureListForEntry%d, // features\n' % entry_id)
+ write_features(entry_id, feature_set, feature_name_prefix, data_helper_file)
+ else:
+ data_file.write('0, // feature size\n')
+ data_file.write('nullptr, // features\n')
+ # Disabled extensions
+ disabled_extensions = None
+ if 'disabled_extensions' in entry:
+ disabled_extensions = entry['disabled_extensions']
+ write_disabled_extension_list(entry_id, disabled_extensions,
+ data_file, data_helper_file)
+ # webkit_bugs are skipped because there is only one entry that has it.
+ # cr_bugs
+ cr_bugs = None
+ if 'cr_bugs' in entry:
+ cr_bugs = entry['cr_bugs']
+ write_number_list(entry_id, 'uint32_t', 'CrBugs', cr_bugs, False, -1,
+ data_file, data_helper_file)
+ # Conditions
+ data_file.write('{\n')
+ write_conditions(entry_id, False, -1, entry, data_file, data_helper_file,
+ data_exception_file)
+ data_file.write('},\n')
+ # Exceptions
+ if 'exceptions' in entry:
+ exceptions = entry['exceptions']
+ exception_count = len(exceptions)
+ exception_var = 'kExceptionsForEntry' + str(entry_id)
+ data_exception_file.write('const GpuControlList::Conditions %s[%d] = {\n' %
+ (exception_var, exception_count))
+ for index in range(exception_count):
+ exception = exceptions[index]
+ if 'device_id' in exception and 'vendor_id' not in exception:
+ assert 'vendor_id' in entry
+ exception['vendor_id'] = entry['vendor_id']
+ data_exception_file.write('{\n')
+ write_conditions(entry_id, True, index, exception,
+ data_exception_file, data_helper_file, None)
+ data_exception_file.write('},\n')
+ data_exception_file.write('};\n\n')
+ data_file.write('arraysize(%s), // exceptions count\n' % exception_var)
+ data_file.write('%s, // exceptions\n' % exception_var)
+ else:
+ data_file.write('0, // exceptions count\n')
+ data_file.write('nullptr, // exceptions\n')
+ # END
+ data_file.write('},\n')
+
+
+def format_files(generated_files):
+ formatter = "clang-format"
+ if platform.system() == "Windows":
+ formatter += ".bat"
+ for filename in generated_files:
+ call([formatter, "-i", "-style=chromium", filename])
+
+
+def write_header_file_guard(file, filename, path, begin):
+ token = (path.upper().replace('/', '_') + '_' +
+ filename.upper().replace('.', '_') + '_')
+ if begin:
+ file.write('#ifndef %s\n#define %s\n\n' % (token, token))
+ else:
+ file.write('\n#endif // %s\n' % token)
+
+
+def process_json_file(json_filepath, list_tag,
+ feature_header_filename, total_features, feature_tag,
+ output_header_filepath, output_data_filepath,
+ output_helper_filepath, output_exception_filepath, path,
+ export_tag, git_format):
+ output_header_filename = os.path.basename(output_header_filepath)
+ output_helper_filename = os.path.basename(output_helper_filepath)
+ output_exception_filename = os.path.basename(output_exception_filepath)
+ json_file = open(json_filepath, 'rb')
+ json_data = json.load(json_file)
+ json_file.close()
+ data_file = open(output_data_filepath, 'wb')
+ data_file.write(_LICENSE)
+ data_file.write(_DO_NOT_EDIT_WARNING)
+ data_file.write('#include "%s/%s"\n\n' % (path, output_header_filename))
+ data_file.write('#include "%s/%s"\n' % (path, output_helper_filename))
+ data_file.write('#include "%s/%s"\n\n' % (path, output_exception_filename))
+ data_helper_file = open(output_helper_filepath, 'wb')
+ data_helper_file.write(_LICENSE)
+ data_helper_file.write(_DO_NOT_EDIT_WARNING)
+ write_header_file_guard(data_helper_file, output_helper_filename, path, True)
+ data_helper_file.write('#include "gpu/config/%s"\n\n' %
+ feature_header_filename)
+ data_helper_file.write('namespace gpu {\n')
+ data_exception_file = open(output_exception_filepath, 'wb')
+ data_exception_file.write(_LICENSE)
+ data_exception_file.write(_DO_NOT_EDIT_WARNING)
+ write_header_file_guard(data_exception_file, output_exception_filename, path,
+ True)
+ data_exception_file.write('namespace gpu {\n')
+ data_file.write('namespace gpu {\n\n')
+ data_file.write('const char k%sVersion[] = "%s";\n\n' %
+ (list_tag, json_data['version']))
+ entry_count = len(json_data['entries'])
+ data_file.write('const size_t k%sEntryCount = %d;\n' %
+ (list_tag, entry_count))
+ data_file.write('const GpuControlList::Entry k%sEntries[%d] = {\n' %
+ (list_tag, entry_count))
+ ids = []
+ for index in range(entry_count):
+ entry = json_data['entries'][index]
+ entry_id = entry['id']
+ assert entry_id not in ids
+ ids.append(entry_id)
+ write_entry(entry, total_features, feature_tag,
+ data_file, data_helper_file, data_exception_file)
+ data_file.write('};\n')
+ data_file.write('} // namespace gpu\n')
+ data_file.close()
+ data_helper_file.write('} // namespace gpu\n')
+ write_header_file_guard(data_helper_file, output_helper_filename, path, False)
+ data_helper_file.close()
+ data_exception_file.write('} // namespace gpu\n')
+ write_header_file_guard(data_exception_file, output_exception_filename, path,
+ False)
+ data_exception_file.close()
+ data_header_file = open(output_header_filepath, 'wb')
+ data_header_file.write(_LICENSE)
+ data_header_file.write(_DO_NOT_EDIT_WARNING)
+ write_header_file_guard(data_header_file, output_header_filename, path, True)
+ if export_tag == 'CONTENT_EXPORT ':
+ data_header_file.write('#include "content/common/content_export.h"\n')
+ data_header_file.write('#include "gpu/config/gpu_control_list.h"\n\n')
+ data_header_file.write('\n')
+ data_header_file.write('namespace gpu {\n')
+ data_header_file.write('%sextern const char k%sVersion[];\n' %
+ (export_tag, list_tag))
+ data_header_file.write('%sextern const size_t k%sEntryCount;\n' %
+ (export_tag, list_tag))
+ data_header_file.write(
+ '%sextern const GpuControlList::Entry k%sEntries[];\n' %
+ (export_tag, list_tag))
+ data_header_file.write('} // namespace gpu\n')
+ write_header_file_guard(data_header_file, output_header_filename, path, False)
+ data_header_file.close()
+ if git_format:
+ format_files([output_header_filepath, output_data_filepath,
+ output_helper_filepath, output_exception_filepath])
+
+
+def process_software_rendering_list(script_dir, output_dir):
+ total_features = load_software_rendering_list_features(
+ os.path.join(script_dir, 'gpu_feature_type.h'))
+ process_json_file(
+ os.path.join(script_dir, 'software_rendering_list.json'),
+ 'SoftwareRenderingList',
+ 'gpu_feature_type.h',
+ total_features,
+ 'GPU_FEATURE_TYPE_',
+ os.path.join(output_dir, 'software_rendering_list_autogen.h'),
+ os.path.join(output_dir, 'software_rendering_list_autogen.cc'),
+ os.path.join(output_dir,
+ 'software_rendering_list_arrays_and_structs_autogen.h'),
+ os.path.join(output_dir, 'software_rendering_list_exceptions_autogen.h'),
+ 'gpu/config',
+ 'GPU_EXPORT ',
+ False)
+
+
+def process_gpu_driver_bug_list(script_dir, output_dir):
+ total_features = load_gpu_driver_bug_workarounds(
+ os.path.join(script_dir, 'gpu_driver_bug_workaround_type.h'))
+ process_json_file(
+ os.path.join(script_dir, 'gpu_driver_bug_list.json'),
+ 'GpuDriverBugList',
+ 'gpu_driver_bug_workaround_type.h',
+ total_features,
+ '',
+ os.path.join(output_dir, 'gpu_driver_bug_list_autogen.h'),
+ os.path.join(output_dir, 'gpu_driver_bug_list_autogen.cc'),
+ os.path.join(output_dir,
+ 'gpu_driver_bug_list_arrays_and_structs_autogen.h'),
+ os.path.join(output_dir, 'gpu_driver_bug_list_exceptions_autogen.h'),
+ 'gpu/config',
+ 'GPU_EXPORT ',
+ False)
+
+
+def process_gpu_control_list_testing(script_dir, output_dir):
+ total_features = ['test_feature_0', 'test_feature_1', 'test_feature_2']
+ process_json_file(
+ os.path.join(script_dir, 'gpu_control_list_testing.json'),
+ 'GpuControlListTesting',
+ 'gpu_control_list_testing_data.h',
+ total_features,
+ '',
+ os.path.join(output_dir, 'gpu_control_list_testing_autogen.h'),
+ os.path.join(output_dir, 'gpu_control_list_testing_autogen.cc'),
+ os.path.join(output_dir,
+ 'gpu_control_list_testing_arrays_and_structs_autogen.h'),
+ os.path.join(output_dir, 'gpu_control_list_testing_exceptions_autogen.h'),
+ 'gpu/config',
+ '',
+ True)
+
+
+def process_gpu_data_manager_testing(script_dir, output_dir):
+ total_features = load_software_rendering_list_features(
+ os.path.join(script_dir, 'gpu_feature_type.h'))
+ process_json_file(
+ os.path.join(output_dir, 'gpu_data_manager_testing.json'),
+ 'GpuDataManagerTesting',
+ 'gpu_feature_type.h',
+ total_features,
+ 'GPU_FEATURE_TYPE_',
+ os.path.join(output_dir, 'gpu_data_manager_testing_autogen.h'),
+ os.path.join(output_dir, 'gpu_data_manager_testing_autogen.cc'),
+ os.path.join(output_dir,
+ 'gpu_data_manager_testing_arrays_and_structs_autogen.h'),
+ os.path.join(output_dir, 'gpu_data_manager_testing_exceptions_autogen.h'),
+ 'content/browser/gpu',
+ '',
+ True)
+
+
+def write_test_entry_enums(input_json_filepath, output_entry_enums_filepath,
+ path, list_tag):
+ json_file = open(input_json_filepath, 'rb')
+ json_data = json.load(json_file)
+ json_file.close()
+
+ output_entry_enums_filename = os.path.basename(output_entry_enums_filepath)
+ enum_file = open(output_entry_enums_filepath, 'wb')
+ enum_file.write(_LICENSE)
+ enum_file.write(_DO_NOT_EDIT_WARNING)
+ write_header_file_guard(enum_file, output_entry_enums_filename, path, True)
+ enum_file.write('namespace gpu {\n')
+ enum_file.write('enum %sEntryEnum {\n' % list_tag)
+ entry_count = len(json_data['entries'])
+ for index in range(entry_count):
+ entry = json_data['entries'][index]
+ entry_id = entry['id']
+ description = entry['description']
+ assert(index + 1 == int(entry_id))
+ description = 'k' + description
+ description = description.replace('.', '_')
+ enum_file.write(' %s = %d,\n' % (description, index))
+ enum_file.write('};\n')
+ enum_file.write('} // namespace gpu\n')
+ write_header_file_guard(enum_file, output_entry_enums_filename, path, False)
+ enum_file.close()
+ format_files([output_entry_enums_filepath])
+
+
+def main(argv):
+ parser = OptionParser()
+ parser.add_option("--output-dir",
+ help="output directory for SoftwareRenderingList and "
+ "GpuDriverBugList data files. "
+ "If unspecified, these files are not generated.")
+ parser.add_option("--skip-testing-data", action="store_false",
+ dest="generate_testing_data", default=True,
+ help="skip testing data generation.")
+ (options, args) = parser.parse_args(args=argv)
+
+ script_dir = os.path.dirname(os.path.realpath(__file__))
+
+ if options.output_dir != None:
+ process_software_rendering_list(script_dir, options.output_dir)
+ process_gpu_driver_bug_list(script_dir, options.output_dir)
+
+ if options.generate_testing_data:
+ # Testing data files are generated by calling the script manually.
+ process_gpu_control_list_testing(script_dir, script_dir)
+ write_test_entry_enums(
+ os.path.join(script_dir, 'gpu_control_list_testing.json'),
+ os.path.join(script_dir,
+ 'gpu_control_list_testing_entry_enums_autogen.h'),
+ 'gpu/config',
+ 'GpuControlListTesting')
+ chrome_root_dir = os.path.abspath(os.path.join(script_dir, '../../'))
+ gpu_data_manager_dir = os.path.join(chrome_root_dir, 'content/browser/gpu')
+ process_gpu_data_manager_testing(script_dir, gpu_data_manager_dir)
+ write_test_entry_enums(
+ os.path.join(gpu_data_manager_dir, 'gpu_data_manager_testing.json'),
+ os.path.join(gpu_data_manager_dir,
+ 'gpu_data_manager_testing_entry_enums_autogen.h'),
+ 'content/browser/gpu',
+ 'GpuDataManagerTesting')
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/chromium/gpu/config/software_rendering_list.README b/chromium/gpu/config/software_rendering_list.README
new file mode 100644
index 00000000000..00149c9825f
--- /dev/null
+++ b/chromium/gpu/config/software_rendering_list.README
@@ -0,0 +1,7 @@
+Determines whether certain gpu-related features are blacklisted or not.
+
+The format of a valid software_rendering_list.json file is defined in
+ <gpu/config/gpu_control_list_format.txt>.
+
+The supported "features" can be found in
+ <gpu/config/gpu_blacklist.cc>.
diff --git a/chromium/gpu/config/software_rendering_list_json.cc b/chromium/gpu/config/software_rendering_list.json
index df24f4fe7fc..ebdfca0b9d7 100644
--- a/chromium/gpu/config/software_rendering_list_json.cc
+++ b/chromium/gpu/config/software_rendering_list.json
@@ -1,24 +1,6 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Determines whether certain gpu-related features are blacklisted or not.
-// The format of a valid software_rendering_list.json file is defined in
-// <gpu/config/gpu_control_list_format.txt>.
-// The supported "features" can be found in <gpu/config/gpu_blacklist.cc>.
-
-#include "gpu/config/gpu_control_list_jsons.h"
-
-#define LONG_STRING_CONST(...) #__VA_ARGS__
-
-namespace gpu {
-
-const char kSoftwareRenderingListJson[] = LONG_STRING_CONST(
-
{
"name": "software rendering list",
- // Please update the version number whenever you change this file.
- "version": "12.20",
+ "version": "13.8",
"entries": [
{
"id": 1,
@@ -31,8 +13,8 @@ const char kSoftwareRenderingListJson[] = LONG_STRING_CONST(
"device_id": ["0x7249"],
"multi_gpu_category": "any",
"features": [
- "webgl",
- "flash_3d",
+ "accelerated_webgl",
+ "flash3d",
"flash_stage3d",
"gpu_rasterization"
]
@@ -53,14 +35,11 @@ const char kSoftwareRenderingListJson[] = LONG_STRING_CONST(
"id": 4,
"description": "The Intel Mobile 945 Express family of chipsets is not compatible with WebGL",
"cr_bugs": [232035],
- "os": {
- "type": "any"
- },
"vendor_id": "0x8086",
"device_id": ["0x27AE", "0x27A2"],
"features": [
- "webgl",
- "flash_3d",
+ "accelerated_webgl",
+ "flash3d",
"flash_stage3d",
"accelerated_2d_canvas"
]
@@ -101,9 +80,6 @@ const char kSoftwareRenderingListJson[] = LONG_STRING_CONST(
"id": 8,
"description": "NVIDIA GeForce FX Go5200 is assumed to be buggy",
"cr_bugs": [72938],
- "os": {
- "type": "any"
- },
"vendor_id": "0x10de",
"device_id": ["0x0324"],
"features": [
@@ -121,8 +97,8 @@ const char kSoftwareRenderingListJson[] = LONG_STRING_CONST(
"device_id": ["0x0393"],
"multi_gpu_category": "any",
"features": [
- "webgl",
- "flash_3d",
+ "accelerated_webgl",
+ "flash3d",
"flash_stage3d",
"gpu_rasterization"
]
@@ -460,15 +436,18 @@ const char kSoftwareRenderingListJson[] = LONG_STRING_CONST(
},
"features": [
"accelerated_video_decode",
- "flash_3d",
+ "flash3d",
"flash_stage3d"
]
},
{
- // Panel fitting is only used with OS_CHROMEOS. To avoid displaying an
- // error in chrome:gpu on every other platform, this blacklist entry needs
- // to only match on chromeos. The drawback is that panel_fitting will not
- // appear to be blacklisted if accidentally queried on non-chromeos.
+ "comment": [
+ "Panel fitting is only used with OS_CHROMEOS. To avoid displaying an ",
+ "error in chrome:gpu on every other platform, this blacklist entry ",
+ "needs to only match on chromeos. The drawback is that panel_fitting ",
+ "will not appear to be blacklisted if accidentally queried on ",
+ "non-chromeos."
+ ],
"id": 57,
"description": "Chrome OS panel fitting is only supported for Intel IVB and SNB Graphics Controllers",
"os": {
@@ -593,7 +572,7 @@ const char kSoftwareRenderingListJson[] = LONG_STRING_CONST(
"vendor_id": "0x10de",
"device_id": ["0x0163"],
"features": [
- "webgl"
+ "accelerated_webgl"
]
},
{
@@ -614,8 +593,6 @@ const char kSoftwareRenderingListJson[] = LONG_STRING_CONST(
"all"
]
},
-) // String split to avoid MSVC char limit.
-LONG_STRING_CONST(
{
"id": 76,
"description": "WebGL is disabled on Android unless the GPU runs in a separate process or reset notification is supported",
@@ -625,10 +602,7 @@ LONG_STRING_CONST(
"in_process_gpu": true,
"exceptions": [
{
- "gl_reset_notification_strategy": {
- "op": "=",
- "value": "33362"
- }
+ "gl_reset_notification_strategy": "33362"
},
{
"gl_renderer": "Mali-4.*",
@@ -636,7 +610,7 @@ LONG_STRING_CONST(
}
],
"features": [
- "webgl"
+ "accelerated_webgl"
]
},
{
@@ -691,7 +665,7 @@ LONG_STRING_CONST(
"vendor_id": "0x8086",
"device_id": ["0xa011"],
"features": [
- "webgl"
+ "accelerated_webgl"
]
},
{
@@ -1024,7 +998,7 @@ LONG_STRING_CONST(
"features": [
"all",
{"exceptions": [
- "webgl"
+ "accelerated_webgl"
]}
]
},
@@ -1177,7 +1151,7 @@ LONG_STRING_CONST(
"gl_vendor": "Vivante.*",
"gl_renderer": ".*PXA.*",
"features": [
- "webgl",
+ "accelerated_webgl",
"accelerated_2d_canvas"
]
},
@@ -1197,43 +1171,6 @@ LONG_STRING_CONST(
]
},
{
- "id": 120,
- "description": "VPx decoding isn't supported before Windows 10 anniversary update.",
- "cr_bugs": [616318],
- "os": {
- "type": "win",
- "version": {
- "op": "<",
- "value": "10.0.14393"
- }
- },
- "features": [
- "accelerated_vpx_decode"
- ]
- },
- {
- "id": 121,
- "description": "VPx decoding is too slow on Intel Broadwell, Skylake, and CherryView",
- "cr_bugs": [616318],
- "os": {
- "type": "win"
- },
- "vendor_id": "0x8086",
- "device_id": ["0x1602", "0x1606", "0x160a", "0x160b", "0x160d",
- "0x160e", "0x1612", "0x1616", "0x161a", "0x161b",
- "0x161d", "0x161e", "0x1622", "0x1626", "0x162a",
- "0x162b", "0x162d", "0x162e", "0x22b0", "0x22b1",
- "0x22b2", "0x22b3", "0x1902", "0x1906", "0x190a",
- "0x190b", "0x190e", "0x1912", "0x1913", "0x1915",
- "0x1916", "0x1917", "0x191a", "0x191b", "0x191d",
- "0x191e", "0x1921", "0x1923", "0x1926", "0x1927",
- "0x192a", "0x192b", "0x192d", "0x1932", "0x193a",
- "0x193b", "0x193d"],
- "features": [
- "accelerated_vpx_decode"
- ]
- },
- {
"id": 122,
"description": "GPU rasterization should only be enabled on NVIDIA and Intel DX11+, and AMD RX-R2 GPUs for now.",
"cr_bugs": [643850],
@@ -1264,38 +1201,14 @@ LONG_STRING_CONST(
"op": ">=",
"value": "5.0"
},
- "device_id": ["0x1309", "0x130a", "0x130b", "0x130c", "0x130d",
- "0x130e", "0x130f", "0x1313", "0x1315", "0x1316",
- "0x1318", "0x131b", "0x131c", "0x131d", "0x6600",
- "0x6604", "0x6605", "0x6610", "0x6611", "0x6617",
- "0x6640", "0x6646", "0x6647", "0x6647", "0x6658",
- "0x665d", "0x665f", "0x6660", "0x6663", "0x6664",
- "0x6665", "0x6667", "0x67b0", "0x67b1", "0x67b9",
- "0x67df", "0x67ef", "0x6810", "0x6811", "0x6820",
- "0x6821", "0x682b", "0x6835", "0x6900", "0x6901",
- "0x6907", "0x6920", "0x6921", "0x6938", "0x6939",
- "0x7300", "0x9851", "0x9852", "0x9853", "0x9854",
- "0x9855", "0x9856", "0x9874", "0x98e4"]
+ "driver_version": {
+ "op": ">=",
+ "value": "15.201"
+ }
}
]
},
{
- "id": 123,
- "description": "Accelerated VPx decoding is hanging on some videos.",
- "cr_bugs": [654111],
- "os": {
- "type": "win"
- },
- "vendor_id": "0x8086",
- "driver_version": {
- "op": "<",
- "value": "21.20.16.4542"
- },
- "features": [
- "accelerated_vpx_decode"
- ]
- },
- {
"id": 124,
"description": "Some AMD drivers have rendering glitches with GPU Rasterization",
"cr_bugs": [653538],
@@ -1319,8 +1232,6 @@ LONG_STRING_CONST(
"gpu_rasterization"
]
},
-) // String split to avoid MSVC char limit.
-LONG_STRING_CONST(
{
"id": 125,
"description": "VirtualBox driver is unstable on linux.",
@@ -1450,22 +1361,6 @@ LONG_STRING_CONST(
]
},
{
- "id": 135,
- "description": "Key parts of WebGL 2 broken on old Qualcomm drivers (depth texture, MSAA)",
- "cr_bugs": [682753, 682075],
- "os": {
- "type": "android",
- "version": {
- "op": "<",
- "value": "6.0"
- }
- },
- "gl_renderer": "Adreno \\(TM\\) 4.*",
- "features": [
- "webgl2"
- ]
- },
- {
"id": 136,
"description": "GPU rasterization is blacklisted on NVidia Fermi architecture for now.",
"cr_bugs": [643850],
@@ -1497,18 +1392,78 @@ LONG_STRING_CONST(
]
},
{
- // Corresponds to GPU driver bug #214.
+ "id": 137,
+ "description": "GPU rasterization on CrOS is blacklisted on non-Intel GPUs for now.",
+ "cr_bugs": [684094],
+ "os": {
+ "type": "chromeos"
+ },
+ "features": [
+ "gpu_rasterization"
+ ],
+ "exceptions": [
+ { "vendor_id": "0x8086" }
+ ]
+ },
+ {
+ "id": 138,
+ "description": "Accelerated video encode is unavailable on Linux",
+ "os": {
+ "type": "linux"
+ },
+ "features": [
+ "accelerated_video_encode"
+ ]
+ },
+ {
+ "id": 139,
+ "description": "GPU Rasterization is disabled on pre-GCN AMD cards",
+ "cr_bugs": [643850],
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x1002",
+ "driver_version": {
+ "op": "between",
+ "value": "15.301",
+ "value2": "15.302"
+ },
+ "features": [
+ "gpu_rasterization"
+ ]
+ },
+ {
"id": 140,
- "description": "Some old Qualcomm scissor bug workaround needs disabling MSAA to work, which is a core part of WebGL 2.",
- "cr_bugs": [670607, 696627, 698197],
+ "comment": "Corresponds to GPU driver bugs #19, #214",
+ "description": "MSAA and depth texture buggy on Adreno 3xx, also disable WebGL2",
+ "cr_bugs": [449116, 698197],
"gl_renderer": "Adreno \\(TM\\) 3.*",
"features": [
"webgl2"
]
+ },
+ {
+ "id": 147,
+ "description": "Explicit multisample resolve is broken on Adreno 4xx on Android 7.0",
+ "comment": "Corresponds to GPU driver bug #174",
+ "cr_bugs": [696126],
+ "os": {
+ "type": "android",
+ "version": {
+ "op": "between",
+ "value": "7.0.0",
+ "value2": "7.0.99",
+ "comment": "Only initial version of N."
+ }
+ },
+ "gl_renderer": "Adreno \\(TM\\) 4.*",
+ "features": [
+ "webgl2"
+ ]
}
+ ],
+ "comment": [
+ "Please update the version number on top whenever you change this file",
+ "Please run gpu/config/process_json.py whenever you change this file"
]
}
-
-); // LONG_STRING_CONST macro
-
-} // namespace gpu
diff --git a/chromium/gpu/gles2_conform_support/BUILD.gn b/chromium/gpu/gles2_conform_support/BUILD.gn
index 272f40bcb55..88f324393b5 100644
--- a/chromium/gpu/gles2_conform_support/BUILD.gn
+++ b/chromium/gpu/gles2_conform_support/BUILD.gn
@@ -413,11 +413,6 @@ if (internal_gles2_conform_tests) {
# Must be done this way for warning flags to be ordered correctly.
":gles2_conform_test_warnings",
]
- if (is_linux) {
- if (!is_chromeos) {
- deps += [ "//build/config/linux/gtk2" ]
- }
- }
if (is_win) {
deps += [
"//third_party/angle:libEGL",
@@ -470,6 +465,7 @@ if (internal_gles2_conform_tests) {
test("gles2_conform_test") {
sources = [
"gles2_conform_test.cc",
+ "gles2_conform_test.h",
]
deps = [
"//base",
diff --git a/chromium/gpu/gles2_conform_support/egl/context.cc b/chromium/gpu/gles2_conform_support/egl/context.cc
index 6b71251858f..f617144c1aa 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.cc
+++ b/chromium/gpu/gles2_conform_support/egl/context.cc
@@ -223,7 +223,9 @@ void Context::SignalSyncToken(const gpu::SyncToken& sync_token,
NOTIMPLEMENTED();
}
-bool Context::CanWaitUnverifiedSyncToken(const gpu::SyncToken* sync_token) {
+void Context::WaitSyncTokenHint(const gpu::SyncToken& sync_token) {}
+
+bool Context::CanWaitUnverifiedSyncToken(const gpu::SyncToken& sync_token) {
return false;
}
diff --git a/chromium/gpu/gles2_conform_support/egl/context.h b/chromium/gpu/gles2_conform_support/egl/context.h
index cff597663d5..7a9d43555d4 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.h
+++ b/chromium/gpu/gles2_conform_support/egl/context.h
@@ -76,7 +76,8 @@ class Context : public base::RefCountedThreadSafe<Context>,
bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) override;
- bool CanWaitUnverifiedSyncToken(const gpu::SyncToken* sync_token) override;
+ void WaitSyncTokenHint(const gpu::SyncToken& sync_token) override;
+ bool CanWaitUnverifiedSyncToken(const gpu::SyncToken& sync_token) override;
// Called by ThreadState to set the needed global variables when this context
// is current.
diff --git a/chromium/gpu/ipc/client/BUILD.gn b/chromium/gpu/ipc/client/BUILD.gn
index 53775227407..96c2808b04a 100644
--- a/chromium/gpu/ipc/client/BUILD.gn
+++ b/chromium/gpu/ipc/client/BUILD.gn
@@ -35,10 +35,10 @@ source_set("ipc_client_sources") {
"gpu_process_hosted_ca_layer_tree_params.h",
]
}
- if (use_ozone) {
+ if (is_linux) {
sources += [
- "gpu_memory_buffer_impl_ozone_native_pixmap.cc",
- "gpu_memory_buffer_impl_ozone_native_pixmap.h",
+ "gpu_memory_buffer_impl_native_pixmap.cc",
+ "gpu_memory_buffer_impl_native_pixmap.h",
]
}
configs += [
@@ -51,14 +51,16 @@ source_set("ipc_client_sources") {
"//gpu/command_buffer/common:common_sources",
"//gpu/config:config_sources",
"//gpu/ipc/common:ipc_common_sources",
- "//ipc",
"//ui/base/",
- "//ui/events/ipc",
"//ui/gfx/ipc",
"//ui/gfx/ipc/geometry",
"//ui/gl",
+ "//ui/latency/ipc",
"//url/ipc:url_ipc",
]
+ public_deps = [
+ "//ipc",
+ ]
if (use_ozone) {
deps += [ "//ui/ozone" ]
}
diff --git a/chromium/gpu/ipc/client/DEPS b/chromium/gpu/ipc/client/DEPS
index 45e1d3de631..191b73ce946 100644
--- a/chromium/gpu/ipc/client/DEPS
+++ b/chromium/gpu/ipc/client/DEPS
@@ -1,6 +1,6 @@
include_rules = [
"+base",
"+ipc",
- "+ui/events",
"+ui/base",
+ "+ui/latency",
]
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
index 233df390153..8f43e18af59 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
@@ -44,6 +44,10 @@ gpu::CommandBufferId CommandBufferProxyID(int channel_id, int32_t route_id) {
(static_cast<uint64_t>(channel_id) << 32) | route_id);
}
+int GetChannelID(gpu::CommandBufferId command_buffer_id) {
+ return static_cast<int>(command_buffer_id.GetUnsafeValue() >> 32);
+}
+
} // namespace
CommandBufferProxyImpl::CommandBufferProxyImpl(int channel_id,
@@ -54,13 +58,6 @@ CommandBufferProxyImpl::CommandBufferProxyImpl(int channel_id,
command_buffer_id_(CommandBufferProxyID(channel_id, route_id)),
route_id_(route_id),
stream_id_(stream_id),
- flush_count_(0),
- last_put_offset_(-1),
- last_barrier_put_offset_(-1),
- next_fence_sync_release_(1),
- flushed_fence_sync_release_(0),
- verified_fence_sync_release_(0),
- next_signal_id_(0),
weak_this_(AsWeakPtr()) {
DCHECK(route_id);
DCHECK_NE(stream_id, GPU_STREAM_INVALID);
@@ -265,7 +262,8 @@ void CommandBufferProxyImpl::Flush(int32_t put_offset) {
uint32_t highest_verified_flush_id;
const uint32_t flush_id = channel_->OrderingBarrier(
route_id_, stream_id_, put_offset, ++flush_count_, latency_info_,
- put_offset_changed, true, &highest_verified_flush_id);
+ pending_sync_token_fences_, put_offset_changed, true,
+ &highest_verified_flush_id);
if (put_offset_changed) {
DCHECK(flush_id);
const uint64_t fence_sync_release = next_fence_sync_release_ - 1;
@@ -278,8 +276,10 @@ void CommandBufferProxyImpl::Flush(int32_t put_offset) {
CleanupFlushedReleases(highest_verified_flush_id);
}
- if (put_offset_changed)
+ if (put_offset_changed) {
latency_info_.clear();
+ pending_sync_token_fences_.clear();
+ }
}
void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) {
@@ -298,7 +298,8 @@ void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) {
uint32_t highest_verified_flush_id;
const uint32_t flush_id = channel_->OrderingBarrier(
route_id_, stream_id_, put_offset, ++flush_count_, latency_info_,
- put_offset_changed, false, &highest_verified_flush_id);
+ pending_sync_token_fences_, put_offset_changed, false,
+ &highest_verified_flush_id);
if (put_offset_changed) {
DCHECK(flush_id);
@@ -311,12 +312,13 @@ void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) {
}
CleanupFlushedReleases(highest_verified_flush_id);
}
-
- if (put_offset_changed)
+ if (put_offset_changed) {
latency_info_.clear();
+ pending_sync_token_fences_.clear();
+ }
}
-void CommandBufferProxyImpl::SetLatencyInfo(
+void CommandBufferProxyImpl::AddLatencyInfo(
const std::vector<ui::LatencyInfo>& latency_info) {
CheckLock();
for (size_t i = 0; i < latency_info.size(); i++)
@@ -415,6 +417,7 @@ void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) {
Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
last_put_offset_ = -1;
+ last_barrier_put_offset_ = -1;
}
scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
@@ -513,8 +516,8 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
DCHECK_EQ(image_fence_sync, flushed_fence_sync_release_ + 1);
}
- DCHECK(gpu::IsGpuMemoryBufferFormatSupported(gpu_memory_buffer->GetFormat(),
- capabilities_));
+ DCHECK(gpu::IsImageFromGpuMemoryBufferFormatSupported(
+ gpu_memory_buffer->GetFormat(), capabilities_));
DCHECK(gpu::IsImageSizeValidForGpuMemoryBufferFormat(
gfx::Size(width, height), gpu_memory_buffer->GetFormat()));
DCHECK(gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
@@ -654,19 +657,28 @@ void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token,
signal_tasks_.insert(std::make_pair(signal_id, callback));
}
+void CommandBufferProxyImpl::WaitSyncTokenHint(
+ const gpu::SyncToken& sync_token) {
+ CheckLock();
+ base::AutoLock lock(last_state_lock_);
+ if (last_state_.error != gpu::error::kNoError)
+ return;
+
+ pending_sync_token_fences_.push_back(sync_token);
+}
+
bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken(
- const gpu::SyncToken* sync_token) {
+ const gpu::SyncToken& sync_token) {
// Can only wait on an unverified sync token if it is from the same channel.
- const uint64_t token_channel =
- sync_token->command_buffer_id().GetUnsafeValue() >> 32;
- const uint64_t channel = command_buffer_id_.GetUnsafeValue() >> 32;
- if (sync_token->namespace_id() != gpu::CommandBufferNamespace::GPU_IO ||
- token_channel != channel) {
+ int sync_token_channel_id = GetChannelID(sync_token.command_buffer_id());
+ int channel_id = GetChannelID(command_buffer_id_);
+ if (sync_token.namespace_id() != gpu::CommandBufferNamespace::GPU_IO ||
+ sync_token_channel_id != channel_id) {
return false;
}
// If waiting on a different stream, flush pending commands on that stream.
- const int32_t release_stream_id = sync_token->extra_data_field();
+ int32_t release_stream_id = sync_token.extra_data_field();
if (release_stream_id == gpu::GPU_STREAM_INVALID)
return false;
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
index c35b70090ea..848f28bfd6b 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
@@ -31,9 +31,9 @@
#include "gpu/ipc/common/gpu_stream_constants.h"
#include "gpu/ipc/common/surface_handle.h"
#include "ipc/ipc_listener.h"
-#include "ui/events/latency_info.h"
#include "ui/gfx/swap_result.h"
#include "ui/gl/gpu_preference.h"
+#include "ui/latency/latency_info.h"
struct GPUCommandBufferConsoleMessage;
struct GPUCreateCommandBufferConfig;
@@ -125,7 +125,8 @@ class GPU_EXPORT CommandBufferProxyImpl
bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) override;
- bool CanWaitUnverifiedSyncToken(const gpu::SyncToken* sync_token) override;
+ void WaitSyncTokenHint(const gpu::SyncToken& sync_token) override;
+ bool CanWaitUnverifiedSyncToken(const gpu::SyncToken& sync_token) override;
void TakeFrontBuffer(const gpu::Mailbox& mailbox);
void ReturnFrontBuffer(const gpu::Mailbox& mailbox,
@@ -139,7 +140,7 @@ class GPU_EXPORT CommandBufferProxyImpl
void SetOnConsoleMessageCallback(const GpuConsoleMessageCallback& callback);
- void SetLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info);
+ void AddLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info);
using SwapBuffersCompletionCallback = base::Callback<void(
const std::vector<ui::LatencyInfo>& latency_info,
gfx::SwapResult result,
@@ -260,26 +261,28 @@ class GPU_EXPORT CommandBufferProxyImpl
const gpu::CommandBufferId command_buffer_id_;
const int32_t route_id_;
const int32_t stream_id_;
- uint32_t flush_count_;
- int32_t last_put_offset_;
- int32_t last_barrier_put_offset_;
+ uint32_t flush_count_ = 0;
+ int32_t last_put_offset_ = -1;
+ int32_t last_barrier_put_offset_ = -1;
// Next generated fence sync.
- uint64_t next_fence_sync_release_;
+ uint64_t next_fence_sync_release_ = 1;
+
+ std::vector<SyncToken> pending_sync_token_fences_;
// Unverified flushed fence syncs with their corresponding flush id.
std::queue<std::pair<uint64_t, uint32_t>> flushed_release_flush_id_;
// Last flushed fence sync release, same as last item in queue if not empty.
- uint64_t flushed_fence_sync_release_;
+ uint64_t flushed_fence_sync_release_ = 0;
// Last verified fence sync.
- uint64_t verified_fence_sync_release_;
+ uint64_t verified_fence_sync_release_ = 0;
GpuConsoleMessageCallback console_message_callback_;
// Tasks to be invoked in SignalSyncPoint responses.
- uint32_t next_signal_id_;
+ uint32_t next_signal_id_ = 0;
SignalTaskMap signal_tasks_;
gpu::Capabilities capabilities_;
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.cc b/chromium/gpu/ipc/client/gpu_channel_host.cc
index 5e41fd9e274..af0c167aff2 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.cc
+++ b/chromium/gpu/ipc/client/gpu_channel_host.cc
@@ -134,6 +134,7 @@ uint32_t GpuChannelHost::OrderingBarrier(
int32_t put_offset,
uint32_t flush_count,
const std::vector<ui::LatencyInfo>& latency_info,
+ const std::vector<SyncToken>& sync_token_fences,
bool put_offset_changed,
bool do_flush,
uint32_t* highest_verified_flush_id) {
@@ -153,6 +154,9 @@ uint32_t GpuChannelHost::OrderingBarrier(
flush_info.flush_id = flush_id;
flush_info.latency_info.insert(flush_info.latency_info.end(),
latency_info.begin(), latency_info.end());
+ flush_info.sync_token_fences.insert(flush_info.sync_token_fences.end(),
+ sync_token_fences.begin(),
+ sync_token_fences.end());
if (do_flush)
InternalFlush(&flush_info);
@@ -180,8 +184,9 @@ void GpuChannelHost::InternalFlush(StreamFlushInfo* flush_info) {
DCHECK_LT(flush_info->flushed_stream_flush_id, flush_info->flush_id);
Send(new GpuCommandBufferMsg_AsyncFlush(
flush_info->route_id, flush_info->put_offset, flush_info->flush_count,
- flush_info->latency_info));
+ flush_info->latency_info, flush_info->sync_token_fences));
flush_info->latency_info.clear();
+ flush_info->sync_token_fences.clear();
flush_info->flush_pending = false;
flush_info->flushed_stream_flush_id = flush_info->flush_id;
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.h b/chromium/gpu/ipc/client/gpu_channel_host.h
index 989f1df6a8d..88733e79991 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.h
+++ b/chromium/gpu/ipc/client/gpu_channel_host.h
@@ -25,8 +25,8 @@
#include "ipc/ipc_sync_channel.h"
#include "ipc/message_filter.h"
#include "ipc/message_router.h"
-#include "ui/events/latency_info.h"
#include "ui/gfx/gpu_memory_buffer.h"
+#include "ui/latency/latency_info.h"
namespace base {
class WaitableEvent;
@@ -41,7 +41,7 @@ class GpuMemoryBufferManager;
}
namespace gpu {
-
+struct SyncToken;
class GpuChannelHost;
using GpuChannelEstablishedCallback =
base::Callback<void(scoped_refptr<GpuChannelHost>)>;
@@ -106,6 +106,7 @@ class GPU_EXPORT GpuChannelHost
int32_t put_offset,
uint32_t flush_count,
const std::vector<ui::LatencyInfo>& latency_info,
+ const std::vector<SyncToken>& sync_token_fences,
bool put_offset_changed,
bool do_flush,
uint32_t* highest_verified_flush_id);
@@ -232,6 +233,7 @@ class GPU_EXPORT GpuChannelHost
uint32_t flush_count;
uint32_t flush_id;
std::vector<ui::LatencyInfo> latency_info;
+ std::vector<SyncToken> sync_token_fences;
};
GpuChannelHost(GpuChannelHostFactory* factory,
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc
index a673cbf2555..d83d8857709 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc
@@ -12,8 +12,8 @@
#include "gpu/ipc/client/gpu_memory_buffer_impl_io_surface.h"
#endif
-#if defined(USE_OZONE)
-#include "gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.h"
+#if defined(OS_LINUX)
+#include "gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap.h"
#endif
namespace gpu {
@@ -50,9 +50,9 @@ std::unique_ptr<GpuMemoryBufferImpl> GpuMemoryBufferImpl::CreateFromHandle(
return GpuMemoryBufferImplIOSurface::CreateFromHandle(
handle, size, format, usage, callback);
#endif
-#if defined(USE_OZONE)
- case gfx::OZONE_NATIVE_PIXMAP:
- return GpuMemoryBufferImplOzoneNativePixmap::CreateFromHandle(
+#if defined(OS_LINUX)
+ case gfx::NATIVE_PIXMAP:
+ return GpuMemoryBufferImplNativePixmap::CreateFromHandle(
handle, size, format, usage, callback);
#endif
default:
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc
index 2efc42d5c63..a87bd3046b6 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc
@@ -120,7 +120,7 @@ void GpuMemoryBufferImplIOSurface::SetColorSpaceForScanout(
// Retrieve the ICC profile data.
gfx::ICCProfile icc_profile;
- if (!color_space_.GetICCProfile(&icc_profile)) {
+ if (!color_space_.GetAsFullRangeRGB().GetICCProfile(&icc_profile)) {
DLOG(ERROR) << "Failed to set color space for scanout: no ICC profile.";
return;
}
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap.cc
index e85f23be46d..12b1da6fc2b 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap.cc
@@ -2,34 +2,38 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.h"
+#include "gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap.h"
#include <utility>
#include "base/memory/ptr_util.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
#include "ui/gfx/buffer_format_util.h"
-#include "ui/ozone/public/client_native_pixmap_factory.h"
-#include "ui/ozone/public/native_pixmap.h"
+#include "ui/gfx/client_native_pixmap_factory.h"
+#include "ui/gfx/native_pixmap.h"
+
+#if defined(USE_OZONE)
#include "ui/ozone/public/ozone_platform.h"
#include "ui/ozone/public/surface_factory_ozone.h"
+#endif
namespace gpu {
namespace {
-void FreeNativePixmapForTesting(scoped_refptr<ui::NativePixmap> native_pixmap) {
+void FreeNativePixmapForTesting(
+ scoped_refptr<gfx::NativePixmap> native_pixmap) {
// Nothing to do here. |native_pixmap| will be freed when this function
// returns and reference count drops to 0.
}
} // namespace
-GpuMemoryBufferImplOzoneNativePixmap::GpuMemoryBufferImplOzoneNativePixmap(
+GpuMemoryBufferImplNativePixmap::GpuMemoryBufferImplNativePixmap(
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
const DestructionCallback& callback,
- std::unique_ptr<ui::ClientNativePixmap> pixmap,
+ std::unique_ptr<gfx::ClientNativePixmap> pixmap,
const std::vector<gfx::NativePixmapPlane>& planes,
base::ScopedFD fd)
: GpuMemoryBufferImpl(id, size, format, callback),
@@ -37,20 +41,18 @@ GpuMemoryBufferImplOzoneNativePixmap::GpuMemoryBufferImplOzoneNativePixmap(
planes_(planes),
fd_(std::move(fd)) {}
-GpuMemoryBufferImplOzoneNativePixmap::~GpuMemoryBufferImplOzoneNativePixmap() {}
+GpuMemoryBufferImplNativePixmap::~GpuMemoryBufferImplNativePixmap() {}
// static
-std::unique_ptr<GpuMemoryBufferImplOzoneNativePixmap>
-GpuMemoryBufferImplOzoneNativePixmap::CreateFromHandle(
+std::unique_ptr<GpuMemoryBufferImplNativePixmap>
+GpuMemoryBufferImplNativePixmap::CreateFromHandle(
const gfx::GpuMemoryBufferHandle& handle,
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
const DestructionCallback& callback) {
- DCHECK_LE(handle.native_pixmap_handle.fds.size(), 1u);
-
// GpuMemoryBufferImpl needs the FD to implement GetHandle() but
- // ui::ClientNativePixmapFactory::ImportFromHandle is expected to take
+ // gfx::ClientNativePixmapFactory::ImportFromHandle is expected to take
// ownership of the FD passed in the handle so we have to dup it here in
// order to pass a valid FD to the GpuMemoryBufferImpl ctor.
base::ScopedFD scoped_fd;
@@ -68,66 +70,71 @@ GpuMemoryBufferImplOzoneNativePixmap::CreateFromHandle(
true /* auto_close */);
}
native_pixmap_handle.planes = handle.native_pixmap_handle.planes;
- std::unique_ptr<ui::ClientNativePixmap> native_pixmap =
- ui::ClientNativePixmapFactory::GetInstance()->ImportFromHandle(
+ std::unique_ptr<gfx::ClientNativePixmap> native_pixmap =
+ gfx::ClientNativePixmapFactory::GetInstance()->ImportFromHandle(
native_pixmap_handle, size, usage);
DCHECK(native_pixmap);
- return base::WrapUnique(new GpuMemoryBufferImplOzoneNativePixmap(
+ return base::WrapUnique(new GpuMemoryBufferImplNativePixmap(
handle.id, size, format, callback, std::move(native_pixmap),
handle.native_pixmap_handle.planes, std::move(scoped_fd)));
}
// static
-bool GpuMemoryBufferImplOzoneNativePixmap::IsConfigurationSupported(
+bool GpuMemoryBufferImplNativePixmap::IsConfigurationSupported(
gfx::BufferFormat format,
gfx::BufferUsage usage) {
return gpu::IsNativeGpuMemoryBufferConfigurationSupported(format, usage);
}
// static
-base::Closure GpuMemoryBufferImplOzoneNativePixmap::AllocateForTesting(
+base::Closure GpuMemoryBufferImplNativePixmap::AllocateForTesting(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
gfx::GpuMemoryBufferHandle* handle) {
DCHECK(IsConfigurationSupported(format, usage));
- scoped_refptr<ui::NativePixmap> pixmap =
+#if defined(USE_OZONE)
+ scoped_refptr<gfx::NativePixmap> pixmap =
ui::OzonePlatform::GetInstance()
->GetSurfaceFactoryOzone()
->CreateNativePixmap(gfx::kNullAcceleratedWidget, size, format,
usage);
- handle->type = gfx::OZONE_NATIVE_PIXMAP;
handle->native_pixmap_handle = pixmap->ExportHandle();
+#else
+ // TODO(j.isorce): use gbm_bo_create / gbm_bo_get_fd from system libgbm.
+ scoped_refptr<gfx::NativePixmap> pixmap;
+ NOTIMPLEMENTED();
+#endif
+ handle->type = gfx::NATIVE_PIXMAP;
return base::Bind(&FreeNativePixmapForTesting, pixmap);
}
-bool GpuMemoryBufferImplOzoneNativePixmap::Map() {
+bool GpuMemoryBufferImplNativePixmap::Map() {
DCHECK(!mapped_);
mapped_ = pixmap_->Map();
return mapped_;
}
-void* GpuMemoryBufferImplOzoneNativePixmap::memory(size_t plane) {
+void* GpuMemoryBufferImplNativePixmap::memory(size_t plane) {
DCHECK(mapped_);
return pixmap_->GetMemoryAddress(plane);
}
-void GpuMemoryBufferImplOzoneNativePixmap::Unmap() {
+void GpuMemoryBufferImplNativePixmap::Unmap() {
DCHECK(mapped_);
pixmap_->Unmap();
mapped_ = false;
}
-int GpuMemoryBufferImplOzoneNativePixmap::stride(size_t plane) const {
+int GpuMemoryBufferImplNativePixmap::stride(size_t plane) const {
DCHECK_LT(plane, gfx::NumberOfPlanesForBufferFormat(format_));
return pixmap_->GetStride(plane);
}
-gfx::GpuMemoryBufferHandle GpuMemoryBufferImplOzoneNativePixmap::GetHandle()
- const {
+gfx::GpuMemoryBufferHandle GpuMemoryBufferImplNativePixmap::GetHandle() const {
gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::OZONE_NATIVE_PIXMAP;
+ handle.type = gfx::NATIVE_PIXMAP;
handle.id = id_;
if (fd_.is_valid()) {
handle.native_pixmap_handle.fds.emplace_back(fd_.get(),
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.h b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap.h
index 50ececf706e..42d17dcd102 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.h
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef GPU_IPC_CLIENT_GPU_MEMORY_BUFFER_IMPL_OZONE_NATIVE_PIXMAP_H_
-#define GPU_IPC_CLIENT_GPU_MEMORY_BUFFER_IMPL_OZONE_NATIVE_PIXMAP_H_
+#ifndef GPU_IPC_CLIENT_GPU_MEMORY_BUFFER_IMPL_NATIVE_PIXMAP_H_
+#define GPU_IPC_CLIENT_GPU_MEMORY_BUFFER_IMPL_NATIVE_PIXMAP_H_
#include <stddef.h>
@@ -13,19 +13,18 @@
#include "gpu/gpu_export.h"
#include "gpu/ipc/client/gpu_memory_buffer_impl.h"
-namespace ui {
+namespace gfx {
class ClientNativePixmap;
}
namespace gpu {
// Implementation of GPU memory buffer based on Ozone native pixmap.
-class GPU_EXPORT GpuMemoryBufferImplOzoneNativePixmap
- : public GpuMemoryBufferImpl {
+class GPU_EXPORT GpuMemoryBufferImplNativePixmap : public GpuMemoryBufferImpl {
public:
- ~GpuMemoryBufferImplOzoneNativePixmap() override;
+ ~GpuMemoryBufferImplNativePixmap() override;
- static std::unique_ptr<GpuMemoryBufferImplOzoneNativePixmap> CreateFromHandle(
+ static std::unique_ptr<GpuMemoryBufferImplNativePixmap> CreateFromHandle(
const gfx::GpuMemoryBufferHandle& handle,
const gfx::Size& size,
gfx::BufferFormat format,
@@ -48,22 +47,22 @@ class GPU_EXPORT GpuMemoryBufferImplOzoneNativePixmap
gfx::GpuMemoryBufferHandle GetHandle() const override;
private:
- GpuMemoryBufferImplOzoneNativePixmap(
+ GpuMemoryBufferImplNativePixmap(
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
const DestructionCallback& callback,
- std::unique_ptr<ui::ClientNativePixmap> native_pixmap,
+ std::unique_ptr<gfx::ClientNativePixmap> native_pixmap,
const std::vector<gfx::NativePixmapPlane>& planes,
base::ScopedFD fd);
- std::unique_ptr<ui::ClientNativePixmap> pixmap_;
+ std::unique_ptr<gfx::ClientNativePixmap> pixmap_;
std::vector<gfx::NativePixmapPlane> planes_;
base::ScopedFD fd_;
- DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferImplOzoneNativePixmap);
+ DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferImplNativePixmap);
};
} // namespace gpu
-#endif // GPU_IPC_CLIENT_GPU_MEMORY_BUFFER_IMPL_OZONE_NATIVE_PIXMAP_H_
+#endif // GPU_IPC_CLIENT_GPU_MEMORY_BUFFER_IMPL_NATIVE_PIXMAP_H_
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap_unittest.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap_unittest.cc
index 28797daa65a..50fe30fa363 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap_unittest.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap_unittest.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.h"
+#include "gpu/ipc/client/gpu_memory_buffer_impl_native_pixmap.h"
#include "gpu/ipc/client/gpu_memory_buffer_impl_test_template.h"
namespace gpu {
namespace {
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplOzoneNativePixmap,
+INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplNativePixmap,
GpuMemoryBufferImplTest,
- GpuMemoryBufferImplOzoneNativePixmap);
+ GpuMemoryBufferImplNativePixmap);
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
index 8390d3809af..0d0e85f0fc5 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
@@ -135,6 +135,7 @@ bool GpuMemoryBufferImplSharedMemory::IsSizeValidForFormat(
case gfx::BufferFormat::RGBX_8888:
case gfx::BufferFormat::BGRA_8888:
case gfx::BufferFormat::BGRX_8888:
+ case gfx::BufferFormat::RGBA_F16:
return true;
case gfx::BufferFormat::YVU_420:
case gfx::BufferFormat::YUV_420_BIPLANAR: {
diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn
index 9ccc11a0ee6..38ba01f308d 100644
--- a/chromium/gpu/ipc/common/BUILD.gn
+++ b/chromium/gpu/ipc/common/BUILD.gn
@@ -87,18 +87,18 @@ source_set("ipc_common_sources") {
":command_buffer_traits_sources",
":surface_handle_type",
"//gpu/command_buffer/common:gles2_utils",
+ "//ipc",
]
deps = [
"//base",
"//gpu/command_buffer/common:common_sources",
"//gpu/config:config_sources",
- "//ipc",
"//ui/base",
- "//ui/events/ipc",
"//ui/gfx/ipc",
"//ui/gfx/ipc/geometry",
"//ui/gl",
+ "//ui/latency/ipc",
"//url/ipc:url_ipc",
]
@@ -138,6 +138,7 @@ mojom("interfaces") {
sources = [
"capabilities.mojom",
"dx_diag_node.mojom",
+ "gpu_feature_info.mojom",
"gpu_info.mojom",
"gpu_preferences.mojom",
"mailbox.mojom",
diff --git a/chromium/gpu/ipc/common/DEPS b/chromium/gpu/ipc/common/DEPS
index b5a9e303568..5f799947780 100644
--- a/chromium/gpu/ipc/common/DEPS
+++ b/chromium/gpu/ipc/common/DEPS
@@ -2,6 +2,6 @@ include_rules = [
"+base",
"+ipc",
"+mojo",
- "+ui/events",
"+ui/base",
+ "+ui/latency",
]
diff --git a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
index dacd67809fe..b6c98466e79 100644
--- a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
+++ b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
@@ -127,8 +127,10 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::Capabilities)
IPC_STRUCT_TRAITS_MEMBER(gpu_rasterization)
IPC_STRUCT_TRAITS_MEMBER(chromium_image_rgb_emulation)
IPC_STRUCT_TRAITS_MEMBER(emulate_rgb_buffer_with_rgba)
- IPC_STRUCT_TRAITS_MEMBER(set_draw_rectangle)
+ IPC_STRUCT_TRAITS_MEMBER(software_to_accelerated_canvas_upgrade)
+ IPC_STRUCT_TRAITS_MEMBER(dc_layers)
IPC_STRUCT_TRAITS_MEMBER(disable_non_empty_post_sub_buffers)
+ IPC_STRUCT_TRAITS_MEMBER(avoid_stencil_buffers)
IPC_STRUCT_TRAITS_MEMBER(major_version)
IPC_STRUCT_TRAITS_MEMBER(minor_version)
diff --git a/chromium/gpu/ipc/common/gpu_feature_info.mojom b/chromium/gpu/ipc/common/gpu_feature_info.mojom
new file mode 100644
index 00000000000..f7f6c14d5d9
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_feature_info.mojom
@@ -0,0 +1,23 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// gpu/config/gpu_feature_info.h
+module gpu.mojom;
+
+// gpu::GpuFeatureStatus
+enum GpuFeatureStatus {
+ Enabled,
+ Blacklisted,
+ Disabled,
+ Undefined,
+ Max
+};
+
+// gpu:GpuFeatureInfo
+struct GpuFeatureInfo {
+ // The array should have one entry for each GpuFeatureType. The size of the
+ // array should be gpu::NUMBER_OF_GPU_FEATURE_TYPES. This is enforced during
+ // deserialization.
+ array<GpuFeatureStatus> status_values;
+};
diff --git a/chromium/gpu/ipc/common/gpu_feature_info.typemap b/chromium/gpu/ipc/common/gpu_feature_info.typemap
new file mode 100644
index 00000000000..90d1958642f
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_feature_info.typemap
@@ -0,0 +1,17 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+mojom = "//gpu/ipc/common/gpu_feature_info.mojom"
+public_headers = [ "//gpu/config/gpu_feature_info.h" ]
+traits_headers = [ "//gpu/ipc/common/gpu_feature_info_struct_traits.h" ]
+public_deps = [
+ "//gpu/config",
+ "//mojo/common",
+ "//mojo/common:struct_traits",
+ "//ui/gfx/geometry/mojo",
+]
+type_mappings = [
+ "gpu.mojom.GpuFeatureStatus=gpu::GpuFeatureStatus",
+ "gpu.mojom.GpuFeatureInfo=gpu::GpuFeatureInfo",
+]
diff --git a/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h b/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h
new file mode 100644
index 00000000000..29ed84062b6
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h
@@ -0,0 +1,76 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_GPU_FEATURE_INFO_STRUCT_TRAITS_H_
+#define GPU_IPC_COMMON_GPU_FEATURE_INFO_STRUCT_TRAITS_H_
+
+#include "gpu/config/gpu_feature_info.h"
+
+namespace mojo {
+
+template <>
+struct EnumTraits<gpu::mojom::GpuFeatureStatus, gpu::GpuFeatureStatus> {
+ static gpu::mojom::GpuFeatureStatus ToMojom(gpu::GpuFeatureStatus status) {
+ switch (status) {
+ case gpu::kGpuFeatureStatusEnabled:
+ return gpu::mojom::GpuFeatureStatus::Enabled;
+ case gpu::kGpuFeatureStatusBlacklisted:
+ return gpu::mojom::GpuFeatureStatus::Blacklisted;
+ case gpu::kGpuFeatureStatusDisabled:
+ return gpu::mojom::GpuFeatureStatus::Disabled;
+ case gpu::kGpuFeatureStatusUndefined:
+ return gpu::mojom::GpuFeatureStatus::Undefined;
+ case gpu::kGpuFeatureStatusMax:
+ return gpu::mojom::GpuFeatureStatus::Max;
+ }
+ NOTREACHED();
+ return gpu::mojom::GpuFeatureStatus::Max;
+ }
+
+ static bool FromMojom(gpu::mojom::GpuFeatureStatus input,
+ gpu::GpuFeatureStatus* out) {
+ switch (input) {
+ case gpu::mojom::GpuFeatureStatus::Enabled:
+ *out = gpu::kGpuFeatureStatusEnabled;
+ return true;
+ case gpu::mojom::GpuFeatureStatus::Blacklisted:
+ *out = gpu::kGpuFeatureStatusBlacklisted;
+ return true;
+ case gpu::mojom::GpuFeatureStatus::Disabled:
+ *out = gpu::kGpuFeatureStatusDisabled;
+ return true;
+ case gpu::mojom::GpuFeatureStatus::Undefined:
+ *out = gpu::kGpuFeatureStatusUndefined;
+ return true;
+ case gpu::mojom::GpuFeatureStatus::Max:
+ *out = gpu::kGpuFeatureStatusMax;
+ return true;
+ }
+ return false;
+ }
+};
+
+template <>
+struct StructTraits<gpu::mojom::GpuFeatureInfoDataView, gpu::GpuFeatureInfo> {
+ static bool Read(gpu::mojom::GpuFeatureInfoDataView data,
+ gpu::GpuFeatureInfo* out) {
+ std::vector<gpu::GpuFeatureStatus> info_status;
+ if (!data.ReadStatusValues(&info_status))
+ return false;
+ if (info_status.size() != gpu::NUMBER_OF_GPU_FEATURE_TYPES)
+ return false;
+ std::copy(info_status.begin(), info_status.end(), out->status_values);
+ return true;
+ }
+
+ static std::vector<gpu::GpuFeatureStatus> status_values(
+ const gpu::GpuFeatureInfo& info) {
+ return std::vector<gpu::GpuFeatureStatus>(info.status_values,
+ std::end(info.status_values));
+ }
+};
+
+} // namespace mojo
+
+#endif // GPU_IPC_COMMON_GPU_FEATURE_INFO_STRUCT_TRAITS_H_
diff --git a/chromium/gpu/ipc/common/gpu_info.mojom b/chromium/gpu/ipc/common/gpu_info.mojom
index 7754e13204c..d34e40ab0b6 100644
--- a/chromium/gpu/ipc/common/gpu_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_info.mojom
@@ -106,6 +106,7 @@ struct GpuInfo {
int32 process_crash_count;
bool in_process_gpu;
bool passthrough_cmd_decoder;
+ bool supports_overlays;
CollectInfoResult basic_info_state;
CollectInfoResult context_info_state;
CollectInfoResult dx_diagnostics_info_state;
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
index d73b78a6afd..9b1803a9ba0 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
@@ -245,6 +245,7 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
out->sandboxed = data.sandboxed();
out->in_process_gpu = data.in_process_gpu();
out->passthrough_cmd_decoder = data.passthrough_cmd_decoder();
+ out->supports_overlays = data.supports_overlays();
out->process_crash_count = data.process_crash_count();
out->jpeg_decode_accelerator_supported =
data.jpeg_decode_accelerator_supported();
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.h b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
index 073ce7bcba3..415fc49e9d4 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
@@ -240,6 +240,10 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
return input.passthrough_cmd_decoder;
}
+ static bool supports_overlays(const gpu::GPUInfo& input) {
+ return input.supports_overlays;
+ }
+
static gpu::CollectInfoResult basic_info_state(const gpu::GPUInfo& input) {
return input.basic_info_state;
}
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
index aa3f179761d..77f98359d26 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
@@ -7,8 +7,8 @@
#include "base/logging.h"
#include "build/build_config.h"
-#if defined(USE_OZONE)
-#include "ui/ozone/public/client_native_pixmap_factory.h"
+#if defined(OS_LINUX)
+#include "ui/gfx/client_native_pixmap_factory.h"
#endif
namespace gpu {
@@ -17,8 +17,8 @@ gfx::GpuMemoryBufferType GetNativeGpuMemoryBufferType() {
#if defined(OS_MACOSX)
return gfx::IO_SURFACE_BUFFER;
#endif
-#if defined(USE_OZONE)
- return gfx::OZONE_NATIVE_PIXMAP;
+#if defined(OS_LINUX)
+ return gfx::NATIVE_PIXMAP;
#endif
return gfx::EMPTY_BUFFER;
}
@@ -39,6 +39,7 @@ bool IsNativeGpuMemoryBufferConfigurationSupported(gfx::BufferFormat format,
case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT:
return format == gfx::BufferFormat::R_8 ||
format == gfx::BufferFormat::BGRA_8888 ||
+ format == gfx::BufferFormat::RGBA_F16 ||
format == gfx::BufferFormat::UYVY_422 ||
format == gfx::BufferFormat::YUV_420_BIPLANAR;
}
@@ -46,13 +47,13 @@ bool IsNativeGpuMemoryBufferConfigurationSupported(gfx::BufferFormat format,
return false;
#endif
-#if defined(USE_OZONE)
- if (!ui::ClientNativePixmapFactory::GetInstance()) {
+#if defined(OS_LINUX)
+ if (!gfx::ClientNativePixmapFactory::GetInstance()) {
// unittests don't have to set ClientNativePixmapFactory.
return false;
}
- return ui::ClientNativePixmapFactory::GetInstance()->IsConfigurationSupported(
- format, usage);
+ return gfx::ClientNativePixmapFactory::GetInstance()
+ ->IsConfigurationSupported(format, usage);
#endif
NOTREACHED();
diff --git a/chromium/gpu/ipc/common/gpu_messages.h b/chromium/gpu/ipc/common/gpu_messages.h
index 770ab3d5a17..59b5c4932ad 100644
--- a/chromium/gpu/ipc/common/gpu_messages.h
+++ b/chromium/gpu/ipc/common/gpu_messages.h
@@ -27,14 +27,14 @@
#include "gpu/ipc/common/surface_handle.h"
#include "ipc/ipc_channel_handle.h"
#include "ipc/ipc_message_macros.h"
-#include "ui/events/ipc/latency_info_param_traits.h"
-#include "ui/events/latency_info.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/ipc/geometry/gfx_param_traits.h"
#include "ui/gfx/ipc/gfx_param_traits.h"
#include "ui/gfx/native_widget_types.h"
#include "ui/gfx/swap_result.h"
+#include "ui/latency/ipc/latency_info_param_traits.h"
+#include "ui/latency/latency_info.h"
#include "url/ipc/url_param_traits.h"
#if defined(OS_MACOSX)
@@ -175,10 +175,11 @@ IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_WaitForGetOffsetInRange,
// Caller passes its current put offset. Current state (including get offset)
// is returned in shared memory. The input latency info for the current
// frame is also sent to the GPU process.
-IPC_MESSAGE_ROUTED3(GpuCommandBufferMsg_AsyncFlush,
+IPC_MESSAGE_ROUTED4(GpuCommandBufferMsg_AsyncFlush,
int32_t /* put_offset */,
uint32_t /* flush_count */,
- std::vector<ui::LatencyInfo> /* latency_info */)
+ std::vector<ui::LatencyInfo> /* latency_info */,
+ std::vector<gpu::SyncToken> /* sync_token_fences */)
// Sent by the GPU process to display messages in the console.
IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_ConsoleMsg,
diff --git a/chromium/gpu/ipc/common/gpu_param_traits_macros.h b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
index 31f08b855f2..0b1ebc49ba3 100644
--- a/chromium/gpu/ipc/common/gpu_param_traits_macros.h
+++ b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
@@ -91,6 +91,7 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::GPUInfo)
IPC_STRUCT_TRAITS_MEMBER(process_crash_count)
IPC_STRUCT_TRAITS_MEMBER(in_process_gpu)
IPC_STRUCT_TRAITS_MEMBER(passthrough_cmd_decoder)
+ IPC_STRUCT_TRAITS_MEMBER(supports_overlays)
IPC_STRUCT_TRAITS_MEMBER(basic_info_state)
IPC_STRUCT_TRAITS_MEMBER(context_info_state)
#if defined(OS_WIN)
diff --git a/chromium/gpu/ipc/common/mailbox_holder.typemap b/chromium/gpu/ipc/common/mailbox_holder.typemap
index 4644f3219d3..06d9d72789e 100644
--- a/chromium/gpu/ipc/common/mailbox_holder.typemap
+++ b/chromium/gpu/ipc/common/mailbox_holder.typemap
@@ -8,4 +8,8 @@ traits_headers = [ "//gpu/ipc/common/mailbox_holder_struct_traits.h" ]
deps = [
"//gpu/ipc/common:struct_traits",
]
+public_deps = [
+ "//gpu/command_buffer/common",
+ "//mojo/public/cpp/bindings",
+]
type_mappings = [ "gpu.mojom.MailboxHolder=::gpu::MailboxHolder" ]
diff --git a/chromium/gpu/ipc/common/mailbox_holder_for_blink.typemap b/chromium/gpu/ipc/common/mailbox_holder_for_blink.typemap
new file mode 100644
index 00000000000..976595e73d7
--- /dev/null
+++ b/chromium/gpu/ipc/common/mailbox_holder_for_blink.typemap
@@ -0,0 +1,13 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+mojom = "//gpu/ipc/common/mailbox_holder.mojom"
+public_headers = [ "//gpu/command_buffer/common/mailbox_holder.h" ]
+traits_headers = [ "//gpu/ipc/common/mailbox_holder_struct_traits.h" ]
+public_deps = [
+ "//gpu/command_buffer/common",
+ "//gpu/ipc/common:interfaces",
+ "//mojo/public/cpp/bindings",
+]
+type_mappings = [ "gpu.mojom.MailboxHolder=::gpu::MailboxHolder" ]
diff --git a/chromium/gpu/ipc/common/struct_traits_unittest.cc b/chromium/gpu/ipc/common/struct_traits_unittest.cc
index cdd7dc00055..93adcb2df3f 100644
--- a/chromium/gpu/ipc/common/struct_traits_unittest.cc
+++ b/chromium/gpu/ipc/common/struct_traits_unittest.cc
@@ -2,9 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
#include <string>
#include "base/message_loop/message_loop.h"
+#include "gpu/config/gpu_feature_type.h"
+#include "gpu/ipc/common/gpu_feature_info.mojom.h"
+#include "gpu/ipc/common/gpu_feature_info_struct_traits.h"
#include "gpu/ipc/common/traits_test_service.mojom.h"
#include "mojo/public/cpp/bindings/binding_set.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -151,6 +155,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
const int process_crash_count = 0xdead;
const bool in_process_gpu = true;
const bool passthrough_cmd_decoder = true;
+ const bool supports_overlays = true;
const gpu::CollectInfoResult basic_info_state =
gpu::CollectInfoResult::kCollectInfoSuccess;
const gpu::CollectInfoResult context_info_state =
@@ -198,6 +203,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
input.process_crash_count = process_crash_count;
input.in_process_gpu = in_process_gpu;
input.passthrough_cmd_decoder = passthrough_cmd_decoder;
+ input.supports_overlays = supports_overlays;
input.basic_info_state = basic_info_state;
input.context_info_state = context_info_state;
#if defined(OS_WIN)
@@ -258,6 +264,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
EXPECT_EQ(process_crash_count, output.process_crash_count);
EXPECT_EQ(in_process_gpu, output.in_process_gpu);
EXPECT_EQ(passthrough_cmd_decoder, output.passthrough_cmd_decoder);
+ EXPECT_EQ(supports_overlays, output.supports_overlays);
EXPECT_EQ(basic_info_state, output.basic_info_state);
EXPECT_EQ(context_info_state, output.context_info_state);
#if defined(OS_WIN)
@@ -442,4 +449,21 @@ TEST_F(StructTraitsTest, GpuPreferences) {
#endif
}
+TEST_F(StructTraitsTest, GpuFeatureInfo) {
+ GpuFeatureInfo input;
+ input.status_values[GPU_FEATURE_TYPE_FLASH3D] =
+ gpu::kGpuFeatureStatusBlacklisted;
+ input.status_values[GPU_FEATURE_TYPE_PANEL_FITTING] =
+ gpu::kGpuFeatureStatusUndefined;
+ input.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] =
+ gpu::kGpuFeatureStatusDisabled;
+
+ GpuFeatureInfo output;
+ ASSERT_TRUE(mojom::GpuFeatureInfo::Deserialize(
+ mojom::GpuFeatureInfo::Serialize(&input), &output));
+ EXPECT_TRUE(std::equal(input.status_values,
+ input.status_values + NUMBER_OF_GPU_FEATURE_TYPES,
+ output.status_values));
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/common/typemaps.gni b/chromium/gpu/ipc/common/typemaps.gni
index 874b19f93b9..d89d86da749 100644
--- a/chromium/gpu/ipc/common/typemaps.gni
+++ b/chromium/gpu/ipc/common/typemaps.gni
@@ -4,6 +4,7 @@
typemaps = [
"//gpu/ipc/common/capabilities.typemap",
+ "//gpu/ipc/common/gpu_feature_info.typemap",
"//gpu/ipc/common/gpu_info.typemap",
"//gpu/ipc/common/gpu_preferences.typemap",
"//gpu/ipc/common/dx_diag_node.typemap",
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
index e0060837ab1..66884870a1c 100644
--- a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
@@ -38,15 +38,11 @@ GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations() {
#if defined(USE_OZONE) || defined(OS_MACOSX)
if (AreNativeGpuMemoryBuffersEnabled()) {
const gfx::BufferFormat kNativeFormats[] = {
- gfx::BufferFormat::R_8,
- gfx::BufferFormat::RG_88,
- gfx::BufferFormat::BGR_565,
- gfx::BufferFormat::RGBA_4444,
- gfx::BufferFormat::RGBA_8888,
- gfx::BufferFormat::BGRA_8888,
- gfx::BufferFormat::UYVY_422,
- gfx::BufferFormat::YVU_420,
- gfx::BufferFormat::YUV_420_BIPLANAR};
+ gfx::BufferFormat::R_8, gfx::BufferFormat::RG_88,
+ gfx::BufferFormat::BGR_565, gfx::BufferFormat::RGBA_4444,
+ gfx::BufferFormat::RGBA_8888, gfx::BufferFormat::BGRA_8888,
+ gfx::BufferFormat::RGBA_F16, gfx::BufferFormat::UYVY_422,
+ gfx::BufferFormat::YVU_420, gfx::BufferFormat::YUV_420_BIPLANAR};
const gfx::BufferUsage kNativeUsages[] = {
gfx::BufferUsage::GPU_READ, gfx::BufferUsage::SCANOUT,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
@@ -96,7 +92,7 @@ uint32_t GetImageTextureTarget(gfx::BufferFormat format,
}
switch (GetNativeGpuMemoryBufferType()) {
- case gfx::OZONE_NATIVE_PIXMAP:
+ case gfx::NATIVE_PIXMAP:
// GPU memory buffers that are shared with the GL using EGLImages
// require TEXTURE_EXTERNAL_OES.
return GL_TEXTURE_EXTERNAL_OES;
diff --git a/chromium/gpu/ipc/host/shader_disk_cache.cc b/chromium/gpu/ipc/host/shader_disk_cache.cc
index 7d1ad889c70..144f9af4783 100644
--- a/chromium/gpu/ipc/host/shader_disk_cache.cc
+++ b/chromium/gpu/ipc/host/shader_disk_cache.cc
@@ -496,6 +496,17 @@ void ShaderCacheFactory::ClearByPath(const base::FilePath& path,
helper_ptr->Clear();
}
+void ShaderCacheFactory::ClearByClientId(int32_t client_id,
+ const base::Time& delete_begin,
+ const base::Time& delete_end,
+ const base::Closure& callback) {
+ DCHECK(CalledOnValidThread());
+ ClientIdToPathMap::iterator iter = client_id_to_path_map_.find(client_id);
+ if (iter == client_id_to_path_map_.end())
+ return;
+ return ClearByPath(iter->second, delete_begin, delete_end, callback);
+}
+
void ShaderCacheFactory::CacheCleared(const base::FilePath& path) {
DCHECK(CalledOnValidThread());
diff --git a/chromium/gpu/ipc/host/shader_disk_cache.h b/chromium/gpu/ipc/host/shader_disk_cache.h
index 4080737d2fe..f8d6f5eb07c 100644
--- a/chromium/gpu/ipc/host/shader_disk_cache.h
+++ b/chromium/gpu/ipc/host/shader_disk_cache.h
@@ -116,6 +116,13 @@ class ShaderCacheFactory : NON_EXPORTED_BASE(public base::ThreadChecker) {
const base::Time& end_time,
const base::Closure& callback);
+ // Same as ClearByPath, but looks up the cache by |client_id|. The |callback|
+ // will be executed when the clear is complete.
+ void ClearByClientId(int32_t client_id,
+ const base::Time& begin_time,
+ const base::Time& end_time,
+ const base::Closure& callback);
+
// Retrieve the shader disk cache for the provided |client_id|.
scoped_refptr<ShaderDiskCache> Get(int32_t client_id);
diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc
index ec3464e2a55..833b985a548 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.cc
+++ b/chromium/gpu/ipc/in_process_command_buffer.cc
@@ -96,8 +96,8 @@ class GpuInProcessThreadHolder : public base::Thread {
scoped_refptr<InProcessCommandBuffer::Service> gpu_thread_service_;
};
-base::LazyInstance<GpuInProcessThreadHolder> g_default_service =
- LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<GpuInProcessThreadHolder>::DestructorAtExit
+ g_default_service = LAZY_INSTANCE_INITIALIZER;
class ScopedEvent {
public:
@@ -171,17 +171,17 @@ gpu::gles2::ProgramCache* InProcessCommandBuffer::Service::program_cache() {
gpu_preferences_.disable_gpu_shader_disk_cache ||
workarounds.disable_program_disk_cache;
program_cache_.reset(new gles2::MemoryProgramCache(
- gpu_preferences_.gpu_program_cache_size,
- disable_disk_cache,
- workarounds.disable_program_caching_for_transform_feedback));
+ gpu_preferences_.gpu_program_cache_size, disable_disk_cache,
+ workarounds.disable_program_caching_for_transform_feedback,
+ &activity_flags_));
}
return program_cache_.get();
}
InProcessCommandBuffer::InProcessCommandBuffer(
const scoped_refptr<Service>& service)
- : command_buffer_id_(
- CommandBufferId::FromUnsafeValue(g_next_command_buffer_id.GetNext())),
+ : command_buffer_id_(CommandBufferId::FromUnsafeValue(
+ g_next_command_buffer_id.GetNext() + 1)),
delayed_work_pending_(false),
image_factory_(nullptr),
gpu_control_client_(nullptr),
@@ -347,10 +347,12 @@ bool InProcessCommandBuffer::InitializeOnGpuThread(
return false;
}
- sync_point_order_data_ = SyncPointOrderData::Create();
- sync_point_client_ = base::MakeUnique<SyncPointClient>(
- service_->sync_point_manager(), sync_point_order_data_, GetNamespaceID(),
- GetCommandBufferID());
+ sync_point_order_data_ =
+ service_->sync_point_manager()->CreateSyncPointOrderData();
+ sync_point_client_state_ =
+ service_->sync_point_manager()->CreateSyncPointClientState(
+ GetNamespaceID(), GetCommandBufferID(),
+ sync_point_order_data_->sequence_id());
if (service_->UseVirtualizedGLContexts() ||
decoder_->GetContextGroup()
@@ -459,11 +461,14 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
}
context_ = nullptr;
surface_ = nullptr;
- sync_point_client_ = nullptr;
if (sync_point_order_data_) {
sync_point_order_data_->Destroy();
sync_point_order_data_ = nullptr;
}
+ if (sync_point_client_state_) {
+ sync_point_client_state_->Destroy();
+ sync_point_client_state_ = nullptr;
+ }
gl_share_group_ = nullptr;
context_group_ = nullptr;
@@ -507,9 +512,7 @@ void InProcessCommandBuffer::QueueTask(bool out_of_order,
}
// Release the |task_queue_lock_| before calling ScheduleTask because
// the callback may get called immediately and attempt to acquire the lock.
- SyncPointManager* sync_manager = service_->sync_point_manager();
- uint32_t order_num =
- sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
+ uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber();
{
base::AutoLock lock(task_queue_lock_);
task_queue_.push(base::MakeUnique<GpuTask>(task, order_num));
@@ -704,8 +707,8 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
int32_t new_id = next_image_id_.GetNext();
- DCHECK(gpu::IsGpuMemoryBufferFormatSupported(gpu_memory_buffer->GetFormat(),
- capabilities_));
+ DCHECK(gpu::IsImageFromGpuMemoryBufferFormatSupported(
+ gpu_memory_buffer->GetFormat(), capabilities_));
DCHECK(gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
internalformat, gpu_memory_buffer->GetFormat()));
@@ -802,7 +805,7 @@ void InProcessCommandBuffer::CreateImageOnGpuThread(
}
if (fence_sync)
- sync_point_client_->ReleaseFenceSync(fence_sync);
+ sync_point_client_state_->ReleaseFenceSync(fence_sync);
}
void InProcessCommandBuffer::DestroyImage(int32_t id) {
@@ -834,7 +837,7 @@ void InProcessCommandBuffer::FenceSyncReleaseOnGpuThread(uint64_t release) {
decoder_->GetContextGroup()->mailbox_manager();
mailbox_manager->PushTextureUpdates(sync_token);
- sync_point_client_->ReleaseFenceSync(release);
+ sync_point_client_state_->ReleaseFenceSync(release);
}
bool InProcessCommandBuffer::WaitSyncTokenOnGpuThread(
@@ -849,7 +852,7 @@ bool InProcessCommandBuffer::WaitSyncTokenOnGpuThread(
if (service_->BlockThreadOnWaitSyncToken()) {
// Wait if sync point wait is valid.
- if (sync_point_client_->Wait(
+ if (sync_point_client_state_->Wait(
sync_token,
base::Bind(&base::WaitableEvent::Signal,
base::Unretained(&fence_sync_wait_event_)))) {
@@ -860,7 +863,7 @@ bool InProcessCommandBuffer::WaitSyncTokenOnGpuThread(
return false;
}
- waiting_for_sync_point_ = sync_point_client_->Wait(
+ waiting_for_sync_point_ = sync_point_client_state_->Wait(
sync_token,
base::Bind(&InProcessCommandBuffer::OnWaitSyncTokenCompleted,
gpu_thread_weak_ptr_factory_.GetWeakPtr(), sync_token));
@@ -881,8 +884,8 @@ void InProcessCommandBuffer::OnWaitSyncTokenCompleted(
mailbox_manager->PullTextureUpdates(sync_token);
waiting_for_sync_point_ = false;
executor_->SetScheduled(true);
- QueueTask(false, base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
- gpu_thread_weak_ptr_, last_put_offset_));
+ service_->ScheduleTask(base::Bind(
+ &InProcessCommandBuffer::ProcessTasksOnGpuThread, gpu_thread_weak_ptr_));
}
void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() {
@@ -906,7 +909,7 @@ void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() {
void InProcessCommandBuffer::SignalSyncTokenOnGpuThread(
const SyncToken& sync_token,
const base::Closure& callback) {
- if (!sync_point_client_->Wait(sync_token, WrapCallback(callback)))
+ if (!sync_point_client_state_->Wait(sync_token, WrapCallback(callback)))
callback.Run();
}
@@ -981,9 +984,11 @@ void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token,
base::Unretained(this), sync_token, WrapCallback(callback)));
}
+void InProcessCommandBuffer::WaitSyncTokenHint(const SyncToken& sync_token) {}
+
bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken(
- const SyncToken* sync_token) {
- return sync_token->namespace_id() == GetNamespaceID();
+ const SyncToken& sync_token) {
+ return sync_token.namespace_id() == GetNamespaceID();
}
#if defined(OS_WIN)
diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h
index 6bc7066d72f..ff29663a925 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.h
+++ b/chromium/gpu/ipc/in_process_command_buffer.h
@@ -23,6 +23,7 @@
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "gpu/command_buffer/client/gpu_control.h"
+#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/common/command_buffer.h"
#include "gpu/command_buffer/service/command_executor.h"
#include "gpu/command_buffer/service/context_group.h"
@@ -51,7 +52,7 @@ class Size;
namespace gpu {
-class SyncPointClient;
+class SyncPointClientState;
class SyncPointOrderData;
class SyncPointManager;
struct GpuProcessHostedCALayerTreeParamsMac;
@@ -128,7 +129,8 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const SyncToken& sync_token,
const base::Closure& callback) override;
- bool CanWaitUnverifiedSyncToken(const SyncToken* sync_token) override;
+ void WaitSyncTokenHint(const SyncToken& sync_token) override;
+ bool CanWaitUnverifiedSyncToken(const SyncToken& sync_token) override;
// ImageTransportSurfaceDelegate implementation:
#if defined(OS_WIN)
@@ -199,6 +201,8 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
scoped_refptr<gles2::MailboxManager> mailbox_manager_;
scoped_refptr<gl::GLShareGroup> share_group_;
std::unique_ptr<gpu::gles2::ProgramCache> program_cache_;
+ // No-op default initialization is used in in-process mode.
+ GpuProcessActivityFlags activity_flags_;
};
private:
@@ -275,7 +279,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
scoped_refptr<gl::GLContext> context_;
scoped_refptr<gl::GLSurface> surface_;
scoped_refptr<SyncPointOrderData> sync_point_order_data_;
- std::unique_ptr<SyncPointClient> sync_point_client_;
+ scoped_refptr<SyncPointClientState> sync_point_client_state_;
base::Closure context_lost_callback_;
// Used to throttle PerformDelayedWorkOnGpuThread.
bool delayed_work_pending_;
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index ce81b1693d4..abec3f8b454 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -61,11 +61,12 @@ target(link_target_type, "ipc_service_sources") {
"//base",
"//ipc",
"//ui/base",
- "//ui/events:events_base",
+ "//ui/display",
"//ui/gfx",
"//ui/gfx/geometry",
"//ui/gl",
"//ui/gl/init",
+ "//ui/latency",
"//url",
]
deps = [
@@ -111,16 +112,16 @@ target(link_target_type, "ipc_service_sources") {
libs += [ "android" ]
}
if (is_linux) {
- sources += [ "image_transport_surface_linux.cc" ]
+ sources += [
+ "gpu_memory_buffer_factory_native_pixmap.cc",
+ "gpu_memory_buffer_factory_native_pixmap.h",
+ "image_transport_surface_linux.cc",
+ ]
}
if (use_x11) {
sources += [ "x_util.h" ]
}
if (use_ozone) {
- sources += [
- "gpu_memory_buffer_factory_ozone_native_pixmap.cc",
- "gpu_memory_buffer_factory_ozone_native_pixmap.h",
- ]
deps += [ "//ui/ozone" ]
}
}
@@ -153,7 +154,6 @@ test("gpu_ipc_service_unittests") {
":test_support",
"//base",
"//base/test:test_support",
- "//gpu:test_support",
"//gpu/command_buffer/common",
"//gpu/command_buffer/common:gles2_utils",
"//gpu/command_buffer/service",
@@ -177,8 +177,10 @@ test("gpu_ipc_service_unittests") {
if (is_mac) {
sources += [ "gpu_memory_buffer_factory_io_surface_unittest.cc" ]
}
+ if (is_linux) {
+ sources += [ "gpu_memory_buffer_factory_native_pixmap_unittest.cc" ]
+ }
if (use_ozone) {
- sources += [ "gpu_memory_buffer_factory_ozone_native_pixmap_unittest.cc" ]
deps += [ "//ui/ozone" ]
}
}
diff --git a/chromium/gpu/ipc/service/DEPS b/chromium/gpu/ipc/service/DEPS
index 0461b20ed01..d7b8d51689b 100644
--- a/chromium/gpu/ipc/service/DEPS
+++ b/chromium/gpu/ipc/service/DEPS
@@ -2,6 +2,8 @@ include_rules = [
"+third_party/skia",
"+ui/accelerated_widget_mac",
"+ui/base",
- "+ui/events",
+ "+ui/display",
+ "+ui/latency",
"+ui/ozone",
+ "+ui/platform_window",
]
diff --git a/chromium/gpu/ipc/service/child_window_surface_win.cc b/chromium/gpu/ipc/service/child_window_surface_win.cc
index f04f6749fda..b3dc4a99908 100644
--- a/chromium/gpu/ipc/service/child_window_surface_win.cc
+++ b/chromium/gpu/ipc/service/child_window_surface_win.cc
@@ -11,7 +11,7 @@
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
-#include "ui/base/ui_base_switches.h"
+#include "ui/display/display_switches.h"
#include "ui/gfx/native_widget_types.h"
#include "ui/gl/egl_util.h"
#include "ui/gl/gl_context.h"
@@ -21,9 +21,10 @@
namespace gpu {
ChildWindowSurfaceWin::ChildWindowSurfaceWin(
+ std::unique_ptr<gfx::VSyncProvider> vsync_provider,
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
HWND parent_window)
- : gl::NativeViewGLSurfaceEGL(0),
+ : gl::NativeViewGLSurfaceEGL(0, std::move(vsync_provider)),
child_window_(delegate, parent_window),
alpha_(true),
first_swap_(true) {
@@ -35,10 +36,10 @@ ChildWindowSurfaceWin::ChildWindowSurfaceWin(
EGLConfig ChildWindowSurfaceWin::GetConfig() {
if (!config_) {
int alpha_size = alpha_ ? 8 : EGL_DONT_CARE;
- int bits_per_channel = base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableHDROutput)
- ? 16
- : 8;
+ int bits_per_channel =
+ base::CommandLine::ForCurrentProcess()->HasSwitch(switches::kEnableHDR)
+ ? 16
+ : 8;
EGLint config_attribs[] = {EGL_ALPHA_SIZE,
alpha_size,
diff --git a/chromium/gpu/ipc/service/child_window_surface_win.h b/chromium/gpu/ipc/service/child_window_surface_win.h
index 9f5017b35e5..8304cd190a3 100644
--- a/chromium/gpu/ipc/service/child_window_surface_win.h
+++ b/chromium/gpu/ipc/service/child_window_surface_win.h
@@ -16,7 +16,8 @@ namespace gpu {
class ChildWindowSurfaceWin : public gl::NativeViewGLSurfaceEGL {
public:
- ChildWindowSurfaceWin(base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
+ ChildWindowSurfaceWin(std::unique_ptr<gfx::VSyncProvider> vsync_provider,
+ base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
HWND parent_window);
// GLSurface implementation.
diff --git a/chromium/gpu/ipc/service/child_window_win.cc b/chromium/gpu/ipc/service/child_window_win.cc
index 1246c577318..6c1dfdc957d 100644
--- a/chromium/gpu/ipc/service/child_window_win.cc
+++ b/chromium/gpu/ipc/service/child_window_win.cc
@@ -207,4 +207,9 @@ ChildWindowWin::~ChildWindowWin() {
}
}
+scoped_refptr<base::TaskRunner> ChildWindowWin::GetTaskRunnerForTesting() {
+ DCHECK(shared_data_);
+ return shared_data_->thread.task_runner();
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/child_window_win.h b/chromium/gpu/ipc/service/child_window_win.h
index 2bccf9ff926..c11202b12da 100644
--- a/chromium/gpu/ipc/service/child_window_win.h
+++ b/chromium/gpu/ipc/service/child_window_win.h
@@ -6,6 +6,7 @@
#define GPU_IPC_SERVICE_CHILD_WINDOW_WIN_H_
#include "base/memory/weak_ptr.h"
+#include "base/task_runner.h"
#include "gpu/ipc/service/image_transport_surface_delegate.h"
#include <windows.h>
@@ -27,6 +28,8 @@ class ChildWindowWin {
void ClearInvalidContents();
HWND window() const { return window_; }
+ scoped_refptr<base::TaskRunner> GetTaskRunnerForTesting();
+
private:
// This member contains all the data that can be accessed from the main or
// window owner threads.
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.cc b/chromium/gpu/ipc/service/direct_composition_surface_win.cc
index 317964e5222..2fd54a68bb7 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.cc
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win.cc
@@ -4,13 +4,33 @@
#include "gpu/ipc/service/direct_composition_surface_win.h"
+#include <d3d11_1.h>
+#include <dcomptypes.h>
+
+#include <deque>
+
+#include "base/feature_list.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
#include "base/optional.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/trace_event/trace_event.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_version.h"
+#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
+#include "gpu/ipc/service/switches.h"
+#include "ui/display/display_switches.h"
+#include "ui/gfx/color_space_win.h"
+#include "ui/gfx/geometry/size_conversions.h"
#include "ui/gfx/native_widget_types.h"
+#include "ui/gfx/transform.h"
+#include "ui/gl/dc_renderer_layer_params.h"
#include "ui/gl/egl_util.h"
#include "ui/gl/gl_angle_util_win.h"
#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_image_dxgi.h"
#include "ui/gl/gl_surface_egl.h"
#include "ui/gl/scoped_make_current.h"
@@ -27,6 +47,11 @@
namespace gpu {
namespace {
+// Some drivers fail to correctly handle BT.709 video in overlays. This flag
+// converts them to BT.601 in the video processor.
+const base::Feature kFallbackBT709VideoToBT601{
+ "FallbackBT709VideoToBT601", base::FEATURE_DISABLED_BY_DEFAULT};
+
// This class is used to make sure a specified surface isn't current, and upon
// destruction it will make the surface current again if it had been before.
class ScopedReleaseCurrent {
@@ -45,6 +70,106 @@ class ScopedReleaseCurrent {
base::Optional<ui::ScopedMakeCurrent> make_current_;
};
+bool SizeContains(const gfx::Size& a, const gfx::Size& b) {
+ return gfx::Rect(a).Contains(gfx::Rect(b));
+}
+
+// This keeps track of whether the previous 30 frames used Overlays or GPU
+// composition to present.
+class PresentationHistory {
+ public:
+ static const int kPresentsToStore = 30;
+
+ PresentationHistory() {}
+
+ void AddSample(DXGI_FRAME_PRESENTATION_MODE mode) {
+ if (mode == DXGI_FRAME_PRESENTATION_MODE_COMPOSED)
+ composed_count_++;
+
+ presents_.push_back(mode);
+ if (presents_.size() > kPresentsToStore) {
+ DXGI_FRAME_PRESENTATION_MODE first_mode = presents_.front();
+ if (first_mode == DXGI_FRAME_PRESENTATION_MODE_COMPOSED)
+ composed_count_--;
+ presents_.pop_front();
+ }
+ }
+
+ bool valid() const { return presents_.size() >= kPresentsToStore; }
+ int composed_count() const { return composed_count_; }
+
+ private:
+ std::deque<DXGI_FRAME_PRESENTATION_MODE> presents_;
+ int composed_count_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(PresentationHistory);
+};
+
+gfx::Size g_overlay_monitor_size;
+
+// This is the raw support info, which shouldn't depend on field trial state.
+bool HardwareSupportsOverlays() {
+ if (!gl::GLSurfaceEGL::IsDirectCompositionSupported())
+ return false;
+
+ base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+ if (command_line->HasSwitch(switches::kEnableDirectCompositionLayers))
+ return true;
+ if (command_line->HasSwitch(switches::kDisableDirectCompositionLayers))
+ return false;
+
+ // Before Windows 10 Anniversary Update (Redstone 1), overlay planes
+ // wouldn't be assigned to non-UWP apps.
+ if (base::win::GetVersion() < base::win::VERSION_WIN10_R1)
+ return false;
+
+ base::win::ScopedComPtr<ID3D11Device> d3d11_device =
+ gl::QueryD3D11DeviceObjectFromANGLE();
+ if (!d3d11_device) {
+ DLOG(ERROR) << "Failing to create overlay swapchain because couldn't "
+ "retrieve D3D11 device from ANGLE.";
+ return false;
+ }
+
+ base::win::ScopedComPtr<IDXGIDevice> dxgi_device;
+ d3d11_device.QueryInterface(dxgi_device.Receive());
+ base::win::ScopedComPtr<IDXGIAdapter> dxgi_adapter;
+ dxgi_device->GetAdapter(dxgi_adapter.Receive());
+
+ unsigned int i = 0;
+ while (true) {
+ base::win::ScopedComPtr<IDXGIOutput> output;
+ if (FAILED(dxgi_adapter->EnumOutputs(i++, output.Receive())))
+ break;
+ base::win::ScopedComPtr<IDXGIOutput3> output3;
+ if (FAILED(output.QueryInterface(output3.Receive())))
+ continue;
+
+ UINT flags = 0;
+ if (FAILED(output3->CheckOverlaySupport(DXGI_FORMAT_YUY2,
+ d3d11_device.get(), &flags)))
+ continue;
+
+ UMA_HISTOGRAM_SPARSE_SLOWLY("GPU.DirectComposition.OverlaySupportFlags",
+ flags);
+
+ // Some new Intel drivers only claim to support unscaled overlays, but
+ // scaled overlays still work. Even when scaled overlays aren't actually
+ // supported, presentation using the overlay path should be relatively
+ // efficient.
+ if (flags & (DXGI_OVERLAY_SUPPORT_FLAG_SCALING |
+ DXGI_OVERLAY_SUPPORT_FLAG_DIRECT)) {
+ DXGI_OUTPUT_DESC monitor_desc = {};
+ if (FAILED(output3->GetDesc(&monitor_desc)))
+ continue;
+ g_overlay_monitor_size =
+ gfx::Rect(monitor_desc.DesktopCoordinates).size();
+ return true;
+ }
+ }
+ return false;
+}
+
// Only one DirectComposition surface can be rendered into at a time. Track
// here which IDCompositionSurface is being rendered into. If another context
// is made current, then this surface will be suspended.
@@ -52,15 +177,742 @@ IDCompositionSurface* g_current_surface;
} // namespace
+class DCLayerTree {
+ public:
+ DCLayerTree(DirectCompositionSurfaceWin* surface,
+ const base::win::ScopedComPtr<ID3D11Device>& d3d11_device,
+ const base::win::ScopedComPtr<IDCompositionDevice2>& dcomp_device)
+ : surface_(surface),
+ d3d11_device_(d3d11_device),
+ dcomp_device_(dcomp_device) {}
+
+ bool Initialize(HWND window);
+ bool CommitAndClearPendingOverlays();
+ bool ScheduleDCLayer(const ui::DCRendererLayerParams& params);
+ void InitializeVideoProcessor(const gfx::Size& input_size,
+ const gfx::Size& output_size);
+
+ const base::win::ScopedComPtr<ID3D11VideoProcessor>& video_processor() const {
+ return video_processor_;
+ }
+ const base::win::ScopedComPtr<ID3D11VideoProcessorEnumerator>&
+ video_processor_enumerator() const {
+ return video_processor_enumerator_;
+ }
+ base::win::ScopedComPtr<IDXGISwapChain1> GetLayerSwapChainForTesting(
+ size_t index) const;
+
+ const GpuDriverBugWorkarounds& workarounds() const {
+ return surface_->workarounds();
+ }
+
+ private:
+ class SwapChainPresenter;
+
+ // This struct is used to cache information about what visuals are currently
+ // being presented so that properties that aren't changed aren't sent to
+ // DirectComposition.
+ struct VisualInfo {
+ base::win::ScopedComPtr<IDCompositionVisual2> content_visual;
+ base::win::ScopedComPtr<IDCompositionVisual2> clip_visual;
+
+ std::unique_ptr<SwapChainPresenter> swap_chain_presenter;
+ base::win::ScopedComPtr<IDXGISwapChain1> swap_chain;
+ base::win::ScopedComPtr<IDCompositionSurface> surface;
+
+ gfx::Rect bounds;
+ float swap_chain_scale_x = 0.0f;
+ float swap_chain_scale_y = 0.0f;
+ bool is_clipped = false;
+ gfx::Rect clip_rect;
+ gfx::Transform transform;
+ };
+
+ void InitVisual(size_t i);
+ void UpdateVisualForVideo(VisualInfo* visual_info,
+ const ui::DCRendererLayerParams& params);
+ void UpdateVisualForBackbuffer(VisualInfo* visual_info,
+ const ui::DCRendererLayerParams& params);
+ void UpdateVisualClip(VisualInfo* visual_info,
+ const ui::DCRendererLayerParams& params);
+
+ DirectCompositionSurfaceWin* surface_;
+ std::vector<std::unique_ptr<ui::DCRendererLayerParams>> pending_overlays_;
+
+ base::win::ScopedComPtr<ID3D11Device> d3d11_device_;
+ base::win::ScopedComPtr<IDCompositionDevice2> dcomp_device_;
+ base::win::ScopedComPtr<IDCompositionTarget> dcomp_target_;
+ base::win::ScopedComPtr<IDCompositionVisual2> root_visual_;
+
+ // The video processor is cached so SwapChains don't have to recreate it
+ // whenever they're created.
+ base::win::ScopedComPtr<ID3D11VideoDevice> video_device_;
+ base::win::ScopedComPtr<ID3D11VideoContext> video_context_;
+ base::win::ScopedComPtr<ID3D11VideoProcessor> video_processor_;
+ base::win::ScopedComPtr<ID3D11VideoProcessorEnumerator>
+ video_processor_enumerator_;
+ gfx::Size video_input_size_;
+ gfx::Size video_output_size_;
+
+ std::vector<VisualInfo> visual_info_;
+
+ DISALLOW_COPY_AND_ASSIGN(DCLayerTree);
+};
+
+class DCLayerTree::SwapChainPresenter {
+ public:
+ SwapChainPresenter(DCLayerTree* surface,
+ base::win::ScopedComPtr<ID3D11Device> d3d11_device);
+
+ ~SwapChainPresenter();
+
+ void PresentToSwapChain(const ui::DCRendererLayerParams& overlay);
+
+ float swap_chain_scale_x() const { return swap_chain_scale_x_; }
+ float swap_chain_scale_y() const { return swap_chain_scale_y_; }
+ const base::win::ScopedComPtr<IDXGISwapChain1>& swap_chain() const {
+ return swap_chain_;
+ }
+
+ private:
+ using PFN_DCOMPOSITION_CREATE_SURFACE_HANDLE =
+ HRESULT(WINAPI*)(DWORD, SECURITY_ATTRIBUTES*, HANDLE*);
+
+ // Returns true if the video processor changed.
+ bool InitializeVideoProcessor(const gfx::Size& in_size,
+ const gfx::Size& out_size);
+ void ReallocateSwapChain(bool yuy2);
+ bool ShouldBeYUY2();
+
+ DCLayerTree* surface_;
+ PFN_DCOMPOSITION_CREATE_SURFACE_HANDLE create_surface_handle_function_;
+
+ gfx::Size swap_chain_size_;
+ gfx::Size processor_input_size_;
+ gfx::Size processor_output_size_;
+ bool is_yuy2_swapchain_ = false;
+
+ // This is the scale from the swapchain size to the size of the contents
+ // onscreen.
+ float swap_chain_scale_x_ = 0.0f;
+ float swap_chain_scale_y_ = 0.0f;
+
+ PresentationHistory presentation_history_;
+ bool failed_to_create_yuy2_swapchain_ = false;
+ int frames_since_color_space_change_ = 0;
+
+ // This is the GLImage that was presented in the last frame.
+ scoped_refptr<gl::GLImageDXGI> last_gl_image_;
+
+ base::win::ScopedComPtr<ID3D11Device> d3d11_device_;
+ base::win::ScopedComPtr<IDXGISwapChain1> swap_chain_;
+ base::win::ScopedComPtr<ID3D11VideoProcessorOutputView> out_view_;
+ base::win::ScopedComPtr<ID3D11VideoProcessor> video_processor_;
+ base::win::ScopedComPtr<ID3D11VideoProcessorEnumerator>
+ video_processor_enumerator_;
+ base::win::ScopedComPtr<ID3D11VideoDevice> video_device_;
+ base::win::ScopedComPtr<ID3D11VideoContext> video_context_;
+
+ base::win::ScopedHandle swap_chain_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(SwapChainPresenter);
+};
+
+bool DCLayerTree::Initialize(HWND window) {
+ d3d11_device_.QueryInterface(video_device_.Receive());
+ base::win::ScopedComPtr<ID3D11DeviceContext> context;
+ d3d11_device_->GetImmediateContext(context.Receive());
+ context.QueryInterface(video_context_.Receive());
+
+ base::win::ScopedComPtr<IDCompositionDesktopDevice> desktop_device;
+ dcomp_device_.QueryInterface(desktop_device.Receive());
+
+ HRESULT hr = desktop_device->CreateTargetForHwnd(window, TRUE,
+ dcomp_target_.Receive());
+ if (FAILED(hr))
+ return false;
+
+ hr = dcomp_device_->CreateVisual(root_visual_.Receive());
+ if (FAILED(hr))
+ return false;
+
+ dcomp_target_->SetRoot(root_visual_.get());
+ return true;
+}
+
+void DCLayerTree::InitializeVideoProcessor(const gfx::Size& input_size,
+ const gfx::Size& output_size) {
+ if (SizeContains(video_input_size_, input_size) &&
+ SizeContains(video_output_size_, output_size))
+ return;
+ video_input_size_ = input_size;
+ video_output_size_ = output_size;
+
+ video_processor_.Reset();
+ video_processor_enumerator_.Reset();
+ D3D11_VIDEO_PROCESSOR_CONTENT_DESC desc = {};
+ desc.InputFrameFormat = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
+ desc.InputFrameRate.Numerator = 60;
+ desc.InputFrameRate.Denominator = 1;
+ desc.InputWidth = input_size.width();
+ desc.InputHeight = input_size.height();
+ desc.OutputFrameRate.Numerator = 60;
+ desc.OutputFrameRate.Denominator = 1;
+ desc.OutputWidth = output_size.width();
+ desc.OutputHeight = output_size.height();
+ desc.Usage = D3D11_VIDEO_USAGE_PLAYBACK_NORMAL;
+ HRESULT hr = video_device_->CreateVideoProcessorEnumerator(
+ &desc, video_processor_enumerator_.Receive());
+ CHECK(SUCCEEDED(hr));
+
+ hr = video_device_->CreateVideoProcessor(video_processor_enumerator_.get(), 0,
+ video_processor_.Receive());
+ CHECK(SUCCEEDED(hr));
+}
+
+base::win::ScopedComPtr<IDXGISwapChain1>
+DCLayerTree::GetLayerSwapChainForTesting(size_t index) const {
+ if (index >= visual_info_.size())
+ return base::win::ScopedComPtr<IDXGISwapChain1>();
+ return visual_info_[index].swap_chain;
+}
+
+DCLayerTree::SwapChainPresenter::SwapChainPresenter(
+ DCLayerTree* surface,
+ base::win::ScopedComPtr<ID3D11Device> d3d11_device)
+ : surface_(surface), d3d11_device_(d3d11_device) {
+ d3d11_device_.QueryInterface(video_device_.Receive());
+ base::win::ScopedComPtr<ID3D11DeviceContext> context;
+ d3d11_device_->GetImmediateContext(context.Receive());
+ context.QueryInterface(video_context_.Receive());
+ HMODULE dcomp = ::GetModuleHandleA("dcomp.dll");
+ CHECK(dcomp);
+ create_surface_handle_function_ =
+ reinterpret_cast<PFN_DCOMPOSITION_CREATE_SURFACE_HANDLE>(
+ GetProcAddress(dcomp, "DCompositionCreateSurfaceHandle"));
+ CHECK(create_surface_handle_function_);
+}
+
+DCLayerTree::SwapChainPresenter::~SwapChainPresenter() {}
+
+bool DCLayerTree::SwapChainPresenter::ShouldBeYUY2() {
+ // Start out as YUY2.
+ if (!presentation_history_.valid())
+ return true;
+ int composition_count = presentation_history_.composed_count();
+
+ // It's more efficient to use a BGRA backbuffer instead of YUY2 if overlays
+ // aren't being used, as otherwise DWM will use the video processor a second
+ // time to convert it to BGRA before displaying it on screen.
+
+ if (is_yuy2_swapchain_) {
+ // Switch to BGRA once 3/4 of presents are composed.
+ return composition_count < (PresentationHistory::kPresentsToStore * 3 / 4);
+ } else {
+ // Switch to YUY2 once 3/4 are using overlays (or unknown).
+ return composition_count < (PresentationHistory::kPresentsToStore / 4);
+ }
+}
+
+void DCLayerTree::SwapChainPresenter::PresentToSwapChain(
+ const ui::DCRendererLayerParams& params) {
+ gl::GLImageDXGI* image_dxgi =
+ gl::GLImageDXGI::FromGLImage(params.image.get());
+ DCHECK(image_dxgi);
+
+ // Swap chain size is the minimum of the on-screen size and the source
+ // size so the video processor can do the minimal amount of work and
+ // the overlay has to read the minimal amount of data.
+ // DWM is also less likely to promote a surface to an overlay if it's
+ // much larger than its area on-screen.
+ gfx::Rect bounds_rect = params.rect;
+ gfx::Size ceiled_input_size = gfx::ToCeiledSize(params.contents_rect.size());
+ gfx::Size swap_chain_size = bounds_rect.size();
+ swap_chain_size.SetToMin(ceiled_input_size);
+
+ // YUY2 surfaces must have an even width.
+ if (swap_chain_size.width() % 2 == 1)
+ swap_chain_size.set_width(swap_chain_size.width() + 1);
+
+ InitializeVideoProcessor(ceiled_input_size, swap_chain_size);
+
+ if (surface_->workarounds().disable_larger_than_screen_overlays) {
+ // Because of the rounding when converting between pixels and DIPs, a
+ // fullscreen video can become slightly larger than the monitor - e.g. on
+ // a 3000x2000 monitor with a scale factor of 1.75 a 1920x1079 video can
+ // become 3002x1689.
+ // On older Intel drivers, swapchains that are bigger than the monitor
+ // won't be put into overlays, which will hurt power usage a lot. On those
+ // systems, the scaling can be adjusted very slightly so that it's less
+ // than the monitor size. This should be close to imperceptible.
+ // TODO(jbauman): Remove when http://crbug.com/668278 is fixed.
+ const int kOversizeMargin = 3;
+
+ if ((bounds_rect.x() >= 0) &&
+ (bounds_rect.width() > g_overlay_monitor_size.width()) &&
+ (bounds_rect.width() <=
+ g_overlay_monitor_size.width() + kOversizeMargin)) {
+ bounds_rect.set_width(g_overlay_monitor_size.width());
+ }
+
+ if ((bounds_rect.y() >= 0) &&
+ (bounds_rect.height() > g_overlay_monitor_size.height()) &&
+ (bounds_rect.height() <=
+ g_overlay_monitor_size.height() + kOversizeMargin)) {
+ bounds_rect.set_height(g_overlay_monitor_size.height());
+ }
+ }
+
+ swap_chain_scale_x_ = bounds_rect.width() * 1.0f / swap_chain_size.width();
+ swap_chain_scale_y_ = bounds_rect.height() * 1.0f / swap_chain_size.height();
+
+ bool yuy2_swapchain = ShouldBeYUY2();
+ bool first_present = false;
+ if (!swap_chain_ || swap_chain_size_ != swap_chain_size ||
+ ((yuy2_swapchain != is_yuy2_swapchain_) &&
+ !failed_to_create_yuy2_swapchain_)) {
+ first_present = true;
+ swap_chain_size_ = swap_chain_size;
+ swap_chain_.Reset();
+ ReallocateSwapChain(yuy2_swapchain);
+ } else if (last_gl_image_ == image_dxgi) {
+ // The swap chain is presenting the same image as last swap, which means
+ // that the image was never returned to the video decoder and should have
+ // the same contents as last time. It shouldn't need to be redrawn.
+ return;
+ }
+
+ last_gl_image_ = image_dxgi;
+
+ if (!out_view_) {
+ base::win::ScopedComPtr<ID3D11Texture2D> texture;
+ swap_chain_->GetBuffer(0, IID_PPV_ARGS(texture.Receive()));
+ D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC out_desc = {};
+ out_desc.ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D;
+ out_desc.Texture2D.MipSlice = 0;
+ HRESULT hr = video_device_->CreateVideoProcessorOutputView(
+ texture.get(), video_processor_enumerator_.get(), &out_desc,
+ out_view_.Receive());
+ CHECK(SUCCEEDED(hr));
+ }
+
+ // TODO(jbauman): Use correct colorspace.
+ gfx::ColorSpace src_color_space = gfx::ColorSpace::CreateREC709();
+ base::win::ScopedComPtr<ID3D11VideoContext1> context1;
+ if (SUCCEEDED(video_context_.QueryInterface(context1.Receive()))) {
+ context1->VideoProcessorSetStreamColorSpace1(
+ video_processor_.get(), 0,
+ gfx::ColorSpaceWin::GetDXGIColorSpace(src_color_space));
+ } else {
+ // This can't handle as many different types of color spaces, so use it
+ // only if ID3D11VideoContext1 isn't available.
+ D3D11_VIDEO_PROCESSOR_COLOR_SPACE color_space =
+ gfx::ColorSpaceWin::GetD3D11ColorSpace(src_color_space);
+ video_context_->VideoProcessorSetStreamColorSpace(video_processor_.get(), 0,
+ &color_space);
+ }
+
+ gfx::ColorSpace output_color_space =
+ is_yuy2_swapchain_ ? src_color_space : gfx::ColorSpace::CreateSRGB();
+ if (base::FeatureList::IsEnabled(kFallbackBT709VideoToBT601) &&
+ (output_color_space == gfx::ColorSpace::CreateREC709())) {
+ output_color_space = gfx::ColorSpace::CreateREC601();
+ }
+
+ base::win::ScopedComPtr<IDXGISwapChain3> swap_chain3;
+ if (SUCCEEDED(swap_chain_.QueryInterface(swap_chain3.Receive()))) {
+ DXGI_COLOR_SPACE_TYPE color_space =
+ gfx::ColorSpaceWin::GetDXGIColorSpace(output_color_space);
+ HRESULT hr = swap_chain3->SetColorSpace1(color_space);
+ CHECK(SUCCEEDED(hr));
+ if (context1) {
+ context1->VideoProcessorSetOutputColorSpace1(video_processor_.get(),
+ color_space);
+ } else {
+ D3D11_VIDEO_PROCESSOR_COLOR_SPACE d3d11_color_space =
+ gfx::ColorSpaceWin::GetD3D11ColorSpace(output_color_space);
+ video_context_->VideoProcessorSetOutputColorSpace(video_processor_.get(),
+ &d3d11_color_space);
+ }
+ }
+
+ {
+ D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC in_desc = {};
+ in_desc.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D;
+ in_desc.Texture2D.ArraySlice = (UINT)image_dxgi->level();
+ base::win::ScopedComPtr<ID3D11VideoProcessorInputView> in_view;
+ HRESULT hr = video_device_->CreateVideoProcessorInputView(
+ image_dxgi->texture().get(), video_processor_enumerator_.get(),
+ &in_desc, in_view.Receive());
+ CHECK(SUCCEEDED(hr));
+
+ D3D11_VIDEO_PROCESSOR_STREAM stream = {};
+ stream.Enable = true;
+ stream.OutputIndex = 0;
+ stream.InputFrameOrField = 0;
+ stream.PastFrames = 0;
+ stream.FutureFrames = 0;
+ stream.pInputSurface = in_view.get();
+ RECT dest_rect = gfx::Rect(swap_chain_size).ToRECT();
+ video_context_->VideoProcessorSetOutputTargetRect(video_processor_.get(),
+ TRUE, &dest_rect);
+ video_context_->VideoProcessorSetStreamDestRect(video_processor_.get(), 0,
+ TRUE, &dest_rect);
+ RECT source_rect = gfx::Rect(ceiled_input_size).ToRECT();
+ video_context_->VideoProcessorSetStreamSourceRect(video_processor_.get(), 0,
+ TRUE, &source_rect);
+
+ video_context_->VideoProcessorSetStreamAutoProcessingMode(
+ video_processor_.get(), 0, FALSE);
+
+ hr = video_context_->VideoProcessorBlt(video_processor_.get(),
+ out_view_.get(), 0, 1, &stream);
+ CHECK(SUCCEEDED(hr));
+ }
+
+ if (first_present) {
+ swap_chain_->Present(0, 0);
+
+ // DirectComposition can display black for a swapchain between the first
+ // and second time it's presented to - maybe the first Present can get
+ // lost somehow and it shows the wrong buffer. In that case copy the
+ // buffers so both have the correct contents, which seems to help. The
+ // first Present() after this needs to have SyncInterval > 0, or else the
+ // workaround doesn't help.
+ base::win::ScopedComPtr<ID3D11Texture2D> dest_texture;
+ HRESULT hr =
+ swap_chain_->GetBuffer(0, IID_PPV_ARGS(dest_texture.Receive()));
+ DCHECK(SUCCEEDED(hr));
+ base::win::ScopedComPtr<ID3D11Texture2D> src_texture;
+ hr = swap_chain_->GetBuffer(1, IID_PPV_ARGS(src_texture.Receive()));
+ DCHECK(SUCCEEDED(hr));
+ base::win::ScopedComPtr<ID3D11DeviceContext> context;
+ d3d11_device_->GetImmediateContext(context.Receive());
+ context->CopyResource(dest_texture.get(), src_texture.get());
+
+ // Additionally wait for the GPU to finish executing its commands, or
+ // there still may be a black flicker when presenting expensive content
+ // (e.g. 4k video).
+ base::win::ScopedComPtr<IDXGIDevice2> dxgi_device2;
+ hr = d3d11_device_.QueryInterface(dxgi_device2.Receive());
+ DCHECK(SUCCEEDED(hr));
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ dxgi_device2->EnqueueSetEvent(event.handle());
+ event.Wait();
+ }
+
+ swap_chain_->Present(1, 0);
+
+ UMA_HISTOGRAM_BOOLEAN("GPU.DirectComposition.SwapchainFormat",
+ is_yuy2_swapchain_);
+ frames_since_color_space_change_++;
+
+ base::win::ScopedComPtr<IDXGISwapChainMedia> swap_chain_media;
+ if (SUCCEEDED(swap_chain_.QueryInterface(swap_chain_media.Receive()))) {
+ DXGI_FRAME_STATISTICS_MEDIA stats = {};
+ if (SUCCEEDED(swap_chain_media->GetFrameStatisticsMedia(&stats))) {
+ UMA_HISTOGRAM_SPARSE_SLOWLY("GPU.DirectComposition.CompositionMode",
+ stats.CompositionMode);
+ presentation_history_.AddSample(stats.CompositionMode);
+ }
+ }
+}
+
+bool DCLayerTree::SwapChainPresenter::InitializeVideoProcessor(
+ const gfx::Size& in_size,
+ const gfx::Size& out_size) {
+ if (video_processor_ && SizeContains(processor_input_size_, in_size) &&
+ SizeContains(processor_output_size_, out_size))
+ return false;
+ processor_input_size_ = in_size;
+ processor_output_size_ = out_size;
+ surface_->InitializeVideoProcessor(in_size, out_size);
+
+ video_processor_enumerator_ = surface_->video_processor_enumerator();
+ video_processor_ = surface_->video_processor();
+ // out_view_ depends on video_processor_enumerator_, so ensure it's
+ // recreated if the enumerator is.
+ out_view_.Reset();
+ return true;
+}
+
+void DCLayerTree::SwapChainPresenter::ReallocateSwapChain(bool yuy2) {
+ TRACE_EVENT0("gpu", "DCLayerTree::SwapChainPresenter::ReallocateSwapChain");
+ DCHECK(!swap_chain_);
+
+ base::win::ScopedComPtr<IDXGIDevice> dxgi_device;
+ d3d11_device_.QueryInterface(dxgi_device.Receive());
+ base::win::ScopedComPtr<IDXGIAdapter> dxgi_adapter;
+ dxgi_device->GetAdapter(dxgi_adapter.Receive());
+ base::win::ScopedComPtr<IDXGIFactory2> dxgi_factory;
+ dxgi_adapter->GetParent(IID_PPV_ARGS(dxgi_factory.Receive()));
+
+ base::win::ScopedComPtr<IDXGIFactoryMedia> media_factory;
+ dxgi_factory.QueryInterface(media_factory.Receive());
+ DXGI_SWAP_CHAIN_DESC1 desc = {};
+ desc.Width = swap_chain_size_.width();
+ desc.Height = swap_chain_size_.height();
+ desc.Format = DXGI_FORMAT_YUY2;
+ desc.Stereo = FALSE;
+ desc.SampleDesc.Count = 1;
+ desc.BufferCount = 2;
+ desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
+ desc.Scaling = DXGI_SCALING_STRETCH;
+ desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
+ desc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
+ desc.Flags =
+ DXGI_SWAP_CHAIN_FLAG_YUV_VIDEO | DXGI_SWAP_CHAIN_FLAG_FULLSCREEN_VIDEO;
+
+ HANDLE handle;
+ create_surface_handle_function_(COMPOSITIONOBJECT_ALL_ACCESS, nullptr,
+ &handle);
+ swap_chain_handle_.Set(handle);
+
+ if (is_yuy2_swapchain_ != yuy2) {
+ UMA_HISTOGRAM_COUNTS_1000(
+ "GPU.DirectComposition.FramesSinceColorSpaceChange",
+ frames_since_color_space_change_);
+ }
+
+ frames_since_color_space_change_ = 0;
+
+ is_yuy2_swapchain_ = false;
+ // The composition surface handle isn't actually used, but
+ // CreateSwapChainForComposition can't create YUY2 swapchains.
+ HRESULT hr = E_FAIL;
+ if (yuy2) {
+ hr = media_factory->CreateSwapChainForCompositionSurfaceHandle(
+ d3d11_device_.get(), swap_chain_handle_.Get(), &desc, nullptr,
+ swap_chain_.Receive());
+ is_yuy2_swapchain_ = SUCCEEDED(hr);
+ failed_to_create_yuy2_swapchain_ = !is_yuy2_swapchain_;
+ }
+
+ if (!is_yuy2_swapchain_) {
+ if (yuy2) {
+ DLOG(ERROR) << "YUY2 creation failed with " << std::hex << hr
+ << ". Falling back to BGRA";
+ }
+ desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
+ desc.Flags = 0;
+ hr = media_factory->CreateSwapChainForCompositionSurfaceHandle(
+ d3d11_device_.get(), swap_chain_handle_.Get(), &desc, nullptr,
+ swap_chain_.Receive());
+ CHECK(SUCCEEDED(hr));
+ }
+ out_view_.Reset();
+}
+
+void DCLayerTree::InitVisual(size_t i) {
+ DCHECK_GT(visual_info_.size(), i);
+ VisualInfo* visual_info = &visual_info_[i];
+ if (visual_info->content_visual)
+ return;
+ DCHECK(!visual_info->clip_visual);
+ base::win::ScopedComPtr<IDCompositionVisual2> visual;
+ dcomp_device_->CreateVisual(visual_info->clip_visual.Receive());
+ dcomp_device_->CreateVisual(visual.Receive());
+ visual_info->content_visual = visual;
+ visual_info->clip_visual->AddVisual(visual.get(), FALSE, nullptr);
+
+ IDCompositionVisual2* last_visual =
+ (i > 0) ? visual_info_[i - 1].clip_visual.get() : nullptr;
+ root_visual_->AddVisual(visual_info->clip_visual.get(), TRUE, last_visual);
+}
+
+void DCLayerTree::UpdateVisualForVideo(
+ VisualInfo* visual_info,
+ const ui::DCRendererLayerParams& params) {
+ base::win::ScopedComPtr<IDCompositionVisual2> dc_visual =
+ visual_info->content_visual;
+
+ gfx::Rect bounds_rect = params.rect;
+ visual_info->surface.Reset();
+ if (!visual_info->swap_chain_presenter) {
+ visual_info->swap_chain_presenter =
+ base::MakeUnique<SwapChainPresenter>(this, d3d11_device_);
+ }
+ visual_info->swap_chain_presenter->PresentToSwapChain(params);
+ if (visual_info->swap_chain !=
+ visual_info->swap_chain_presenter->swap_chain()) {
+ visual_info->swap_chain = visual_info->swap_chain_presenter->swap_chain();
+ dc_visual->SetContent(visual_info->swap_chain.get());
+ }
+
+ if (visual_info->swap_chain_presenter->swap_chain_scale_x() !=
+ visual_info->swap_chain_scale_x ||
+ visual_info->swap_chain_presenter->swap_chain_scale_y() !=
+ visual_info->swap_chain_scale_y ||
+ params.transform != visual_info->transform ||
+ visual_info->bounds != bounds_rect) {
+ visual_info->swap_chain_scale_x =
+ visual_info->swap_chain_presenter->swap_chain_scale_x();
+ visual_info->swap_chain_scale_y =
+ visual_info->swap_chain_presenter->swap_chain_scale_y();
+ visual_info->transform = params.transform;
+ visual_info->bounds = bounds_rect;
+
+ gfx::Transform final_transform = params.transform;
+ gfx::Transform scale_transform;
+ scale_transform.Scale(
+ visual_info->swap_chain_presenter->swap_chain_scale_x(),
+ visual_info->swap_chain_presenter->swap_chain_scale_y());
+ final_transform.PreconcatTransform(scale_transform);
+ final_transform.Transpose();
+
+ dc_visual->SetOffsetX(bounds_rect.x());
+ dc_visual->SetOffsetY(bounds_rect.y());
+ base::win::ScopedComPtr<IDCompositionMatrixTransform> dcomp_transform;
+ dcomp_device_->CreateMatrixTransform(dcomp_transform.Receive());
+ D2D_MATRIX_3X2_F d2d_matrix = {{{final_transform.matrix().get(0, 0),
+ final_transform.matrix().get(0, 1),
+ final_transform.matrix().get(1, 0),
+ final_transform.matrix().get(1, 1),
+ final_transform.matrix().get(3, 0),
+ final_transform.matrix().get(3, 1)}}};
+ dcomp_transform->SetMatrix(d2d_matrix);
+ dc_visual->SetTransform(dcomp_transform.get());
+ }
+}
+
+void DCLayerTree::UpdateVisualForBackbuffer(
+ VisualInfo* visual_info,
+ const ui::DCRendererLayerParams& params) {
+ base::win::ScopedComPtr<IDCompositionVisual2> dc_visual =
+ visual_info->content_visual;
+
+ visual_info->swap_chain_presenter = nullptr;
+ if ((visual_info->surface != surface_->dcomp_surface()) ||
+ (visual_info->swap_chain != surface_->swap_chain())) {
+ visual_info->surface = surface_->dcomp_surface();
+ visual_info->swap_chain = surface_->swap_chain();
+ if (visual_info->surface) {
+ dc_visual->SetContent(visual_info->surface.get());
+ } else if (visual_info->swap_chain) {
+ dc_visual->SetContent(visual_info->swap_chain.get());
+ } else {
+ dc_visual->SetContent(nullptr);
+ }
+ }
+
+ gfx::Rect bounds_rect = params.rect;
+ if (visual_info->bounds != bounds_rect ||
+ !visual_info->transform.IsIdentity()) {
+ dc_visual->SetOffsetX(bounds_rect.x());
+ dc_visual->SetOffsetY(bounds_rect.y());
+ visual_info->bounds = bounds_rect;
+ dc_visual->SetTransform(nullptr);
+ visual_info->transform = gfx::Transform();
+ }
+}
+
+void DCLayerTree::UpdateVisualClip(VisualInfo* visual_info,
+ const ui::DCRendererLayerParams& params) {
+ if (params.is_clipped != visual_info->is_clipped ||
+ params.clip_rect != visual_info->clip_rect) {
+ // DirectComposition clips happen in the pre-transform visual
+ // space, while cc/ clips happen post-transform. So the clip needs
+ // to go on a separate parent visual that's untransformed.
+ visual_info->is_clipped = params.is_clipped;
+ visual_info->clip_rect = params.clip_rect;
+ if (params.is_clipped) {
+ base::win::ScopedComPtr<IDCompositionRectangleClip> clip;
+ dcomp_device_->CreateRectangleClip(clip.Receive());
+ gfx::Rect offset_clip = params.clip_rect;
+ clip->SetLeft(offset_clip.x());
+ clip->SetRight(offset_clip.right());
+ clip->SetBottom(offset_clip.bottom());
+ clip->SetTop(offset_clip.y());
+ visual_info->clip_visual->SetClip(clip.get());
+ } else {
+ visual_info->clip_visual->SetClip(nullptr);
+ }
+ }
+}
+
+bool DCLayerTree::CommitAndClearPendingOverlays() {
+ TRACE_EVENT1("gpu", "DCLayerTree::CommitAndClearPendingOverlays", "size",
+ pending_overlays_.size());
+ UMA_HISTOGRAM_BOOLEAN("GPU.DirectComposition.OverlaysUsed",
+ !pending_overlays_.empty());
+ // Add an overlay with z-order 0 representing the main plane.
+ gfx::Size surface_size = surface_->GetSize();
+ pending_overlays_.push_back(base::MakeUnique<ui::DCRendererLayerParams>(
+ false, gfx::Rect(), 0, gfx::Transform(), nullptr,
+ gfx::RectF(gfx::SizeF(surface_size)), gfx::Rect(surface_size), 0, 0, 1.0,
+ 0));
+
+ // TODO(jbauman): Reuse swapchains that are switched between overlays and
+ // underlays.
+ std::sort(pending_overlays_.begin(), pending_overlays_.end(),
+ [](const auto& a, const auto& b) -> bool {
+ return a->z_order < b->z_order;
+ });
+
+ while (visual_info_.size() > pending_overlays_.size()) {
+ visual_info_.back().clip_visual->RemoveAllVisuals();
+ root_visual_->RemoveVisual(visual_info_.back().clip_visual.get());
+ visual_info_.pop_back();
+ }
+
+ visual_info_.resize(pending_overlays_.size());
+
+ // The overall visual tree has one clip visual for every overlay (including
+ // the main plane). The clip visuals are in z_order and are all children of
+ // a root visual. Each clip visual has a child visual that has the actual
+ // plane content.
+
+ for (size_t i = 0; i < pending_overlays_.size(); i++) {
+ ui::DCRendererLayerParams& params = *pending_overlays_[i];
+ VisualInfo* visual_info = &visual_info_[i];
+
+ InitVisual(i);
+ if (params.image &&
+ params.image->GetType() == gl::GLImage::Type::DXGI_IMAGE) {
+ UpdateVisualForVideo(visual_info, params);
+ } else if (!params.image) {
+ UpdateVisualForBackbuffer(visual_info, params);
+ } else {
+ CHECK(false);
+ }
+ UpdateVisualClip(visual_info, params);
+ }
+
+ HRESULT hr = dcomp_device_->Commit();
+ CHECK(SUCCEEDED(hr));
+
+ pending_overlays_.clear();
+ return true;
+}
+
+bool DCLayerTree::ScheduleDCLayer(const ui::DCRendererLayerParams& params) {
+ pending_overlays_.push_back(
+ base::MakeUnique<ui::DCRendererLayerParams>(params));
+ return true;
+}
+
DirectCompositionSurfaceWin::DirectCompositionSurfaceWin(
+ std::unique_ptr<gfx::VSyncProvider> vsync_provider,
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
HWND parent_window)
- : gl::GLSurfaceEGL(), child_window_(delegate, parent_window) {}
+ : gl::GLSurfaceEGL(),
+ child_window_(delegate, parent_window),
+ workarounds_(delegate->GetFeatureInfo()->workarounds()),
+ vsync_provider_(std::move(vsync_provider)) {}
DirectCompositionSurfaceWin::~DirectCompositionSurfaceWin() {
Destroy();
}
+// static
+bool DirectCompositionSurfaceWin::AreOverlaysSupported() {
+ if (!HardwareSupportsOverlays())
+ return false;
+
+ return base::FeatureList::IsEnabled(switches::kDirectCompositionOverlays);
+}
+
bool DirectCompositionSurfaceWin::InitializeNativeWindow() {
if (window_)
return true;
@@ -70,12 +922,6 @@ bool DirectCompositionSurfaceWin::InitializeNativeWindow() {
return result;
}
-bool DirectCompositionSurfaceWin::Initialize(
- std::unique_ptr<gfx::VSyncProvider> vsync_provider) {
- vsync_provider_ = std::move(vsync_provider);
- return Initialize(gl::GLSurfaceFormat());
-}
-
bool DirectCompositionSurfaceWin::Initialize(gl::GLSurfaceFormat format) {
d3d11_device_ = gl::QueryD3D11DeviceObjectFromANGLE();
dcomp_device_ = gl::QueryDirectCompositionDevice(d3d11_device_);
@@ -90,20 +936,11 @@ bool DirectCompositionSurfaceWin::Initialize(gl::GLSurfaceFormat format) {
}
}
- base::win::ScopedComPtr<IDCompositionDesktopDevice> desktop_device;
- dcomp_device_.QueryInterface(desktop_device.Receive());
-
- HRESULT hr = desktop_device->CreateTargetForHwnd(window_, TRUE,
- dcomp_target_.Receive());
- if (FAILED(hr))
- return false;
-
- hr = dcomp_device_->CreateVisual(visual_.Receive());
- if (FAILED(hr))
+ layer_tree_ =
+ base::MakeUnique<DCLayerTree>(this, d3d11_device_, dcomp_device_);
+ if (!layer_tree_->Initialize(window_))
return false;
- dcomp_target_->SetRoot(visual_.get());
-
std::vector<EGLint> pbuffer_attribs;
pbuffer_attribs.push_back(EGL_WIDTH);
pbuffer_attribs.push_back(1);
@@ -115,32 +952,93 @@ bool DirectCompositionSurfaceWin::Initialize(gl::GLSurfaceFormat format) {
eglCreatePbufferSurface(display, GetConfig(), &pbuffer_attribs[0]);
CHECK(!!default_surface_);
- InitializeSurface();
-
return true;
}
+void DirectCompositionSurfaceWin::ReleaseCurrentSurface() {
+ ReleaseDrawTexture(true);
+ dcomp_surface_.Reset();
+ swap_chain_.Reset();
+}
+
void DirectCompositionSurfaceWin::InitializeSurface() {
- ScopedReleaseCurrent release_current(this);
- ReleaseDrawTexture();
- dcomp_surface_.Release();
- HRESULT hr = dcomp_device_->CreateSurface(
- size_.width(), size_.height(), DXGI_FORMAT_B8G8R8A8_UNORM,
- DXGI_ALPHA_MODE_PREMULTIPLIED, dcomp_surface_.Receive());
- has_been_rendered_to_ = false;
+ TRACE_EVENT1("gpu", "DirectCompositionSurfaceWin::InitializeSurface()",
+ "enable_dc_layers_", enable_dc_layers_);
+ DCHECK(!dcomp_surface_);
+ DCHECK(!swap_chain_);
+ DXGI_FORMAT output_format =
+ base::CommandLine::ForCurrentProcess()->HasSwitch(switches::kEnableHDR)
+ ? DXGI_FORMAT_R16G16B16A16_FLOAT
+ : DXGI_FORMAT_B8G8R8A8_UNORM;
+ if (enable_dc_layers_) {
+ // Always treat as premultiplied, because an underlay could cause it to
+ // become transparent.
+ HRESULT hr = dcomp_device_->CreateSurface(
+ size_.width(), size_.height(), output_format,
+ DXGI_ALPHA_MODE_PREMULTIPLIED, dcomp_surface_.Receive());
+ has_been_rendered_to_ = false;
+ CHECK(SUCCEEDED(hr));
+ } else {
+ DXGI_ALPHA_MODE alpha_mode =
+ has_alpha_ ? DXGI_ALPHA_MODE_PREMULTIPLIED : DXGI_ALPHA_MODE_IGNORE;
+ base::win::ScopedComPtr<IDXGIDevice> dxgi_device;
+ d3d11_device_.QueryInterface(dxgi_device.Receive());
+ base::win::ScopedComPtr<IDXGIAdapter> dxgi_adapter;
+ dxgi_device->GetAdapter(dxgi_adapter.Receive());
+ base::win::ScopedComPtr<IDXGIFactory2> dxgi_factory;
+ dxgi_adapter->GetParent(IID_PPV_ARGS(dxgi_factory.Receive()));
- CHECK(SUCCEEDED(hr));
+ DXGI_SWAP_CHAIN_DESC1 desc = {};
+ desc.Width = size_.width();
+ desc.Height = size_.height();
+ desc.Format = output_format;
+ desc.Stereo = FALSE;
+ desc.SampleDesc.Count = 1;
+ desc.BufferCount = 2;
+ desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
+ desc.Scaling = DXGI_SCALING_STRETCH;
+ desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
+ desc.AlphaMode = alpha_mode;
+ desc.Flags = 0;
+ HRESULT hr = dxgi_factory->CreateSwapChainForComposition(
+ d3d11_device_.get(), &desc, nullptr, swap_chain_.Receive());
+ has_been_rendered_to_ = false;
+ first_swap_ = true;
+ CHECK(SUCCEEDED(hr));
+ }
}
-void DirectCompositionSurfaceWin::ReleaseDrawTexture() {
+void DirectCompositionSurfaceWin::ReleaseDrawTexture(bool will_discard) {
if (real_surface_) {
eglDestroySurface(GetDisplay(), real_surface_);
real_surface_ = nullptr;
}
if (draw_texture_) {
- draw_texture_.Release();
- HRESULT hr = dcomp_surface_->EndDraw();
- CHECK(SUCCEEDED(hr));
+ draw_texture_.Reset();
+ if (dcomp_surface_) {
+ HRESULT hr = dcomp_surface_->EndDraw();
+ CHECK(SUCCEEDED(hr));
+ } else if (!will_discard) {
+ DXGI_PRESENT_PARAMETERS params = {};
+ RECT dirty_rect = swap_rect_.ToRECT();
+ params.DirtyRectsCount = 1;
+ params.pDirtyRects = &dirty_rect;
+ swap_chain_->Present1(first_swap_ ? 0 : 1, 0, &params);
+ if (first_swap_) {
+ // Wait for the GPU to finish executing its commands before
+ // committing the DirectComposition tree, or else the swapchain
+ // may flicker black when it's first presented.
+ base::win::ScopedComPtr<IDXGIDevice2> dxgi_device2;
+ HRESULT hr = d3d11_device_.QueryInterface(dxgi_device2.Receive());
+ DCHECK(SUCCEEDED(hr));
+ base::WaitableEvent event(
+ base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ dxgi_device2->EnqueueSetEvent(event.handle());
+ event.Wait();
+ first_swap_ = false;
+ }
+ }
}
if (dcomp_surface_ == g_current_surface)
g_current_surface = nullptr;
@@ -161,10 +1059,13 @@ void DirectCompositionSurfaceWin::Destroy() {
}
real_surface_ = nullptr;
}
- if (dcomp_surface_ == g_current_surface)
+ if (dcomp_surface_ && (dcomp_surface_ == g_current_surface)) {
+ HRESULT hr = dcomp_surface_->EndDraw();
+ CHECK(SUCCEEDED(hr));
g_current_surface = nullptr;
- draw_texture_.Release();
- dcomp_surface_.Release();
+ }
+ draw_texture_.Reset();
+ dcomp_surface_.Reset();
}
gfx::Size DirectCompositionSurfaceWin::GetSize() {
@@ -182,7 +1083,7 @@ void* DirectCompositionSurfaceWin::GetHandle() {
bool DirectCompositionSurfaceWin::Resize(const gfx::Size& size,
float scale_factor,
bool has_alpha) {
- if (size == GetSize())
+ if ((size == GetSize()) && (has_alpha == has_alpha_))
return true;
// Force a resize and redraw (but not a move, activate, etc.).
@@ -192,7 +1093,10 @@ bool DirectCompositionSurfaceWin::Resize(const gfx::Size& size,
return false;
}
size_ = size;
- InitializeSurface();
+ has_alpha_ = has_alpha;
+ ScopedReleaseCurrent release_current(this);
+ // New surface will be initialized in SetDrawRectangle.
+ ReleaseCurrentSurface();
return true;
}
@@ -200,18 +1104,9 @@ bool DirectCompositionSurfaceWin::Resize(const gfx::Size& size,
gfx::SwapResult DirectCompositionSurfaceWin::SwapBuffers() {
{
ScopedReleaseCurrent release_current(this);
- ReleaseDrawTexture();
- visual_->SetContent(dcomp_surface_.get());
+ ReleaseDrawTexture(false);
- CommitAndClearPendingOverlays();
- dcomp_device_->Commit();
- }
- // Force the driver to finish drawing before clearing the contents to
- // transparent, to reduce or eliminate the period of time where the contents
- // have flashed black.
- if (first_swap_) {
- glFinish();
- first_swap_ = false;
+ layer_tree_->CommitAndClearPendingOverlays();
}
child_window_.ClearInvalidContents();
return gfx::SwapResult::SWAP_ACK;
@@ -221,35 +1116,26 @@ gfx::SwapResult DirectCompositionSurfaceWin::PostSubBuffer(int x,
int y,
int width,
int height) {
- ScopedReleaseCurrent release_current(this);
- ReleaseDrawTexture();
- visual_->SetContent(dcomp_surface_.get());
- CommitAndClearPendingOverlays();
- dcomp_device_->Commit();
- child_window_.ClearInvalidContents();
- return gfx::SwapResult::SWAP_ACK;
+ // The arguments are ignored because SetDrawRectangle specified the area to
+ // be swapped.
+ return SwapBuffers();
}
gfx::VSyncProvider* DirectCompositionSurfaceWin::GetVSyncProvider() {
return vsync_provider_.get();
}
-bool DirectCompositionSurfaceWin::ScheduleOverlayPlane(
- int z_order,
- gfx::OverlayTransform transform,
- gl::GLImage* image,
- const gfx::Rect& bounds_rect,
- const gfx::RectF& crop_rect) {
- pending_overlays_.push_back(
- Overlay(z_order, transform, image, bounds_rect, crop_rect));
- return true;
+bool DirectCompositionSurfaceWin::ScheduleDCLayer(
+ const ui::DCRendererLayerParams& params) {
+ return layer_tree_->ScheduleDCLayer(params);
}
-bool DirectCompositionSurfaceWin::CommitAndClearPendingOverlays() {
- pending_overlays_.clear();
+bool DirectCompositionSurfaceWin::SetEnableDCLayers(bool enable) {
+ enable_dc_layers_ = enable;
return true;
}
+
bool DirectCompositionSurfaceWin::FlipsVertically() const {
return true;
}
@@ -274,13 +1160,22 @@ bool DirectCompositionSurfaceWin::OnMakeCurrent(gl::GLContext* context) {
return true;
}
-bool DirectCompositionSurfaceWin::SupportsSetDrawRectangle() const {
+bool DirectCompositionSurfaceWin::SupportsDCLayers() const {
return true;
}
bool DirectCompositionSurfaceWin::SetDrawRectangle(const gfx::Rect& rectangle) {
if (draw_texture_)
return false;
+ DCHECK(!real_surface_);
+ ScopedReleaseCurrent release_current(this);
+
+ if ((enable_dc_layers_ && !dcomp_surface_) ||
+ (!enable_dc_layers_ && !swap_chain_)) {
+ ReleaseCurrentSurface();
+ InitializeSurface();
+ }
+
if (!gfx::Rect(size_).Contains(rectangle)) {
DLOG(ERROR) << "Draw rectangle must be contained within size of surface";
return false;
@@ -290,17 +1185,22 @@ bool DirectCompositionSurfaceWin::SetDrawRectangle(const gfx::Rect& rectangle) {
return false;
}
- DCHECK(!real_surface_);
CHECK(!g_current_surface);
- ScopedReleaseCurrent release_current(this);
RECT rect = rectangle.ToRECT();
- // TODO(jbauman): Use update_offset
- POINT update_offset;
-
- HRESULT hr = dcomp_surface_->BeginDraw(
- &rect, IID_PPV_ARGS(draw_texture_.Receive()), &update_offset);
- CHECK(SUCCEEDED(hr));
+ if (dcomp_surface_) {
+ POINT update_offset;
+ HRESULT hr = dcomp_surface_->BeginDraw(
+ &rect, IID_PPV_ARGS(draw_texture_.Receive()), &update_offset);
+ draw_offset_ = gfx::Point(update_offset) - gfx::Rect(rect).origin();
+ CHECK(SUCCEEDED(hr));
+ } else {
+ HRESULT hr =
+ swap_chain_->GetBuffer(0, IID_PPV_ARGS(draw_texture_.Receive()));
+ swap_rect_ = rectangle;
+ draw_offset_ = gfx::Vector2d();
+ CHECK(SUCCEEDED(hr));
+ }
has_been_rendered_to_ = true;
g_current_surface = dcomp_surface_.get();
@@ -323,19 +1223,18 @@ bool DirectCompositionSurfaceWin::SetDrawRectangle(const gfx::Rect& rectangle) {
return true;
}
-DirectCompositionSurfaceWin::Overlay::Overlay(int z_order,
- gfx::OverlayTransform transform,
- scoped_refptr<gl::GLImage> image,
- gfx::Rect bounds_rect,
- gfx::RectF crop_rect)
- : z_order(z_order),
- transform(transform),
- image(image),
- bounds_rect(bounds_rect),
- crop_rect(crop_rect) {}
+gfx::Vector2d DirectCompositionSurfaceWin::GetDrawOffset() const {
+ return draw_offset_;
+}
-DirectCompositionSurfaceWin::Overlay::Overlay(const Overlay& overlay) = default;
+scoped_refptr<base::TaskRunner>
+DirectCompositionSurfaceWin::GetWindowTaskRunnerForTesting() {
+ return child_window_.GetTaskRunnerForTesting();
+}
-DirectCompositionSurfaceWin::Overlay::~Overlay() {}
+base::win::ScopedComPtr<IDXGISwapChain1>
+DirectCompositionSurfaceWin::GetLayerSwapChainForTesting(size_t index) const {
+ return layer_tree_->GetLayerSwapChainForTesting(index);
+}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.h b/chromium/gpu/ipc/service/direct_composition_surface_win.h
index 6b7fec7e64f..6db44b7d0b1 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.h
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win.h
@@ -11,6 +11,7 @@
#include "base/memory/weak_ptr.h"
#include "base/win/scoped_comptr.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/gpu_export.h"
#include "gpu/ipc/service/child_window_win.h"
#include "gpu/ipc/service/image_transport_surface_delegate.h"
@@ -19,12 +20,19 @@
namespace gpu {
+class DCLayerTree;
+
class GPU_EXPORT DirectCompositionSurfaceWin : public gl::GLSurfaceEGL {
public:
DirectCompositionSurfaceWin(
+ std::unique_ptr<gfx::VSyncProvider> vsync_provider,
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
HWND parent_window);
+ // Returns true if there's an output on the current adapter that can
+ // use overlays.
+ static bool AreOverlaysSupported();
+
bool InitializeNativeWindow();
// GLSurfaceEGL implementation.
@@ -40,46 +48,50 @@ class GPU_EXPORT DirectCompositionSurfaceWin : public gl::GLSurfaceEGL {
gfx::SwapResult SwapBuffers() override;
gfx::SwapResult PostSubBuffer(int x, int y, int width, int height) override;
gfx::VSyncProvider* GetVSyncProvider() override;
- bool ScheduleOverlayPlane(int z_order,
- gfx::OverlayTransform transform,
- gl::GLImage* image,
- const gfx::Rect& bounds_rect,
- const gfx::RectF& crop_rect) override;
+ bool SetEnableDCLayers(bool enable) override;
bool FlipsVertically() const override;
bool SupportsPostSubBuffer() override;
bool OnMakeCurrent(gl::GLContext* context) override;
- bool SupportsSetDrawRectangle() const override;
+ bool SupportsDCLayers() const override;
bool SetDrawRectangle(const gfx::Rect& rect) override;
+ gfx::Vector2d GetDrawOffset() const override;
+
+ // This schedules an overlay plane to be displayed on the next SwapBuffers
+ // or PostSubBuffer call. Overlay planes must be scheduled before every swap
+ // to remain in the layer tree. This surface's backbuffer doesn't have to be
+ // scheduled with ScheduleDCLayer, as it's automatically placed in the layer
+ // tree at z-order 0.
+ bool ScheduleDCLayer(const ui::DCRendererLayerParams& params) override;
+
+ const base::win::ScopedComPtr<IDCompositionSurface>& dcomp_surface() const {
+ return dcomp_surface_;
+ }
- bool Initialize(std::unique_ptr<gfx::VSyncProvider> vsync_provider);
+ scoped_refptr<base::TaskRunner> GetWindowTaskRunnerForTesting();
+ const base::win::ScopedComPtr<IDXGISwapChain1>& swap_chain() const {
+ return swap_chain_;
+ }
+
+ base::win::ScopedComPtr<IDXGISwapChain1> GetLayerSwapChainForTesting(
+ size_t index) const;
+
+ const GpuDriverBugWorkarounds& workarounds() const { return workarounds_; }
protected:
~DirectCompositionSurfaceWin() override;
private:
- struct Overlay {
- Overlay(int z_order,
- gfx::OverlayTransform transform,
- scoped_refptr<gl::GLImage> image,
- gfx::Rect bounds_rect,
- gfx::RectF crop_rect);
- Overlay(const Overlay& overlay);
-
- ~Overlay();
-
- int z_order;
- gfx::OverlayTransform transform;
- scoped_refptr<gl::GLImage> image;
- gfx::Rect bounds_rect;
- gfx::RectF crop_rect;
- };
-
- bool CommitAndClearPendingOverlays();
+ void ReleaseCurrentSurface();
void InitializeSurface();
- void ReleaseDrawTexture();
+ // Release the texture that's currently being drawn to. If will_discard is
+ // true then the surface should be discarded without swapping any contents
+ // to it.
+ void ReleaseDrawTexture(bool will_discard);
ChildWindowWin child_window_;
+ GpuDriverBugWorkarounds workarounds_;
+
HWND window_ = nullptr;
// This is a placeholder surface used when not rendering to the
// DirectComposition surface.
@@ -90,14 +102,17 @@ class GPU_EXPORT DirectCompositionSurfaceWin : public gl::GLSurfaceEGL {
EGLSurface real_surface_ = 0;
gfx::Size size_ = gfx::Size(1, 1);
bool first_swap_ = true;
+ bool enable_dc_layers_ = false;
+ bool has_alpha_ = true;
std::unique_ptr<gfx::VSyncProvider> vsync_provider_;
- std::vector<Overlay> pending_overlays_;
+ gfx::Rect swap_rect_;
+ std::unique_ptr<DCLayerTree> layer_tree_;
+ gfx::Vector2d draw_offset_;
base::win::ScopedComPtr<ID3D11Device> d3d11_device_;
base::win::ScopedComPtr<IDCompositionDevice2> dcomp_device_;
- base::win::ScopedComPtr<IDCompositionTarget> dcomp_target_;
- base::win::ScopedComPtr<IDCompositionVisual2> visual_;
base::win::ScopedComPtr<IDCompositionSurface> dcomp_surface_;
+ base::win::ScopedComPtr<IDXGISwapChain1> swap_chain_;
base::win::ScopedComPtr<ID3D11Texture2D> draw_texture_;
// Keep track of whether the texture has been rendered to, as the first draw
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc b/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
index 3e43906403c..432251d9b0c 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
@@ -3,83 +3,182 @@
// found in the LICENSE file.
#include "gpu/ipc/service/direct_composition_surface_win.h"
+
#include "base/memory/weak_ptr.h"
#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/win/scoped_gdi_object.h"
+#include "base/win/scoped_hdc.h"
+#include "base/win/scoped_select_object.h"
+#include "gpu/command_buffer/service/feature_info.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/base/win/hidden_window.h"
+#include "ui/gfx/gdi_util.h"
+#include "ui/gfx/transform.h"
+#include "ui/gl/dc_renderer_layer_params.h"
#include "ui/gl/gl_angle_util_win.h"
#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_image_dxgi.h"
#include "ui/gl/init/gl_factory.h"
+#include "ui/platform_window/platform_window_delegate.h"
+#include "ui/platform_window/win/win_window.h"
namespace gpu {
namespace {
+bool CheckIfDCSupported() {
+ if (!gl::QueryDirectCompositionDevice(
+ gl::QueryD3D11DeviceObjectFromANGLE())) {
+ LOG(WARNING)
+ << "GL implementation not using DirectComposition, skipping test.";
+ return false;
+ }
+ return true;
+}
+
class TestImageTransportSurfaceDelegate
: public ImageTransportSurfaceDelegate,
public base::SupportsWeakPtr<TestImageTransportSurfaceDelegate> {
public:
+ TestImageTransportSurfaceDelegate()
+ : feature_info_(new gpu::gles2::FeatureInfo()) {}
+
~TestImageTransportSurfaceDelegate() override {}
// ImageTransportSurfaceDelegate implementation.
void DidCreateAcceleratedSurfaceChildWindow(
SurfaceHandle parent_window,
- SurfaceHandle child_window) override {}
+ SurfaceHandle child_window) override {
+ if (parent_window)
+ ::SetParent(child_window, parent_window);
+ }
void DidSwapBuffersComplete(SwapBuffersCompleteParams params) override {}
- const gles2::FeatureInfo* GetFeatureInfo() const override { return nullptr; }
+ const gles2::FeatureInfo* GetFeatureInfo() const override {
+ return feature_info_.get();
+ }
void SetLatencyInfoCallback(const LatencyInfoCallback& callback) override {}
void UpdateVSyncParameters(base::TimeTicks timebase,
base::TimeDelta interval) override {}
void AddFilter(IPC::MessageFilter* message_filter) override {}
int32_t GetRouteID() const override { return 0; }
+
+ private:
+ scoped_refptr<gpu::gles2::FeatureInfo> feature_info_;
};
+class TestPlatformDelegate : public ui::PlatformWindowDelegate {
+ public:
+ // ui::PlatformWindowDelegate implementation.
+ void OnBoundsChanged(const gfx::Rect& new_bounds) override {}
+ void OnDamageRect(const gfx::Rect& damaged_region) override {}
+ void DispatchEvent(ui::Event* event) override {}
+ void OnCloseRequest() override {}
+ void OnClosed() override {}
+ void OnWindowStateChanged(ui::PlatformWindowState new_state) override {}
+ void OnLostCapture() override {}
+ void OnAcceleratedWidgetAvailable(gfx::AcceleratedWidget widget,
+ float device_pixel_ratio) override {}
+ void OnAcceleratedWidgetDestroyed() override {}
+ void OnActivationChanged(bool active) override {}
+};
+
+void RunPendingTasks(scoped_refptr<base::TaskRunner> task_runner) {
+ base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner->PostTask(FROM_HERE,
+ Bind(&base::WaitableEvent::Signal, Unretained(&done)));
+ done.Wait();
+}
+
+void DestroySurface(scoped_refptr<DirectCompositionSurfaceWin> surface) {
+ scoped_refptr<base::TaskRunner> task_runner =
+ surface->GetWindowTaskRunnerForTesting();
+ DCHECK(surface->HasOneRef());
+
+ surface = nullptr;
+
+ // Ensure that the ChildWindowWin posts the task to delete the thread to the
+ // main loop before doing RunUntilIdle. Otherwise the child threads could
+ // outlive the main thread.
+ RunPendingTasks(task_runner);
+
+ base::RunLoop().RunUntilIdle();
+}
+
+base::win::ScopedComPtr<ID3D11Texture2D> CreateNV12Texture(
+ const base::win::ScopedComPtr<ID3D11Device>& d3d11_device,
+ const gfx::Size& size) {
+ D3D11_TEXTURE2D_DESC desc = {};
+ desc.Width = size.width();
+ desc.Height = size.height();
+ desc.MipLevels = 1;
+ desc.ArraySize = 1;
+ desc.Format = DXGI_FORMAT_NV12;
+ desc.Usage = D3D11_USAGE_DEFAULT;
+ desc.SampleDesc.Count = 1;
+ desc.BindFlags = 0;
+
+ std::vector<char> image_data(size.width() * size.height() * 3 / 2);
+ // Y, U, and V should all be Oxff. Output color should be pink.
+ memset(&image_data[0], 0xff, size.width() * size.height() * 3 / 2);
+
+ D3D11_SUBRESOURCE_DATA data = {};
+ data.pSysMem = (const void*)&image_data[0];
+ data.SysMemPitch = size.width();
+
+ base::win::ScopedComPtr<ID3D11Texture2D> texture;
+ HRESULT hr = d3d11_device->CreateTexture2D(&desc, &data, texture.Receive());
+ CHECK(SUCCEEDED(hr));
+ return texture;
+}
+
TEST(DirectCompositionSurfaceTest, TestMakeCurrent) {
- if (!gl::QueryDirectCompositionDevice(
- gl::QueryD3D11DeviceObjectFromANGLE())) {
- LOG(WARNING)
- << "GL implementation not using DirectComposition, skipping test.";
+ if (!CheckIfDCSupported())
return;
- }
TestImageTransportSurfaceDelegate delegate;
- scoped_refptr<DirectCompositionSurfaceWin> surface(
- new DirectCompositionSurfaceWin(delegate.AsWeakPtr(),
+ scoped_refptr<DirectCompositionSurfaceWin> surface1(
+ new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize());
+ EXPECT_TRUE(surface1->Initialize());
+ surface1->SetEnableDCLayers(true);
- scoped_refptr<gl::GLContext> context =
- gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
- EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0, true));
+ scoped_refptr<gl::GLContext> context1 = gl::init::CreateGLContext(
+ nullptr, surface1.get(), gl::GLContextAttribs());
+ EXPECT_TRUE(surface1->Resize(gfx::Size(100, 100), 1.0, true));
// First SetDrawRectangle must be full size of surface.
- EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
- EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
+ EXPECT_FALSE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
+ EXPECT_TRUE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
// SetDrawRectangle can't be called again until swap.
- EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
+ EXPECT_FALSE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
- EXPECT_TRUE(context->MakeCurrent(surface.get()));
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers());
+ EXPECT_TRUE(context1->MakeCurrent(surface1.get()));
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface1->SwapBuffers());
- EXPECT_TRUE(context->IsCurrent(surface.get()));
+ EXPECT_TRUE(context1->IsCurrent(surface1.get()));
// SetDrawRectangle must be contained within surface.
- EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 101, 101)));
- EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
- EXPECT_TRUE(context->IsCurrent(surface.get()));
+ EXPECT_FALSE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 101, 101)));
+ EXPECT_TRUE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
+ EXPECT_TRUE(context1->IsCurrent(surface1.get()));
- EXPECT_TRUE(surface->Resize(gfx::Size(50, 50), 1.0, true));
- EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
- EXPECT_TRUE(context->IsCurrent(surface.get()));
+ EXPECT_TRUE(surface1->Resize(gfx::Size(50, 50), 1.0, true));
+ EXPECT_TRUE(context1->IsCurrent(surface1.get()));
+ EXPECT_TRUE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
+ EXPECT_TRUE(context1->IsCurrent(surface1.get()));
scoped_refptr<DirectCompositionSurfaceWin> surface2(
- new DirectCompositionSurfaceWin(delegate.AsWeakPtr(),
+ new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
ui::GetHiddenWindow()));
EXPECT_TRUE(surface2->Initialize());
scoped_refptr<gl::GLContext> context2 = gl::init::CreateGLContext(
nullptr, surface2.get(), gl::GLContextAttribs());
+ surface2->SetEnableDCLayers(true);
EXPECT_TRUE(context2->MakeCurrent(surface2.get()));
EXPECT_TRUE(surface2->Resize(gfx::Size(100, 100), 1.0, true));
// The previous IDCompositionSurface should be suspended when another
@@ -89,13 +188,341 @@ TEST(DirectCompositionSurfaceTest, TestMakeCurrent) {
// It should be possible to switch back to the previous surface and
// unsuspend it.
+ EXPECT_TRUE(context1->MakeCurrent(surface1.get()));
+ context2 = nullptr;
+ context1 = nullptr;
+
+ DestroySurface(std::move(surface1));
+ DestroySurface(std::move(surface2));
+}
+
+// Tests that switching using EnableDCLayers works.
+TEST(DirectCompositionSurfaceTest, DXGIDCLayerSwitch) {
+ if (!CheckIfDCSupported())
+ return;
+
+ TestImageTransportSurfaceDelegate delegate;
+
+ scoped_refptr<DirectCompositionSurfaceWin> surface(
+ new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
+ ui::GetHiddenWindow()));
+ EXPECT_TRUE(surface->Initialize());
+
+ scoped_refptr<gl::GLContext> context =
+ gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
+ EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0, true));
+ EXPECT_FALSE(surface->swap_chain());
+
+ // First SetDrawRectangle must be full size of surface for DXGI
+ // swapchain.
+ EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
+ EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
+ EXPECT_TRUE(surface->swap_chain());
+
+ // SetDrawRectangle can't be called again until swap.
+ EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
+
EXPECT_TRUE(context->MakeCurrent(surface.get()));
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers());
+
+ EXPECT_TRUE(context->IsCurrent(surface.get()));
+
+ surface->SetEnableDCLayers(true);
+
+ // Surface switched to use IDCompositionSurface, so must draw to
+ // entire surface.
+ EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
+ EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
+ EXPECT_TRUE(context->IsCurrent(surface.get()));
+ EXPECT_FALSE(surface->swap_chain());
+
+ surface->SetEnableDCLayers(false);
+
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers());
+
+ // Surface switched to use IDXGISwapChain, so must draw to entire
+ // surface.
+ EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
+ EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
+ EXPECT_TRUE(surface->swap_chain());
- context2 = nullptr;
- surface2 = nullptr;
context = nullptr;
- surface = nullptr;
- base::RunLoop().RunUntilIdle();
+ DestroySurface(std::move(surface));
+}
+
+// Ensure that the swapchain's alpha is correct.
+TEST(DirectCompositionSurfaceTest, SwitchAlpha) {
+ if (!CheckIfDCSupported())
+ return;
+
+ TestImageTransportSurfaceDelegate delegate;
+
+ scoped_refptr<DirectCompositionSurfaceWin> surface(
+ new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
+ ui::GetHiddenWindow()));
+ EXPECT_TRUE(surface->Initialize());
+
+ scoped_refptr<gl::GLContext> context =
+ gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
+ EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0, true));
+ EXPECT_FALSE(surface->swap_chain());
+
+ EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
+ base::win::ScopedComPtr<IDXGISwapChain1> swap_chain = surface->swap_chain();
+ ASSERT_TRUE(swap_chain);
+ DXGI_SWAP_CHAIN_DESC1 desc;
+ swap_chain->GetDesc1(&desc);
+ EXPECT_EQ(DXGI_ALPHA_MODE_PREMULTIPLIED, desc.AlphaMode);
+
+ // Resize to the same parameters should have no effect.
+ EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0, true));
+ EXPECT_TRUE(surface->swap_chain());
+
+ EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0, false));
+ EXPECT_FALSE(surface->swap_chain());
+
+ EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
+
+ swap_chain = surface->swap_chain();
+ ASSERT_TRUE(swap_chain);
+ swap_chain->GetDesc1(&desc);
+ EXPECT_EQ(DXGI_ALPHA_MODE_IGNORE, desc.AlphaMode);
+
+ context = nullptr;
+ DestroySurface(std::move(surface));
+}
+
+// Ensure that the GLImage isn't presented again unless it changes.
+TEST(DirectCompositionSurfaceTest, NoPresentTwice) {
+ if (!CheckIfDCSupported())
+ return;
+
+ TestImageTransportSurfaceDelegate delegate;
+ scoped_refptr<DirectCompositionSurfaceWin> surface(
+ new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
+ ui::GetHiddenWindow()));
+ EXPECT_TRUE(surface->Initialize());
+ surface->SetEnableDCLayers(true);
+ gfx::Size window_size(100, 100);
+
+ scoped_refptr<gl::GLContext> context =
+ gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
+
+ base::win::ScopedComPtr<ID3D11Device> d3d11_device =
+ gl::QueryD3D11DeviceObjectFromANGLE();
+
+ gfx::Size texture_size(50, 50);
+ base::win::ScopedComPtr<ID3D11Texture2D> texture =
+ CreateNV12Texture(d3d11_device, texture_size);
+
+ scoped_refptr<gl::GLImageDXGI> image_dxgi(
+ new gl::GLImageDXGI(texture_size, nullptr));
+ image_dxgi->SetTexture(texture, 0);
+
+ ui::DCRendererLayerParams params(false, gfx::Rect(), 1, gfx::Transform(),
+ image_dxgi.get(),
+ gfx::RectF(gfx::Rect(texture_size)),
+ gfx::Rect(window_size), 0, 0, 1.0, 0);
+ surface->ScheduleDCLayer(params);
+
+ base::win::ScopedComPtr<IDXGISwapChain1> swap_chain =
+ surface->GetLayerSwapChainForTesting(1);
+ ASSERT_FALSE(swap_chain);
+
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers());
+
+ swap_chain = surface->GetLayerSwapChainForTesting(1);
+ ASSERT_TRUE(swap_chain);
+
+ UINT last_present_count = 0;
+ EXPECT_TRUE(SUCCEEDED(swap_chain->GetLastPresentCount(&last_present_count)));
+
+ // One present is normal, and a second present because it's the first frame
+ // and the other buffer needs to be drawn to.
+ EXPECT_EQ(2u, last_present_count);
+
+ surface->ScheduleDCLayer(params);
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers());
+
+ base::win::ScopedComPtr<IDXGISwapChain1> swap_chain2 =
+ surface->GetLayerSwapChainForTesting(1);
+ EXPECT_EQ(swap_chain2, swap_chain);
+
+ // It's the same image, so it should have the same swapchain.
+ EXPECT_TRUE(SUCCEEDED(swap_chain->GetLastPresentCount(&last_present_count)));
+ EXPECT_EQ(2u, last_present_count);
+
+ // The size of the swapchain changed, so it should be recreated.
+ ui::DCRendererLayerParams params2(false, gfx::Rect(), 1, gfx::Transform(),
+ image_dxgi.get(),
+ gfx::RectF(gfx::Rect(texture_size)),
+ gfx::Rect(0, 0, 25, 25), 0, 0, 1.0, 0);
+ surface->ScheduleDCLayer(params2);
+
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers());
+
+ base::win::ScopedComPtr<IDXGISwapChain1> swap_chain3 =
+ surface->GetLayerSwapChainForTesting(1);
+ EXPECT_NE(swap_chain2, swap_chain3);
+
+ context = nullptr;
+ DestroySurface(std::move(surface));
+}
+
+COLORREF ReadBackWindowPixel(HWND window, const gfx::Point& point) {
+ base::win::ScopedCreateDC mem_hdc(::CreateCompatibleDC(nullptr));
+ void* bits = nullptr;
+ BITMAPV4HEADER hdr;
+ gfx::CreateBitmapV4Header(point.x() + 1, point.y() + 1, &hdr);
+ DCHECK(mem_hdc.IsValid());
+ base::win::ScopedBitmap bitmap(
+ ::CreateDIBSection(mem_hdc.Get(), reinterpret_cast<BITMAPINFO*>(&hdr),
+ DIB_RGB_COLORS, &bits, nullptr, 0));
+ DCHECK(bitmap.is_valid());
+
+ base::win::ScopedSelectObject select_object(mem_hdc.Get(), bitmap.get());
+
+ // Grab a copy of the window. Use PrintWindow because it works even when the
+ // window's partially occluded. The PW_RENDERFULLCONTENT flag is undocumented,
+ // but works starting in Windows 8.1. It allows for capturing the contents of
+ // the window that are drawn using DirectComposition.
+ UINT flags = PW_CLIENTONLY | PW_RENDERFULLCONTENT;
+
+ BOOL result = PrintWindow(window, mem_hdc.Get(), flags);
+ if (!result)
+ PLOG(ERROR) << "Failed to print window";
+
+ GdiFlush();
+
+ uint32_t pixel_value =
+ static_cast<uint32_t*>(bits)[hdr.bV4Width * point.y() + point.x()];
+
+ return pixel_value;
+}
+
+class DirectCompositionPixelTest : public testing::Test {
+ public:
+ DirectCompositionPixelTest()
+ : window_(&platform_delegate_, gfx::Rect(0, 0, 100, 100)) {}
+
+ protected:
+ void InitializeSurface() {
+ static_cast<ui::PlatformWindow*>(&window_)->Show();
+
+ surface_ = new DirectCompositionSurfaceWin(nullptr, delegate_.AsWeakPtr(),
+ window_.hwnd());
+ EXPECT_TRUE(surface_->Initialize());
+ }
+
+ void PixelTestSwapChain(bool layers_enabled) {
+ if (!CheckIfDCSupported())
+ return;
+
+ InitializeSurface();
+
+ surface_->SetEnableDCLayers(layers_enabled);
+ gfx::Size window_size(100, 100);
+
+ scoped_refptr<gl::GLContext> context = gl::init::CreateGLContext(
+ nullptr, surface_.get(), gl::GLContextAttribs());
+ EXPECT_TRUE(surface_->Resize(window_size, 1.0, true));
+ EXPECT_TRUE(surface_->SetDrawRectangle(gfx::Rect(window_size)));
+ EXPECT_TRUE(context->MakeCurrent(surface_.get()));
+
+ glClearColor(1.0, 0.0, 0.0, 1.0);
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface_->SwapBuffers());
+
+ // Ensure DWM swap completed.
+ Sleep(1000);
+
+ SkColor actual_color =
+ ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
+ EXPECT_EQ(SK_ColorRED, actual_color);
+
+ EXPECT_TRUE(context->IsCurrent(surface_.get()));
+
+ context = nullptr;
+ DestroySurface(std::move(surface_));
+ }
+
+ TestPlatformDelegate platform_delegate_;
+ TestImageTransportSurfaceDelegate delegate_;
+ ui::WinWindow window_;
+ scoped_refptr<DirectCompositionSurfaceWin> surface_;
+};
+
+TEST_F(DirectCompositionPixelTest, DCLayersEnabled) {
+ PixelTestSwapChain(true);
+}
+
+TEST_F(DirectCompositionPixelTest, DCLayersDisabled) {
+ PixelTestSwapChain(false);
+}
+
+bool AreColorsSimilar(int a, int b) {
+ // The precise colors may differ depending on the video processor, so allow
+ // a margin for error.
+ const int kMargin = 10;
+ return abs(SkColorGetA(a) - SkColorGetA(b)) < kMargin &&
+ abs(SkColorGetR(a) - SkColorGetR(b)) < kMargin &&
+ abs(SkColorGetG(a) - SkColorGetG(b)) < kMargin &&
+ abs(SkColorGetB(a) - SkColorGetB(b)) < kMargin;
+}
+
+TEST_F(DirectCompositionPixelTest, VideoSwapchain) {
+ if (!CheckIfDCSupported())
+ return;
+ InitializeSurface();
+ surface_->SetEnableDCLayers(true);
+ gfx::Size window_size(100, 100);
+
+ scoped_refptr<gl::GLContext> context = gl::init::CreateGLContext(
+ nullptr, surface_.get(), gl::GLContextAttribs());
+ EXPECT_TRUE(surface_->Resize(window_size, 1.0, true));
+
+ base::win::ScopedComPtr<ID3D11Device> d3d11_device =
+ gl::QueryD3D11DeviceObjectFromANGLE();
+
+ gfx::Size texture_size(50, 50);
+ base::win::ScopedComPtr<ID3D11Texture2D> texture =
+ CreateNV12Texture(d3d11_device, texture_size);
+
+ scoped_refptr<gl::GLImageDXGI> image_dxgi(
+ new gl::GLImageDXGI(texture_size, nullptr));
+ image_dxgi->SetTexture(texture, 0);
+
+ ui::DCRendererLayerParams params(
+ false, gfx::Rect(), 1, gfx::Transform(),
+ image_dxgi.get(),
+ gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(texture_size), 0, 0, 1.0,
+ 0);
+ surface_->ScheduleDCLayer(params);
+
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface_->SwapBuffers());
+
+ // Scaling up the swapchain with the same image should cause it to be
+ // transformed again, but not presented again.
+ ui::DCRendererLayerParams params2(
+ false, gfx::Rect(), 1, gfx::Transform(),
+ image_dxgi.get(),
+ gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0,
+ 0);
+ surface_->ScheduleDCLayer(params2);
+
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface_->SwapBuffers());
+ Sleep(1000);
+
+ SkColor expected_color = SkColorSetRGB(0xff, 0xb7, 0xff);
+ SkColor actual_color =
+ ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
+ EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
+ << std::hex << "Expected " << expected_color << " Actual "
+ << actual_color;
+
+ context = nullptr;
+ DestroySurface(std::move(surface_));
}
} // namespace
diff --git a/chromium/gpu/ipc/service/gpu_channel.cc b/chromium/gpu/ipc/service/gpu_channel.cc
index 2d9ebefdf71..1d67b33ae2d 100644
--- a/chromium/gpu/ipc/service/gpu_channel.cc
+++ b/chromium/gpu/ipc/service/gpu_channel.cc
@@ -31,10 +31,9 @@
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/mailbox.h"
-#include "gpu/command_buffer/service/command_executor.h"
#include "gpu/command_buffer/service/image_factory.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
-#include "gpu/command_buffer/service/sync_point_manager.h"
+#include "gpu/command_buffer/service/preemption_flag.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
@@ -65,68 +64,69 @@ const int64_t kMaxPreemptTimeMs = kVsyncIntervalMs;
// below this threshold.
const int64_t kStopPreemptThresholdMs = kVsyncIntervalMs;
+CommandBufferId GenerateCommandBufferId(int channel_id, int32_t route_id) {
+ return CommandBufferId::FromUnsafeValue(
+ (static_cast<uint64_t>(channel_id) << 32) | route_id);
+}
+
} // anonymous namespace
-scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create(
- int32_t stream_id,
- GpuStreamPriority stream_priority,
- GpuChannel* channel,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
- const scoped_refptr<PreemptionFlag>& preempting_flag,
- const scoped_refptr<PreemptionFlag>& preempted_flag,
- SyncPointManager* sync_point_manager) {
- return new GpuChannelMessageQueue(stream_id, stream_priority, channel,
- io_task_runner, preempting_flag,
- preempted_flag, sync_point_manager);
+SyncChannelFilteredSender::SyncChannelFilteredSender(
+ IPC::ChannelHandle channel_handle,
+ IPC::Listener* listener,
+ scoped_refptr<base::SingleThreadTaskRunner> ipc_task_runner,
+ base::WaitableEvent* shutdown_event)
+ : channel_(IPC::SyncChannel::Create(channel_handle,
+ IPC::Channel::MODE_SERVER,
+ listener,
+ ipc_task_runner,
+ false,
+ shutdown_event)) {}
+
+SyncChannelFilteredSender::~SyncChannelFilteredSender() = default;
+
+bool SyncChannelFilteredSender::Send(IPC::Message* msg) {
+ return channel_->Send(msg);
+}
+
+void SyncChannelFilteredSender::AddFilter(IPC::MessageFilter* filter) {
+ channel_->AddFilter(filter);
}
-scoped_refptr<SyncPointOrderData>
-GpuChannelMessageQueue::GetSyncPointOrderData() {
- return sync_point_order_data_;
+void SyncChannelFilteredSender::RemoveFilter(IPC::MessageFilter* filter) {
+ channel_->RemoveFilter(filter);
}
GpuChannelMessageQueue::GpuChannelMessageQueue(
- int32_t stream_id,
- GpuStreamPriority stream_priority,
GpuChannel* channel,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
- const scoped_refptr<PreemptionFlag>& preempting_flag,
- const scoped_refptr<PreemptionFlag>& preempted_flag,
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
+ scoped_refptr<PreemptionFlag> preempting_flag,
+ scoped_refptr<PreemptionFlag> preempted_flag,
SyncPointManager* sync_point_manager)
- : stream_id_(stream_id),
- stream_priority_(stream_priority),
- enabled_(true),
- scheduled_(true),
- channel_(channel),
- preemption_state_(IDLE),
+ : channel_(channel),
max_preemption_time_(
base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)),
timer_(new base::OneShotTimer),
- sync_point_order_data_(SyncPointOrderData::Create()),
- io_task_runner_(io_task_runner),
- preempting_flag_(preempting_flag),
- preempted_flag_(preempted_flag),
+ sync_point_order_data_(sync_point_manager->CreateSyncPointOrderData()),
+ main_task_runner_(std::move(main_task_runner)),
+ io_task_runner_(std::move(io_task_runner)),
+ preempting_flag_(std::move(preempting_flag)),
+ preempted_flag_(std::move(preempted_flag)),
sync_point_manager_(sync_point_manager) {
- timer_->SetTaskRunner(io_task_runner);
+ timer_->SetTaskRunner(io_task_runner_);
io_thread_checker_.DetachFromThread();
}
GpuChannelMessageQueue::~GpuChannelMessageQueue() {
- DCHECK(!enabled_);
DCHECK(channel_messages_.empty());
}
-void GpuChannelMessageQueue::Disable() {
- {
- base::AutoLock auto_lock(channel_lock_);
- DCHECK(enabled_);
- enabled_ = false;
- }
-
- // We guarantee that the queues will no longer be modified after enabled_
- // is set to false, it is now safe to modify the queue without the lock.
- // All public facing modifying functions check enabled_ while all
- // private modifying functions DCHECK(enabled_) to enforce this.
+void GpuChannelMessageQueue::Destroy() {
+ // We guarantee that the queue will no longer be modified after Destroy is
+ // called, it is now safe to modify the queue without the lock. All public
+ // facing modifying functions check enabled_ while all private modifying
+ // functions DCHECK(enabled_) to enforce this.
while (!channel_messages_.empty()) {
const IPC::Message& msg = channel_messages_.front()->message;
if (msg.is_sync()) {
@@ -138,15 +138,16 @@ void GpuChannelMessageQueue::Disable() {
}
sync_point_order_data_->Destroy();
- sync_point_order_data_ = nullptr;
+ if (preempting_flag_)
+ preempting_flag_->Reset();
+
+ // Destroy timer on io thread.
io_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuChannelMessageQueue::DisableIO, this));
-}
+ FROM_HERE, base::Bind([](std::unique_ptr<base::OneShotTimer>) {},
+ base::Passed(&timer_)));
-void GpuChannelMessageQueue::DisableIO() {
- DCHECK(io_thread_checker_.CalledOnValidThread());
- timer_ = nullptr;
+ channel_ = nullptr;
}
bool GpuChannelMessageQueue::IsScheduled() const {
@@ -154,14 +155,13 @@ bool GpuChannelMessageQueue::IsScheduled() const {
return scheduled_;
}
-void GpuChannelMessageQueue::OnRescheduled(bool scheduled) {
+void GpuChannelMessageQueue::SetScheduled(bool scheduled) {
base::AutoLock lock(channel_lock_);
- DCHECK(enabled_);
if (scheduled_ == scheduled)
return;
scheduled_ = scheduled;
if (scheduled)
- channel_->PostHandleMessage(this);
+ PostHandleMessageOnQueue();
if (preempting_flag_) {
io_task_runner_->PostTask(
FROM_HERE,
@@ -169,53 +169,46 @@ void GpuChannelMessageQueue::OnRescheduled(bool scheduled) {
}
}
-uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const {
- return sync_point_order_data_->unprocessed_order_num();
-}
-
-uint32_t GpuChannelMessageQueue::GetProcessedOrderNum() const {
- return sync_point_order_data_->processed_order_num();
-}
-
-bool GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) {
+void GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) {
base::AutoLock auto_lock(channel_lock_);
- if (enabled_) {
- if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
- message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
- channel_->PostHandleOutOfOrderMessage(message);
- return true;
- }
+ DCHECK(channel_);
+ uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber();
+ std::unique_ptr<GpuChannelMessage> msg(
+ new GpuChannelMessage(message, order_num, base::TimeTicks::Now()));
- uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber(
- sync_point_manager_);
- std::unique_ptr<GpuChannelMessage> msg(
- new GpuChannelMessage(message, order_num, base::TimeTicks::Now()));
+ channel_messages_.push_back(std::move(msg));
- if (channel_messages_.empty()) {
- DCHECK(scheduled_);
- channel_->PostHandleMessage(this);
- }
+ bool first_message = channel_messages_.size() == 1;
+ if (first_message)
+ PostHandleMessageOnQueue();
- channel_messages_.push_back(std::move(msg));
-
- if (preempting_flag_)
- UpdatePreemptionStateHelper();
+ if (preempting_flag_)
+ UpdatePreemptionStateHelper();
+}
- return true;
- }
- return false;
+void GpuChannelMessageQueue::PostHandleMessageOnQueue() {
+ channel_lock_.AssertAcquired();
+ DCHECK(channel_);
+ DCHECK(scheduled_);
+ DCHECK(!channel_messages_.empty());
+ DCHECK(!handle_message_post_task_pending_);
+ handle_message_post_task_pending_ = true;
+ main_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&GpuChannel::HandleMessageOnQueue, channel_->AsWeakPtr()));
}
const GpuChannelMessage* GpuChannelMessageQueue::BeginMessageProcessing() {
base::AutoLock auto_lock(channel_lock_);
- DCHECK(enabled_);
+ DCHECK(channel_);
+ DCHECK(scheduled_);
+ DCHECK(!channel_messages_.empty());
+ handle_message_post_task_pending_ = false;
// If we have been preempted by another channel, just post a task to wake up.
if (preempted_flag_ && preempted_flag_->IsSet()) {
- channel_->PostHandleMessage(this);
+ PostHandleMessageOnQueue();
return nullptr;
}
- if (channel_messages_.empty())
- return nullptr;
sync_point_order_data_->BeginProcessingOrderNumber(
channel_messages_.front()->order_number);
return channel_messages_.front().get();
@@ -227,7 +220,7 @@ void GpuChannelMessageQueue::PauseMessageProcessing() {
// If we have been preempted by another channel, just post a task to wake up.
if (scheduled_)
- channel_->PostHandleMessage(this);
+ PostHandleMessageOnQueue();
sync_point_order_data_->PauseProcessingOrderNumber(
channel_messages_.front()->order_number);
@@ -243,7 +236,7 @@ void GpuChannelMessageQueue::FinishMessageProcessing() {
channel_messages_.pop_front();
if (!channel_messages_.empty())
- channel_->PostHandleMessage(this);
+ PostHandleMessageOnQueue();
if (preempting_flag_) {
io_task_runner_->PostTask(
@@ -256,7 +249,8 @@ void GpuChannelMessageQueue::UpdatePreemptionState() {
DCHECK(io_thread_checker_.CalledOnValidThread());
DCHECK(preempting_flag_);
base::AutoLock lock(channel_lock_);
- UpdatePreemptionStateHelper();
+ if (channel_)
+ UpdatePreemptionStateHelper();
}
void GpuChannelMessageQueue::UpdatePreemptionStateHelper() {
@@ -443,82 +437,72 @@ void GpuChannelMessageQueue::TransitionToWouldPreemptDescheduled() {
TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
}
-GpuChannelMessageFilter::GpuChannelMessageFilter()
- : channel_(nullptr), peer_pid_(base::kNullProcessId) {}
+GpuChannelMessageFilter::GpuChannelMessageFilter(
+ GpuChannel* gpu_channel,
+ scoped_refptr<GpuChannelMessageQueue> message_queue,
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner)
+ : gpu_channel_(gpu_channel),
+ message_queue_(std::move(message_queue)),
+ main_task_runner_(std::move(main_task_runner)) {}
-GpuChannelMessageFilter::~GpuChannelMessageFilter() {}
+GpuChannelMessageFilter::~GpuChannelMessageFilter() {
+ DCHECK(!gpu_channel_);
+}
+
+void GpuChannelMessageFilter::Destroy() {
+ base::AutoLock auto_lock(gpu_channel_lock_);
+ gpu_channel_ = nullptr;
+}
void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) {
- DCHECK(!channel_);
- channel_ = channel;
- for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
- filter->OnFilterAdded(channel_);
- }
+ DCHECK(!ipc_channel_);
+ ipc_channel_ = channel;
+ for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_)
+ filter->OnFilterAdded(ipc_channel_);
}
void GpuChannelMessageFilter::OnFilterRemoved() {
- for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
+ for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_)
filter->OnFilterRemoved();
- }
- channel_ = nullptr;
+ ipc_channel_ = nullptr;
peer_pid_ = base::kNullProcessId;
}
void GpuChannelMessageFilter::OnChannelConnected(int32_t peer_pid) {
DCHECK(peer_pid_ == base::kNullProcessId);
peer_pid_ = peer_pid;
- for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
+ for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_)
filter->OnChannelConnected(peer_pid);
- }
}
void GpuChannelMessageFilter::OnChannelError() {
- for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
+ for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_)
filter->OnChannelError();
- }
}
void GpuChannelMessageFilter::OnChannelClosing() {
- for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
+ for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_)
filter->OnChannelClosing();
- }
}
void GpuChannelMessageFilter::AddChannelFilter(
scoped_refptr<IPC::MessageFilter> filter) {
channel_filters_.push_back(filter);
- if (channel_)
- filter->OnFilterAdded(channel_);
+ if (ipc_channel_)
+ filter->OnFilterAdded(ipc_channel_);
if (peer_pid_ != base::kNullProcessId)
filter->OnChannelConnected(peer_pid_);
}
void GpuChannelMessageFilter::RemoveChannelFilter(
scoped_refptr<IPC::MessageFilter> filter) {
- if (channel_)
+ if (ipc_channel_)
filter->OnFilterRemoved();
- channel_filters_.erase(
- std::find(channel_filters_.begin(), channel_filters_.end(), filter));
-}
-
-// This gets called from the main thread and assumes that all messages which
-// lead to creation of a new route are synchronous messages.
-// TODO(sunnyps): Create routes (and streams) on the IO thread so that we can
-// make the CreateCommandBuffer/VideoDecoder/VideoEncoder messages asynchronous.
-void GpuChannelMessageFilter::AddRoute(
- int32_t route_id,
- const scoped_refptr<GpuChannelMessageQueue>& queue) {
- base::AutoLock lock(routes_lock_);
- routes_.insert(std::make_pair(route_id, queue));
-}
-
-void GpuChannelMessageFilter::RemoveRoute(int32_t route_id) {
- base::AutoLock lock(routes_lock_);
- routes_.erase(route_id);
+ base::Erase(channel_filters_, filter);
}
bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
- DCHECK(channel_);
+ DCHECK(ipc_channel_);
if (message.should_unblock() || message.is_reply())
return MessageErrorHandler(message, "Unexpected message type");
@@ -534,29 +518,28 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
return true;
}
- scoped_refptr<GpuChannelMessageQueue> message_queue =
- LookupStreamByRoute(message.routing_id());
-
- if (!message_queue)
- return MessageErrorHandler(message, "Could not find message queue");
-
- if (!message_queue->PushBackMessage(message))
+ base::AutoLock auto_lock(gpu_channel_lock_);
+ if (!gpu_channel_)
return MessageErrorHandler(message, "Channel destroyed");
+ if (message.routing_id() == MSG_ROUTING_CONTROL ||
+ message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
+ message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
+ // It's OK to post task that may never run even for sync messages, because
+ // if the channel is destroyed, the client Send will fail.
+ main_task_runner_->PostTask(FROM_HERE,
+ base::Bind(&GpuChannel::HandleOutOfOrderMessage,
+ gpu_channel_->AsWeakPtr(), message));
+ } else {
+ // Message queue takes care of PostTask.
+ message_queue_->PushBackMessage(message);
+ }
+
return true;
}
bool GpuChannelMessageFilter::Send(IPC::Message* message) {
- return channel_->Send(message);
-}
-
-scoped_refptr<GpuChannelMessageQueue>
-GpuChannelMessageFilter::LookupStreamByRoute(int32_t route_id) {
- base::AutoLock lock(routes_lock_);
- auto it = routes_.find(route_id);
- if (it != routes_.end())
- return it->second;
- return nullptr;
+ return ipc_channel_->Send(message);
}
bool GpuChannelMessageFilter::MessageErrorHandler(const IPC::Message& message,
@@ -576,22 +559,21 @@ FilteredSender::FilteredSender() = default;
FilteredSender::~FilteredSender() = default;
-GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
- SyncPointManager* sync_point_manager,
- GpuWatchdogThread* watchdog,
- gl::GLShareGroup* share_group,
- gles2::MailboxManager* mailbox,
- PreemptionFlag* preempting_flag,
- PreemptionFlag* preempted_flag,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- int32_t client_id,
- uint64_t client_tracing_id,
- bool allow_view_command_buffers,
- bool allow_real_time_streams)
+GpuChannel::GpuChannel(
+ GpuChannelManager* gpu_channel_manager,
+ SyncPointManager* sync_point_manager,
+ GpuWatchdogThread* watchdog,
+ scoped_refptr<gl::GLShareGroup> share_group,
+ scoped_refptr<gles2::MailboxManager> mailbox_manager,
+ scoped_refptr<PreemptionFlag> preempting_flag,
+ scoped_refptr<PreemptionFlag> preempted_flag,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
+ int32_t client_id,
+ uint64_t client_tracing_id,
+ bool is_gpu_host)
: gpu_channel_manager_(gpu_channel_manager),
sync_point_manager_(sync_point_manager),
- unhandled_message_listener_(nullptr),
preempting_flag_(preempting_flag),
preempted_flag_(preempted_flag),
client_id_(client_id),
@@ -599,44 +581,35 @@ GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
task_runner_(task_runner),
io_task_runner_(io_task_runner),
share_group_(share_group),
- mailbox_manager_(mailbox),
+ mailbox_manager_(mailbox_manager),
watchdog_(watchdog),
- allow_view_command_buffers_(allow_view_command_buffers),
- allow_real_time_streams_(allow_real_time_streams),
+ is_gpu_host_(is_gpu_host),
weak_factory_(this) {
DCHECK(gpu_channel_manager);
DCHECK(client_id);
- filter_ = new GpuChannelMessageFilter();
+ message_queue_ = new GpuChannelMessageQueue(this, task_runner, io_task_runner,
+ preempting_flag, preempted_flag,
+ sync_point_manager);
- scoped_refptr<GpuChannelMessageQueue> control_queue =
- CreateStream(GPU_STREAM_DEFAULT, GpuStreamPriority::HIGH);
- AddRouteToStream(MSG_ROUTING_CONTROL, GPU_STREAM_DEFAULT);
+ filter_ = new GpuChannelMessageFilter(this, message_queue_, task_runner);
}
GpuChannel::~GpuChannel() {
// Clear stubs first because of dependencies.
stubs_.clear();
- for (auto& kv : streams_)
- kv.second->Disable();
-
- if (preempting_flag_.get())
- preempting_flag_->Reset();
-}
+ // Destroy filter first so that no message queue gets no more messages.
+ filter_->Destroy();
-IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event) {
- DCHECK(shutdown_event);
- DCHECK(!channel_);
+ message_queue_->Destroy();
- mojo::MessagePipe pipe;
- channel_ = IPC::SyncChannel::Create(pipe.handle0.release(),
- IPC::Channel::MODE_SERVER, this,
- io_task_runner_, false, shutdown_event);
+ DCHECK(!preempting_flag_ || !preempting_flag_->IsSet());
+}
+void GpuChannel::Init(std::unique_ptr<FilteredSender> channel) {
+ channel_ = std::move(channel);
channel_->AddFilter(filter_.get());
-
- return pipe.handle1.release();
}
void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) {
@@ -652,24 +625,6 @@ base::ProcessId GpuChannel::GetClientPID() const {
return peer_pid_;
}
-uint32_t GpuChannel::GetProcessedOrderNum() const {
- uint32_t processed_order_num = 0;
- for (auto& kv : streams_) {
- processed_order_num =
- std::max(processed_order_num, kv.second->GetProcessedOrderNum());
- }
- return processed_order_num;
-}
-
-uint32_t GpuChannel::GetUnprocessedOrderNum() const {
- uint32_t unprocessed_order_num = 0;
- for (auto& kv : streams_) {
- unprocessed_order_num =
- std::max(unprocessed_order_num, kv.second->GetUnprocessedOrderNum());
- }
- return unprocessed_order_num;
-}
-
bool GpuChannel::OnMessageReceived(const IPC::Message& msg) {
// All messages should be pushed to channel_messages_ and handled separately.
NOTREACHED();
@@ -700,10 +655,14 @@ bool GpuChannel::Send(IPC::Message* message) {
return channel_->Send(message);
}
-void GpuChannel::OnStreamRescheduled(int32_t stream_id, bool scheduled) {
- scoped_refptr<GpuChannelMessageQueue> queue = LookupStream(stream_id);
- DCHECK(queue);
- queue->OnRescheduled(scheduled);
+void GpuChannel::OnCommandBufferScheduled(GpuCommandBufferStub* stub) {
+ message_queue_->SetScheduled(true);
+ // TODO(sunnyps): Enable gpu scheduler task queue for stub's sequence.
+}
+
+void GpuChannel::OnCommandBufferDescheduled(GpuCommandBufferStub* stub) {
+ message_queue_->SetScheduled(false);
+ // TODO(sunnyps): Disable gpu scheduler task queue for stub's sequence.
}
GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) {
@@ -724,18 +683,14 @@ void GpuChannel::MarkAllContextsLost() {
}
bool GpuChannel::AddRoute(int32_t route_id,
- int32_t stream_id,
+ SequenceId sequence_id,
IPC::Listener* listener) {
- if (router_.AddRoute(route_id, listener)) {
- AddRouteToStream(route_id, stream_id);
- return true;
- }
- return false;
+ // TODO(sunnyps): Add route id to sequence id mapping to filter.
+ return router_.AddRoute(route_id, listener);
}
void GpuChannel::RemoveRoute(int32_t route_id) {
router_.RemoveRoute(route_id);
- RemoveRouteFromStream(route_id);
}
bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
@@ -752,31 +707,9 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
return handled;
}
-scoped_refptr<SyncPointOrderData> GpuChannel::GetSyncPointOrderData(
- int32_t stream_id) {
- auto it = streams_.find(stream_id);
- DCHECK(it != streams_.end());
- DCHECK(it->second);
- return it->second->GetSyncPointOrderData();
-}
-
-void GpuChannel::PostHandleMessage(
- const scoped_refptr<GpuChannelMessageQueue>& queue) {
- task_runner_->PostTask(FROM_HERE,
- base::Bind(&GpuChannel::HandleMessage,
- weak_factory_.GetWeakPtr(), queue));
-}
-
-void GpuChannel::PostHandleOutOfOrderMessage(const IPC::Message& msg) {
- task_runner_->PostTask(FROM_HERE,
- base::Bind(&GpuChannel::HandleOutOfOrderMessage,
- weak_factory_.GetWeakPtr(), msg));
-}
-
-void GpuChannel::HandleMessage(
- const scoped_refptr<GpuChannelMessageQueue>& message_queue) {
+void GpuChannel::HandleMessageOnQueue() {
const GpuChannelMessage* channel_msg =
- message_queue->BeginMessageProcessing();
+ message_queue_->BeginMessageProcessing();
if (!channel_msg)
return;
@@ -792,13 +725,13 @@ void GpuChannel::HandleMessage(
HandleMessageHelper(msg);
// If we get descheduled or yield while processing a message.
- if ((stub && stub->HasUnprocessedCommands()) ||
- !message_queue->IsScheduled()) {
+ if (stub && (stub->HasUnprocessedCommands() || !stub->IsScheduled())) {
DCHECK((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID == msg.type() ||
(uint32_t)GpuCommandBufferMsg_WaitSyncToken::ID == msg.type());
- message_queue->PauseMessageProcessing();
+ DCHECK_EQ(stub->IsScheduled(), message_queue_->IsScheduled());
+ message_queue_->PauseMessageProcessing();
} else {
- message_queue->FinishMessageProcessing();
+ message_queue_->FinishMessageProcessing();
}
}
@@ -827,59 +760,6 @@ void GpuChannel::HandleOutOfOrderMessage(const IPC::Message& msg) {
HandleMessageHelper(msg);
}
-void GpuChannel::HandleMessageForTesting(const IPC::Message& msg) {
- HandleMessageHelper(msg);
-}
-
-scoped_refptr<GpuChannelMessageQueue> GpuChannel::CreateStream(
- int32_t stream_id,
- GpuStreamPriority stream_priority) {
- DCHECK(streams_.find(stream_id) == streams_.end());
- scoped_refptr<GpuChannelMessageQueue> queue = GpuChannelMessageQueue::Create(
- stream_id, stream_priority, this, io_task_runner_,
- (stream_id == GPU_STREAM_DEFAULT) ? preempting_flag_ : nullptr,
- preempted_flag_, sync_point_manager_);
- streams_.insert(std::make_pair(stream_id, queue));
- streams_to_num_routes_.insert(std::make_pair(stream_id, 0));
- return queue;
-}
-
-scoped_refptr<GpuChannelMessageQueue> GpuChannel::LookupStream(
- int32_t stream_id) {
- auto stream_it = streams_.find(stream_id);
- if (stream_it != streams_.end())
- return stream_it->second;
- return nullptr;
-}
-
-void GpuChannel::DestroyStreamIfNecessary(
- const scoped_refptr<GpuChannelMessageQueue>& queue) {
- int32_t stream_id = queue->stream_id();
- if (streams_to_num_routes_[stream_id] == 0) {
- queue->Disable();
- streams_to_num_routes_.erase(stream_id);
- streams_.erase(stream_id);
- }
-}
-
-void GpuChannel::AddRouteToStream(int32_t route_id, int32_t stream_id) {
- DCHECK(streams_.find(stream_id) != streams_.end());
- DCHECK(routes_to_streams_.find(route_id) == routes_to_streams_.end());
- streams_to_num_routes_[stream_id]++;
- routes_to_streams_.insert(std::make_pair(route_id, stream_id));
- filter_->AddRoute(route_id, streams_[stream_id]);
-}
-
-void GpuChannel::RemoveRouteFromStream(int32_t route_id) {
- DCHECK(routes_to_streams_.find(route_id) != routes_to_streams_.end());
- int32_t stream_id = routes_to_streams_[route_id];
- DCHECK(streams_.find(stream_id) != streams_.end());
- routes_to_streams_.erase(route_id);
- streams_to_num_routes_[stream_id]--;
- filter_->RemoveRoute(route_id);
- DestroyStreamIfNecessary(streams_[stream_id]);
-}
-
#if defined(OS_ANDROID)
const GpuCommandBufferStub* GpuChannel::GetOneStub() const {
for (const auto& kv : stubs_) {
@@ -917,8 +797,7 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer(
const GPUCreateCommandBufferConfig& init_params,
int32_t route_id,
std::unique_ptr<base::SharedMemory> shared_state_shm) {
- if (init_params.surface_handle != kNullSurfaceHandle &&
- !allow_view_command_buffers_) {
+ if (init_params.surface_handle != kNullSurfaceHandle && !is_gpu_host_) {
DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a "
"view context on a non-priviledged channel";
return nullptr;
@@ -928,8 +807,7 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer(
GpuCommandBufferStub* share_group = LookupCommandBuffer(share_group_id);
if (!share_group && share_group_id != MSG_ROUTING_NONE) {
- DLOG(ERROR)
- << "GpuChannel::CreateCommandBuffer(): invalid share group id";
+ DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): invalid share group id";
return nullptr;
}
@@ -941,8 +819,7 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer(
}
GpuStreamPriority stream_priority = init_params.stream_priority;
- if (!allow_real_time_streams_ &&
- stream_priority == GpuStreamPriority::REAL_TIME) {
+ if (stream_priority == GpuStreamPriority::REAL_TIME && !is_gpu_host_) {
DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): real time stream "
"priority not allowed";
return nullptr;
@@ -962,20 +839,17 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer(
return nullptr;
}
- scoped_refptr<GpuChannelMessageQueue> queue = LookupStream(stream_id);
- if (!queue)
- queue = CreateStream(stream_id, stream_priority);
+ CommandBufferId command_buffer_id =
+ GenerateCommandBufferId(client_id_, route_id);
- std::unique_ptr<GpuCommandBufferStub> stub(GpuCommandBufferStub::Create(
- this, share_group, init_params, route_id, std::move(shared_state_shm)));
+ // TODO(sunnyps): Lookup sequence id using stream id to sequence id map.
+ SequenceId sequence_id = message_queue_->sequence_id();
- if (!stub) {
- DestroyStreamIfNecessary(queue);
- return nullptr;
- }
+ std::unique_ptr<GpuCommandBufferStub> stub(GpuCommandBufferStub::Create(
+ this, share_group, init_params, command_buffer_id, sequence_id, stream_id,
+ route_id, std::move(shared_state_shm)));
- if (!AddRoute(route_id, stream_id, stub.get())) {
- DestroyStreamIfNecessary(queue);
+ if (!AddRoute(route_id, sequence_id, stub.get())) {
DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): failed to add route";
return nullptr;
}
@@ -984,8 +858,8 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer(
}
void GpuChannel::OnDestroyCommandBuffer(int32_t route_id) {
- TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
- "route_id", route_id);
+ TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer", "route_id",
+ route_id);
std::unique_ptr<GpuCommandBufferStub> stub;
auto it = stubs_.find(route_id);
@@ -996,8 +870,8 @@ void GpuChannel::OnDestroyCommandBuffer(int32_t route_id) {
// In case the renderer is currently blocked waiting for a sync reply from the
// stub, we need to make sure to reschedule the correct stream here.
if (stub && !stub->IsScheduled()) {
- // This stub won't get a chance to reschedule the stream so do that now.
- OnStreamRescheduled(stub->stream_id(), true);
+ // This stub won't get a chance to be scheduled so do that now.
+ OnCommandBufferScheduled(stub.get());
}
RemoveRoute(route_id);
@@ -1020,14 +894,14 @@ void GpuChannel::CacheShader(const std::string& key,
void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
io_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter,
- filter_, make_scoped_refptr(filter)));
+ FROM_HERE,
+ base::Bind(&GpuChannelMessageFilter::AddChannelFilter, filter_, filter));
}
void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
io_task_runner_->PostTask(
FROM_HERE, base::Bind(&GpuChannelMessageFilter::RemoveChannelFilter,
- filter_, make_scoped_refptr(filter)));
+ filter_, filter));
}
uint64_t GpuChannel::GetMemoryUsage() {
diff --git a/chromium/gpu/ipc/service/gpu_channel.h b/chromium/gpu/ipc/service/gpu_channel.h
index e83b7d56496..bd583b2674e 100644
--- a/chromium/gpu/ipc/service/gpu_channel.h
+++ b/chromium/gpu/ipc/service/gpu_channel.h
@@ -20,6 +20,7 @@
#include "base/threading/thread_checker.h"
#include "base/trace_event/memory_dump_provider.h"
#include "build/build_config.h"
+#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/gpu_export.h"
#include "gpu/ipc/common/gpu_stream_constants.h"
#include "gpu/ipc/service/gpu_command_buffer_stub.h"
@@ -41,7 +42,6 @@ class WaitableEvent;
namespace gpu {
class PreemptionFlag;
-class SyncPointOrderData;
class SyncPointManager;
class GpuChannelManager;
class GpuChannelMessageFilter;
@@ -57,6 +57,25 @@ class GPU_EXPORT FilteredSender : public IPC::Sender {
virtual void RemoveFilter(IPC::MessageFilter* filter) = 0;
};
+class GPU_EXPORT SyncChannelFilteredSender : public FilteredSender {
+ public:
+ SyncChannelFilteredSender(
+ IPC::ChannelHandle channel_handle,
+ IPC::Listener* listener,
+ scoped_refptr<base::SingleThreadTaskRunner> ipc_task_runner,
+ base::WaitableEvent* shutdown_event);
+ ~SyncChannelFilteredSender() override;
+
+ bool Send(IPC::Message* msg) override;
+ void AddFilter(IPC::MessageFilter* filter) override;
+ void RemoveFilter(IPC::MessageFilter* filter) override;
+
+ private:
+ std::unique_ptr<IPC::SyncChannel> channel_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncChannelFilteredSender);
+};
+
// Encapsulates an IPC channel between the GPU process and one renderer
// process. On the renderer side there's a corresponding GpuChannelHost.
class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
@@ -65,21 +84,20 @@ class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
GpuChannel(GpuChannelManager* gpu_channel_manager,
SyncPointManager* sync_point_manager,
GpuWatchdogThread* watchdog,
- gl::GLShareGroup* share_group,
- gles2::MailboxManager* mailbox_manager,
- PreemptionFlag* preempting_flag,
- PreemptionFlag* preempted_flag,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
+ scoped_refptr<gl::GLShareGroup> share_group,
+ scoped_refptr<gles2::MailboxManager> mailbox_manager,
+ scoped_refptr<PreemptionFlag> preempting_flag,
+ scoped_refptr<PreemptionFlag> preempted_flag,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
int32_t client_id,
uint64_t client_tracing_id,
- bool allow_view_command_buffers,
- bool allow_real_time_streams);
+ bool is_gpu_host);
~GpuChannel() override;
- // Initializes the IPC channel. Caller takes ownership of the client FD in
- // the returned handle and is responsible for closing it.
- virtual IPC::ChannelHandle Init(base::WaitableEvent* shutdown_event);
+ // The IPC channel cannot be passed in the constructor because it needs a
+ // listener. The listener is the GpuChannel and must be constructed first.
+ void Init(std::unique_ptr<FilteredSender> channel);
void SetUnhandledMessageListener(IPC::Listener* listener);
@@ -92,6 +110,10 @@ class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
GpuWatchdogThread* watchdog() const { return watchdog_; }
+ const scoped_refptr<GpuChannelMessageFilter>& filter() const {
+ return filter_;
+ }
+
const scoped_refptr<gles2::MailboxManager>& mailbox_manager() const {
return mailbox_manager_;
}
@@ -104,7 +126,7 @@ class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
return preempted_flag_;
}
- virtual base::ProcessId GetClientPID() const;
+ base::ProcessId GetClientPID() const;
int client_id() const { return client_id_; }
@@ -112,10 +134,12 @@ class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
base::WeakPtr<GpuChannel> AsWeakPtr();
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner() const {
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner() const {
return io_task_runner_;
}
+ FilteredSender* channel_for_testing() const { return channel_.get(); }
+
// IPC::Listener implementation:
bool OnMessageReceived(const IPC::Message& msg) override;
void OnChannelConnected(int32_t peer_pid) override;
@@ -126,7 +150,8 @@ class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
void AddFilter(IPC::MessageFilter* filter) override;
void RemoveFilter(IPC::MessageFilter* filter) override;
- void OnStreamRescheduled(int32_t stream_id, bool scheduled);
+ void OnCommandBufferScheduled(GpuCommandBufferStub* stub);
+ void OnCommandBufferDescheduled(GpuCommandBufferStub* stub);
gl::GLShareGroup* share_group() const { return share_group_.get(); }
@@ -137,7 +162,9 @@ class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
// Called to add a listener for a particular message routing ID.
// Returns true if succeeded.
- bool AddRoute(int32_t route_id, int32_t stream_id, IPC::Listener* listener);
+ bool AddRoute(int32_t route_id,
+ SequenceId sequence_id,
+ IPC::Listener* listener);
// Called to remove a listener for a particular message routing ID.
void RemoveRoute(int32_t route_id);
@@ -153,61 +180,23 @@ class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
uint32_t internalformat,
SurfaceHandle surface_handle);
- GpuChannelMessageFilter* filter() const { return filter_.get(); }
-
- // Returns the global order number for the last processed IPC message.
- uint32_t GetProcessedOrderNum() const;
+ // Handle messages enqueued in |message_queue_|.
+ void HandleMessageOnQueue();
- // Returns the global order number for the last unprocessed IPC message.
- uint32_t GetUnprocessedOrderNum() const;
-
- // Returns the shared sync point global order data for the stream.
- scoped_refptr<SyncPointOrderData> GetSyncPointOrderData(
- int32_t stream_id);
-
- void PostHandleOutOfOrderMessage(const IPC::Message& message);
- void PostHandleMessage(const scoped_refptr<GpuChannelMessageQueue>& queue);
-
- // Synchronously handle the message to make testing convenient.
- void HandleMessageForTesting(const IPC::Message& msg);
+ // Some messages such as WaitForGetOffsetInRange and WaitForTokenInRange are
+ // processed as soon as possible because the client is blocked until they
+ // are completed.
+ void HandleOutOfOrderMessage(const IPC::Message& msg);
#if defined(OS_ANDROID)
const GpuCommandBufferStub* GetOneStub() const;
#endif
- protected:
- // The message filter on the io thread.
- scoped_refptr<GpuChannelMessageFilter> filter_;
-
- // Map of routing id to command buffer stub.
- std::unordered_map<int32_t, std::unique_ptr<GpuCommandBufferStub>> stubs_;
-
private:
- friend class TestGpuChannel;
-
bool OnControlMessageReceived(const IPC::Message& msg);
- void HandleMessage(const scoped_refptr<GpuChannelMessageQueue>& queue);
-
- // Some messages such as WaitForGetOffsetInRange and WaitForTokenInRange are
- // processed as soon as possible because the client is blocked until they
- // are completed.
- void HandleOutOfOrderMessage(const IPC::Message& msg);
-
void HandleMessageHelper(const IPC::Message& msg);
- scoped_refptr<GpuChannelMessageQueue> CreateStream(
- int32_t stream_id,
- GpuStreamPriority stream_priority);
-
- scoped_refptr<GpuChannelMessageQueue> LookupStream(int32_t stream_id);
-
- void DestroyStreamIfNecessary(
- const scoped_refptr<GpuChannelMessageQueue>& queue);
-
- void AddRouteToStream(int32_t route_id, int32_t stream_id);
- void RemoveRouteFromStream(int32_t route_id);
-
// Message handlers for control messages.
void OnCreateCommandBuffer(const GPUCreateCommandBufferConfig& init_params,
int32_t route_id,
@@ -223,6 +212,18 @@ class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
int32_t route_id,
std::unique_ptr<base::SharedMemory> shared_state_shm);
+ std::unique_ptr<FilteredSender> channel_;
+
+ base::ProcessId peer_pid_ = base::kNullProcessId;
+
+ scoped_refptr<GpuChannelMessageQueue> message_queue_;
+
+ // The message filter on the io thread.
+ scoped_refptr<GpuChannelMessageFilter> filter_;
+
+ // Map of routing id to command buffer stub.
+ std::unordered_map<int32_t, std::unique_ptr<GpuCommandBufferStub>> stubs_;
+
// The lifetime of objects of this class is managed by a GpuChannelManager.
// The GpuChannelManager destroy all the GpuChannels that they own when they
// are destroyed. So a raw pointer is safe.
@@ -232,9 +233,7 @@ class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
// message loop.
SyncPointManager* const sync_point_manager_;
- std::unique_ptr<IPC::SyncChannel> channel_;
-
- IPC::Listener* unhandled_message_listener_;
+ IPC::Listener* unhandled_message_listener_ = nullptr;
// Used to implement message routing functionality to CommandBuffer objects
IPC::MessageRouter router_;
@@ -265,26 +264,11 @@ class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
GpuWatchdogThread* const watchdog_;
- // Map of stream id to appropriate message queue.
- base::hash_map<int32_t, scoped_refptr<GpuChannelMessageQueue>> streams_;
-
- // Multimap of stream id to route ids.
- base::hash_map<int32_t, int> streams_to_num_routes_;
-
- // Map of route id to stream id;
- base::hash_map<int32_t, int32_t> routes_to_streams_;
-
- // Can view command buffers be created on this channel.
- const bool allow_view_command_buffers_;
+ const bool is_gpu_host_;
- // Can real time streams be created on this channel.
- const bool allow_real_time_streams_;
-
- base::ProcessId peer_pid_;
-
- // Member variables should appear before the WeakPtrFactory, to ensure
- // that any WeakPtrs to Controller are invalidated before its members
- // variable's destructors are executed, rendering them invalid.
+ // Member variables should appear before the WeakPtrFactory, to ensure that
+ // any WeakPtrs to Controller are invalidated before its members variable's
+ // destructors are executed, rendering them invalid.
base::WeakPtrFactory<GpuChannel> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(GpuChannel);
@@ -303,7 +287,12 @@ class GPU_EXPORT GpuChannel : public IPC::Listener, public FilteredSender {
// - it generates mailbox names for clients of the GPU process on the IO thread.
class GPU_EXPORT GpuChannelMessageFilter : public IPC::MessageFilter {
public:
- GpuChannelMessageFilter();
+ GpuChannelMessageFilter(
+ GpuChannel* gpu_channel,
+ scoped_refptr<GpuChannelMessageQueue> message_queue,
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner);
+
+ void Destroy();
// IPC::MessageFilter implementation.
void OnFilterAdded(IPC::Channel* channel) override;
@@ -316,28 +305,23 @@ class GPU_EXPORT GpuChannelMessageFilter : public IPC::MessageFilter {
void AddChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
- void AddRoute(int32_t route_id,
- const scoped_refptr<GpuChannelMessageQueue>& queue);
- void RemoveRoute(int32_t route_id);
-
bool Send(IPC::Message* message);
- protected:
- ~GpuChannelMessageFilter() override;
-
private:
- scoped_refptr<GpuChannelMessageQueue> LookupStreamByRoute(int32_t route_id);
+ ~GpuChannelMessageFilter() override;
bool MessageErrorHandler(const IPC::Message& message, const char* error_msg);
- // Map of route id to message queue.
- base::hash_map<int32_t, scoped_refptr<GpuChannelMessageQueue>> routes_;
- base::Lock routes_lock_; // Protects |routes_|.
-
- IPC::Channel* channel_;
- base::ProcessId peer_pid_;
+ IPC::Channel* ipc_channel_ = nullptr;
+ base::ProcessId peer_pid_ = base::kNullProcessId;
std::vector<scoped_refptr<IPC::MessageFilter>> channel_filters_;
+ GpuChannel* gpu_channel_ = nullptr;
+ base::Lock gpu_channel_lock_;
+
+ scoped_refptr<GpuChannelMessageQueue> message_queue_;
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
+
DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageFilter);
};
@@ -358,36 +342,25 @@ struct GpuChannelMessage {
class GpuChannelMessageQueue
: public base::RefCountedThreadSafe<GpuChannelMessageQueue> {
public:
- static scoped_refptr<GpuChannelMessageQueue> Create(
- int32_t stream_id,
- GpuStreamPriority stream_priority,
+ GpuChannelMessageQueue(
GpuChannel* channel,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
- const scoped_refptr<PreemptionFlag>& preempting_flag,
- const scoped_refptr<PreemptionFlag>& preempted_flag,
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
+ scoped_refptr<PreemptionFlag> preempting_flag,
+ scoped_refptr<PreemptionFlag> preempted_flag,
SyncPointManager* sync_point_manager);
- void Disable();
- void DisableIO();
+ void Destroy();
- int32_t stream_id() const { return stream_id_; }
- GpuStreamPriority stream_priority() const { return stream_priority_; }
+ SequenceId sequence_id() const {
+ return sync_point_order_data_->sequence_id();
+ }
bool IsScheduled() const;
- void OnRescheduled(bool scheduled);
+ void SetScheduled(bool scheduled);
bool HasQueuedMessages() const;
- base::TimeTicks GetNextMessageTimeTick() const;
-
- scoped_refptr<SyncPointOrderData> GetSyncPointOrderData();
-
- // Returns the global order number for the last unprocessed IPC message.
- uint32_t GetUnprocessedOrderNum() const;
-
- // Returns the global order number for the last unprocessed IPC message.
- uint32_t GetProcessedOrderNum() const;
-
// Should be called before a message begins to be processed. Returns false if
// there are no messages to process.
const GpuChannelMessage* BeginMessageProcessing();
@@ -397,7 +370,7 @@ class GpuChannelMessageQueue
// there are more messages to process.
void FinishMessageProcessing();
- bool PushBackMessage(const IPC::Message& message);
+ void PushBackMessage(const IPC::Message& message);
private:
enum PreemptionState {
@@ -418,16 +391,10 @@ class GpuChannelMessageQueue
friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>;
- GpuChannelMessageQueue(
- int32_t stream_id,
- GpuStreamPriority stream_priority,
- GpuChannel* channel,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
- const scoped_refptr<PreemptionFlag>& preempting_flag,
- const scoped_refptr<PreemptionFlag>& preempted_flag,
- SyncPointManager* sync_point_manager);
~GpuChannelMessageQueue();
+ void PostHandleMessageOnQueue();
+
void UpdatePreemptionState();
void UpdatePreemptionStateHelper();
@@ -445,21 +412,18 @@ class GpuChannelMessageQueue
bool ShouldTransitionToIdle() const;
- const int32_t stream_id_;
- const GpuStreamPriority stream_priority_;
-
// These can be accessed from both IO and main threads and are protected by
// |channel_lock_|.
- bool enabled_;
- bool scheduled_;
- GpuChannel* const channel_;
+ bool scheduled_ = true;
+ GpuChannel* channel_ = nullptr; // set to nullptr on Destroy
std::deque<std::unique_ptr<GpuChannelMessage>> channel_messages_;
+ bool handle_message_post_task_pending_ = false;
mutable base::Lock channel_lock_;
// The following are accessed on the IO thread only.
// No lock is necessary for preemption state because it's only accessed on the
// IO thread.
- PreemptionState preemption_state_;
+ PreemptionState preemption_state_ = IDLE;
// Maximum amount of time that we can spend in PREEMPTING.
// It is reset when we transition to IDLE.
base::TimeDelta max_preemption_time_;
@@ -470,6 +434,7 @@ class GpuChannelMessageQueue
// Keeps track of sync point related state such as message order numbers.
scoped_refptr<SyncPointOrderData> sync_point_order_data_;
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
scoped_refptr<PreemptionFlag> preempting_flag_;
scoped_refptr<PreemptionFlag> preempted_flag_;
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc
index a6a099c6300..31dd336935d 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc
@@ -19,6 +19,7 @@
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_program_cache.h"
+#include "gpu/command_buffer/service/preemption_flag.h"
#include "gpu/command_buffer/service/shader_translator_cache.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/ipc/common/gpu_messages.h"
@@ -45,21 +46,21 @@ const int kMaxKeepAliveTimeMs = 200;
GpuChannelManager::GpuChannelManager(
const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& workarounds,
GpuChannelManagerDelegate* delegate,
GpuWatchdogThread* watchdog,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- base::WaitableEvent* shutdown_event,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
SyncPointManager* sync_point_manager,
GpuMemoryBufferFactory* gpu_memory_buffer_factory,
- const GpuFeatureInfo& gpu_feature_info)
+ const GpuFeatureInfo& gpu_feature_info,
+ GpuProcessActivityFlags activity_flags)
: task_runner_(task_runner),
io_task_runner_(io_task_runner),
gpu_preferences_(gpu_preferences),
- gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()),
+ gpu_driver_bug_workarounds_(workarounds),
delegate_(delegate),
watchdog_(watchdog),
- shutdown_event_(shutdown_event),
share_group_(new gl::GLShareGroup()),
mailbox_manager_(gles2::MailboxManager::Create(gpu_preferences)),
gpu_memory_manager_(this),
@@ -67,6 +68,7 @@ GpuChannelManager::GpuChannelManager(
gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
gpu_feature_info_(gpu_feature_info),
exiting_for_lost_context_(false),
+ activity_flags_(std::move(activity_flags)),
weak_factory_(this) {
DCHECK(task_runner);
DCHECK(io_task_runner);
@@ -91,9 +93,9 @@ gles2::ProgramCache* GpuChannelManager::program_cache() {
gpu_preferences_.disable_gpu_shader_disk_cache ||
workarounds.disable_program_disk_cache;
program_cache_.reset(new gles2::MemoryProgramCache(
- gpu_preferences_.gpu_program_cache_size,
- disable_disk_cache,
- workarounds.disable_program_caching_for_transform_feedback));
+ gpu_preferences_.gpu_program_cache_size, disable_disk_cache,
+ workarounds.disable_program_caching_for_transform_feedback,
+ &activity_flags_));
}
return program_cache_.get();
}
@@ -125,32 +127,18 @@ GpuChannel* GpuChannelManager::LookupChannel(int32_t client_id) const {
return it != gpu_channels_.end() ? it->second.get() : nullptr;
}
-std::unique_ptr<GpuChannel> GpuChannelManager::CreateGpuChannel(
- int client_id,
- uint64_t client_tracing_id,
- bool preempts,
- bool allow_view_command_buffers,
- bool allow_real_time_streams) {
- return base::MakeUnique<GpuChannel>(
- this, sync_point_manager(), watchdog_, share_group(), mailbox_manager(),
- preempts ? preemption_flag() : nullptr,
- preempts ? nullptr : preemption_flag(), task_runner_.get(),
- io_task_runner_.get(), client_id, client_tracing_id,
- allow_view_command_buffers, allow_real_time_streams);
-}
-
-IPC::ChannelHandle GpuChannelManager::EstablishChannel(
- int client_id,
- uint64_t client_tracing_id,
- bool preempts,
- bool allow_view_command_buffers,
- bool allow_real_time_streams) {
- std::unique_ptr<GpuChannel> channel(
- CreateGpuChannel(client_id, client_tracing_id, preempts,
- allow_view_command_buffers, allow_real_time_streams));
- IPC::ChannelHandle channel_handle = channel->Init(shutdown_event_);
- gpu_channels_[client_id] = std::move(channel);
- return channel_handle;
+GpuChannel* GpuChannelManager::EstablishChannel(int client_id,
+ uint64_t client_tracing_id,
+ bool is_gpu_host) {
+ std::unique_ptr<GpuChannel> gpu_channel = base::MakeUnique<GpuChannel>(
+ this, sync_point_manager_, watchdog_, share_group_, mailbox_manager_,
+ is_gpu_host ? preemption_flag_ : nullptr,
+ is_gpu_host ? nullptr : preemption_flag_, task_runner_, io_task_runner_,
+ client_id, client_tracing_id, is_gpu_host);
+
+ GpuChannel* gpu_channel_ptr = gpu_channel.get();
+ gpu_channels_[client_id] = std::move(gpu_channel);
+ return gpu_channel_ptr;
}
void GpuChannelManager::InternalDestroyGpuMemoryBuffer(
@@ -186,24 +174,6 @@ void GpuChannelManager::PopulateShaderCache(const std::string& program_proto) {
program_cache()->LoadProgram(program_proto);
}
-uint32_t GpuChannelManager::GetUnprocessedOrderNum() const {
- uint32_t unprocessed_order_num = 0;
- for (auto& kv : gpu_channels_) {
- unprocessed_order_num =
- std::max(unprocessed_order_num, kv.second->GetUnprocessedOrderNum());
- }
- return unprocessed_order_num;
-}
-
-uint32_t GpuChannelManager::GetProcessedOrderNum() const {
- uint32_t processed_order_num = 0;
- for (auto& kv : gpu_channels_) {
- processed_order_num =
- std::max(processed_order_num, kv.second->GetProcessedOrderNum());
- }
- return processed_order_num;
-}
-
void GpuChannelManager::LoseAllContexts() {
for (auto& kv : gpu_channels_) {
kv.second->MarkAllContextsLost();
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h
index 01301b2708c..b0a183acdec 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.h
@@ -17,6 +17,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "build/build_config.h"
+#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/service/gpu_preferences.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
@@ -28,15 +29,12 @@
#include "ui/gl/gl_surface.h"
#include "url/gurl.h"
-namespace base {
-class WaitableEvent;
-}
-
namespace gl {
class GLShareGroup;
}
namespace gpu {
+class GpuDriverBugWorkarounds;
struct GpuPreferences;
class PreemptionFlag;
class SyncPointManager;
@@ -49,10 +47,6 @@ class ShaderTranslatorCache;
}
}
-namespace IPC {
-struct ChannelHandle;
-}
-
namespace gpu {
class GpuChannel;
class GpuChannelManagerDelegate;
@@ -65,23 +59,22 @@ class GpuWatchdogThread;
class GPU_EXPORT GpuChannelManager {
public:
GpuChannelManager(const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& workarounds,
GpuChannelManagerDelegate* delegate,
GpuWatchdogThread* watchdog,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- base::WaitableEvent* shutdown_event,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
SyncPointManager* sync_point_manager,
GpuMemoryBufferFactory* gpu_memory_buffer_factory,
- const GpuFeatureInfo& gpu_feature_info);
- virtual ~GpuChannelManager();
+ const GpuFeatureInfo& gpu_feature_info,
+ GpuProcessActivityFlags activity_flags);
+ ~GpuChannelManager();
GpuChannelManagerDelegate* delegate() const { return delegate_; }
- IPC::ChannelHandle EstablishChannel(int client_id,
- uint64_t client_tracing_id,
- bool preempts,
- bool allow_view_command_buffers,
- bool allow_real_time_streams);
+ GpuChannel* EstablishChannel(int client_id,
+ uint64_t client_tracing_id,
+ bool is_gpu_host);
void PopulateShaderCache(const std::string& shader);
void DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
@@ -98,9 +91,7 @@ class GPU_EXPORT GpuChannelManager {
void LoseAllContexts();
void MaybeExitOnContextLost();
- const GpuPreferences& gpu_preferences() const {
- return gpu_preferences_;
- }
+ const GpuPreferences& gpu_preferences() const { return gpu_preferences_; }
const GpuDriverBugWorkarounds& gpu_driver_bug_workarounds() const {
return gpu_driver_bug_workarounds_;
}
@@ -119,21 +110,11 @@ class GPU_EXPORT GpuChannelManager {
return gpu_memory_buffer_factory_;
}
- // Returns the maximum order number for unprocessed IPC messages across all
- // channels.
- uint32_t GetUnprocessedOrderNum() const;
-
- // Returns the maximum order number for processed IPC messages across all
- // channels.
- uint32_t GetProcessedOrderNum() const;
-
#if defined(OS_ANDROID)
void DidAccessGpu();
#endif
- bool is_exiting_for_lost_context() {
- return exiting_for_lost_context_;
- }
+ bool is_exiting_for_lost_context() { return exiting_for_lost_context_; }
gles2::MailboxManager* mailbox_manager() const {
return mailbox_manager_.get();
@@ -141,28 +122,6 @@ class GPU_EXPORT GpuChannelManager {
gl::GLShareGroup* share_group() const { return share_group_.get(); }
- protected:
- virtual std::unique_ptr<GpuChannel> CreateGpuChannel(
- int client_id,
- uint64_t client_tracing_id,
- bool preempts,
- bool allow_view_command_buffers,
- bool allow_real_time_streams);
-
- SyncPointManager* sync_point_manager() const {
- return sync_point_manager_;
- }
-
- PreemptionFlag* preemption_flag() const { return preemption_flag_.get(); }
-
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
-
- // These objects manage channels to individual renderer processes. There is
- // one channel for each renderer process that has connected to this GPU
- // process.
- std::unordered_map<int32_t, std::unique_ptr<GpuChannel>> gpu_channels_;
-
private:
void InternalDestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id, int client_id);
void InternalDestroyGpuMemoryBufferOnIO(gfx::GpuMemoryBufferId id,
@@ -172,6 +131,14 @@ class GPU_EXPORT GpuChannelManager {
void DoWakeUpGpu();
#endif
+ // These objects manage channels to individual renderer processes. There is
+ // one channel for each renderer process that has connected to this GPU
+ // process.
+ std::unordered_map<int32_t, std::unique_ptr<GpuChannel>> gpu_channels_;
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+
const GpuPreferences gpu_preferences_;
GpuDriverBugWorkarounds gpu_driver_bug_workarounds_;
@@ -179,8 +146,6 @@ class GPU_EXPORT GpuChannelManager {
GpuWatchdogThread* watchdog_;
- base::WaitableEvent* shutdown_event_;
-
scoped_refptr<gl::GLShareGroup> share_group_;
scoped_refptr<gles2::MailboxManager> mailbox_manager_;
scoped_refptr<PreemptionFlag> preemption_flag_;
@@ -204,6 +169,10 @@ class GPU_EXPORT GpuChannelManager {
// Set during intentional GPU process shutdown.
bool exiting_for_lost_context_;
+ // Flags which indicate GPU process activity. Read by the browser process
+ // on GPU process crash.
+ GpuProcessActivityFlags activity_flags_;
+
// Member variables should appear before the WeakPtrFactory, to ensure
// that any WeakPtrs to Controller are invalidated before its members
// variable's destructors are executed, rendering them invalid.
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc b/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
index b5963d57bcb..28ba32115f4 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
@@ -23,15 +23,10 @@ TEST_F(GpuChannelManagerTest, EstablishChannel) {
uint64_t kClientTracingId = 1;
ASSERT_TRUE(channel_manager());
-
- IPC::ChannelHandle channel_handle = channel_manager()->EstablishChannel(
- kClientId, kClientTracingId, false /* preempts */,
- false /* allow_view_command_buffers */,
- false /* allow_real_time_streams */);
- EXPECT_TRUE(channel_handle.is_mojo_channel_handle());
-
- GpuChannel* channel = channel_manager()->LookupChannel(kClientId);
- ASSERT_TRUE(channel);
+ GpuChannel* channel =
+ channel_manager()->EstablishChannel(kClientId, kClientTracingId, false);
+ EXPECT_TRUE(channel);
+ EXPECT_EQ(channel_manager()->LookupChannel(kClientId), channel);
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.cc b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
index 85b207ee875..45a78966d65 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
@@ -5,152 +5,160 @@
#include "gpu/ipc/service/gpu_channel_test_common.h"
#include "base/memory/ptr_util.h"
+#include "base/memory/shared_memory.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
#include "ipc/ipc_test_sink.h"
+#include "ui/gl/init/gl_factory.h"
+#include "ui/gl/test/gl_surface_test_support.h"
#include "url/gurl.h"
namespace gpu {
-TestGpuChannelManagerDelegate::TestGpuChannelManagerDelegate() {}
-
-TestGpuChannelManagerDelegate::~TestGpuChannelManagerDelegate() {}
-
-void TestGpuChannelManagerDelegate::SetActiveURL(const GURL& url) {}
-
-void TestGpuChannelManagerDelegate::DidCreateOffscreenContext(
- const GURL& active_url) {}
-
-void TestGpuChannelManagerDelegate::DidDestroyChannel(int client_id) {}
-
-void TestGpuChannelManagerDelegate::DidDestroyOffscreenContext(
- const GURL& active_url) {}
-
-void TestGpuChannelManagerDelegate::DidLoseContext(
- bool offscreen,
- error::ContextLostReason reason,
- const GURL& active_url) {}
-
-void TestGpuChannelManagerDelegate::StoreShaderToDisk(
- int32_t client_id,
- const std::string& key,
- const std::string& shader) {}
-
+class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate {
+ public:
+ TestGpuChannelManagerDelegate() = default;
+ ~TestGpuChannelManagerDelegate() override = default;
+
+ // GpuChannelManagerDelegate implementation:
+ void SetActiveURL(const GURL& url) override {}
+ void DidCreateOffscreenContext(const GURL& active_url) override {}
+ void DidDestroyChannel(int client_id) override {}
+ void DidDestroyOffscreenContext(const GURL& active_url) override {}
+ void DidLoseContext(bool offscreen,
+ error::ContextLostReason reason,
+ const GURL& active_url) override {}
+ void StoreShaderToDisk(int32_t client_id,
+ const std::string& key,
+ const std::string& shader) override {}
#if defined(OS_WIN)
-void TestGpuChannelManagerDelegate::SendAcceleratedSurfaceCreatedChildWindow(
- SurfaceHandle parent_window,
- SurfaceHandle child_window) {}
+ void SendAcceleratedSurfaceCreatedChildWindow(
+ SurfaceHandle parent_window,
+ SurfaceHandle child_window) override {}
#endif
-TestGpuChannelManager::TestGpuChannelManager(
- const GpuPreferences& gpu_preferences,
- GpuChannelManagerDelegate* delegate,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- SyncPointManager* sync_point_manager,
- GpuMemoryBufferFactory* gpu_memory_buffer_factory)
- : GpuChannelManager(gpu_preferences,
- delegate,
- nullptr,
- task_runner,
- io_task_runner,
- nullptr,
- sync_point_manager,
- gpu_memory_buffer_factory,
- GpuFeatureInfo()) {}
-
-TestGpuChannelManager::~TestGpuChannelManager() {
- // Clear gpu channels here so that any IPC messages sent are handled using the
- // overridden Send method.
- gpu_channels_.clear();
-}
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestGpuChannelManagerDelegate);
+};
-std::unique_ptr<GpuChannel> TestGpuChannelManager::CreateGpuChannel(
- int client_id,
- uint64_t client_tracing_id,
- bool preempts,
- bool allow_view_command_buffers,
- bool allow_real_time_streams) {
- return base::MakeUnique<TestGpuChannel>(
- this, sync_point_manager(), share_group(), mailbox_manager(),
- preempts ? preemption_flag() : nullptr,
- preempts ? nullptr : preemption_flag(), task_runner_.get(),
- io_task_runner_.get(), client_id, client_tracing_id,
- allow_view_command_buffers, allow_real_time_streams);
-}
+class TestSinkFilteredSender : public FilteredSender {
+ public:
+ TestSinkFilteredSender() : sink_(base::MakeUnique<IPC::TestSink>()) {}
+ ~TestSinkFilteredSender() override = default;
-TestGpuChannel::TestGpuChannel(GpuChannelManager* gpu_channel_manager,
- SyncPointManager* sync_point_manager,
- gl::GLShareGroup* share_group,
- gles2::MailboxManager* mailbox_manager,
- PreemptionFlag* preempting_flag,
- PreemptionFlag* preempted_flag,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- int client_id,
- uint64_t client_tracing_id,
- bool allow_view_command_buffers,
- bool allow_real_time_streams)
- : GpuChannel(gpu_channel_manager,
- sync_point_manager,
- nullptr,
- share_group,
- mailbox_manager,
- preempting_flag,
- preempted_flag,
- task_runner,
- io_task_runner,
- client_id,
- client_tracing_id,
- allow_view_command_buffers,
- allow_real_time_streams) {}
-
-TestGpuChannel::~TestGpuChannel() {
- // Call stubs here so that any IPC messages sent are handled using the
- // overridden Send method.
- stubs_.clear();
-}
+ IPC::TestSink* sink() const { return sink_.get(); }
-base::ProcessId TestGpuChannel::GetClientPID() const {
- return base::kNullProcessId;
-}
+ bool Send(IPC::Message* msg) override { return sink_->Send(msg); }
-IPC::ChannelHandle TestGpuChannel::Init(base::WaitableEvent* shutdown_event) {
- filter_->OnFilterAdded(&sink_);
- mojo::MessagePipe pipe;
- return pipe.handle0.release();
-}
+ void AddFilter(IPC::MessageFilter* filter) override {
+ // Needed to appease DCHECKs.
+ filter->OnFilterAdded(sink_.get());
+ }
-bool TestGpuChannel::Send(IPC::Message* msg) {
- DCHECK(!msg->is_sync());
- return sink_.Send(msg);
-}
+ void RemoveFilter(IPC::MessageFilter* filter) override {
+ filter->OnFilterRemoved();
+ }
+
+ private:
+ std::unique_ptr<IPC::TestSink> sink_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestSinkFilteredSender);
+};
-// TODO(sunnyps): Use a mock memory buffer factory when necessary.
GpuChannelTestCommon::GpuChannelTestCommon()
: task_runner_(new base::TestSimpleTaskRunner),
io_task_runner_(new base::TestSimpleTaskRunner),
sync_point_manager_(new SyncPointManager()),
- channel_manager_delegate_(new TestGpuChannelManagerDelegate()) {}
+ channel_manager_delegate_(new TestGpuChannelManagerDelegate()),
+ channel_manager_(
+ new GpuChannelManager(GpuPreferences(),
+ GpuDriverBugWorkarounds(),
+ channel_manager_delegate_.get(),
+ nullptr /* watchdog */,
+ task_runner_.get(),
+ io_task_runner_.get(),
+ sync_point_manager_.get(),
+ nullptr /* gpu_memory_buffer_factory */,
+ GpuFeatureInfo(),
+ GpuProcessActivityFlags())) {
+ // We need GL bindings to actually initialize command buffers.
+ gl::GLSurfaceTestSupport::InitializeOneOffWithStubBindings();
+}
GpuChannelTestCommon::~GpuChannelTestCommon() {
+ // Command buffers can post tasks and run GL in destruction so do this first.
+ channel_manager_ = nullptr;
+
// Clear pending tasks to avoid refptr cycles that get flagged by ASAN.
task_runner_->ClearPendingTasks();
io_task_runner_->ClearPendingTasks();
+
+ gl::init::ShutdownGL();
}
-void GpuChannelTestCommon::SetUp() {
- channel_manager_.reset(new TestGpuChannelManager(
- gpu_preferences_, channel_manager_delegate_.get(), task_runner_.get(),
- io_task_runner_.get(), sync_point_manager_.get(), nullptr));
+GpuChannel* GpuChannelTestCommon::CreateChannel(int32_t client_id,
+ bool is_gpu_host) {
+ uint64_t kClientTracingId = 1;
+ GpuChannel* channel = channel_manager()->EstablishChannel(
+ client_id, kClientTracingId, is_gpu_host);
+ channel->Init(base::MakeUnique<TestSinkFilteredSender>());
+ base::ProcessId kProcessId = 1;
+ channel->OnChannelConnected(kProcessId);
+ return channel;
}
-void GpuChannelTestCommon::TearDown() {
- // Destroying channels causes tasks to run on the IO task runner.
- channel_manager_ = nullptr;
+void GpuChannelTestCommon::HandleMessage(GpuChannel* channel,
+ IPC::Message* msg) {
+ IPC::TestSink* sink =
+ static_cast<TestSinkFilteredSender*>(channel->channel_for_testing())
+ ->sink();
+
+ // Some IPCs (such as GpuCommandBufferMsg_Initialize) will generate more
+ // delayed responses, drop those if they exist.
+ sink->ClearMessages();
+
+ // Needed to appease DCHECKs.
+ msg->set_unblock(false);
+
+ // Message filter gets message first on IO thread.
+ channel->filter()->OnMessageReceived(*msg);
+
+ // Run the HandleMessage task posted to the main thread.
+ task_runner()->RunPendingTasks();
+
+ // Replies are sent to the sink.
+ if (msg->is_sync()) {
+ const IPC::Message* reply_msg = sink->GetMessageAt(0);
+ ASSERT_TRUE(reply_msg);
+ EXPECT_TRUE(!reply_msg->is_reply_error());
+
+ EXPECT_TRUE(IPC::SyncMessage::IsMessageReplyTo(
+ *reply_msg, IPC::SyncMessage::GetMessageId(*msg)));
+
+ IPC::MessageReplyDeserializer* deserializer =
+ static_cast<IPC::SyncMessage*>(msg)->GetReplyDeserializer();
+ ASSERT_TRUE(deserializer);
+ deserializer->SerializeOutputParameters(*reply_msg);
+
+ delete deserializer;
+ }
+
+ sink->ClearMessages();
+
+ delete msg;
}
+base::SharedMemoryHandle GpuChannelTestCommon::GetSharedHandle() {
+ base::SharedMemory shared_memory;
+ shared_memory.CreateAnonymous(10);
+ base::SharedMemoryHandle shmem_handle;
+ shared_memory.ShareToProcess(base::GetCurrentProcessHandle(), &shmem_handle);
+ return shmem_handle;
+}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.h b/chromium/gpu/ipc/service/gpu_channel_test_common.h
index f610cfaf11c..c574743a42c 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.h
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.h
@@ -2,126 +2,49 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <stdint.h>
-
#include <memory>
-#include "gpu/command_buffer/service/gpu_preferences.h"
-#include "gpu/ipc/service/gpu_channel.h"
-#include "gpu/ipc/service/gpu_channel_manager.h"
-#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
-#include "ipc/ipc_test_sink.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory_handle.h"
#include "testing/gtest/include/gtest/gtest.h"
-class GURL;
-
namespace base {
class TestSimpleTaskRunner;
} // namespace base
namespace IPC {
-class TestSink;
+class Message;
} // namespace IPC
namespace gpu {
-
+class GpuChannel;
+class GpuChannelManager;
class SyncPointManager;
-
-class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate {
- public:
- TestGpuChannelManagerDelegate();
- ~TestGpuChannelManagerDelegate() override;
-
- private:
- // GpuChannelManagerDelegate implementation:
- void SetActiveURL(const GURL& url) override;
- void DidCreateOffscreenContext(const GURL& active_url) override;
- void DidDestroyChannel(int client_id) override;
- void DidDestroyOffscreenContext(const GURL& active_url) override;
- void DidLoseContext(bool offscreen,
- error::ContextLostReason reason,
- const GURL& active_url) override;
- void StoreShaderToDisk(int32_t client_id,
- const std::string& key,
- const std::string& shader) override;
-#if defined(OS_WIN)
- void SendAcceleratedSurfaceCreatedChildWindow(
- SurfaceHandle parent_window,
- SurfaceHandle child_window) override;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(TestGpuChannelManagerDelegate);
-};
-
-class TestGpuChannelManager : public GpuChannelManager {
- public:
- TestGpuChannelManager(const GpuPreferences& gpu_preferences,
- GpuChannelManagerDelegate* delegate,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- SyncPointManager* sync_point_manager,
- GpuMemoryBufferFactory* gpu_memory_buffer_factory);
- ~TestGpuChannelManager() override;
-
- protected:
- std::unique_ptr<GpuChannel> CreateGpuChannel(
- int client_id,
- uint64_t client_tracing_id,
- bool preempts,
- bool allow_view_command_buffers,
- bool allow_real_time_streams) override;
-};
-
-class TestGpuChannel : public GpuChannel {
- public:
- TestGpuChannel(GpuChannelManager* gpu_channel_manager,
- SyncPointManager* sync_point_manager,
- gl::GLShareGroup* share_group,
- gles2::MailboxManager* mailbox_manager,
- PreemptionFlag* preempting_flag,
- PreemptionFlag* preempted_flag,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- int client_id,
- uint64_t client_tracing_id,
- bool allow_view_command_buffers,
- bool allow_real_time_streams);
- ~TestGpuChannel() override;
-
- IPC::TestSink* sink() { return &sink_; }
- base::ProcessId GetClientPID() const override;
-
- IPC::ChannelHandle Init(base::WaitableEvent* shutdown_event) override;
-
- // IPC::Sender implementation.
- bool Send(IPC::Message* msg) override;
-
- private:
- IPC::TestSink sink_;
-};
+class TestGpuChannelManagerDelegate;
class GpuChannelTestCommon : public testing::Test {
public:
GpuChannelTestCommon();
~GpuChannelTestCommon() override;
- void SetUp() override;
- void TearDown() override;
-
protected:
GpuChannelManager* channel_manager() { return channel_manager_.get(); }
- TestGpuChannelManagerDelegate* channel_manager_delegate() {
- return channel_manager_delegate_.get();
- }
base::TestSimpleTaskRunner* task_runner() { return task_runner_.get(); }
+ GpuChannel* CreateChannel(int32_t client_id, bool is_gpu_host);
+
+ void HandleMessage(GpuChannel* channel, IPC::Message* msg);
+
+ base::SharedMemoryHandle GetSharedHandle();
+
private:
- GpuPreferences gpu_preferences_;
scoped_refptr<base::TestSimpleTaskRunner> task_runner_;
scoped_refptr<base::TestSimpleTaskRunner> io_task_runner_;
std::unique_ptr<SyncPointManager> sync_point_manager_;
std::unique_ptr<TestGpuChannelManagerDelegate> channel_manager_delegate_;
std::unique_ptr<GpuChannelManager> channel_manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuChannelTestCommon);
};
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_channel_unittest.cc b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
index 8e913f5bd2e..e76f2dbf447 100644
--- a/chromium/gpu/ipc/service/gpu_channel_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
@@ -4,92 +4,13 @@
#include <stdint.h>
-#include "base/memory/shared_memory.h"
-#include "base/test/test_message_loop.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel.h"
-#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_test_common.h"
-#include "gpu/test_message_loop_type.h"
-#include "ipc/ipc_test_sink.h"
-#include "ui/gl/gl_surface_stub.h"
-#include "ui/gl/init/gl_factory.h"
-#include "ui/gl/test/gl_surface_test_support.h"
namespace gpu {
-class GpuChannelTest : public GpuChannelTestCommon {
- public:
- GpuChannelTest()
- : GpuChannelTestCommon(),
- message_loop_(test::GetMessageLoopTypeForGpu()) {}
- ~GpuChannelTest() override {}
-
- void SetUp() override {
- // We need GL bindings to actually initialize command buffers.
- gl::GLSurfaceTestSupport::InitializeOneOffWithStubBindings();
-
- GpuChannelTestCommon::SetUp();
- }
-
- void TearDown() override {
- GpuChannelTestCommon::TearDown();
-
- gl::init::ShutdownGL();
- }
-
- GpuChannel* CreateChannel(int32_t client_id,
- bool allow_view_command_buffers,
- bool allow_real_time_streams) {
- DCHECK(channel_manager());
- uint64_t kClientTracingId = 1;
- channel_manager()->EstablishChannel(
- client_id, kClientTracingId, false /* preempts */,
- allow_view_command_buffers, allow_real_time_streams);
- return channel_manager()->LookupChannel(client_id);
- }
-
- void HandleMessage(GpuChannel* channel, IPC::Message* msg) {
- TestGpuChannel* test_channel = static_cast<TestGpuChannel*>(channel);
-
- // Some IPCs (such as GpuCommandBufferMsg_Initialize) will generate more
- // delayed responses, drop those if they exist.
- test_channel->sink()->ClearMessages();
-
- test_channel->HandleMessageForTesting(*msg);
- if (msg->is_sync()) {
- const IPC::Message* reply_msg = test_channel->sink()->GetMessageAt(0);
- CHECK(reply_msg);
- CHECK(!reply_msg->is_reply_error());
-
- CHECK(IPC::SyncMessage::IsMessageReplyTo(
- *reply_msg, IPC::SyncMessage::GetMessageId(*msg)));
-
- IPC::MessageReplyDeserializer* deserializer =
- static_cast<IPC::SyncMessage*>(msg)->GetReplyDeserializer();
- CHECK(deserializer);
- deserializer->SerializeOutputParameters(*reply_msg);
-
- delete deserializer;
-
- test_channel->sink()->ClearMessages();
- }
-
- delete msg;
- }
-
- base::SharedMemoryHandle GetSharedHandle() {
- base::SharedMemory shared_memory;
- shared_memory.CreateAnonymous(10);
- base::SharedMemoryHandle shmem_handle;
- shared_memory.ShareToProcess(base::GetCurrentProcessHandle(),
- &shmem_handle);
- return shmem_handle;
- }
-
- private:
- base::TestMessageLoop message_loop_;
-};
+class GpuChannelTest : public GpuChannelTestCommon {};
#if defined(OS_WIN)
const SurfaceHandle kFakeSurfaceHandle = reinterpret_cast<SurfaceHandle>(1);
@@ -99,9 +20,8 @@ const SurfaceHandle kFakeSurfaceHandle = 1;
TEST_F(GpuChannelTest, CreateViewCommandBufferAllowed) {
int32_t kClientId = 1;
- bool allow_view_command_buffers = true;
- GpuChannel* channel =
- CreateChannel(kClientId, allow_view_command_buffers, false);
+ bool is_gpu_host = true;
+ GpuChannel* channel = CreateChannel(kClientId, is_gpu_host);
ASSERT_TRUE(channel);
SurfaceHandle surface_handle = kFakeSurfaceHandle;
@@ -128,9 +48,8 @@ TEST_F(GpuChannelTest, CreateViewCommandBufferAllowed) {
TEST_F(GpuChannelTest, CreateViewCommandBufferDisallowed) {
int32_t kClientId = 1;
- bool allow_view_command_buffers = false;
- GpuChannel* channel =
- CreateChannel(kClientId, allow_view_command_buffers, false);
+ bool is_gpu_host = false;
+ GpuChannel* channel = CreateChannel(kClientId, is_gpu_host);
ASSERT_TRUE(channel);
SurfaceHandle surface_handle = kFakeSurfaceHandle;
@@ -157,7 +76,7 @@ TEST_F(GpuChannelTest, CreateViewCommandBufferDisallowed) {
TEST_F(GpuChannelTest, CreateOffscreenCommandBuffer) {
int32_t kClientId = 1;
- GpuChannel* channel = CreateChannel(kClientId, true, false);
+ GpuChannel* channel = CreateChannel(kClientId, true);
ASSERT_TRUE(channel);
int32_t kRouteId = 1;
@@ -181,7 +100,7 @@ TEST_F(GpuChannelTest, CreateOffscreenCommandBuffer) {
TEST_F(GpuChannelTest, IncompatibleStreamIds) {
int32_t kClientId = 1;
- GpuChannel* channel = CreateChannel(kClientId, true, false);
+ GpuChannel* channel = CreateChannel(kClientId, true);
ASSERT_TRUE(channel);
// Create first context.
@@ -222,59 +141,10 @@ TEST_F(GpuChannelTest, IncompatibleStreamIds) {
EXPECT_FALSE(stub);
}
-TEST_F(GpuChannelTest, StreamLifetime) {
- int32_t kClientId = 1;
- GpuChannel* channel = CreateChannel(kClientId, true, false);
- ASSERT_TRUE(channel);
-
- // Create first context.
- int32_t kRouteId1 = 1;
- int32_t kStreamId1 = 1;
- GpuStreamPriority kStreamPriority1 = GpuStreamPriority::NORMAL;
- GPUCreateCommandBufferConfig init_params;
- init_params.surface_handle = kNullSurfaceHandle;
- init_params.share_group_id = MSG_ROUTING_NONE;
- init_params.stream_id = kStreamId1;
- init_params.stream_priority = kStreamPriority1;
- init_params.attribs = gles2::ContextCreationAttribHelper();
- init_params.active_url = GURL();
- bool result = false;
- gpu::Capabilities capabilities;
- HandleMessage(channel, new GpuChannelMsg_CreateCommandBuffer(
- init_params, kRouteId1, GetSharedHandle(), &result,
- &capabilities));
- EXPECT_TRUE(result);
-
- GpuCommandBufferStub* stub = channel->LookupCommandBuffer(kRouteId1);
- EXPECT_TRUE(stub);
-
- HandleMessage(channel, new GpuChannelMsg_DestroyCommandBuffer(kRouteId1));
- stub = channel->LookupCommandBuffer(kRouteId1);
- EXPECT_FALSE(stub);
-
- // Create second context in same share group but different stream.
- int32_t kRouteId2 = 2;
- int32_t kStreamId2 = 2;
- GpuStreamPriority kStreamPriority2 = GpuStreamPriority::LOW;
-
- init_params.share_group_id = MSG_ROUTING_NONE;
- init_params.stream_id = kStreamId2;
- init_params.stream_priority = kStreamPriority2;
- init_params.attribs = gles2::ContextCreationAttribHelper();
- init_params.active_url = GURL();
- HandleMessage(channel, new GpuChannelMsg_CreateCommandBuffer(
- init_params, kRouteId2, GetSharedHandle(), &result,
- &capabilities));
- EXPECT_TRUE(result);
-
- stub = channel->LookupCommandBuffer(kRouteId2);
- EXPECT_TRUE(stub);
-}
-
TEST_F(GpuChannelTest, RealTimeStreamsDisallowed) {
int32_t kClientId = 1;
- bool allow_real_time_streams = false;
- GpuChannel* channel = CreateChannel(kClientId, true, allow_real_time_streams);
+ bool is_gpu_host = false;
+ GpuChannel* channel = CreateChannel(kClientId, is_gpu_host);
ASSERT_TRUE(channel);
// Create first context.
@@ -301,8 +171,8 @@ TEST_F(GpuChannelTest, RealTimeStreamsDisallowed) {
TEST_F(GpuChannelTest, RealTimeStreamsAllowed) {
int32_t kClientId = 1;
- bool allow_real_time_streams = true;
- GpuChannel* channel = CreateChannel(kClientId, true, allow_real_time_streams);
+ bool is_gpu_host = true;
+ GpuChannel* channel = CreateChannel(kClientId, is_gpu_host);
ASSERT_TRUE(channel);
// Create first context.
@@ -329,7 +199,7 @@ TEST_F(GpuChannelTest, RealTimeStreamsAllowed) {
TEST_F(GpuChannelTest, CreateFailsIfSharedContextIsLost) {
int32_t kClientId = 1;
- GpuChannel* channel = CreateChannel(kClientId, false, false);
+ GpuChannel* channel = CreateChannel(kClientId, false);
ASSERT_TRUE(channel);
// Create first context, we will share this one.
diff --git a/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc b/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
index 95171c33230..c30df4b4c2c 100644
--- a/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
@@ -22,12 +22,14 @@
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/sync_token.h"
+#include "gpu/command_buffer/service/command_executor.h"
#include "gpu/command_buffer/service/gl_context_virtual.h"
#include "gpu/command_buffer/service/gl_state_restorer_impl.h"
#include "gpu/command_buffer/service/image_manager.h"
#include "gpu/command_buffer/service/logger.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/preemption_flag.h"
#include "gpu/command_buffer/service/query_manager.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
@@ -117,10 +119,8 @@ class GpuCommandBufferMemoryTracker : public gles2::MemoryTracker {
&GpuCommandBufferMemoryTracker::LogMemoryStatsPeriodic);
}
- void TrackMemoryAllocatedChange(
- size_t old_size, size_t new_size) override {
- tracking_group_->TrackMemoryAllocatedChange(
- old_size, new_size);
+ void TrackMemoryAllocatedChange(size_t old_size, size_t new_size) override {
+ tracking_group_->TrackMemoryAllocatedChange(old_size, new_size);
}
bool EnsureGPUMemoryAvailable(size_t size_needed) override {
@@ -213,21 +213,20 @@ DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
return base::WrapUnique(new DevToolsChannelData(res.release()));
}
-CommandBufferId GetCommandBufferID(int channel_id, int32_t route_id) {
- return CommandBufferId::FromUnsafeValue(
- (static_cast<uint64_t>(channel_id) << 32) | route_id);
-}
-
} // namespace
std::unique_ptr<GpuCommandBufferStub> GpuCommandBufferStub::Create(
GpuChannel* channel,
GpuCommandBufferStub* share_command_buffer_stub,
const GPUCreateCommandBufferConfig& init_params,
+ CommandBufferId command_buffer_id,
+ SequenceId sequence_id,
+ int32_t stream_id,
int32_t route_id,
std::unique_ptr<base::SharedMemory> shared_state_shm) {
std::unique_ptr<GpuCommandBufferStub> stub(
- new GpuCommandBufferStub(channel, init_params, route_id));
+ new GpuCommandBufferStub(channel, init_params, command_buffer_id,
+ sequence_id, stream_id, route_id));
if (!stub->Initialize(share_command_buffer_stub, init_params,
std::move(shared_state_shm)))
return nullptr;
@@ -237,13 +236,17 @@ std::unique_ptr<GpuCommandBufferStub> GpuCommandBufferStub::Create(
GpuCommandBufferStub::GpuCommandBufferStub(
GpuChannel* channel,
const GPUCreateCommandBufferConfig& init_params,
+ CommandBufferId command_buffer_id,
+ SequenceId sequence_id,
+ int32_t stream_id,
int32_t route_id)
: channel_(channel),
initialized_(false),
surface_handle_(init_params.surface_handle),
use_virtualized_gl_context_(false),
- command_buffer_id_(GetCommandBufferID(channel->client_id(), route_id)),
- stream_id_(init_params.stream_id),
+ command_buffer_id_(command_buffer_id),
+ sequence_id_(sequence_id),
+ stream_id_(stream_id),
route_id_(route_id),
last_flush_count_(0),
waiting_for_sync_point_(false),
@@ -256,14 +259,12 @@ GpuCommandBufferStub::~GpuCommandBufferStub() {
}
GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
- return channel()->gpu_channel_manager()->gpu_memory_manager();
+ return channel()->gpu_channel_manager()->gpu_memory_manager();
}
bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
- "GPUTask",
- "data",
- DevToolsChannelData::CreateForChannel(channel()));
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "GPUTask",
+ "data", DevToolsChannelData::CreateForChannel(channel()));
FastSetActiveURL(active_url_, active_url_hash_, channel_);
bool have_context = false;
@@ -303,12 +304,9 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
OnRegisterTransferBuffer);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
OnDestroyTransferBuffer);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_WaitSyncToken,
- OnWaitSyncToken)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncToken,
- OnSignalSyncToken)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
- OnSignalQuery)
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_WaitSyncToken, OnWaitSyncToken)
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncToken, OnSignalSyncToken)
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery, OnSignalQuery)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
@@ -414,7 +412,7 @@ void GpuCommandBufferStub::PerformWork() {
if (executor_) {
uint32_t current_unprocessed_num =
- channel()->gpu_channel_manager()->GetUnprocessedOrderNum();
+ channel()->sync_point_manager()->GetUnprocessedOrderNum();
// We're idle when no messages were processed or scheduled.
bool is_idle = (previous_processed_num_ == current_unprocessed_num);
if (!is_idle && !last_idle_time_.is_null()) {
@@ -445,7 +443,7 @@ bool GpuCommandBufferStub::HasUnprocessedCommands() {
if (command_buffer_) {
CommandBuffer::State state = command_buffer_->GetLastState();
return command_buffer_->GetPutOffset() != state.get_offset &&
- !error::IsError(state.error);
+ !error::IsError(state.error);
}
return false;
}
@@ -470,7 +468,7 @@ void GpuCommandBufferStub::ScheduleDelayedWork(base::TimeDelta delay) {
// Idle when no messages are processed between now and when
// PollWork is called.
previous_processed_num_ =
- channel()->gpu_channel_manager()->GetProcessedOrderNum();
+ channel()->sync_point_manager()->GetProcessedOrderNum();
if (last_idle_time_.is_null())
last_idle_time_ = current_time;
@@ -517,8 +515,9 @@ void GpuCommandBufferStub::Destroy() {
// (exit_on_context_lost workaround), then don't tell the browser about
// offscreen context destruction here since it's not client-invoked, and
// might bypass the 3D API blocking logic.
- if ((surface_handle_ == gpu::kNullSurfaceHandle) && !active_url_.is_empty()
- && !gpu_channel_manager->is_exiting_for_lost_context()) {
+ if ((surface_handle_ == gpu::kNullSurfaceHandle) &&
+ !active_url_.is_empty() &&
+ !gpu_channel_manager->is_exiting_for_lost_context()) {
gpu_channel_manager->delegate()->DidDestroyOffscreenContext(active_url_);
}
}
@@ -530,7 +529,10 @@ void GpuCommandBufferStub::Destroy() {
// destroy it before those.
executor_.reset();
- sync_point_client_.reset();
+ if (sync_point_client_state_) {
+ sync_point_client_state_->Destroy();
+ sync_point_client_state_ = nullptr;
+ }
bool have_context = false;
if (decoder_ && decoder_->GetGLContext()) {
@@ -639,18 +641,22 @@ bool GpuCommandBufferStub::Initialize(
use_virtualized_gl_context_ = false;
#endif
- command_buffer_.reset(new CommandBufferService(
- context_group_->transfer_buffer_manager()));
+ command_buffer_.reset(
+ new CommandBufferService(context_group_->transfer_buffer_manager()));
decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get()));
executor_.reset(new CommandExecutor(command_buffer_.get(), decoder_.get(),
decoder_.get()));
- sync_point_client_ = base::MakeUnique<SyncPointClient>(
- channel_->sync_point_manager(),
- channel_->GetSyncPointOrderData(stream_id_),
- CommandBufferNamespace::GPU_IO, command_buffer_id_);
- executor_->SetPreemptByFlag(channel_->preempted_flag());
+ sync_point_client_state_ =
+ channel_->sync_point_manager()->CreateSyncPointClientState(
+ CommandBufferNamespace::GPU_IO, command_buffer_id_, sequence_id_);
+
+ // TODO(sunnyps): Hook callback to gpu scheduler.
+ if (channel_->preempted_flag()) {
+ executor_->SetPauseExecutionCallback(
+ base::Bind(&PreemptionFlag::IsSet, channel_->preempted_flag()));
+ }
decoder_->set_engine(executor_.get());
@@ -767,8 +773,7 @@ bool GpuCommandBufferStub::Initialize(
}
if (!context->GetGLStateRestorer()) {
- context->SetGLStateRestorer(
- new GLStateRestorerImpl(decoder_->AsWeakPtr()));
+ context->SetGLStateRestorer(new GLStateRestorerImpl(decoder_->AsWeakPtr()));
}
if (!context_group_->has_program_cache() &&
@@ -788,12 +793,10 @@ bool GpuCommandBufferStub::Initialize(
decoder_->set_log_commands(true);
}
- decoder_->GetLogger()->SetMsgCallback(
- base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
- base::Unretained(this)));
- decoder_->SetShaderCacheCallback(
- base::Bind(&GpuCommandBufferStub::SendCachedShader,
- base::Unretained(this)));
+ decoder_->GetLogger()->SetMsgCallback(base::Bind(
+ &GpuCommandBufferStub::SendConsoleMessage, base::Unretained(this)));
+ decoder_->SetShaderCacheCallback(base::Bind(
+ &GpuCommandBufferStub::SendCachedShader, base::Unretained(this)));
decoder_->SetFenceSyncReleaseCallback(base::Bind(
&GpuCommandBufferStub::OnFenceSyncRelease, base::Unretained(this)));
decoder_->SetWaitSyncTokenCallback(base::Bind(
@@ -918,8 +921,8 @@ void GpuCommandBufferStub::CheckCompleteWaits() {
if (wait_for_token_ || wait_for_get_offset_) {
CommandBuffer::State state = command_buffer_->GetLastState();
if (wait_for_token_ &&
- (CommandBuffer::InRange(
- wait_for_token_->start, wait_for_token_->end, state.token) ||
+ (CommandBuffer::InRange(wait_for_token_->start, wait_for_token_->end,
+ state.token) ||
state.error != error::kNoError)) {
ReportState();
GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
@@ -929,8 +932,7 @@ void GpuCommandBufferStub::CheckCompleteWaits() {
}
if (wait_for_get_offset_ &&
(CommandBuffer::InRange(wait_for_get_offset_->start,
- wait_for_get_offset_->end,
- state.get_offset) ||
+ wait_for_get_offset_->end, state.get_offset) ||
state.error != error::kNoError)) {
ReportState();
GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
@@ -944,7 +946,8 @@ void GpuCommandBufferStub::CheckCompleteWaits() {
void GpuCommandBufferStub::OnAsyncFlush(
int32_t put_offset,
uint32_t flush_count,
- const std::vector<ui::LatencyInfo>& latency_info) {
+ const std::vector<ui::LatencyInfo>& latency_info,
+ const std::vector<SyncToken>& sync_token_fences) {
TRACE_EVENT1(
"gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
DCHECK(command_buffer_);
@@ -1008,7 +1011,9 @@ void GpuCommandBufferStub::OnCommandProcessed() {
channel_->watchdog()->CheckArmed();
}
-void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
+void GpuCommandBufferStub::ReportState() {
+ command_buffer_->UpdateState();
+}
void GpuCommandBufferStub::PutChanged() {
FastSetActiveURL(active_url_, active_url_hash_, channel_);
@@ -1017,7 +1022,7 @@ void GpuCommandBufferStub::PutChanged() {
void GpuCommandBufferStub::OnSignalSyncToken(const SyncToken& sync_token,
uint32_t id) {
- if (!sync_point_client_->WaitNonThreadSafe(
+ if (!sync_point_client_state_->WaitNonThreadSafe(
sync_token, channel_->task_runner(),
base::Bind(&GpuCommandBufferStub::OnSignalAck, this->AsWeakPtr(),
id))) {
@@ -1033,13 +1038,10 @@ void GpuCommandBufferStub::OnSignalQuery(uint32_t query_id, uint32_t id) {
if (decoder_) {
gles2::QueryManager* query_manager = decoder_->GetQueryManager();
if (query_manager) {
- gles2::QueryManager::Query* query =
- query_manager->GetQuery(query_id);
+ gles2::QueryManager::Query* query = query_manager->GetQuery(query_id);
if (query) {
- query->AddCallback(
- base::Bind(&GpuCommandBufferStub::OnSignalAck,
- this->AsWeakPtr(),
- id));
+ query->AddCallback(base::Bind(&GpuCommandBufferStub::OnSignalAck,
+ this->AsWeakPtr(), id));
return;
}
}
@@ -1056,7 +1058,7 @@ void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) {
mailbox_manager->PushTextureUpdates(sync_token);
command_buffer_->SetReleaseCount(release);
- sync_point_client_->ReleaseFenceSync(release);
+ sync_point_client_state_->ReleaseFenceSync(release);
}
void GpuCommandBufferStub::OnDescheduleUntilFinished() {
@@ -1064,14 +1066,14 @@ void GpuCommandBufferStub::OnDescheduleUntilFinished() {
DCHECK(executor_->HasPollingWork());
executor_->SetScheduled(false);
- channel_->OnStreamRescheduled(stream_id_, false);
+ channel_->OnCommandBufferDescheduled(this);
}
void GpuCommandBufferStub::OnRescheduleAfterFinished() {
DCHECK(!executor_->scheduled());
executor_->SetScheduled(true);
- channel_->OnStreamRescheduled(stream_id_, true);
+ channel_->OnCommandBufferScheduled(this);
}
bool GpuCommandBufferStub::OnWaitSyncToken(const SyncToken& sync_token) {
@@ -1080,14 +1082,14 @@ bool GpuCommandBufferStub::OnWaitSyncToken(const SyncToken& sync_token) {
TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncToken", this, "GpuCommandBufferStub",
this);
- waiting_for_sync_point_ = sync_point_client_->WaitNonThreadSafe(
+ waiting_for_sync_point_ = sync_point_client_state_->WaitNonThreadSafe(
sync_token, channel_->task_runner(),
base::Bind(&GpuCommandBufferStub::OnWaitSyncTokenCompleted, AsWeakPtr(),
sync_token));
if (waiting_for_sync_point_) {
executor_->SetScheduled(false);
- channel_->OnStreamRescheduled(stream_id_, false);
+ channel_->OnCommandBufferDescheduled(this);
return true;
}
@@ -1102,14 +1104,12 @@ void GpuCommandBufferStub::OnWaitSyncTokenCompleted(
DCHECK(waiting_for_sync_point_);
TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncTokenCompleted", this,
"GpuCommandBufferStub", this);
+ // Don't call PullTextureUpdates here because we can't MakeCurrent if we're
+ // executing commands on another context. The WaitSyncToken command will run
+ // again and call PullTextureUpdates once this command buffer gets scheduled.
waiting_for_sync_point_ = false;
-
- gles2::MailboxManager* mailbox_manager = context_group_->mailbox_manager();
- if (mailbox_manager->UsesSync() && MakeCurrent())
- mailbox_manager->PullTextureUpdates(sync_token);
-
executor_->SetScheduled(true);
- channel_->OnStreamRescheduled(stream_id_, true);
+ channel_->OnCommandBufferScheduled(this);
}
void GpuCommandBufferStub::OnCreateImage(
@@ -1132,8 +1132,8 @@ void GpuCommandBufferStub::OnCreateImage(
return;
}
- if (!gpu::IsGpuMemoryBufferFormatSupported(format,
- decoder_->GetCapabilities())) {
+ if (!gpu::IsImageFromGpuMemoryBufferFormatSupported(
+ format, decoder_->GetCapabilities())) {
LOG(ERROR) << "Format is not supported.";
return;
}
@@ -1156,7 +1156,7 @@ void GpuCommandBufferStub::OnCreateImage(
image_manager->AddImage(image.get(), id);
if (image_release_count)
- sync_point_client_->ReleaseFenceSync(image_release_count);
+ sync_point_client_state_->ReleaseFenceSync(image_release_count);
}
void GpuCommandBufferStub::OnDestroyImage(int32_t id) {
diff --git a/chromium/gpu/ipc/service/gpu_command_buffer_stub.h b/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
index 3803172a1fa..d65ef938251 100644
--- a/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
@@ -21,22 +21,25 @@
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/gpu_memory_allocation.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
-#include "gpu/command_buffer/service/command_executor.h"
#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/sequence_id.h"
#include "gpu/gpu_export.h"
#include "gpu/ipc/common/surface_handle.h"
#include "gpu/ipc/service/gpu_memory_manager.h"
#include "gpu/ipc/service/image_transport_surface_delegate.h"
#include "ipc/ipc_listener.h"
#include "ipc/ipc_sender.h"
-#include "ui/events/latency_info.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/swap_result.h"
#include "ui/gl/gl_surface.h"
#include "ui/gl/gpu_preference.h"
+#include "ui/latency/latency_info.h"
#include "url/gurl.h"
+struct GPUCreateCommandBufferConfig;
+struct GpuCommandBufferMsg_CreateImage_Params;
+
namespace gl {
class GLShareGroup;
}
@@ -44,16 +47,10 @@ class GLShareGroup;
namespace gpu {
struct Mailbox;
struct SyncToken;
-class SyncPointClient;
-}
-
-struct GPUCreateCommandBufferConfig;
-struct GpuCommandBufferMsg_CreateImage_Params;
-
-namespace gpu {
-
-class GpuChannel;
struct WaitForCommandState;
+class CommandExecutor;
+class GpuChannel;
+class SyncPointClientState;
class GPU_EXPORT GpuCommandBufferStub
: public IPC::Listener,
@@ -74,11 +71,14 @@ class GPU_EXPORT GpuCommandBufferStub
LatencyInfoCallback;
static std::unique_ptr<GpuCommandBufferStub> Create(
- GpuChannel* channel,
- GpuCommandBufferStub* share_group,
- const GPUCreateCommandBufferConfig& init_params,
- int32_t route_id,
- std::unique_ptr<base::SharedMemory> shared_state_shm);
+ GpuChannel* channel,
+ GpuCommandBufferStub* share_group,
+ const GPUCreateCommandBufferConfig& init_params,
+ CommandBufferId command_buffer_id,
+ SequenceId sequence_id,
+ int32_t stream_id,
+ int32_t route_id,
+ std::unique_ptr<base::SharedMemory> shared_state_shm);
~GpuCommandBufferStub() override;
@@ -118,7 +118,8 @@ class GPU_EXPORT GpuCommandBufferStub
// Unique command buffer ID for this command buffer stub.
CommandBufferId command_buffer_id() const { return command_buffer_id_; }
- // Identifies the stream for this command buffer.
+ SequenceId sequence_id() const { return sequence_id_; }
+
int32_t stream_id() const { return stream_id_; }
// Sends a message to the console.
@@ -136,6 +137,9 @@ class GPU_EXPORT GpuCommandBufferStub
private:
GpuCommandBufferStub(GpuChannel* channel,
const GPUCreateCommandBufferConfig& init_params,
+ CommandBufferId command_buffer_id,
+ SequenceId sequence_id,
+ int32_t stream_id,
int32_t route_id);
bool Initialize(GpuCommandBufferStub* share_group,
@@ -161,7 +165,8 @@ class GPU_EXPORT GpuCommandBufferStub
IPC::Message* reply_message);
void OnAsyncFlush(int32_t put_offset,
uint32_t flush_count,
- const std::vector<ui::LatencyInfo>& latency_info);
+ const std::vector<ui::LatencyInfo>& latency_info,
+ const std::vector<SyncToken>& sync_token_fences);
void OnRegisterTransferBuffer(int32_t id,
base::SharedMemoryHandle transfer_buffer,
uint32_t size);
@@ -219,6 +224,7 @@ class GPU_EXPORT GpuCommandBufferStub
const SurfaceHandle surface_handle_;
bool use_virtualized_gl_context_;
const CommandBufferId command_buffer_id_;
+ const SequenceId sequence_id_;
const int32_t stream_id_;
const int32_t route_id_;
uint32_t last_flush_count_;
@@ -226,7 +232,7 @@ class GPU_EXPORT GpuCommandBufferStub
std::unique_ptr<CommandBufferService> command_buffer_;
std::unique_ptr<gles2::GLES2Decoder> decoder_;
std::unique_ptr<CommandExecutor> executor_;
- std::unique_ptr<SyncPointClient> sync_point_client_;
+ scoped_refptr<SyncPointClientState> sync_point_client_state_;
scoped_refptr<gl::GLSurface> surface_;
scoped_refptr<gl::GLShareGroup> share_group_;
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index 8ed804264d8..de7fec9b424 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -22,6 +22,14 @@
#include "ui/gl/gl_switches.h"
#include "ui/gl/init/gl_factory.h"
+#if defined(USE_OZONE)
+#include "ui/ozone/public/ozone_platform.h"
+#endif
+
+#if defined(OS_WIN)
+#include "gpu/ipc/service/direct_composition_surface_win.h"
+#endif
+
namespace gpu {
namespace {
@@ -94,6 +102,14 @@ void CollectGraphicsInfo(gpu::GPUInfo& gpu_info) {
case gpu::kCollectInfoSuccess:
break;
}
+
+#if defined(OS_WIN)
+ if (gl::GetGLImplementation() == gl::kGLImplementationEGLGLES2 &&
+ gl::GLSurfaceEGL::IsDirectCompositionSupported() &&
+ DirectCompositionSurfaceWin::AreOverlaysSupported()) {
+ gpu_info.supports_overlays = true;
+ }
+#endif // defined(OS_WIN)
}
#endif // defined(OS_MACOSX)
@@ -175,6 +191,14 @@ bool GpuInit::InitializeAndStartSandbox(const base::CommandLine& command_line) {
base::TimeTicks before_initialize_one_off = base::TimeTicks::Now();
+#if defined(USE_OZONE)
+ // Initialize Ozone GPU after the watchdog in case it hangs. The sandbox
+ // may also have started at this point.
+ ui::OzonePlatform::InitParams params;
+ params.single_process = false;
+ ui::OzonePlatform::InitializeForGPU(params);
+#endif
+
// Load and initialize the GL implementation and locate the GL entry points if
// needed. This initialization may have already happened if running in the
// browser process, for example.
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory.cc
index d3dbb554a7e..8e179a4818d 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory.cc
@@ -12,8 +12,8 @@
#include "gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h"
#endif
-#if defined(USE_OZONE)
-#include "gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h"
+#if defined(OS_LINUX)
+#include "gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h"
#endif
namespace gpu {
@@ -24,10 +24,9 @@ GpuMemoryBufferFactory::CreateNativeType() {
#if defined(OS_MACOSX)
return base::WrapUnique(new GpuMemoryBufferFactoryIOSurface);
#endif
-#if defined(USE_OZONE)
- return base::WrapUnique(new GpuMemoryBufferFactoryOzoneNativePixmap);
+#if defined(OS_LINUX)
+ return base::WrapUnique(new GpuMemoryBufferFactoryNativePixmap);
#endif
- NOTREACHED();
return nullptr;
}
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory.h
index 539caa8b3a6..55c83dc14b7 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory.h
@@ -23,7 +23,8 @@ class GPU_EXPORT GpuMemoryBufferFactory {
public:
virtual ~GpuMemoryBufferFactory() {}
- // Creates a new factory instance for native GPU memory buffers.
+ // Creates a new factory instance for native GPU memory buffers. Returns null
+ // if native buffers are not supported.
static std::unique_ptr<GpuMemoryBufferFactory> CreateNativeType();
// Creates a new GPU memory buffer instance. A valid handle is returned on
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
index 8b266b8933b..cc08eb86e41 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
@@ -2,32 +2,33 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h"
+#include "gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h"
-#include "ui/ozone/gl/gl_image_ozone_native_pixmap.h"
-#include "ui/ozone/public/client_native_pixmap.h"
-#include "ui/ozone/public/client_native_pixmap_factory.h"
-#include "ui/ozone/public/native_pixmap.h"
+#include "ui/gfx/client_native_pixmap.h"
+#include "ui/gfx/native_pixmap.h"
+#include "ui/gl/gl_image_native_pixmap.h"
+
+#if defined(USE_OZONE)
#include "ui/ozone/public/ozone_platform.h"
#include "ui/ozone/public/surface_factory_ozone.h"
+#endif
namespace gpu {
-GpuMemoryBufferFactoryOzoneNativePixmap::
- GpuMemoryBufferFactoryOzoneNativePixmap() {}
+GpuMemoryBufferFactoryNativePixmap::GpuMemoryBufferFactoryNativePixmap() {}
-GpuMemoryBufferFactoryOzoneNativePixmap::
- ~GpuMemoryBufferFactoryOzoneNativePixmap() {}
+GpuMemoryBufferFactoryNativePixmap::~GpuMemoryBufferFactoryNativePixmap() {}
gfx::GpuMemoryBufferHandle
-GpuMemoryBufferFactoryOzoneNativePixmap::CreateGpuMemoryBuffer(
+GpuMemoryBufferFactoryNativePixmap::CreateGpuMemoryBuffer(
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
int client_id,
SurfaceHandle surface_handle) {
- scoped_refptr<ui::NativePixmap> pixmap =
+#if defined(USE_OZONE)
+ scoped_refptr<gfx::NativePixmap> pixmap =
ui::OzonePlatform::GetInstance()
->GetSurfaceFactoryOzone()
->CreateNativePixmap(surface_handle, size, format, usage);
@@ -39,7 +40,7 @@ GpuMemoryBufferFactoryOzoneNativePixmap::CreateGpuMemoryBuffer(
}
gfx::GpuMemoryBufferHandle new_handle;
- new_handle.type = gfx::OZONE_NATIVE_PIXMAP;
+ new_handle.type = gfx::NATIVE_PIXMAP;
new_handle.id = id;
new_handle.native_pixmap_handle = pixmap->ExportHandle();
@@ -52,9 +53,13 @@ GpuMemoryBufferFactoryOzoneNativePixmap::CreateGpuMemoryBuffer(
}
return new_handle;
+#else
+ NOTIMPLEMENTED();
+ return gfx::GpuMemoryBufferHandle();
+#endif
}
-void GpuMemoryBufferFactoryOzoneNativePixmap::DestroyGpuMemoryBuffer(
+void GpuMemoryBufferFactoryNativePixmap::DestroyGpuMemoryBuffer(
gfx::GpuMemoryBufferId id,
int client_id) {
base::AutoLock lock(native_pixmaps_lock_);
@@ -62,21 +67,21 @@ void GpuMemoryBufferFactoryOzoneNativePixmap::DestroyGpuMemoryBuffer(
native_pixmaps_.erase(key);
}
-ImageFactory* GpuMemoryBufferFactoryOzoneNativePixmap::AsImageFactory() {
+ImageFactory* GpuMemoryBufferFactoryNativePixmap::AsImageFactory() {
return this;
}
scoped_refptr<gl::GLImage>
-GpuMemoryBufferFactoryOzoneNativePixmap::CreateImageForGpuMemoryBuffer(
+GpuMemoryBufferFactoryNativePixmap::CreateImageForGpuMemoryBuffer(
const gfx::GpuMemoryBufferHandle& handle,
const gfx::Size& size,
gfx::BufferFormat format,
unsigned internalformat,
int client_id,
SurfaceHandle surface_handle) {
- DCHECK_EQ(handle.type, gfx::OZONE_NATIVE_PIXMAP);
+ DCHECK_EQ(handle.type, gfx::NATIVE_PIXMAP);
- scoped_refptr<ui::NativePixmap> pixmap;
+ scoped_refptr<gfx::NativePixmap> pixmap;
// If CreateGpuMemoryBuffer was used to allocate this buffer then avoid
// creating a new native pixmap for it.
@@ -90,10 +95,16 @@ GpuMemoryBufferFactoryOzoneNativePixmap::CreateImageForGpuMemoryBuffer(
// Create new pixmap from handle if one doesn't already exist.
if (!pixmap) {
+#if defined(USE_OZONE)
pixmap = ui::OzonePlatform::GetInstance()
->GetSurfaceFactoryOzone()
->CreateNativePixmapFromHandle(surface_handle, size, format,
handle.native_pixmap_handle);
+#else
+ // TODO(j.isorce): implement this to enable glCreateImageCHROMIUM on Linux.
+ // On going in http://codereview.chromium.org/2705213005, crbug.com/584248.
+ NOTIMPLEMENTED();
+#endif
if (!pixmap.get()) {
DLOG(ERROR) << "Failed to create pixmap from handle";
return nullptr;
@@ -106,8 +117,8 @@ GpuMemoryBufferFactoryOzoneNativePixmap::CreateImageForGpuMemoryBuffer(
}
}
- scoped_refptr<ui::GLImageOzoneNativePixmap> image(
- new ui::GLImageOzoneNativePixmap(size, internalformat));
+ scoped_refptr<gl::GLImageNativePixmap> image(
+ new gl::GLImageNativePixmap(size, internalformat));
if (!image->Initialize(pixmap.get(), format)) {
LOG(ERROR) << "Failed to create GLImage " << size.ToString() << " format "
<< static_cast<int>(format);
@@ -117,22 +128,26 @@ GpuMemoryBufferFactoryOzoneNativePixmap::CreateImageForGpuMemoryBuffer(
}
scoped_refptr<gl::GLImage>
-GpuMemoryBufferFactoryOzoneNativePixmap::CreateAnonymousImage(
+GpuMemoryBufferFactoryNativePixmap::CreateAnonymousImage(
const gfx::Size& size,
gfx::BufferFormat format,
unsigned internalformat) {
- scoped_refptr<ui::NativePixmap> pixmap =
- ui::OzonePlatform::GetInstance()
- ->GetSurfaceFactoryOzone()
- ->CreateNativePixmap(gpu::kNullSurfaceHandle, size, format,
- gfx::BufferUsage::SCANOUT);
+ scoped_refptr<gfx::NativePixmap> pixmap;
+#if defined(USE_OZONE)
+ pixmap = ui::OzonePlatform::GetInstance()
+ ->GetSurfaceFactoryOzone()
+ ->CreateNativePixmap(gpu::kNullSurfaceHandle, size, format,
+ gfx::BufferUsage::SCANOUT);
+#else
+ NOTIMPLEMENTED();
+#endif
if (!pixmap.get()) {
LOG(ERROR) << "Failed to create pixmap " << size.ToString() << " format "
<< static_cast<int>(format);
return nullptr;
}
- scoped_refptr<ui::GLImageOzoneNativePixmap> image(
- new ui::GLImageOzoneNativePixmap(size, internalformat));
+ scoped_refptr<gl::GLImageNativePixmap> image(
+ new gl::GLImageNativePixmap(size, internalformat));
if (!image->Initialize(pixmap.get(), format)) {
LOG(ERROR) << "Failed to create GLImage " << size.ToString() << " format "
<< static_cast<int>(format);
@@ -141,7 +156,7 @@ GpuMemoryBufferFactoryOzoneNativePixmap::CreateAnonymousImage(
return image;
}
-unsigned GpuMemoryBufferFactoryOzoneNativePixmap::RequiredTextureType() {
+unsigned GpuMemoryBufferFactoryNativePixmap::RequiredTextureType() {
return GL_TEXTURE_EXTERNAL_OES;
}
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h
index 45be7524496..463d0682cb4 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_OZONE_NATIVE_PIXMAP_H_
-#define GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_OZONE_NATIVE_PIXMAP_H_
+#ifndef GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_NATIVE_PIXMAP_H_
+#define GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_NATIVE_PIXMAP_H_
#include <unordered_map>
#include <utility>
@@ -14,7 +14,7 @@
#include "gpu/command_buffer/service/image_factory.h"
#include "gpu/gpu_export.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
-#include "ui/ozone/public/native_pixmap.h"
+#include "ui/gfx/native_pixmap.h"
namespace gl {
class GLImage;
@@ -22,12 +22,12 @@ class GLImage;
namespace gpu {
-class GPU_EXPORT GpuMemoryBufferFactoryOzoneNativePixmap
+class GPU_EXPORT GpuMemoryBufferFactoryNativePixmap
: public GpuMemoryBufferFactory,
public ImageFactory {
public:
- GpuMemoryBufferFactoryOzoneNativePixmap();
- ~GpuMemoryBufferFactoryOzoneNativePixmap() override;
+ GpuMemoryBufferFactoryNativePixmap();
+ ~GpuMemoryBufferFactoryNativePixmap() override;
// Overridden from GpuMemoryBufferFactory:
gfx::GpuMemoryBufferHandle CreateGpuMemoryBuffer(
@@ -58,16 +58,15 @@ class GPU_EXPORT GpuMemoryBufferFactoryOzoneNativePixmap
private:
using NativePixmapMapKey = std::pair<int, int>;
using NativePixmapMapKeyHash = base::IntPairHash<NativePixmapMapKey>;
- using NativePixmapMap =
- std::unordered_map<NativePixmapMapKey,
- scoped_refptr<ui::NativePixmap>,
- NativePixmapMapKeyHash>;
+ using NativePixmapMap = std::unordered_map<NativePixmapMapKey,
+ scoped_refptr<gfx::NativePixmap>,
+ NativePixmapMapKeyHash>;
NativePixmapMap native_pixmaps_;
base::Lock native_pixmaps_lock_;
- DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferFactoryOzoneNativePixmap);
+ DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferFactoryNativePixmap);
};
} // namespace gpu
-#endif // GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_OZONE_NATIVE_PIXMAP_H_
+#endif // GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_NATIVE_PIXMAP_H_
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap_unittest.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc
index 7fcf626e694..2c381184dd0 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h"
+#include "gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory_test_template.h"
namespace gpu {
namespace {
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferFactoryOzoneNativePixmap,
+INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferFactoryNativePixmap,
GpuMemoryBufferFactoryTest,
- GpuMemoryBufferFactoryOzoneNativePixmap);
+ GpuMemoryBufferFactoryNativePixmap);
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
index 70eb7db25cd..23b9d71d9a3 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
@@ -26,6 +26,11 @@
#include <windows.h>
#endif
+#if defined(USE_X11)
+#include <X11/Xatom.h>
+#include <X11/Xlib.h>
+#endif
+
namespace gpu {
namespace {
@@ -241,11 +246,47 @@ void GpuWatchdogThread::OnCheck(bool after_suspend) {
// Post a task to the watchdog thread to exit if the monitored thread does
// not respond in time.
- task_runner()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang,
- weak_factory_.GetWeakPtr()),
- timeout);
+ task_runner()->PostDelayedTask(FROM_HERE,
+ base::Bind(&GpuWatchdogThread::OnCheckTimeout,
+ weak_factory_.GetWeakPtr()),
+ timeout);
+}
+
+void GpuWatchdogThread::OnCheckTimeout() {
+ // Should not get here while the system is suspended.
+ DCHECK(!suspended_);
+
+ // If the watchdog woke up significantly behind schedule, disarm and reset
+ // the watchdog check. This is to prevent the watchdog thread from terminating
+ // when a machine wakes up from sleep or hibernation, which would otherwise
+ // appear to be a hang.
+ if (base::Time::Now() > suspension_timeout_) {
+ armed_ = false;
+ OnCheck(true);
+ return;
+ }
+
+ if (!base::subtle::NoBarrier_Load(&awaiting_acknowledge_)) {
+ // This should be possible only when CheckArmed() has been called but
+ // OnAcknowledge() hasn't.
+ // In this case the watched thread might need more time to finish posting
+ // OnAcknowledge task.
+
+ // Continue with the termination after an additional delay.
+ task_runner()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang,
+ weak_factory_.GetWeakPtr()),
+ 0.5 * timeout_);
+
+ // Post a task that does nothing on the watched thread to bump its priority
+ // and make it more likely to get scheduled.
+ watched_message_loop_->task_runner()->PostTask(
+ FROM_HERE, base::Bind(&base::DoNothing));
+ return;
+ }
+
+ DeliberatelyTerminateToRecoverFromHang();
}
// Use the --disable-gpu-watchdog command line switch to disable this.
@@ -268,16 +309,6 @@ void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
}
#endif
- // If the watchdog woke up significantly behind schedule, disarm and reset
- // the watchdog check. This is to prevent the watchdog thread from terminating
- // when a machine wakes up from sleep or hibernation, which would otherwise
- // appear to be a hang.
- if (base::Time::Now() > suspension_timeout_) {
- armed_ = false;
- OnCheck(true);
- return;
- }
-
#if defined(USE_X11)
XWindowAttributes attributes;
XGetWindowAttributes(display_, window_, &attributes);
@@ -370,6 +401,10 @@ void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
base::debug::Alias(&current_time);
base::debug::Alias(&current_timeticks);
+ int32_t awaiting_acknowledge =
+ base::subtle::NoBarrier_Load(&awaiting_acknowledge_);
+ base::debug::Alias(&awaiting_acknowledge);
+
LOG(ERROR) << "The GPU process hung. Terminating after "
<< timeout_.InMilliseconds() << " ms.";
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.h b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
index 99f42cffccc..09238c1e066 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.h
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
@@ -19,10 +19,6 @@
#include "ui/gfx/native_widget_types.h"
#if defined(USE_X11)
-extern "C" {
-#include <X11/Xlib.h>
-#include <X11/Xatom.h>
-}
#include <sys/poll.h>
#include "ui/base/x/x11_util.h" // nogncheck
#include "ui/gfx/x/x11_types.h" // nogncheck
@@ -73,6 +69,7 @@ class GPU_EXPORT GpuWatchdogThread : public base::Thread,
void OnAcknowledge();
void OnCheck(bool after_suspend);
+ void OnCheckTimeout();
void DeliberatelyTerminateToRecoverFromHang();
#if defined(USE_X11)
void SetupXServer();
diff --git a/chromium/gpu/ipc/service/image_transport_surface_android.cc b/chromium/gpu/ipc/service/image_transport_surface_android.cc
index a80e364ec72..d5be8e42f11 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_android.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_android.cc
@@ -30,14 +30,15 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
LOG(WARNING) << "Failed to acquire native widget.";
return nullptr;
}
- scoped_refptr<gl::GLSurface> surface = new gl::NativeViewGLSurfaceEGL(window);
+ scoped_refptr<gl::GLSurface> surface =
+ new gl::NativeViewGLSurfaceEGL(window, nullptr);
bool initialize_success = surface->Initialize(format);
ANativeWindow_release(window);
if (!initialize_success)
return scoped_refptr<gl::GLSurface>();
- return scoped_refptr<gl::GLSurface>(
- new PassThroughImageTransportSurface(delegate, surface.get()));
+ return scoped_refptr<gl::GLSurface>(new PassThroughImageTransportSurface(
+ delegate, surface.get(), kMultiWindowSwapIntervalDefault));
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_transport_surface_delegate.h b/chromium/gpu/ipc/service/image_transport_surface_delegate.h
index 8e421b41abe..44edc0f643d 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_delegate.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_delegate.h
@@ -9,8 +9,8 @@
#include "gpu/command_buffer/common/texture_in_use_response.h"
#include "gpu/gpu_export.h"
#include "gpu/ipc/common/surface_handle.h"
-#include "ui/events/latency_info.h"
#include "ui/gfx/swap_result.h"
+#include "ui/latency/latency_info.h"
#if defined(OS_MACOSX)
#include "ui/base/cocoa/remote_layer_api.h"
diff --git a/chromium/gpu/ipc/service/image_transport_surface_linux.cc b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
index 54cf4b2571a..6453e178c29 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_linux.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
@@ -5,6 +5,7 @@
#include "gpu/ipc/service/image_transport_surface.h"
#include "gpu/ipc/service/pass_through_image_transport_surface.h"
+#include "ui/gl/gl_surface_glx.h"
#include "ui/gl/init/gl_factory.h"
namespace gpu {
@@ -16,15 +17,20 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
gl::GLSurfaceFormat format) {
DCHECK_NE(surface_handle, kNullSurfaceHandle);
scoped_refptr<gl::GLSurface> surface;
+ MultiWindowSwapInterval multi_window_swap_interval =
+ kMultiWindowSwapIntervalDefault;
#if defined(USE_OZONE)
surface = gl::init::CreateSurfacelessViewGLSurface(surface_handle);
#endif
- if (!surface)
+ if (!surface) {
surface = gl::init::CreateViewGLSurface(surface_handle);
+ if (gl::GetGLImplementation() == gl::kGLImplementationDesktopGL)
+ multi_window_swap_interval = kMultiWindowSwapIntervalForceZero;
+ }
if (!surface)
return surface;
- return scoped_refptr<gl::GLSurface>(
- new PassThroughImageTransportSurface(delegate, surface.get()));
+ return scoped_refptr<gl::GLSurface>(new PassThroughImageTransportSurface(
+ delegate, surface.get(), multi_window_swap_interval));
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_transport_surface_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_mac.mm
index 2dd88d5a9c7..288e6ebf76c 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_mac.mm
@@ -69,7 +69,8 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
if (!surface.get() || !surface->Initialize(format))
return surface;
return make_scoped_refptr<gl::GLSurface>(
- new PassThroughImageTransportSurface(delegate, surface.get()));
+ new PassThroughImageTransportSurface(
+ delegate, surface.get(), kMultiWindowSwapIntervalDefault));
}
}
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
index 39a1435292e..8b1a0ac1ca8 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
@@ -14,9 +14,9 @@
#include "gpu/ipc/service/gpu_command_buffer_stub.h"
#include "gpu/ipc/service/image_transport_surface.h"
#include "ui/base/cocoa/remote_layer_api.h"
-#include "ui/events/latency_info.h"
#include "ui/gl/gl_surface.h"
#include "ui/gl/gpu_switching_observer.h"
+#include "ui/latency/latency_info.h"
@class CAContext;
@class CALayer;
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index a22a1d12fda..1c75718e5a7 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -190,7 +190,7 @@ gfx::SwapResult ImageTransportSurfaceOverlayMac::SwapBuffersInternal(
// If we have gotten more than one frame ahead of GL, wait for the previous
// frame to complete.
if (previous_frame_fence_) {
- TRACE_EVENT0("gpu", "ImageTransportSurfaceOverlayMac::ClientWait");
+ TRACE_EVENT0("gpu", "ClientWait");
// Ensure we are using the context with which the fence was created.
gl::ScopedCGLSetCurrentContext scoped_set_current(fence_context_obj_);
@@ -222,8 +222,11 @@ gfx::SwapResult ImageTransportSurfaceOverlayMac::SwapBuffersInternal(
base::scoped_policy::RETAIN);
// A glFlush is necessary to ensure correct content appears.
- glFlush();
- CheckGLErrors("After fence/flush");
+ {
+ TRACE_EVENT0("gpu", "glFlush");
+ glFlush();
+ CheckGLErrors("After fence/flush");
+ }
after_flush_before_commit_time = base::TimeTicks::Now();
UMA_HISTOGRAM_TIMES("GPU.IOSurface.GLFlushTime",
@@ -231,7 +234,7 @@ gfx::SwapResult ImageTransportSurfaceOverlayMac::SwapBuffersInternal(
} else {
// GLFence isn't supported - issue a glFinish on each frame to ensure
// there is backpressure from GL.
- TRACE_EVENT0("gpu", "ImageTransportSurfaceOverlayMac::glFinish");
+ TRACE_EVENT0("gpu", "glFinish");
CheckGLErrors("Before finish");
glFinish();
CheckGLErrors("After finish");
@@ -239,8 +242,11 @@ gfx::SwapResult ImageTransportSurfaceOverlayMac::SwapBuffersInternal(
}
bool fullscreen_low_power_layer_valid = false;
- ca_layer_tree_coordinator_->CommitPendingTreesToCA(
- pixel_damage_rect, &fullscreen_low_power_layer_valid);
+ {
+ TRACE_EVENT0("gpu", "CommitPendingTreesToCA");
+ ca_layer_tree_coordinator_->CommitPendingTreesToCA(
+ pixel_damage_rect, &fullscreen_low_power_layer_valid);
+ }
base::TimeTicks after_transaction_time = base::TimeTicks::Now();
UMA_HISTOGRAM_TIMES("GPU.IOSurface.CATransactionTime",
@@ -336,8 +342,19 @@ bool ImageTransportSurfaceOverlayMac::ScheduleOverlayPlane(
DLOG(ERROR) << "Not an IOSurface image.";
return false;
}
- return ca_layer_tree_coordinator_->SetPendingGLRendererBackbuffer(
- io_surface_image->io_surface());
+ const ui::CARendererLayerParams overlay_as_calayer_params(
+ false, // is_clipped
+ gfx::Rect(), // clip_rect
+ 0, // sorting_context_id
+ gfx::Transform(), image,
+ crop_rect, // contents_rect
+ pixel_frame_rect, // rect
+ SK_ColorTRANSPARENT, // background_color
+ 0, // edge_aa_mask
+ 1.f, // opacity
+ GL_LINEAR); // filter;
+ return ca_layer_tree_coordinator_->GetPendingCARendererLayerTree()
+ ->ScheduleCALayer(overlay_as_calayer_params);
}
bool ImageTransportSurfaceOverlayMac::ScheduleCALayer(
diff --git a/chromium/gpu/ipc/service/image_transport_surface_win.cc b/chromium/gpu/ipc/service/image_transport_surface_win.cc
index 236c42c38bc..a13a2d558ee 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_win.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_win.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "base/metrics/histogram_macros.h"
#include "base/win/windows_version.h"
#include "gpu/ipc/service/child_window_surface_win.h"
#include "gpu/ipc/service/direct_composition_surface_win.h"
@@ -40,6 +41,8 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
DCHECK_NE(surface_handle, kNullSurfaceHandle);
scoped_refptr<gl::GLSurface> surface;
+ MultiWindowSwapInterval multi_window_swap_interval =
+ kMultiWindowSwapIntervalDefault;
if (gl::GetGLImplementation() == gl::kGLImplementationEGLGLES2) {
std::unique_ptr<gfx::VSyncProvider> vsync_provider;
@@ -49,23 +52,32 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
vsync_provider.reset(new gl::VSyncProviderWin(surface_handle));
if (gl::GLSurfaceEGL::IsDirectCompositionSupported()) {
- if (base::FeatureList::IsEnabled(switches::kDirectCompositionOverlays)) {
+ bool overlays_supported =
+ DirectCompositionSurfaceWin::AreOverlaysSupported();
+ UMA_HISTOGRAM_BOOLEAN("GPU.DirectComposition.OverlaysSupported",
+ overlays_supported);
+ if (overlays_supported) {
scoped_refptr<DirectCompositionSurfaceWin> egl_surface =
- make_scoped_refptr(
- new DirectCompositionSurfaceWin(delegate, surface_handle));
- if (!egl_surface->Initialize(std::move(vsync_provider)))
+ make_scoped_refptr(new DirectCompositionSurfaceWin(
+ std::move(vsync_provider), delegate, surface_handle));
+ if (!egl_surface->Initialize())
return nullptr;
surface = egl_surface;
} else {
- scoped_refptr<ChildWindowSurfaceWin> egl_surface = make_scoped_refptr(
- new ChildWindowSurfaceWin(delegate, surface_handle));
- if (!egl_surface->Initialize(std::move(vsync_provider)))
+ scoped_refptr<ChildWindowSurfaceWin> egl_surface =
+ make_scoped_refptr(new ChildWindowSurfaceWin(
+ std::move(vsync_provider), delegate, surface_handle));
+ if (!egl_surface->Initialize())
return nullptr;
surface = egl_surface;
}
} else {
surface = gl::init::CreateNativeViewGLSurfaceEGL(
surface_handle, std::move(vsync_provider));
+ // This is unnecessary with DirectComposition because that doesn't block
+ // swaps, but instead blocks the first draw into a surface during the next
+ // frame.
+ multi_window_swap_interval = kMultiWindowSwapIntervalForceZero;
if (!surface)
return nullptr;
}
@@ -75,8 +87,8 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
return nullptr;
}
- return scoped_refptr<gl::GLSurface>(
- new PassThroughImageTransportSurface(delegate, surface.get()));
+ return scoped_refptr<gl::GLSurface>(new PassThroughImageTransportSurface(
+ delegate, surface.get(), multi_window_swap_interval));
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
index f12b15922a5..a35c9ee9dc4 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
@@ -14,19 +14,30 @@
namespace gpu {
+namespace {
+// Number of swap generations before vsync is reenabled after we've stopped
+// doing multiple swaps per frame.
+const int kMultiWindowSwapEnableVSyncDelay = 60;
+
+int g_current_swap_generation_ = 0;
+int g_num_swaps_in_current_swap_generation_ = 0;
+int g_last_multi_window_swap_generation_ = 0;
+} // anonymous namespace
+
PassThroughImageTransportSurface::PassThroughImageTransportSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
- gl::GLSurface* surface)
+ gl::GLSurface* surface,
+ MultiWindowSwapInterval multi_window_swap_interval)
: GLSurfaceAdapter(surface),
delegate_(delegate),
- did_set_swap_interval_(false),
+ multi_window_swap_interval_(multi_window_swap_interval),
weak_ptr_factory_(this) {}
bool PassThroughImageTransportSurface::Initialize(
gl::GLSurfaceFormat format) {
// The surface is assumed to have already been initialized.
delegate_->SetLatencyInfoCallback(
- base::Bind(&PassThroughImageTransportSurface::SetLatencyInfo,
+ base::Bind(&PassThroughImageTransportSurface::AddLatencyInfo,
base::Unretained(this)));
return true;
}
@@ -110,18 +121,6 @@ void PassThroughImageTransportSurface::CommitOverlayPlanesAsync(
weak_ptr_factory_.GetWeakPtr(), base::Passed(&latency_info), callback));
}
-bool PassThroughImageTransportSurface::OnMakeCurrent(gl::GLContext* context) {
- if (!did_set_swap_interval_) {
- if (base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableGpuVsync))
- context->ForceSwapIntervalZero(true);
- else
- context->SetSwapInterval(1);
- did_set_swap_interval_ = true;
- }
- return true;
-}
-
PassThroughImageTransportSurface::~PassThroughImageTransportSurface() {
if (delegate_) {
delegate_->SetLatencyInfoCallback(
@@ -129,7 +128,7 @@ PassThroughImageTransportSurface::~PassThroughImageTransportSurface() {
}
}
-void PassThroughImageTransportSurface::SetLatencyInfo(
+void PassThroughImageTransportSurface::AddLatencyInfo(
const std::vector<ui::LatencyInfo>& latency_info) {
latency_info_.insert(latency_info_.end(), latency_info.begin(),
latency_info.end());
@@ -143,12 +142,49 @@ void PassThroughImageTransportSurface::SendVSyncUpdateIfAvailable() {
}
}
+void PassThroughImageTransportSurface::UpdateSwapInterval() {
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableGpuVsync)) {
+ gl::GLContext::GetCurrent()->ForceSwapIntervalZero(true);
+ return;
+ }
+
+ gl::GLContext::GetCurrent()->SetSwapInterval(1);
+
+ if (multi_window_swap_interval_ == kMultiWindowSwapIntervalForceZero) {
+ // This code is a simple way of enforcing that we only vsync if one surface
+ // is swapping per frame. This provides single window cases a stable refresh
+ // while allowing multi-window cases to not slow down due to multiple syncs
+ // on a single thread. A better way to fix this problem would be to have
+ // each surface present on its own thread.
+
+ if (g_current_swap_generation_ == swap_generation_) {
+ // No other surface has swapped since we swapped last time.
+ if (g_num_swaps_in_current_swap_generation_ > 1)
+ g_last_multi_window_swap_generation_ = g_current_swap_generation_;
+ g_num_swaps_in_current_swap_generation_ = 0;
+ g_current_swap_generation_++;
+ }
+
+ swap_generation_ = g_current_swap_generation_;
+ g_num_swaps_in_current_swap_generation_++;
+
+ bool should_override_vsync =
+ (g_num_swaps_in_current_swap_generation_ > 1) &&
+ (g_current_swap_generation_ - g_last_multi_window_swap_generation_ <
+ kMultiWindowSwapEnableVSyncDelay);
+ gl::GLContext::GetCurrent()->ForceSwapIntervalZero(should_override_vsync);
+ }
+}
+
std::unique_ptr<std::vector<ui::LatencyInfo>>
PassThroughImageTransportSurface::StartSwapBuffers() {
// GetVsyncValues before SwapBuffers to work around Mali driver bug:
// crbug.com/223558.
SendVSyncUpdateIfAvailable();
+ UpdateSwapInterval();
+
base::TimeTicks swap_time = base::TimeTicks::Now();
for (auto& latency : latency_info_) {
latency.AddLatencyNumberWithTimestamp(
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
index dd7686e9542..44ea2437afa 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
@@ -14,18 +14,27 @@
#include "base/memory/weak_ptr.h"
#include "gpu/ipc/service/image_transport_surface.h"
#include "gpu/ipc/service/image_transport_surface_delegate.h"
-#include "ui/events/latency_info.h"
#include "ui/gl/gl_surface.h"
+#include "ui/latency/latency_info.h"
namespace gpu {
+enum MultiWindowSwapInterval {
+ // Use the default swap interval of 1 even if multiple windows are swapping.
+ // This can reduce frame rate if the swap buffers calls block.
+ kMultiWindowSwapIntervalDefault,
+ // Force swap interval to 0 when multiple windows are swapping.
+ kMultiWindowSwapIntervalForceZero
+};
+
// An implementation of ImageTransportSurface that implements GLSurface through
// GLSurfaceAdapter, thereby forwarding GLSurface methods through to it.
class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
public:
PassThroughImageTransportSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
- gl::GLSurface* surface);
+ gl::GLSurface* surface,
+ MultiWindowSwapInterval multi_window_swap_interval);
// GLSurface implementation.
bool Initialize(gl::GLSurfaceFormat format) override;
@@ -43,7 +52,6 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
gfx::SwapResult CommitOverlayPlanes() override;
void CommitOverlayPlanesAsync(
const SwapCompletionCallback& callback) override;
- bool OnMakeCurrent(gl::GLContext* context) override;
private:
~PassThroughImageTransportSurface() override;
@@ -52,7 +60,11 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
// the browser.
void SendVSyncUpdateIfAvailable();
- void SetLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info);
+ void UpdateSwapInterval();
+
+ // Add |latency_info| to be reported and augumented with GPU latency
+ // components next time there is a GPU buffer swap.
+ void AddLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info);
std::unique_ptr<std::vector<ui::LatencyInfo>> StartSwapBuffers();
void FinishSwapBuffers(
std::unique_ptr<std::vector<ui::LatencyInfo>> latency_info,
@@ -63,8 +75,11 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
gfx::SwapResult result);
base::WeakPtr<ImageTransportSurfaceDelegate> delegate_;
- bool did_set_swap_interval_;
std::vector<ui::LatencyInfo> latency_info_;
+ MultiWindowSwapInterval multi_window_swap_interval_ =
+ kMultiWindowSwapIntervalDefault;
+ int swap_generation_ = 0;
+
base::WeakPtrFactory<PassThroughImageTransportSurface> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(PassThroughImageTransportSurface);
diff --git a/chromium/gpu/ipc/service/stream_texture_android.cc b/chromium/gpu/ipc/service/stream_texture_android.cc
index 173dab8284f..dea1f36a401 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.cc
+++ b/chromium/gpu/ipc/service/stream_texture_android.cc
@@ -68,7 +68,7 @@ StreamTexture::StreamTexture(GpuCommandBufferStub* owner_stub,
weak_factory_(this) {
owner_stub->AddDestructionObserver(this);
memset(current_matrix_, 0, sizeof(current_matrix_));
- owner_stub->channel()->AddRoute(route_id, owner_stub->stream_id(), this);
+ owner_stub->channel()->AddRoute(route_id, owner_stub->sequence_id(), this);
surface_texture_->SetFrameAvailableCallback(base::Bind(
&StreamTexture::OnFrameAvailable, weak_factory_.GetWeakPtr()));
}
diff --git a/chromium/gpu/khronos_glcts_support/BUILD.gn b/chromium/gpu/khronos_glcts_support/BUILD.gn
index deb37fdb993..aa7b2c8dc7a 100644
--- a/chromium/gpu/khronos_glcts_support/BUILD.gn
+++ b/chromium/gpu/khronos_glcts_support/BUILD.gn
@@ -958,6 +958,7 @@ if (!is_android) {
test("khronos_glcts_test") {
sources = [
"khronos_glcts_test.cc",
+ "khronos_glcts_test.h",
]
deps = [
diff --git a/chromium/gpu/skia_bindings/BUILD.gn b/chromium/gpu/skia_bindings/BUILD.gn
index f9d96c38576..6ebdd9b38ef 100644
--- a/chromium/gpu/skia_bindings/BUILD.gn
+++ b/chromium/gpu/skia_bindings/BUILD.gn
@@ -12,6 +12,7 @@ source_set("skia_bindings") {
deps = [
"//base",
"//gpu/command_buffer/client:gles2_interface",
+ "//gpu/ipc/common:interfaces",
"//skia",
]
}
diff --git a/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.cc b/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.cc
index 1f161413640..a49a81a0c9d 100644
--- a/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.cc
+++ b/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.cc
@@ -10,32 +10,50 @@
#include "base/lazy_instance.h"
#include "base/macros.h"
+#include "base/sys_info.h"
#include "base/trace_event/trace_event.h"
#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu/command_buffer/common/capabilities.h"
#include "gpu/skia_bindings/gl_bindings_skia_cmd_buffer.h"
#include "third_party/skia/include/gpu/GrContext.h"
+#include "third_party/skia/include/gpu/GrContextOptions.h"
#include "third_party/skia/include/gpu/gl/GrGLInterface.h"
namespace skia_bindings {
GrContextForGLES2Interface::GrContextForGLES2Interface(
- gpu::gles2::GLES2Interface* gl) {
+ gpu::gles2::GLES2Interface* gl,
+ const gpu::Capabilities& capabilities) {
+ GrContextOptions options;
+ options.fAvoidStencilBuffers = capabilities.avoid_stencil_buffers;
sk_sp<GrGLInterface> interface(
skia_bindings::CreateGLES2InterfaceBindings(gl));
- gr_context_ = sk_sp<GrContext>(
- GrContext::Create(kOpenGL_GrBackend,
- // GrContext takes ownership of |interface|.
- reinterpret_cast<GrBackendContext>(interface.get())));
+ gr_context_ = sk_sp<GrContext>(GrContext::Create(
+ kOpenGL_GrBackend,
+ // GrContext takes ownership of |interface|.
+ reinterpret_cast<GrBackendContext>(interface.get()), options));
if (gr_context_) {
// The limit of the number of GPU resources we hold in the GrContext's
// GPU cache.
static const int kMaxGaneshResourceCacheCount = 16384;
// The limit of the bytes allocated toward GPU resources in the GrContext's
// GPU cache.
+ static const size_t kMaxLowEndGaneshResourceCacheBytes = 48 * 1024 * 1024;
static const size_t kMaxGaneshResourceCacheBytes = 96 * 1024 * 1024;
+ static const size_t kMaxHighEndGaneshResourceCacheBytes = 256 * 1024 * 1024;
+ static const int64_t kHighEndMemoryThreshold = (int64_t)4096 * 1024 * 1024;
+ static const int64_t kLowEndMemoryThreshold = (int64_t)512 * 1024 * 1024;
+
+ size_t max_ganesh_resource_cache_bytes = kMaxGaneshResourceCacheBytes;
+ if (base::SysInfo::AmountOfPhysicalMemory() <= kLowEndMemoryThreshold) {
+ max_ganesh_resource_cache_bytes = kMaxLowEndGaneshResourceCacheBytes;
+ } else if (base::SysInfo::AmountOfPhysicalMemory() >=
+ kHighEndMemoryThreshold) {
+ max_ganesh_resource_cache_bytes = kMaxHighEndGaneshResourceCacheBytes;
+ }
gr_context_->setResourceCacheLimits(kMaxGaneshResourceCacheCount,
- kMaxGaneshResourceCacheBytes);
+ max_ganesh_resource_cache_bytes);
}
}
diff --git a/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.h b/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.h
index 3813fb47bcd..3771126ce4c 100644
--- a/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.h
+++ b/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.h
@@ -11,6 +11,8 @@
class GrContext;
namespace gpu {
+struct Capabilities;
+
namespace gles2 {
class GLES2Interface;
}
@@ -23,7 +25,8 @@ namespace skia_bindings {
// is alive.
class GrContextForGLES2Interface {
public:
- explicit GrContextForGLES2Interface(gpu::gles2::GLES2Interface* gl);
+ explicit GrContextForGLES2Interface(gpu::gles2::GLES2Interface* gl,
+ const gpu::Capabilities& capabilities);
virtual ~GrContextForGLES2Interface();
GrContext* get() { return gr_context_.get(); }
diff --git a/chromium/gpu/test_message_loop_type.h b/chromium/gpu/test_message_loop_type.h
deleted file mode 100644
index b2d97ff82a8..00000000000
--- a/chromium/gpu/test_message_loop_type.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_TEST_MESSAGE_LOOP_TYPE_H_
-#define GPU_TEST_MESSAGE_LOOP_TYPE_H_
-
-#include "base/message_loop/message_loop.h"
-
-#if defined(USE_OZONE)
-#include "ui/ozone/public/ozone_platform.h"
-#endif
-
-namespace gpu {
-namespace test {
-
-// Returns the MessageLoop type needed for GPU tests. The Ozone platform may not
-// work with TYPE_DEFAULT and this needs to be checked at runtime.
-inline base::MessageLoop::Type GetMessageLoopTypeForGpu() {
-#if defined(USE_OZONE)
- return ui::OzonePlatform::EnsureInstance()->GetMessageLoopTypeForGpu();
-#else
- return base::MessageLoop::TYPE_DEFAULT;
-#endif
-}
-
-} // namespace test
-} // namespace gpu
-
-#endif // GPU_TEST_MESSAGE_LOOP_TYPE_H_
diff --git a/chromium/gpu/tools/compositor_model_bench/BUILD.gn b/chromium/gpu/tools/compositor_model_bench/BUILD.gn
index 94e57dadd73..e6c89f1ec0a 100644
--- a/chromium/gpu/tools/compositor_model_bench/BUILD.gn
+++ b/chromium/gpu/tools/compositor_model_bench/BUILD.gn
@@ -9,10 +9,15 @@ if (is_linux && !is_chromeos && current_cpu != "arm" && use_x11) {
sources = [
"compositor_model_bench.cc",
"forward_render_model.cc",
+ "forward_render_model.h",
"render_model_utils.cc",
+ "render_model_utils.h",
"render_models.cc",
+ "render_models.h",
"render_tree.cc",
+ "render_tree.h",
"shaders.cc",
+ "shaders.h",
]
libs = [ "GL" ]
diff --git a/chromium/gpu/vulkan/BUILD.gn b/chromium/gpu/vulkan/BUILD.gn
index 39ed1e06682..a901159e8c6 100644
--- a/chromium/gpu/vulkan/BUILD.gn
+++ b/chromium/gpu/vulkan/BUILD.gn
@@ -2,13 +2,16 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("features.gni")
import("//build/config/ui.gni")
+import("//build/buildflag_header.gni")
import("//testing/test.gni")
-config("vulkan_config") {
- defines = [ "ENABLE_VULKAN" ]
+# Generate a buildflag header for compile-time checking of Vulkan support.
+buildflag_header("features") {
+ header = "features.h"
+ flags = [ "ENABLE_VULKAN=$enable_vulkan" ]
}
-
if (enable_vulkan) {
vulkan_lib_dir = getenv("VULKAN_SDK") + "/lib"
component("vulkan") {
@@ -50,10 +53,7 @@ if (enable_vulkan) {
configs += [ "//build/config:precompiled_headers" ]
defines = [ "VULKAN_IMPLEMENTATION" ]
- all_dependent_configs = [
- ":vulkan_config",
- "//third_party/vulkan:vulkan_headers",
- ]
+ all_dependent_configs = [ "//third_party/vulkan:vulkan_headers" ]
libs = [ "vulkan" ]
if (current_cpu == "x64") {
diff --git a/chromium/gpu/vulkan/OWNERS b/chromium/gpu/vulkan/OWNERS
index 905e1f4ff9d..211e0383d05 100644
--- a/chromium/gpu/vulkan/OWNERS
+++ b/chromium/gpu/vulkan/OWNERS
@@ -1,2 +1,4 @@
piman@chromium.org
-dyen@chromium.org
+vmiura@chromium.org
+
+# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/vulkan/features.gni b/chromium/gpu/vulkan/features.gni
new file mode 100644
index 00000000000..f02ac88abff
--- /dev/null
+++ b/chromium/gpu/vulkan/features.gni
@@ -0,0 +1,10 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Features used by targets inside and outside of |gpu/vulkan|.
+# For details see declare_args() in build/config/BUILDCONFIG.gn.
+declare_args() {
+ # Enable experimental vulkan backend.
+ enable_vulkan = false
+}