summaryrefslogtreecommitdiff
path: root/chromium/gpu/ipc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/gpu/ipc')
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.cc6
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.h2
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.cc13
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.h2
-rw-r--r--chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc1
-rw-r--r--chromium/gpu/ipc/client/image_decode_accelerator_proxy.h5
-rw-r--r--chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc152
-rw-r--r--chromium/gpu/ipc/common/BUILD.gn50
-rw-r--r--chromium/gpu/ipc/common/OWNERS3
-rw-r--r--chromium/gpu/ipc/common/PRESUBMIT.py55
-rwxr-xr-xchromium/gpu/ipc/common/generate_vulkan_types.py598
-rw-r--r--chromium/gpu/ipc/common/gpu_info.mojom7
-rw-r--r--chromium/gpu/ipc/common/gpu_info_mojom_traits.cc11
-rw-r--r--chromium/gpu/ipc/common/gpu_info_mojom_traits.h11
-rw-r--r--chromium/gpu/ipc/common/gpu_messages.h5
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.mojom12
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h52
-rw-r--r--chromium/gpu/ipc/common/gpu_watchdog_timeout.h2
-rw-r--r--chromium/gpu/ipc/common/typemaps.gni2
-rw-r--r--chromium/gpu/ipc/common/vulkan_info.mojom26
-rw-r--r--chromium/gpu/ipc/common/vulkan_info.typemap16
-rw-r--r--chromium/gpu/ipc/common/vulkan_info_mojom_traits.h118
-rw-r--r--chromium/gpu/ipc/common/vulkan_types.h10
-rw-r--r--chromium/gpu/ipc/common/vulkan_types.mojom232
-rw-r--r--chromium/gpu/ipc/common/vulkan_types.typemap28
-rw-r--r--chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc510
-rw-r--r--chromium/gpu/ipc/common/vulkan_types_mojom_traits.h951
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache.cc5
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.cc18
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.h1
-rw-r--r--chromium/gpu/ipc/in_process_gpu_thread_holder.cc4
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn8
-rw-r--r--chromium/gpu/ipc/service/command_buffer_stub.cc2
-rw-r--r--chromium/gpu/ipc/service/gles2_command_buffer_stub.cc16
-rw-r--r--chromium/gpu/ipc/service/gles2_command_buffer_stub.h3
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.cc18
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.h10
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager_delegate.h3
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.cc14
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc165
-rw-r--r--chromium/gpu/ipc/service/gpu_init.h2
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc1
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc6
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.cc16
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.h15
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc128
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc517
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h133
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub.cc93
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub.h21
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc159
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_delegate.h3
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_linux.cc4
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm3
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.cc73
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.h2
-rw-r--r--chromium/gpu/ipc/service/raster_command_buffer_stub.cc7
-rw-r--r--chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc2
59 files changed, 3947 insertions, 387 deletions
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
index 1ea1ee14598..2ba4395ef04 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
@@ -38,6 +38,7 @@
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_fence.h"
#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gpu_preference.h"
namespace gpu {
@@ -190,9 +191,10 @@ void CommandBufferProxyImpl::OnConsoleMessage(
message.id);
}
-void CommandBufferProxyImpl::OnGpuSwitched() {
+void CommandBufferProxyImpl::OnGpuSwitched(
+ gl::GpuPreference active_gpu_heuristic) {
if (gpu_control_client_)
- gpu_control_client_->OnGpuSwitched();
+ gpu_control_client_->OnGpuSwitched(active_gpu_heuristic);
}
void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
index 67cecbdf8ee..b2be647ee62 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
@@ -178,7 +178,7 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
void OnDestroyed(gpu::error::ContextLostReason reason,
gpu::error::Error error);
void OnConsoleMessage(const GPUCommandBufferConsoleMessage& message);
- void OnGpuSwitched();
+ void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic);
void OnSignalAck(uint32_t id, const CommandBuffer::State& state);
void OnSwapBuffersCompleted(const SwapBuffersCompleteParams& params);
void OnBufferPresented(uint64_t swap_id,
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.cc b/chromium/gpu/ipc/client/gpu_channel_host.cc
index 4b8c1374bb0..505affa44fa 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.cc
+++ b/chromium/gpu/ipc/client/gpu_channel_host.cc
@@ -21,6 +21,7 @@
#include "gpu/ipc/common/gpu_watchdog_timeout.h"
#include "ipc/ipc_channel_mojo.h"
#include "ipc/ipc_sync_message.h"
+#include "mojo/public/cpp/bindings/lib/message_quota_checker.h"
#include "url/gurl.h"
using base::AutoLock;
@@ -274,11 +275,13 @@ operator=(OrderingBarrierInfo&&) = default;
GpuChannelHost::Listener::Listener(
mojo::ScopedMessagePipeHandle handle,
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner)
- : channel_(IPC::ChannelMojo::Create(std::move(handle),
- IPC::Channel::MODE_CLIENT,
- this,
- io_task_runner,
- base::ThreadTaskRunnerHandle::Get())) {
+ : channel_(IPC::ChannelMojo::Create(
+ std::move(handle),
+ IPC::Channel::MODE_CLIENT,
+ this,
+ io_task_runner,
+ base::ThreadTaskRunnerHandle::Get(),
+ mojo::internal::MessageQuotaChecker::MaybeCreate())) {
DCHECK(channel_);
DCHECK(io_task_runner->BelongsToCurrentThread());
bool result = channel_->Connect();
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.h b/chromium/gpu/ipc/client/gpu_channel_host.h
index c6e46a23fe3..1af60880511 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.h
+++ b/chromium/gpu/ipc/client/gpu_channel_host.h
@@ -100,7 +100,7 @@ class GPU_EXPORT GpuChannelHost
// Ensure that the all deferred messages prior upto |deferred_message_id| have
// been flushed. Pass UINT32_MAX to force all pending deferred messages to be
// flushed.
- void EnsureFlush(uint32_t deferred_message_id);
+ virtual void EnsureFlush(uint32_t deferred_message_id);
// Verify that the all deferred messages prior upto |deferred_message_id| have
// reached the service. Pass UINT32_MAX to force all pending deferred messages
diff --git a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
index 76029007347..630cd3ddb45 100644
--- a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
+++ b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
@@ -35,6 +35,7 @@ bool IsSupportedImageSize(
image_size = image_data->coded_size.value();
else
image_size = image_data->image_size;
+ DCHECK(!image_size.IsEmpty());
return image_size.width() >=
supported_profile.min_encoded_dimensions.width() &&
diff --git a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h
index a47df5d6957..1fd6f911f23 100644
--- a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h
+++ b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h
@@ -9,6 +9,7 @@
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "gpu/command_buffer/client/image_decode_accelerator_interface.h"
+#include "gpu/gpu_export.h"
namespace gpu {
class GpuChannelHost;
@@ -45,7 +46,8 @@ class GpuChannelHost;
// Objects of this class are thread-safe.
//
// TODO(andrescj): actually put the decoder's capabilities in GpuInfo.
-class ImageDecodeAcceleratorProxy : public ImageDecodeAcceleratorInterface {
+class GPU_EXPORT ImageDecodeAcceleratorProxy
+ : public ImageDecodeAcceleratorInterface {
public:
ImageDecodeAcceleratorProxy(GpuChannelHost* host, int32_t route_id);
~ImageDecodeAcceleratorProxy() override;
@@ -53,7 +55,6 @@ class ImageDecodeAcceleratorProxy : public ImageDecodeAcceleratorInterface {
// Determines if |image_metadata| corresponds to an image that can be decoded
// using hardware decode acceleration. The ScheduleImageDecode() method should
// only be called for images for which IsImageSupported() returns true.
- // Otherwise, the client faces a GPU channel teardown if the decode fails.
bool IsImageSupported(
const cc::ImageHeaderMetadata* image_metadata) const override;
diff --git a/chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc b/chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc
new file mode 100644
index 00000000000..e6bb414290a
--- /dev/null
+++ b/chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc
@@ -0,0 +1,152 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/stl_util.h"
+#include "base/test/task_environment.h"
+#include "gpu/ipc/client/gpu_channel_host.h"
+#include "gpu/ipc/client/image_decode_accelerator_proxy.h"
+#include "gpu/ipc/common/command_buffer_id.h"
+#include "gpu/ipc/common/gpu_messages.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gfx/color_space.h"
+
+using ::testing::DeleteArg;
+using ::testing::DoAll;
+using ::testing::Return;
+using ::testing::StrictMock;
+
+namespace gpu {
+
+namespace {
+constexpr int kChannelId = 5;
+constexpr int32_t kRasterCmdBufferRouteId = 3;
+constexpr gfx::Size kOutputSize(2, 2);
+
+MATCHER_P(IpcMessageEqualTo, expected, "") {
+ // Get params from actual IPC message.
+ GpuChannelMsg_ScheduleImageDecode::Param actual_param_tuple;
+ if (!GpuChannelMsg_ScheduleImageDecode::Read(arg, &actual_param_tuple))
+ return false;
+
+ GpuChannelMsg_ScheduleImageDecode_Params params =
+ std::get<0>(actual_param_tuple);
+ const uint64_t release_count = std::get<1>(actual_param_tuple);
+
+ // Get params from expected IPC Message.
+ GpuChannelMsg_ScheduleImageDecode::Param expected_param_tuple;
+ if (!GpuChannelMsg_ScheduleImageDecode::Read(expected, &expected_param_tuple))
+ return false;
+
+ GpuChannelMsg_ScheduleImageDecode_Params expected_params =
+ std::get<0>(expected_param_tuple);
+ const uint64_t expected_release_count = std::get<1>(expected_param_tuple);
+
+ // Compare all relevant fields.
+ return arg->routing_id() == expected->routing_id() &&
+ release_count == expected_release_count &&
+ params.encoded_data == expected_params.encoded_data &&
+ params.output_size == expected_params.output_size &&
+ params.raster_decoder_route_id ==
+ expected_params.raster_decoder_route_id &&
+ params.transfer_cache_entry_id ==
+ expected_params.transfer_cache_entry_id &&
+ params.discardable_handle_shm_id ==
+ expected_params.discardable_handle_shm_id &&
+ params.discardable_handle_shm_offset ==
+ expected_params.discardable_handle_shm_offset &&
+ params.discardable_handle_release_count ==
+ expected_params.discardable_handle_release_count &&
+ params.target_color_space == expected_params.target_color_space &&
+ params.needs_mips == expected_params.needs_mips;
+}
+
+} // namespace
+
+class MockGpuChannelHost : public GpuChannelHost {
+ public:
+ MockGpuChannelHost()
+ : GpuChannelHost(kChannelId,
+ GPUInfo(),
+ GpuFeatureInfo(),
+ mojo::ScopedMessagePipeHandle(mojo::MessagePipeHandle(
+ mojo::kInvalidHandleValue))) {}
+
+ MOCK_METHOD1(Send, bool(IPC::Message*));
+
+ protected:
+ ~MockGpuChannelHost() override {}
+};
+
+class ImageDecodeAcceleratorProxyTest : public ::testing::Test {
+ public:
+ ImageDecodeAcceleratorProxyTest()
+ : gpu_channel_host_(
+ base::MakeRefCounted<StrictMock<MockGpuChannelHost>>()),
+ proxy_(gpu_channel_host_.get(),
+ (int32_t)GpuChannelReservedRoutes::kImageDecodeAccelerator) {}
+
+ ~ImageDecodeAcceleratorProxyTest() override = default;
+
+ protected:
+ base::test::SingleThreadTaskEnvironment task_environment_;
+ scoped_refptr<StrictMock<MockGpuChannelHost>> gpu_channel_host_;
+ ImageDecodeAcceleratorProxy proxy_;
+};
+
+TEST_F(ImageDecodeAcceleratorProxyTest, ScheduleImageDecodeSendsMessage) {
+ const uint8_t image[4] = {1, 2, 3, 4};
+ base::span<const uint8_t> encoded_data =
+ base::span<const uint8_t>(image, base::size(image));
+
+ const gfx::ColorSpace color_space = gfx::ColorSpace::CreateSRGB();
+
+ GpuChannelMsg_ScheduleImageDecode_Params expected_params;
+ expected_params.encoded_data =
+ std::vector<uint8_t>(encoded_data.cbegin(), encoded_data.cend());
+ expected_params.output_size = kOutputSize;
+ expected_params.raster_decoder_route_id = kRasterCmdBufferRouteId;
+ expected_params.transfer_cache_entry_id = 1u;
+ expected_params.discardable_handle_shm_id = 2;
+ expected_params.discardable_handle_shm_offset = 3u;
+ expected_params.discardable_handle_release_count = 4u;
+ expected_params.target_color_space = color_space;
+ expected_params.needs_mips = false;
+
+ GpuChannelMsg_ScheduleImageDecode expected_message(
+ static_cast<int32_t>(GpuChannelReservedRoutes::kImageDecodeAccelerator),
+ std::move(expected_params), /*release_count=*/1u);
+
+ {
+ EXPECT_CALL(*gpu_channel_host_, Send(IpcMessageEqualTo(&expected_message)))
+ .Times(1)
+ .WillOnce(DoAll(DeleteArg<0>(),
+ Return(false))); // Delete object passed to Send.
+ }
+
+ SyncToken token = proxy_.ScheduleImageDecode(
+ encoded_data, kOutputSize,
+ CommandBufferIdFromChannelAndRoute(kChannelId, kRasterCmdBufferRouteId),
+ /*transfer_cache_entry_id=*/1u,
+ /*discardable_handle_shm_id=*/2,
+ /*discardable_handle_shm_offset=*/3u,
+ /*discardable_handle_release_count=*/4u, color_space,
+ /*needs_mips=*/false);
+
+ task_environment_.RunUntilIdle();
+ testing::Mock::VerifyAndClearExpectations(gpu_channel_host_.get());
+
+ EXPECT_EQ(ChannelIdFromCommandBufferId(token.command_buffer_id()),
+ kChannelId);
+ EXPECT_EQ(
+ RouteIdFromCommandBufferId(token.command_buffer_id()),
+ static_cast<int32_t>(GpuChannelReservedRoutes::kImageDecodeAccelerator));
+ EXPECT_EQ(token.release_count(), 1u);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn
index 9ff12f1a402..4f885a6d4eb 100644
--- a/chromium/gpu/ipc/common/BUILD.gn
+++ b/chromium/gpu/ipc/common/BUILD.gn
@@ -3,6 +3,7 @@
# found in the LICENSE file.
import("//build/config/ui.gni")
+import("//gpu/vulkan/features.gni")
import("//mojo/public/tools/bindings/mojom.gni")
import("//ui/ozone/ozone.gni")
@@ -199,7 +200,19 @@ component("vulkan_ycbcr_info") {
configs += [ "//gpu:gpu_implementation" ]
}
+source_set("vulkan_types") {
+ sources = [
+ "vulkan_types.h",
+ ]
+ public_deps = [
+ "//ui/gfx",
+ ]
+ all_dependent_configs = [ "//third_party/vulkan:vulkan_config" ]
+ configs += [ "//gpu:gpu_implementation" ]
+}
+
mojom("interfaces") {
+ generate_java = true
sources = [
"capabilities.mojom",
"context_result.mojom",
@@ -221,9 +234,14 @@ mojom("interfaces") {
"//ui/gfx/geometry/mojom",
"//ui/gfx/mojom",
]
+ if (enable_vulkan) {
+ public_deps += [ ":vulkan_interface" ]
+ enabled_features = [ "supports_vulkan" ]
+ }
}
mojom("gpu_preferences_interface") {
+ generate_java = true
sources = [
"gpu_preferences.mojom",
]
@@ -239,6 +257,20 @@ mojom("gpu_preferences_interface") {
}
}
+mojom("vulkan_interface") {
+ generate_java = true
+ sources = [
+ "vulkan_info.mojom",
+ "vulkan_types.mojom",
+ ]
+
+ public_deps = [
+ "//mojo/public/mojom/base",
+ ]
+
+ js_generate_struct_deserializers = true
+}
+
mojom("test_interfaces") {
testonly = true
sources = [
@@ -249,6 +281,21 @@ mojom("test_interfaces") {
":gpu_preferences_interface",
":interfaces",
]
+
+ if (enable_vulkan) {
+ public_deps += [ ":vulkan_interface" ]
+ }
+}
+
+source_set("vulkan_types_mojom_traits") {
+ sources = [
+ "vulkan_types_mojom_traits.h",
+ ]
+
+ deps = [
+ ":vulkan_interface_shared_cpp_sources",
+ ":vulkan_types",
+ ]
}
source_set("mojom_traits") {
@@ -271,4 +318,7 @@ source_set("mojom_traits") {
if (is_android) {
sources += [ "vulkan_ycbcr_info_mojom_traits.h" ]
}
+ if (enable_vulkan) {
+ deps += [ ":vulkan_types_mojom_traits" ]
+ }
}
diff --git a/chromium/gpu/ipc/common/OWNERS b/chromium/gpu/ipc/common/OWNERS
index 94f052bc57f..02933e17756 100644
--- a/chromium/gpu/ipc/common/OWNERS
+++ b/chromium/gpu/ipc/common/OWNERS
@@ -3,6 +3,9 @@
set noparent
file://ipc/SECURITY_OWNERS
+per-file generate_vulkan_types.py=file://gpu/OWNERS
+per-file gpu_watchdog_timeout.h=file://gpu/OWNERS
+
# The following lines are redundant, they're just to silence the presubmit
per-file *_messages*.h=set noparent
per-file *_messages*.h=file://ipc/SECURITY_OWNERS
diff --git a/chromium/gpu/ipc/common/PRESUBMIT.py b/chromium/gpu/ipc/common/PRESUBMIT.py
new file mode 100644
index 00000000000..d30db350f6f
--- /dev/null
+++ b/chromium/gpu/ipc/common/PRESUBMIT.py
@@ -0,0 +1,55 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Enforces Vulkan types autogen matches script output.
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details on the presubmit API built into depot_tools.
+"""
+
+import os.path
+
+
+def CommonChecks(input_api, output_api):
+ generating_files = input_api.AffectedFiles(
+ file_filter=lambda x: os.path.basename(x.LocalPath()) in [
+ 'generate_vulkan_types.py'])
+ generated_files = input_api.AffectedFiles(
+ file_filter=lambda x: os.path.basename(x.LocalPath()) in [
+ 'vulkan_types.mojom', 'vulkan_types_mojom_traits.h',
+ 'vulkan_types_mojom_traits.cc', 'vulkan_types.typemap'
+ ])
+
+
+ messages = []
+
+ if generated_files and not generating_files:
+ long_text = 'Changed files:\n'
+ for file in generated_files:
+ long_text += file.LocalPath() + '\n'
+ long_text += '\n'
+ messages.append(output_api.PresubmitError(
+ 'Vulkan types generated files changed but the generator '
+ 'did not.', long_text=long_text))
+
+ with input_api.temporary_directory() as temp_dir:
+ commands = []
+ if generating_files:
+ commands.append(input_api.Command(name='generate_vulkan_types',
+ cmd=[input_api.python_executable,
+ 'generate_vulkan_types.py',
+ '--check',
+ '--output-dir=' + temp_dir],
+ kwargs={},
+ message=output_api.PresubmitError))
+ if commands:
+ messages.extend(input_api.RunTests(commands))
+
+ return messages
+
+def CheckChangeOnUpload(input_api, output_api):
+ return CommonChecks(input_api, output_api)
+
+def CheckChangeOnCommit(input_api, output_api):
+ return CommonChecks(input_api, output_api)
diff --git a/chromium/gpu/ipc/common/generate_vulkan_types.py b/chromium/gpu/ipc/common/generate_vulkan_types.py
new file mode 100755
index 00000000000..ef27fab9b6e
--- /dev/null
+++ b/chromium/gpu/ipc/common/generate_vulkan_types.py
@@ -0,0 +1,598 @@
+#!/usr/bin/env python
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import filecmp
+import optparse
+import os.path
+import platform
+import re
+import subprocess
+import sys
+
+_VULKAN_HEADER_FILE = "third_party/vulkan/include/vulkan/vulkan_core.h"
+
+_STRUCTS = [
+ "VkExtensionProperties",
+ "VkLayerProperties",
+ "VkPhysicalDeviceProperties",
+ "VkPhysicalDeviceFeatures",
+ "VkQueueFamilyProperties",
+]
+
+_SELF_LOCATION = os.path.dirname(os.path.abspath(__file__))
+
+_MOJO_TYPES = set([
+ "uint8",
+ "uint16",
+ "uint32",
+ "int8",
+ "int16",
+ "int32",
+ "float",
+ "string",
+])
+
+_VULKAN_BASIC_TYPE_MAP = set([
+ "uint8_t",
+ "uint16_t",
+ "uint32_t",
+ "uint64_t",
+ "int8_t",
+ "int16_t",
+ "int32_t",
+ "int64_t",
+ "size_t",
+ "VkBool32",
+ "float",
+ "char",
+])
+
+# types to mojo type
+_type_map = {
+ "uint8_t" : "uint8",
+ "uint16_t" : "uint16",
+ "uint32_t" : "uint32",
+ "uint64_t" : "uint64",
+ "int8_t" : "int8",
+ "int16_t" : "int16",
+ "int32_t" : "int32",
+ "int64_t" : "int64",
+ "size_t" : "uint64",
+ "VkBool32" : "bool",
+ "float" : "float",
+ "char" : "char",
+}
+
+_structs = {}
+_enums = {}
+_defines = {}
+_handles = set([])
+_generated_types = []
+
+
+def ValueNameToVALUE_NAME(name):
+ return re.sub(
+ r'(?<=[a-z])[A-Z]|(?<!^)[A-Z](?=[a-z])', r"_\g<0>", name).upper()
+
+
+def ParseHandle(line):
+ if line.startswith("VK_DEFINE_HANDLE("):
+ name = line[len("VK_DEFINE_HANDLE("):-1]
+ elif line.startswith("VK_DEFINE_NON_DISPATCHABLE_HANDLE("):
+ name = line[len("VK_DEFINE_NON_DISPATCHABLE_HANDLE("):-1]
+ elif line.startswith("VK_DEFINE_DISPATCHABLE_HANDLE("):
+ name = line[len("VK_DEFINE_DISPATCHABLE_HANDLE("):-1]
+ else:
+ return
+ _handles.add(name)
+
+
+def ParseTypedef(line):
+ # typedef Type1 Type1;
+ line = line.rstrip(';')
+ line = line.split()
+ if len(line) == 3:
+ typedef, t1, t2 = line
+ assert typedef == "typedef"
+ # We would like to use bool instead uint32 for VkBool32
+ if t2 == "VkBool32":
+ return
+ if t1 in _type_map:
+ _type_map[t2] = _type_map[t1]
+ else:
+ assert t1 in _structs or t1 in _enums or t1 in _handles, \
+ "Undefined type '%s'" % t1
+ else:
+ pass
+ # skip typdef for function pointer
+
+
+def ParseEnum(line, header_file):
+ # typedef enum kName {
+ # ...
+ # } kName;
+ name = line.split()[2]
+
+ # Skip VkResult and NameBits
+ if name == "VkResult":
+ value_name_prefix = "VK"
+ elif name.endswith("FlagBits"):
+ value_name_prefix = ValueNameToVALUE_NAME(name[:-len("FlagBits")])
+ elif name.endswith("FlagBitsKHR"):
+ value_name_prefix = ValueNameToVALUE_NAME(name[:-len("FlagBitsKHR")])
+ else:
+ value_name_prefix = ValueNameToVALUE_NAME(name)
+
+ values = []
+ while True:
+ line = header_file.readline().strip()
+ # } kName;
+ if line == "} %s;" % name:
+ break
+ # VK_NAME = value,
+ value_name, value = line.rstrip(',').split(" = ")
+ if not value.isdigit():
+ # Ignore VK_NAME_BEGIN_RANGE
+ # Ignore VK_NAME_END_RANGE
+ # Ignore VK_NAME_RANGE_SIZE
+ # Ignore VK_NAME_MAX_ENUM = 0x7FFFFFFF
+ continue
+ assert len(value_name_prefix) + 1 < len(value_name), \
+ "Wrong enum value name `%s`" % value_name
+ mojom_value_name = value_name[len(value_name_prefix) + 1:]
+ values.append((value_name, value, mojom_value_name))
+ assert name not in _enums, "enum '%s' has been defined." % name
+ _enums[name] = values
+
+
+def ParseStruct(line, header_file):
+ # typedef struct kName {
+ # ...
+ # } kName;
+ name = line.split()[2]
+
+ fields = []
+ while True:
+ line = header_file.readline().strip()
+ # } kName;
+ if line == "} %s;" % name:
+ break
+ # type name;
+ # const type name;
+ # type name[L];
+ line = line.rstrip(";")
+ field_type, field_name = line.rsplit(None, 1)
+ array_len = None
+ if '[' in field_name:
+ assert ']' in field_name
+ field_name, array_len = field_name.rstrip(']').split('[')
+ assert array_len.isdigit() or array_len in _defines
+ fields.append((field_name, field_type, array_len))
+ assert name not in _structs, "struct '%s' has been defined." % name
+ _structs[name] = fields
+
+
+def ParseDefine(line):
+ # not parse multi-line macros
+ if line.endswith('\\'):
+ return
+ # not parse #define NAME() ...
+ if '(' in line or ')' in line:
+ return
+
+ define, name, value = line.split()
+ assert define == "#define"
+ assert name not in _defines, "macro '%s' has been defined." % name
+ _defines[name] = value
+
+
+def ParseVulkanHeaderFile(path):
+ with open(path) as header_file:
+ while True:
+ line = header_file.readline()
+ if not line:
+ break
+ line = line.strip()
+
+ if line.startswith("#define"):
+ ParseDefine(line)
+ elif line.startswith("typedef enum "):
+ ParseEnum(line, header_file)
+ elif line.startswith("typedef struct "):
+ ParseStruct(line, header_file)
+ elif line.startswith("typedef "):
+ ParseTypedef(line)
+ elif line.startswith("VK_DEFINE_"):
+ ParseHandle(line)
+
+
+def WriteMojomEnum(name, mojom_file):
+ if name in _generated_types:
+ return
+ _generated_types.append(name)
+
+ values = _enums[name]
+ mojom_file.write("\n")
+ mojom_file.write("enum %s {\n" % name)
+ for _, value, mojom_value_name in values:
+ mojom_file.write(" %s = %s,\n" % (mojom_value_name, value))
+ mojom_file.write(" INVALID_VALUE = -1,\n")
+ mojom_file.write("};\n")
+
+
+def WriteMojomStruct(name, mojom_file):
+ if name in _generated_types:
+ return
+ _generated_types.append(name)
+
+ fields = _structs[name]
+ deps = []
+ for field_name, field_type, array_len in fields:
+ if field_type in _structs or field_type in _enums:
+ deps.append(field_type)
+ WriteMojomTypes(deps, mojom_file)
+
+ mojom_file.write("\n")
+ mojom_file.write("struct %s {\n" % name)
+ for field_name, field_type, array_len in fields:
+ if field_type in _type_map:
+ field_type = _type_map[field_type]
+ else:
+ assert field_type in _structs or field_type in _enums or \
+ field_type in _handles, "Undefine type: '%s'" % field_type
+ if field_type == "char":
+ assert array_len
+ array_len = _defines[array_len]
+ mojom_file.write(" string %s;\n" % field_name)
+ elif not array_len:
+ mojom_file.write(" %s %s;\n" % (field_type, field_name))
+ else:
+ if not array_len.isdigit():
+ array_len = _defines[array_len]
+ assert array_len.isdigit(), "%s is not a digit." % array_len
+ mojom_file.write(
+ " array<%s, %s> %s;\n" % (field_type, array_len, field_name))
+ mojom_file.write("};\n")
+
+
+def WriteMojomTypes(types, mojom_file):
+ for t in types:
+ if t in _structs:
+ WriteMojomStruct(t, mojom_file)
+ elif t in _enums:
+ WriteMojomEnum(t, mojom_file)
+ else:
+ pass
+
+
+def GenerateMojom(mojom_file):
+ mojom_file.write(
+'''// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/ipc/common/generate_vulkan_types.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+module gpu.mojom;
+''')
+ WriteMojomTypes(_STRUCTS, mojom_file)
+
+
+def WriteStructTraits(name, traits_header_file, traits_source_file):
+ traits_header_file.write(
+"""
+template <>
+struct StructTraits<gpu::mojom::%sDataView, %s> {
+""" % (name, name)
+ )
+
+ fields = _structs[name]
+ for field_name, field_type, array_len in fields:
+ if field_type == "VkBool32":
+ field_type = "bool"
+ elif field_type == "VkDeviceSize":
+ field_type = "bool"
+
+ if field_type == "char":
+ assert array_len
+ traits_header_file.write(
+"""
+ static base::StringPiece %s(const %s& input) {
+ return input.%s;
+ }
+""" % (field_name, name, field_name))
+ elif array_len:
+ traits_header_file.write(
+"""
+ static base::span<const %s> %s(const %s& input) {
+ return input.%s;
+ }
+""" % (field_type, field_name, name, field_name))
+ elif field_type in _structs:
+ traits_header_file.write(
+"""
+ static const %s& %s(const %s& input) {
+ return input.%s;
+ }
+""" % (field_type, field_name, name, field_name))
+ else:
+ traits_header_file.write(
+"""
+ static %s %s(const %s& input) {
+ return input.%s;
+ }
+""" % (field_type, field_name, name, field_name))
+
+ traits_header_file.write(
+"""
+ static bool Read(gpu::mojom::%sDataView data, %s* out);
+""" % (name, name))
+
+ traits_source_file.write(
+"""
+// static
+bool StructTraits<gpu::mojom::%sDataView, %s>::Read(
+ gpu::mojom::%sDataView data, %s* out) {
+""" % (name, name, name, name))
+
+ fields = _structs[name]
+ for field_name, field_type, array_len in fields:
+ if field_type == "VkBool32":
+ field_type = "bool"
+ elif field_type == "VkDeviceSize":
+ field_type = "bool"
+
+ if field_type == "char":
+ assert array_len
+ read_method = "Read%s%s" % (field_name[0].upper(), field_name[1:])
+ traits_source_file.write(
+"""
+ base::StringPiece %s;
+ if (!data.%s(&%s))
+ return false;
+ %s.copy(out->%s, sizeof(out->%s));
+""" % (field_name, read_method, field_name, field_name, field_name, field_name))
+ elif array_len:
+ read_method = "Read%s%s" % (field_name[0].upper(), field_name[1:])
+ traits_source_file.write(
+"""
+ base::span<%s> %s(out->%s);
+ if (!data.%s(&%s))
+ return false;
+""" % (field_type, field_name, field_name, read_method, field_name))
+ elif field_type in _structs or field_type in _enums:
+ traits_source_file.write(
+"""
+ if (!data.Read%s%s(&out->%s))
+ return false;
+""" % (field_name[0].upper(), field_name[1:], field_name))
+ else:
+ traits_source_file.write(
+"""
+ out->%s = data.%s();
+""" % (field_name, field_name))
+
+
+ traits_source_file.write(
+"""
+ return true;
+}
+""")
+
+
+ traits_header_file.write("};\n")
+
+
+def WriteEnumTraits(name, traits_header_file):
+ traits_header_file.write(
+"""
+template <>
+struct EnumTraits<gpu::mojom::%s, %s> {
+ static gpu::mojom::%s ToMojom(%s input) {
+ switch (input) {
+""" % (name, name, name, name))
+
+ for value_name, _, mojom_value_name in _enums[name]:
+ traits_header_file.write(
+"""
+ case %s::%s:
+ return gpu::mojom::%s::%s;"""
+ % (name, value_name, name, mojom_value_name))
+
+ traits_header_file.write(
+"""
+ default:
+ NOTREACHED();
+ return gpu::mojom::%s::INVALID_VALUE;
+ }
+ }
+
+ static bool FromMojom(gpu::mojom::%s input, %s* out) {
+ switch (input) {
+""" % (name, name, name))
+
+ for value_name, _, mojom_value_name in _enums[name]:
+ traits_header_file.write(
+"""
+ case gpu::mojom::%s::%s:
+ *out = %s::%s;
+ return true;""" % (name, mojom_value_name, name, value_name))
+
+ traits_header_file.write(
+"""
+ case gpu::mojom::%s::INVALID_VALUE:
+ NOTREACHED();
+ return false;
+
+ }
+ NOTREACHED();
+ return false;
+ }
+};""" % name)
+
+
+
+def GenerateTraitsFile(traits_header_file, traits_source_file):
+ traits_header_file.write(
+"""// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/ipc/common/generate_vulkan_types.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_
+#define GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_
+
+#include "base/containers/span.h"
+#include "base/strings/string_piece.h"
+#include "gpu/ipc/common/vulkan_types.h"
+#include "gpu/ipc/common/vulkan_types.mojom-shared.h"
+
+namespace mojo {
+""")
+
+ traits_source_file.write(
+"""// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/ipc/common/generate_vulkan_types.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#include "gpu/ipc/common/vulkan_info_mojom_traits.h"
+
+namespace mojo {
+""")
+
+ for t in _generated_types:
+ if t in _structs:
+ WriteStructTraits(t, traits_header_file, traits_source_file)
+ elif t in _enums:
+ WriteEnumTraits(t, traits_header_file)
+
+ traits_header_file.write(
+"""
+} // namespace mojo
+
+#endif // GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_""")
+
+ traits_source_file.write(
+"""
+} // namespace mojo""")
+
+
+def GenerateTypemapFile(typemap_file):
+ typemap_file.write(
+"""# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is auto-generated from
+# gpu/ipc/common/generate_vulkan_types.py
+# DO NOT EDIT!
+
+mojom = "//gpu/ipc/common/vulkan_types.mojom"
+public_headers = [ "//gpu/ipc/common/vulkan_types.h" ]
+traits_headers = [ "//gpu/ipc/common/vulkan_types_mojom_traits.h" ]
+sources = [
+ "//gpu/ipc/common/vulkan_types_mojom_traits.cc",
+]
+public_deps = [
+ "//gpu/ipc/common:vulkan_types",
+]
+type_mappings = [
+""")
+ for t in _generated_types:
+ typemap_file.write(" \"gpu.mojom.%s=::%s\",\n" % (t, t))
+ typemap_file.write("]\n")
+
+
+def main(argv):
+ """This is the main function."""
+
+ parser = optparse.OptionParser()
+ parser.add_option(
+ "--output-dir",
+ help="Output directory for generated files. Defaults to this script's "
+ "directory.")
+ parser.add_option(
+ "-c", "--check", action="store_true",
+ help="Check if output files match generated files in chromium root "
+ "directory. Use this in PRESUBMIT scripts with --output-dir.")
+
+ (options, _) = parser.parse_args(args=argv)
+
+ # Support generating files for PRESUBMIT.
+ if options.output_dir:
+ output_dir = options.output_dir
+ else:
+ output_dir = _SELF_LOCATION
+
+ def ClangFormat(filename):
+ formatter = "clang-format"
+ if platform.system() == "Windows":
+ formatter += ".bat"
+ subprocess.call([formatter, "-i", "-style=chromium", filename])
+
+ vulkan_header_file_path = os.path.join(
+ _SELF_LOCATION, "../../..", _VULKAN_HEADER_FILE)
+ ParseVulkanHeaderFile(vulkan_header_file_path)
+
+ mojom_file_name = "vulkan_types.mojom"
+ mojom_file = open(
+ os.path.join(output_dir, mojom_file_name), 'wb')
+ GenerateMojom(mojom_file)
+ mojom_file.close()
+ ClangFormat(mojom_file.name)
+
+ traits_header_file_name = "vulkan_types_mojom_traits.h"
+ traits_header_file = \
+ open(os.path.join(output_dir, traits_header_file_name), 'wb')
+ traits_source_file_name = "vulkan_types_mojom_traits.cc"
+ traits_source_file = \
+ open(os.path.join(output_dir, traits_source_file_name), 'wb')
+ GenerateTraitsFile(traits_header_file, traits_source_file)
+ traits_header_file.close()
+ ClangFormat(traits_header_file.name)
+ traits_source_file.close()
+ ClangFormat(traits_source_file.name)
+
+ typemap_file_name = "vulkan_types.typemap"
+ typemap_file = open(
+ os.path.join(output_dir, typemap_file_name), 'wb')
+ GenerateTypemapFile(typemap_file)
+ typemap_file.close()
+
+ check_failed_filenames = []
+ if options.check:
+ for filename in [mojom_file_name, traits_header_file_name,
+ traits_source_file_name, typemap_file_name]:
+ if not filecmp.cmp(os.path.join(output_dir, filename),
+ os.path.join(_SELF_LOCATION, filename)):
+ check_failed_filenames.append(filename)
+
+ if len(check_failed_filenames) > 0:
+ print 'Please run gpu/ipc/common/generate_vulkan_types.py'
+ print 'Failed check on generated files:'
+ for filename in check_failed_filenames:
+ print filename
+ return 1
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/chromium/gpu/ipc/common/gpu_info.mojom b/chromium/gpu/ipc/common/gpu_info.mojom
index d0f8b34f23f..9942b5eb9da 100644
--- a/chromium/gpu/ipc/common/gpu_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_info.mojom
@@ -9,6 +9,9 @@ import "gpu/ipc/common/dx_diag_node.mojom";
import "mojo/public/mojom/base/time.mojom";
import "ui/gfx/geometry/mojom/geometry.mojom";
+[EnableIf=supports_vulkan]
+import "gpu/ipc/common/vulkan_info.mojom";
+
// gpu::GPUInfo::GPUDevice
struct GpuDevice {
uint32 vendor_id;
@@ -172,4 +175,8 @@ struct GpuInfo {
uint64 system_visual;
uint64 rgba_visual;
bool oop_rasterization_supported;
+ bool subpixel_font_rendering;
+
+ [EnableIf=supports_vulkan]
+ VulkanInfo? vulkan_info;
};
diff --git a/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc b/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
index f777a5195e6..248daaf61ce 100644
--- a/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
@@ -8,6 +8,10 @@
#include "base/logging.h"
#include "mojo/public/cpp/base/time_mojom_traits.h"
+#if BUILDFLAG(ENABLE_VULKAN)
+#include "gpu/ipc/common/vulkan_info_mojom_traits.h"
+#endif
+
namespace mojo {
// static
@@ -379,6 +383,7 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
out->rgba_visual = data.rgba_visual();
#endif
out->oop_rasterization_supported = data.oop_rasterization_supported();
+ out->subpixel_font_rendering = data.subpixel_font_rendering();
#if defined(OS_WIN)
out->direct_composition = data.direct_composition();
@@ -412,7 +417,11 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
data.ReadVideoEncodeAcceleratorSupportedProfiles(
&out->video_encode_accelerator_supported_profiles) &&
data.ReadImageDecodeAcceleratorSupportedProfiles(
- &out->image_decode_accelerator_supported_profiles);
+ &out->image_decode_accelerator_supported_profiles) &&
+#if BUILDFLAG(ENABLE_VULKAN)
+ data.ReadVulkanInfo(&out->vulkan_info) &&
+#endif
+ true;
}
} // namespace mojo
diff --git a/chromium/gpu/ipc/common/gpu_info_mojom_traits.h b/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
index b617a1781e6..b8b22696781 100644
--- a/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
+++ b/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
@@ -391,6 +391,17 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
static bool oop_rasterization_supported(const gpu::GPUInfo& input) {
return input.oop_rasterization_supported;
}
+
+ static bool subpixel_font_rendering(const gpu::GPUInfo& input) {
+ return input.subpixel_font_rendering;
+ }
+
+#if BUILDFLAG(ENABLE_VULKAN)
+ static const base::Optional<gpu::VulkanInfo> vulkan_info(
+ const gpu::GPUInfo& input) {
+ return input.vulkan_info;
+ }
+#endif
};
} // namespace mojo
diff --git a/chromium/gpu/ipc/common/gpu_messages.h b/chromium/gpu/ipc/common/gpu_messages.h
index 2b8e00f1350..ace22e5c450 100644
--- a/chromium/gpu/ipc/common/gpu_messages.h
+++ b/chromium/gpu/ipc/common/gpu_messages.h
@@ -13,7 +13,6 @@
#include <string>
#include <vector>
-#include "base/memory/shared_memory.h"
#include "base/optional.h"
#include "base/unguessable_token.h"
#include "build/build_config.h"
@@ -42,6 +41,7 @@
#include "ui/gfx/native_widget_types.h"
#include "ui/gfx/presentation_feedback.h"
#include "ui/gfx/swap_result.h"
+#include "ui/gl/gpu_preference.h"
#include "url/ipc/url_param_traits.h"
#if defined(OS_MACOSX)
@@ -296,7 +296,8 @@ IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_ConsoleMsg,
GPUCommandBufferConsoleMessage /* msg */)
// Sent by the GPU process to notify the renderer process of a GPU switch.
-IPC_MESSAGE_ROUTED0(GpuCommandBufferMsg_GpuSwitched)
+IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_GpuSwitched,
+ gl::GpuPreference /* active_gpu_heuristic */)
// Register an existing shared memory transfer buffer. The id that can be
// used to identify the transfer buffer from a command buffer.
diff --git a/chromium/gpu/ipc/common/gpu_preferences.mojom b/chromium/gpu/ipc/common/gpu_preferences.mojom
index 3fd75af7210..305d8a294c7 100644
--- a/chromium/gpu/ipc/common/gpu_preferences.mojom
+++ b/chromium/gpu/ipc/common/gpu_preferences.mojom
@@ -19,6 +19,15 @@ enum VulkanImplementationName {
kLast = kSwiftshader,
};
+// Corresponds to gpu::GrContextType.
+enum GrContextType {
+ kGL = 0,
+ kVulkan = 1,
+ kMetal = 2,
+ kDawn = 3,
+ kLast = kDawn,
+};
+
// gpu::GpuPreferences
struct GpuPreferences {
bool disable_accelerated_video_decode;
@@ -59,12 +68,12 @@ struct GpuPreferences {
bool disable_biplanar_gpu_memory_buffers_for_video_frames;
array<gfx.mojom.BufferUsageAndFormat> texture_target_exception_list;
- bool disable_gpu_driver_bug_workarounds;
bool ignore_gpu_blacklist;
bool enable_oop_rasterization;
bool disable_oop_rasterization;
bool enable_oop_rasterization_ddl;
bool watchdog_starts_backgrounded;
+ GrContextType gr_context_type;
VulkanImplementationName use_vulkan;
bool enforce_vulkan_protected_memory;
bool disable_vulkan_surface;
@@ -72,6 +81,7 @@ struct GpuPreferences {
bool enable_metal;
bool enable_gpu_benchmarking_extension;
bool enable_webgpu;
+ bool enable_gpu_blocked_time_metric;
[EnableIf=use_ozone]
mojo_base.mojom.MessagePumpType message_pump_type;
diff --git a/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h b/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h
index a481a354458..48441160c64 100644
--- a/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h
+++ b/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h
@@ -19,6 +19,42 @@
namespace mojo {
template <>
+struct EnumTraits<gpu::mojom::GrContextType, gpu::GrContextType> {
+ static gpu::mojom::GrContextType ToMojom(gpu::GrContextType input) {
+ switch (input) {
+ case gpu::GrContextType::kGL:
+ return gpu::mojom::GrContextType::kGL;
+ case gpu::GrContextType::kVulkan:
+ return gpu::mojom::GrContextType::kVulkan;
+ case gpu::GrContextType::kMetal:
+ return gpu::mojom::GrContextType::kMetal;
+ case gpu::GrContextType::kDawn:
+ return gpu::mojom::GrContextType::kDawn;
+ }
+ NOTREACHED();
+ return gpu::mojom::GrContextType::kGL;
+ }
+ static bool FromMojom(gpu::mojom::GrContextType input,
+ gpu::GrContextType* out) {
+ switch (input) {
+ case gpu::mojom::GrContextType::kGL:
+ *out = gpu::GrContextType::kGL;
+ return true;
+ case gpu::mojom::GrContextType::kVulkan:
+ *out = gpu::GrContextType::kVulkan;
+ return true;
+ case gpu::mojom::GrContextType::kMetal:
+ *out = gpu::GrContextType::kMetal;
+ return true;
+ case gpu::mojom::GrContextType::kDawn:
+ *out = gpu::GrContextType::kDawn;
+ return true;
+ }
+ return false;
+ }
+};
+
+template <>
struct EnumTraits<gpu::mojom::VulkanImplementationName,
gpu::VulkanImplementationName> {
static gpu::mojom::VulkanImplementationName ToMojom(
@@ -113,13 +149,13 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
out->texture_target_exception_list.push_back(usage_format);
}
- out->disable_gpu_driver_bug_workarounds =
- prefs.disable_gpu_driver_bug_workarounds();
out->ignore_gpu_blacklist = prefs.ignore_gpu_blacklist();
out->enable_oop_rasterization = prefs.enable_oop_rasterization();
out->disable_oop_rasterization = prefs.disable_oop_rasterization();
out->enable_oop_rasterization_ddl = prefs.enable_oop_rasterization_ddl();
out->watchdog_starts_backgrounded = prefs.watchdog_starts_backgrounded();
+ if (!prefs.ReadGrContextType(&out->gr_context_type))
+ return false;
if (!prefs.ReadUseVulkan(&out->use_vulkan))
return false;
out->enforce_vulkan_protected_memory =
@@ -131,6 +167,8 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
out->enable_gpu_benchmarking_extension =
prefs.enable_gpu_benchmarking_extension();
out->enable_webgpu = prefs.enable_webgpu();
+ out->enable_gpu_blocked_time_metric =
+ prefs.enable_gpu_blocked_time_metric();
#if defined(USE_OZONE)
if (!prefs.ReadMessagePumpType(&out->message_pump_type))
@@ -246,10 +284,6 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
texture_target_exception_list(const gpu::GpuPreferences& prefs) {
return prefs.texture_target_exception_list;
}
- static bool disable_gpu_driver_bug_workarounds(
- const gpu::GpuPreferences& prefs) {
- return prefs.disable_gpu_driver_bug_workarounds;
- }
static bool ignore_gpu_blacklist(const gpu::GpuPreferences& prefs) {
return prefs.ignore_gpu_blacklist;
}
@@ -265,6 +299,9 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool watchdog_starts_backgrounded(const gpu::GpuPreferences& prefs) {
return prefs.watchdog_starts_backgrounded;
}
+ static gpu::GrContextType gr_context_type(const gpu::GpuPreferences& prefs) {
+ return prefs.gr_context_type;
+ }
static gpu::VulkanImplementationName use_vulkan(
const gpu::GpuPreferences& prefs) {
return prefs.use_vulkan;
@@ -290,6 +327,9 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool enable_webgpu(const gpu::GpuPreferences& prefs) {
return prefs.enable_webgpu;
}
+ static bool enable_gpu_blocked_time_metric(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_blocked_time_metric;
+ }
#if defined(USE_OZONE)
static base::MessagePumpType message_pump_type(
const gpu::GpuPreferences& prefs) {
diff --git a/chromium/gpu/ipc/common/gpu_watchdog_timeout.h b/chromium/gpu/ipc/common/gpu_watchdog_timeout.h
index 07332c723ae..f67352aa7d2 100644
--- a/chromium/gpu/ipc/common/gpu_watchdog_timeout.h
+++ b/chromium/gpu/ipc/common/gpu_watchdog_timeout.h
@@ -33,7 +33,7 @@ constexpr int kRestartFactor = 2;
// It takes longer to initialize GPU process in Windows. See
// https://crbug.com/949839 for details.
#if defined(OS_WIN)
-constexpr int kInitFactor = 4;
+constexpr int kInitFactor = 2;
#else
constexpr int kInitFactor = 1;
#endif
diff --git a/chromium/gpu/ipc/common/typemaps.gni b/chromium/gpu/ipc/common/typemaps.gni
index 397b2b00c21..e27bfe23ec2 100644
--- a/chromium/gpu/ipc/common/typemaps.gni
+++ b/chromium/gpu/ipc/common/typemaps.gni
@@ -16,4 +16,6 @@ typemaps = [
"//gpu/ipc/common/surface_handle.typemap",
"//gpu/ipc/common/sync_token.typemap",
"//gpu/ipc/common/vulkan_ycbcr_info.typemap",
+ "//gpu/ipc/common/vulkan_info.typemap",
+ "//gpu/ipc/common/vulkan_types.typemap",
]
diff --git a/chromium/gpu/ipc/common/vulkan_info.mojom b/chromium/gpu/ipc/common/vulkan_info.mojom
new file mode 100644
index 00000000000..f80cc07f3e4
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_info.mojom
@@ -0,0 +1,26 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// gpu/config/vulkan_info.h
+module gpu.mojom;
+
+import "gpu/ipc/common/vulkan_types.mojom";
+
+struct VulkanPhysicalDeviceInfo {
+ VkPhysicalDeviceProperties properties;
+ array<VkLayerProperties> layers;
+ VkPhysicalDeviceFeatures features;
+ bool feature_sampler_ycbcr_conversion;
+ bool feature_protected_memory;
+ array<VkQueueFamilyProperties> queue_families;
+};
+
+struct VulkanInfo {
+ uint32 api_version;
+ uint32 used_api_version;
+ array<VkExtensionProperties> instance_extensions;
+ array<string> enabled_instance_extensions;
+ array<VkLayerProperties> instance_layers;
+ array<VulkanPhysicalDeviceInfo> physical_devices;
+};
diff --git a/chromium/gpu/ipc/common/vulkan_info.typemap b/chromium/gpu/ipc/common/vulkan_info.typemap
new file mode 100644
index 00000000000..d61d1095f12
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_info.typemap
@@ -0,0 +1,16 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+mojom = "//gpu/ipc/common/vulkan_info.mojom"
+public_headers = [ "//gpu/config/vulkan_info.h" ]
+traits_headers = [ "//gpu/ipc/common/vulkan_info_mojom_traits.h" ]
+public_deps = [
+ # "//gpu/config",
+ "//gpu/ipc/common:vulkan_types",
+ "//gpu/ipc/common:vulkan_types_mojom_traits",
+]
+type_mappings = [
+ "gpu.mojom.VulkanPhysicalDeviceInfo=::gpu::VulkanPhysicalDeviceInfo",
+ "gpu.mojom.VulkanInfo=::gpu::VulkanInfo",
+]
diff --git a/chromium/gpu/ipc/common/vulkan_info_mojom_traits.h b/chromium/gpu/ipc/common/vulkan_info_mojom_traits.h
new file mode 100644
index 00000000000..9b67d962139
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_info_mojom_traits.h
@@ -0,0 +1,118 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_VULKAN_INFO_MOJOM_TRAITS_H_
+#define GPU_IPC_COMMON_VULKAN_INFO_MOJOM_TRAITS_H_
+
+#include "base/containers/span.h"
+#include "base/strings/string_piece.h"
+#include "gpu/config/vulkan_info.h"
+#include "gpu/ipc/common/vulkan_info.mojom-shared.h"
+#include "gpu/ipc/common/vulkan_types_mojom_traits.h"
+
+namespace mojo {
+
+template <>
+struct StructTraits<gpu::mojom::VulkanPhysicalDeviceInfoDataView,
+ gpu::VulkanPhysicalDeviceInfo> {
+ static const VkPhysicalDeviceProperties& properties(
+ const gpu::VulkanPhysicalDeviceInfo& input) {
+ return input.properties;
+ }
+
+ static const std::vector<VkLayerProperties>& layers(
+ const gpu::VulkanPhysicalDeviceInfo& input) {
+ return input.layers;
+ }
+
+ static const VkPhysicalDeviceFeatures& features(
+ const gpu::VulkanPhysicalDeviceInfo& input) {
+ return input.features;
+ }
+
+ static bool feature_sampler_ycbcr_conversion(
+ const gpu::VulkanPhysicalDeviceInfo& input) {
+ return input.feature_sampler_ycbcr_conversion;
+ }
+
+ static bool feature_protected_memory(
+ const gpu::VulkanPhysicalDeviceInfo& input) {
+ return input.feature_protected_memory;
+ }
+
+ static const std::vector<VkQueueFamilyProperties>& queue_families(
+ const gpu::VulkanPhysicalDeviceInfo& input) {
+ return input.queue_families;
+ }
+
+ static bool Read(gpu::mojom::VulkanPhysicalDeviceInfoDataView data,
+ gpu::VulkanPhysicalDeviceInfo* out) {
+ if (!data.ReadProperties(&out->properties))
+ return false;
+ if (!data.ReadLayers(&out->layers))
+ return false;
+ if (!data.ReadFeatures(&out->features))
+ return false;
+ out->feature_sampler_ycbcr_conversion =
+ data.feature_sampler_ycbcr_conversion();
+ out->feature_protected_memory = data.feature_protected_memory();
+ if (!data.ReadQueueFamilies(&out->queue_families))
+ return false;
+ return true;
+ }
+};
+
+template <>
+struct StructTraits<gpu::mojom::VulkanInfoDataView, gpu::VulkanInfo> {
+ static uint32_t api_version(const gpu::VulkanInfo& input) {
+ return input.api_version;
+ }
+
+ static uint32_t used_api_version(const gpu::VulkanInfo& input) {
+ return input.used_api_version;
+ }
+
+ static const std::vector<VkExtensionProperties>& instance_extensions(
+ const gpu::VulkanInfo& input) {
+ return input.instance_extensions;
+ }
+
+ static std::vector<base::StringPiece> enabled_instance_extensions(
+ const gpu::VulkanInfo& input) {
+ std::vector<base::StringPiece> extensions;
+ extensions.reserve(input.enabled_instance_extensions.size());
+ for (const char* extension : input.enabled_instance_extensions)
+ extensions.emplace_back(extension);
+ return extensions;
+ }
+
+ static const std::vector<VkLayerProperties>& instance_layers(
+ const gpu::VulkanInfo& input) {
+ return input.instance_layers;
+ }
+
+ static const std::vector<gpu::VulkanPhysicalDeviceInfo>& physical_devices(
+ const gpu::VulkanInfo& input) {
+ return input.physical_devices;
+ }
+
+ static bool Read(gpu::mojom::VulkanInfoDataView data, gpu::VulkanInfo* out) {
+ out->api_version = data.api_version();
+ out->used_api_version = data.used_api_version();
+
+ if (!data.ReadInstanceExtensions(&out->instance_extensions))
+ return false;
+
+ std::vector<base::StringPiece> extensions;
+ if (!data.ReadEnabledInstanceExtensions(&extensions))
+ return false;
+ out->SetEnabledInstanceExtensions(extensions);
+ return data.ReadInstanceLayers(&out->instance_layers) &&
+ data.ReadPhysicalDevices(&out->physical_devices);
+ }
+};
+
+} // namespace mojo
+
+#endif // GPU_IPC_COMMON_VULKAN_INFO_MOJOM_TRAITS_H_
diff --git a/chromium/gpu/ipc/common/vulkan_types.h b/chromium/gpu/ipc/common/vulkan_types.h
new file mode 100644
index 00000000000..0d65cd9235f
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_types.h
@@ -0,0 +1,10 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_VULKAN_TYPES_H_
+#define GPU_IPC_COMMON_VULKAN_TYPES_H_
+
+#include <vulkan/vulkan.h>
+
+#endif // GPU_IPC_COMMON_VULKAN_TYPES_H_
diff --git a/chromium/gpu/ipc/common/vulkan_types.mojom b/chromium/gpu/ipc/common/vulkan_types.mojom
new file mode 100644
index 00000000000..8f13e182cd6
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_types.mojom
@@ -0,0 +1,232 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/ipc/common/generate_vulkan_types.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+module gpu.mojom;
+
+struct VkExtensionProperties {
+ string extensionName;
+ uint32 specVersion;
+};
+
+struct VkLayerProperties {
+ string layerName;
+ uint32 specVersion;
+ uint32 implementationVersion;
+ string description;
+};
+
+enum VkPhysicalDeviceType {
+ OTHER = 0,
+ INTEGRATED_GPU = 1,
+ DISCRETE_GPU = 2,
+ VIRTUAL_GPU = 3,
+ CPU = 4,
+ INVALID_VALUE = -1,
+};
+
+struct VkPhysicalDeviceLimits {
+ uint32 maxImageDimension1D;
+ uint32 maxImageDimension2D;
+ uint32 maxImageDimension3D;
+ uint32 maxImageDimensionCube;
+ uint32 maxImageArrayLayers;
+ uint32 maxTexelBufferElements;
+ uint32 maxUniformBufferRange;
+ uint32 maxStorageBufferRange;
+ uint32 maxPushConstantsSize;
+ uint32 maxMemoryAllocationCount;
+ uint32 maxSamplerAllocationCount;
+ uint64 bufferImageGranularity;
+ uint64 sparseAddressSpaceSize;
+ uint32 maxBoundDescriptorSets;
+ uint32 maxPerStageDescriptorSamplers;
+ uint32 maxPerStageDescriptorUniformBuffers;
+ uint32 maxPerStageDescriptorStorageBuffers;
+ uint32 maxPerStageDescriptorSampledImages;
+ uint32 maxPerStageDescriptorStorageImages;
+ uint32 maxPerStageDescriptorInputAttachments;
+ uint32 maxPerStageResources;
+ uint32 maxDescriptorSetSamplers;
+ uint32 maxDescriptorSetUniformBuffers;
+ uint32 maxDescriptorSetUniformBuffersDynamic;
+ uint32 maxDescriptorSetStorageBuffers;
+ uint32 maxDescriptorSetStorageBuffersDynamic;
+ uint32 maxDescriptorSetSampledImages;
+ uint32 maxDescriptorSetStorageImages;
+ uint32 maxDescriptorSetInputAttachments;
+ uint32 maxVertexInputAttributes;
+ uint32 maxVertexInputBindings;
+ uint32 maxVertexInputAttributeOffset;
+ uint32 maxVertexInputBindingStride;
+ uint32 maxVertexOutputComponents;
+ uint32 maxTessellationGenerationLevel;
+ uint32 maxTessellationPatchSize;
+ uint32 maxTessellationControlPerVertexInputComponents;
+ uint32 maxTessellationControlPerVertexOutputComponents;
+ uint32 maxTessellationControlPerPatchOutputComponents;
+ uint32 maxTessellationControlTotalOutputComponents;
+ uint32 maxTessellationEvaluationInputComponents;
+ uint32 maxTessellationEvaluationOutputComponents;
+ uint32 maxGeometryShaderInvocations;
+ uint32 maxGeometryInputComponents;
+ uint32 maxGeometryOutputComponents;
+ uint32 maxGeometryOutputVertices;
+ uint32 maxGeometryTotalOutputComponents;
+ uint32 maxFragmentInputComponents;
+ uint32 maxFragmentOutputAttachments;
+ uint32 maxFragmentDualSrcAttachments;
+ uint32 maxFragmentCombinedOutputResources;
+ uint32 maxComputeSharedMemorySize;
+ array<uint32, 3> maxComputeWorkGroupCount;
+ uint32 maxComputeWorkGroupInvocations;
+ array<uint32, 3> maxComputeWorkGroupSize;
+ uint32 subPixelPrecisionBits;
+ uint32 subTexelPrecisionBits;
+ uint32 mipmapPrecisionBits;
+ uint32 maxDrawIndexedIndexValue;
+ uint32 maxDrawIndirectCount;
+ float maxSamplerLodBias;
+ float maxSamplerAnisotropy;
+ uint32 maxViewports;
+ array<uint32, 2> maxViewportDimensions;
+ array<float, 2> viewportBoundsRange;
+ uint32 viewportSubPixelBits;
+ uint64 minMemoryMapAlignment;
+ uint64 minTexelBufferOffsetAlignment;
+ uint64 minUniformBufferOffsetAlignment;
+ uint64 minStorageBufferOffsetAlignment;
+ int32 minTexelOffset;
+ uint32 maxTexelOffset;
+ int32 minTexelGatherOffset;
+ uint32 maxTexelGatherOffset;
+ float minInterpolationOffset;
+ float maxInterpolationOffset;
+ uint32 subPixelInterpolationOffsetBits;
+ uint32 maxFramebufferWidth;
+ uint32 maxFramebufferHeight;
+ uint32 maxFramebufferLayers;
+ uint32 framebufferColorSampleCounts;
+ uint32 framebufferDepthSampleCounts;
+ uint32 framebufferStencilSampleCounts;
+ uint32 framebufferNoAttachmentsSampleCounts;
+ uint32 maxColorAttachments;
+ uint32 sampledImageColorSampleCounts;
+ uint32 sampledImageIntegerSampleCounts;
+ uint32 sampledImageDepthSampleCounts;
+ uint32 sampledImageStencilSampleCounts;
+ uint32 storageImageSampleCounts;
+ uint32 maxSampleMaskWords;
+ bool timestampComputeAndGraphics;
+ float timestampPeriod;
+ uint32 maxClipDistances;
+ uint32 maxCullDistances;
+ uint32 maxCombinedClipAndCullDistances;
+ uint32 discreteQueuePriorities;
+ array<float, 2> pointSizeRange;
+ array<float, 2> lineWidthRange;
+ float pointSizeGranularity;
+ float lineWidthGranularity;
+ bool strictLines;
+ bool standardSampleLocations;
+ uint64 optimalBufferCopyOffsetAlignment;
+ uint64 optimalBufferCopyRowPitchAlignment;
+ uint64 nonCoherentAtomSize;
+};
+
+struct VkPhysicalDeviceSparseProperties {
+ bool residencyStandard2DBlockShape;
+ bool residencyStandard2DMultisampleBlockShape;
+ bool residencyStandard3DBlockShape;
+ bool residencyAlignedMipSize;
+ bool residencyNonResidentStrict;
+};
+
+struct VkPhysicalDeviceProperties {
+ uint32 apiVersion;
+ uint32 driverVersion;
+ uint32 vendorID;
+ uint32 deviceID;
+ VkPhysicalDeviceType deviceType;
+ string deviceName;
+ array<uint8, 16> pipelineCacheUUID;
+ VkPhysicalDeviceLimits limits;
+ VkPhysicalDeviceSparseProperties sparseProperties;
+};
+
+struct VkPhysicalDeviceFeatures {
+ bool robustBufferAccess;
+ bool fullDrawIndexUint32;
+ bool imageCubeArray;
+ bool independentBlend;
+ bool geometryShader;
+ bool tessellationShader;
+ bool sampleRateShading;
+ bool dualSrcBlend;
+ bool logicOp;
+ bool multiDrawIndirect;
+ bool drawIndirectFirstInstance;
+ bool depthClamp;
+ bool depthBiasClamp;
+ bool fillModeNonSolid;
+ bool depthBounds;
+ bool wideLines;
+ bool largePoints;
+ bool alphaToOne;
+ bool multiViewport;
+ bool samplerAnisotropy;
+ bool textureCompressionETC2;
+ bool textureCompressionASTC_LDR;
+ bool textureCompressionBC;
+ bool occlusionQueryPrecise;
+ bool pipelineStatisticsQuery;
+ bool vertexPipelineStoresAndAtomics;
+ bool fragmentStoresAndAtomics;
+ bool shaderTessellationAndGeometryPointSize;
+ bool shaderImageGatherExtended;
+ bool shaderStorageImageExtendedFormats;
+ bool shaderStorageImageMultisample;
+ bool shaderStorageImageReadWithoutFormat;
+ bool shaderStorageImageWriteWithoutFormat;
+ bool shaderUniformBufferArrayDynamicIndexing;
+ bool shaderSampledImageArrayDynamicIndexing;
+ bool shaderStorageBufferArrayDynamicIndexing;
+ bool shaderStorageImageArrayDynamicIndexing;
+ bool shaderClipDistance;
+ bool shaderCullDistance;
+ bool shaderFloat64;
+ bool shaderInt64;
+ bool shaderInt16;
+ bool shaderResourceResidency;
+ bool shaderResourceMinLod;
+ bool sparseBinding;
+ bool sparseResidencyBuffer;
+ bool sparseResidencyImage2D;
+ bool sparseResidencyImage3D;
+ bool sparseResidency2Samples;
+ bool sparseResidency4Samples;
+ bool sparseResidency8Samples;
+ bool sparseResidency16Samples;
+ bool sparseResidencyAliased;
+ bool variableMultisampleRate;
+ bool inheritedQueries;
+};
+
+struct VkExtent3D {
+ uint32 width;
+ uint32 height;
+ uint32 depth;
+};
+
+struct VkQueueFamilyProperties {
+ uint32 queueFlags;
+ uint32 queueCount;
+ uint32 timestampValidBits;
+ VkExtent3D minImageTransferGranularity;
+};
diff --git a/chromium/gpu/ipc/common/vulkan_types.typemap b/chromium/gpu/ipc/common/vulkan_types.typemap
new file mode 100644
index 00000000000..9506337e412
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_types.typemap
@@ -0,0 +1,28 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is auto-generated from
+# gpu/ipc/common/generate_vulkan_types.py
+# DO NOT EDIT!
+
+mojom = "//gpu/ipc/common/vulkan_types.mojom"
+public_headers = [ "//gpu/ipc/common/vulkan_types.h" ]
+traits_headers = [ "//gpu/ipc/common/vulkan_types_mojom_traits.h" ]
+sources = [
+ "//gpu/ipc/common/vulkan_types_mojom_traits.cc",
+]
+public_deps = [
+ "//gpu/ipc/common:vulkan_types",
+]
+type_mappings = [
+ "gpu.mojom.VkExtensionProperties=::VkExtensionProperties",
+ "gpu.mojom.VkLayerProperties=::VkLayerProperties",
+ "gpu.mojom.VkPhysicalDeviceProperties=::VkPhysicalDeviceProperties",
+ "gpu.mojom.VkPhysicalDeviceType=::VkPhysicalDeviceType",
+ "gpu.mojom.VkPhysicalDeviceLimits=::VkPhysicalDeviceLimits",
+ "gpu.mojom.VkPhysicalDeviceSparseProperties=::VkPhysicalDeviceSparseProperties",
+ "gpu.mojom.VkPhysicalDeviceFeatures=::VkPhysicalDeviceFeatures",
+ "gpu.mojom.VkQueueFamilyProperties=::VkQueueFamilyProperties",
+ "gpu.mojom.VkExtent3D=::VkExtent3D",
+]
diff --git a/chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc b/chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc
new file mode 100644
index 00000000000..9dc3878dcc8
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc
@@ -0,0 +1,510 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/ipc/common/generate_vulkan_types.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#include "gpu/ipc/common/vulkan_info_mojom_traits.h"
+
+namespace mojo {
+
+// static
+bool StructTraits<
+ gpu::mojom::VkExtensionPropertiesDataView,
+ VkExtensionProperties>::Read(gpu::mojom::VkExtensionPropertiesDataView data,
+ VkExtensionProperties* out) {
+ base::StringPiece extensionName;
+ if (!data.ReadExtensionName(&extensionName))
+ return false;
+ extensionName.copy(out->extensionName, sizeof(out->extensionName));
+
+ out->specVersion = data.specVersion();
+
+ return true;
+}
+
+// static
+bool StructTraits<gpu::mojom::VkLayerPropertiesDataView, VkLayerProperties>::
+ Read(gpu::mojom::VkLayerPropertiesDataView data, VkLayerProperties* out) {
+ base::StringPiece layerName;
+ if (!data.ReadLayerName(&layerName))
+ return false;
+ layerName.copy(out->layerName, sizeof(out->layerName));
+
+ out->specVersion = data.specVersion();
+
+ out->implementationVersion = data.implementationVersion();
+
+ base::StringPiece description;
+ if (!data.ReadDescription(&description))
+ return false;
+ description.copy(out->description, sizeof(out->description));
+
+ return true;
+}
+
+// static
+bool StructTraits<gpu::mojom::VkPhysicalDevicePropertiesDataView,
+ VkPhysicalDeviceProperties>::
+ Read(gpu::mojom::VkPhysicalDevicePropertiesDataView data,
+ VkPhysicalDeviceProperties* out) {
+ out->apiVersion = data.apiVersion();
+
+ out->driverVersion = data.driverVersion();
+
+ out->vendorID = data.vendorID();
+
+ out->deviceID = data.deviceID();
+
+ if (!data.ReadDeviceType(&out->deviceType))
+ return false;
+
+ base::StringPiece deviceName;
+ if (!data.ReadDeviceName(&deviceName))
+ return false;
+ deviceName.copy(out->deviceName, sizeof(out->deviceName));
+
+ base::span<uint8_t> pipelineCacheUUID(out->pipelineCacheUUID);
+ if (!data.ReadPipelineCacheUUID(&pipelineCacheUUID))
+ return false;
+
+ if (!data.ReadLimits(&out->limits))
+ return false;
+
+ if (!data.ReadSparseProperties(&out->sparseProperties))
+ return false;
+
+ return true;
+}
+
+// static
+bool StructTraits<gpu::mojom::VkPhysicalDeviceLimitsDataView,
+ VkPhysicalDeviceLimits>::
+ Read(gpu::mojom::VkPhysicalDeviceLimitsDataView data,
+ VkPhysicalDeviceLimits* out) {
+ out->maxImageDimension1D = data.maxImageDimension1D();
+
+ out->maxImageDimension2D = data.maxImageDimension2D();
+
+ out->maxImageDimension3D = data.maxImageDimension3D();
+
+ out->maxImageDimensionCube = data.maxImageDimensionCube();
+
+ out->maxImageArrayLayers = data.maxImageArrayLayers();
+
+ out->maxTexelBufferElements = data.maxTexelBufferElements();
+
+ out->maxUniformBufferRange = data.maxUniformBufferRange();
+
+ out->maxStorageBufferRange = data.maxStorageBufferRange();
+
+ out->maxPushConstantsSize = data.maxPushConstantsSize();
+
+ out->maxMemoryAllocationCount = data.maxMemoryAllocationCount();
+
+ out->maxSamplerAllocationCount = data.maxSamplerAllocationCount();
+
+ out->bufferImageGranularity = data.bufferImageGranularity();
+
+ out->sparseAddressSpaceSize = data.sparseAddressSpaceSize();
+
+ out->maxBoundDescriptorSets = data.maxBoundDescriptorSets();
+
+ out->maxPerStageDescriptorSamplers = data.maxPerStageDescriptorSamplers();
+
+ out->maxPerStageDescriptorUniformBuffers =
+ data.maxPerStageDescriptorUniformBuffers();
+
+ out->maxPerStageDescriptorStorageBuffers =
+ data.maxPerStageDescriptorStorageBuffers();
+
+ out->maxPerStageDescriptorSampledImages =
+ data.maxPerStageDescriptorSampledImages();
+
+ out->maxPerStageDescriptorStorageImages =
+ data.maxPerStageDescriptorStorageImages();
+
+ out->maxPerStageDescriptorInputAttachments =
+ data.maxPerStageDescriptorInputAttachments();
+
+ out->maxPerStageResources = data.maxPerStageResources();
+
+ out->maxDescriptorSetSamplers = data.maxDescriptorSetSamplers();
+
+ out->maxDescriptorSetUniformBuffers = data.maxDescriptorSetUniformBuffers();
+
+ out->maxDescriptorSetUniformBuffersDynamic =
+ data.maxDescriptorSetUniformBuffersDynamic();
+
+ out->maxDescriptorSetStorageBuffers = data.maxDescriptorSetStorageBuffers();
+
+ out->maxDescriptorSetStorageBuffersDynamic =
+ data.maxDescriptorSetStorageBuffersDynamic();
+
+ out->maxDescriptorSetSampledImages = data.maxDescriptorSetSampledImages();
+
+ out->maxDescriptorSetStorageImages = data.maxDescriptorSetStorageImages();
+
+ out->maxDescriptorSetInputAttachments =
+ data.maxDescriptorSetInputAttachments();
+
+ out->maxVertexInputAttributes = data.maxVertexInputAttributes();
+
+ out->maxVertexInputBindings = data.maxVertexInputBindings();
+
+ out->maxVertexInputAttributeOffset = data.maxVertexInputAttributeOffset();
+
+ out->maxVertexInputBindingStride = data.maxVertexInputBindingStride();
+
+ out->maxVertexOutputComponents = data.maxVertexOutputComponents();
+
+ out->maxTessellationGenerationLevel = data.maxTessellationGenerationLevel();
+
+ out->maxTessellationPatchSize = data.maxTessellationPatchSize();
+
+ out->maxTessellationControlPerVertexInputComponents =
+ data.maxTessellationControlPerVertexInputComponents();
+
+ out->maxTessellationControlPerVertexOutputComponents =
+ data.maxTessellationControlPerVertexOutputComponents();
+
+ out->maxTessellationControlPerPatchOutputComponents =
+ data.maxTessellationControlPerPatchOutputComponents();
+
+ out->maxTessellationControlTotalOutputComponents =
+ data.maxTessellationControlTotalOutputComponents();
+
+ out->maxTessellationEvaluationInputComponents =
+ data.maxTessellationEvaluationInputComponents();
+
+ out->maxTessellationEvaluationOutputComponents =
+ data.maxTessellationEvaluationOutputComponents();
+
+ out->maxGeometryShaderInvocations = data.maxGeometryShaderInvocations();
+
+ out->maxGeometryInputComponents = data.maxGeometryInputComponents();
+
+ out->maxGeometryOutputComponents = data.maxGeometryOutputComponents();
+
+ out->maxGeometryOutputVertices = data.maxGeometryOutputVertices();
+
+ out->maxGeometryTotalOutputComponents =
+ data.maxGeometryTotalOutputComponents();
+
+ out->maxFragmentInputComponents = data.maxFragmentInputComponents();
+
+ out->maxFragmentOutputAttachments = data.maxFragmentOutputAttachments();
+
+ out->maxFragmentDualSrcAttachments = data.maxFragmentDualSrcAttachments();
+
+ out->maxFragmentCombinedOutputResources =
+ data.maxFragmentCombinedOutputResources();
+
+ out->maxComputeSharedMemorySize = data.maxComputeSharedMemorySize();
+
+ base::span<uint32_t> maxComputeWorkGroupCount(out->maxComputeWorkGroupCount);
+ if (!data.ReadMaxComputeWorkGroupCount(&maxComputeWorkGroupCount))
+ return false;
+
+ out->maxComputeWorkGroupInvocations = data.maxComputeWorkGroupInvocations();
+
+ base::span<uint32_t> maxComputeWorkGroupSize(out->maxComputeWorkGroupSize);
+ if (!data.ReadMaxComputeWorkGroupSize(&maxComputeWorkGroupSize))
+ return false;
+
+ out->subPixelPrecisionBits = data.subPixelPrecisionBits();
+
+ out->subTexelPrecisionBits = data.subTexelPrecisionBits();
+
+ out->mipmapPrecisionBits = data.mipmapPrecisionBits();
+
+ out->maxDrawIndexedIndexValue = data.maxDrawIndexedIndexValue();
+
+ out->maxDrawIndirectCount = data.maxDrawIndirectCount();
+
+ out->maxSamplerLodBias = data.maxSamplerLodBias();
+
+ out->maxSamplerAnisotropy = data.maxSamplerAnisotropy();
+
+ out->maxViewports = data.maxViewports();
+
+ base::span<uint32_t> maxViewportDimensions(out->maxViewportDimensions);
+ if (!data.ReadMaxViewportDimensions(&maxViewportDimensions))
+ return false;
+
+ base::span<float> viewportBoundsRange(out->viewportBoundsRange);
+ if (!data.ReadViewportBoundsRange(&viewportBoundsRange))
+ return false;
+
+ out->viewportSubPixelBits = data.viewportSubPixelBits();
+
+ out->minMemoryMapAlignment = data.minMemoryMapAlignment();
+
+ out->minTexelBufferOffsetAlignment = data.minTexelBufferOffsetAlignment();
+
+ out->minUniformBufferOffsetAlignment = data.minUniformBufferOffsetAlignment();
+
+ out->minStorageBufferOffsetAlignment = data.minStorageBufferOffsetAlignment();
+
+ out->minTexelOffset = data.minTexelOffset();
+
+ out->maxTexelOffset = data.maxTexelOffset();
+
+ out->minTexelGatherOffset = data.minTexelGatherOffset();
+
+ out->maxTexelGatherOffset = data.maxTexelGatherOffset();
+
+ out->minInterpolationOffset = data.minInterpolationOffset();
+
+ out->maxInterpolationOffset = data.maxInterpolationOffset();
+
+ out->subPixelInterpolationOffsetBits = data.subPixelInterpolationOffsetBits();
+
+ out->maxFramebufferWidth = data.maxFramebufferWidth();
+
+ out->maxFramebufferHeight = data.maxFramebufferHeight();
+
+ out->maxFramebufferLayers = data.maxFramebufferLayers();
+
+ out->framebufferColorSampleCounts = data.framebufferColorSampleCounts();
+
+ out->framebufferDepthSampleCounts = data.framebufferDepthSampleCounts();
+
+ out->framebufferStencilSampleCounts = data.framebufferStencilSampleCounts();
+
+ out->framebufferNoAttachmentsSampleCounts =
+ data.framebufferNoAttachmentsSampleCounts();
+
+ out->maxColorAttachments = data.maxColorAttachments();
+
+ out->sampledImageColorSampleCounts = data.sampledImageColorSampleCounts();
+
+ out->sampledImageIntegerSampleCounts = data.sampledImageIntegerSampleCounts();
+
+ out->sampledImageDepthSampleCounts = data.sampledImageDepthSampleCounts();
+
+ out->sampledImageStencilSampleCounts = data.sampledImageStencilSampleCounts();
+
+ out->storageImageSampleCounts = data.storageImageSampleCounts();
+
+ out->maxSampleMaskWords = data.maxSampleMaskWords();
+
+ out->timestampComputeAndGraphics = data.timestampComputeAndGraphics();
+
+ out->timestampPeriod = data.timestampPeriod();
+
+ out->maxClipDistances = data.maxClipDistances();
+
+ out->maxCullDistances = data.maxCullDistances();
+
+ out->maxCombinedClipAndCullDistances = data.maxCombinedClipAndCullDistances();
+
+ out->discreteQueuePriorities = data.discreteQueuePriorities();
+
+ base::span<float> pointSizeRange(out->pointSizeRange);
+ if (!data.ReadPointSizeRange(&pointSizeRange))
+ return false;
+
+ base::span<float> lineWidthRange(out->lineWidthRange);
+ if (!data.ReadLineWidthRange(&lineWidthRange))
+ return false;
+
+ out->pointSizeGranularity = data.pointSizeGranularity();
+
+ out->lineWidthGranularity = data.lineWidthGranularity();
+
+ out->strictLines = data.strictLines();
+
+ out->standardSampleLocations = data.standardSampleLocations();
+
+ out->optimalBufferCopyOffsetAlignment =
+ data.optimalBufferCopyOffsetAlignment();
+
+ out->optimalBufferCopyRowPitchAlignment =
+ data.optimalBufferCopyRowPitchAlignment();
+
+ out->nonCoherentAtomSize = data.nonCoherentAtomSize();
+
+ return true;
+}
+
+// static
+bool StructTraits<gpu::mojom::VkPhysicalDeviceSparsePropertiesDataView,
+ VkPhysicalDeviceSparseProperties>::
+ Read(gpu::mojom::VkPhysicalDeviceSparsePropertiesDataView data,
+ VkPhysicalDeviceSparseProperties* out) {
+ out->residencyStandard2DBlockShape = data.residencyStandard2DBlockShape();
+
+ out->residencyStandard2DMultisampleBlockShape =
+ data.residencyStandard2DMultisampleBlockShape();
+
+ out->residencyStandard3DBlockShape = data.residencyStandard3DBlockShape();
+
+ out->residencyAlignedMipSize = data.residencyAlignedMipSize();
+
+ out->residencyNonResidentStrict = data.residencyNonResidentStrict();
+
+ return true;
+}
+
+// static
+bool StructTraits<gpu::mojom::VkPhysicalDeviceFeaturesDataView,
+ VkPhysicalDeviceFeatures>::
+ Read(gpu::mojom::VkPhysicalDeviceFeaturesDataView data,
+ VkPhysicalDeviceFeatures* out) {
+ out->robustBufferAccess = data.robustBufferAccess();
+
+ out->fullDrawIndexUint32 = data.fullDrawIndexUint32();
+
+ out->imageCubeArray = data.imageCubeArray();
+
+ out->independentBlend = data.independentBlend();
+
+ out->geometryShader = data.geometryShader();
+
+ out->tessellationShader = data.tessellationShader();
+
+ out->sampleRateShading = data.sampleRateShading();
+
+ out->dualSrcBlend = data.dualSrcBlend();
+
+ out->logicOp = data.logicOp();
+
+ out->multiDrawIndirect = data.multiDrawIndirect();
+
+ out->drawIndirectFirstInstance = data.drawIndirectFirstInstance();
+
+ out->depthClamp = data.depthClamp();
+
+ out->depthBiasClamp = data.depthBiasClamp();
+
+ out->fillModeNonSolid = data.fillModeNonSolid();
+
+ out->depthBounds = data.depthBounds();
+
+ out->wideLines = data.wideLines();
+
+ out->largePoints = data.largePoints();
+
+ out->alphaToOne = data.alphaToOne();
+
+ out->multiViewport = data.multiViewport();
+
+ out->samplerAnisotropy = data.samplerAnisotropy();
+
+ out->textureCompressionETC2 = data.textureCompressionETC2();
+
+ out->textureCompressionASTC_LDR = data.textureCompressionASTC_LDR();
+
+ out->textureCompressionBC = data.textureCompressionBC();
+
+ out->occlusionQueryPrecise = data.occlusionQueryPrecise();
+
+ out->pipelineStatisticsQuery = data.pipelineStatisticsQuery();
+
+ out->vertexPipelineStoresAndAtomics = data.vertexPipelineStoresAndAtomics();
+
+ out->fragmentStoresAndAtomics = data.fragmentStoresAndAtomics();
+
+ out->shaderTessellationAndGeometryPointSize =
+ data.shaderTessellationAndGeometryPointSize();
+
+ out->shaderImageGatherExtended = data.shaderImageGatherExtended();
+
+ out->shaderStorageImageExtendedFormats =
+ data.shaderStorageImageExtendedFormats();
+
+ out->shaderStorageImageMultisample = data.shaderStorageImageMultisample();
+
+ out->shaderStorageImageReadWithoutFormat =
+ data.shaderStorageImageReadWithoutFormat();
+
+ out->shaderStorageImageWriteWithoutFormat =
+ data.shaderStorageImageWriteWithoutFormat();
+
+ out->shaderUniformBufferArrayDynamicIndexing =
+ data.shaderUniformBufferArrayDynamicIndexing();
+
+ out->shaderSampledImageArrayDynamicIndexing =
+ data.shaderSampledImageArrayDynamicIndexing();
+
+ out->shaderStorageBufferArrayDynamicIndexing =
+ data.shaderStorageBufferArrayDynamicIndexing();
+
+ out->shaderStorageImageArrayDynamicIndexing =
+ data.shaderStorageImageArrayDynamicIndexing();
+
+ out->shaderClipDistance = data.shaderClipDistance();
+
+ out->shaderCullDistance = data.shaderCullDistance();
+
+ out->shaderFloat64 = data.shaderFloat64();
+
+ out->shaderInt64 = data.shaderInt64();
+
+ out->shaderInt16 = data.shaderInt16();
+
+ out->shaderResourceResidency = data.shaderResourceResidency();
+
+ out->shaderResourceMinLod = data.shaderResourceMinLod();
+
+ out->sparseBinding = data.sparseBinding();
+
+ out->sparseResidencyBuffer = data.sparseResidencyBuffer();
+
+ out->sparseResidencyImage2D = data.sparseResidencyImage2D();
+
+ out->sparseResidencyImage3D = data.sparseResidencyImage3D();
+
+ out->sparseResidency2Samples = data.sparseResidency2Samples();
+
+ out->sparseResidency4Samples = data.sparseResidency4Samples();
+
+ out->sparseResidency8Samples = data.sparseResidency8Samples();
+
+ out->sparseResidency16Samples = data.sparseResidency16Samples();
+
+ out->sparseResidencyAliased = data.sparseResidencyAliased();
+
+ out->variableMultisampleRate = data.variableMultisampleRate();
+
+ out->inheritedQueries = data.inheritedQueries();
+
+ return true;
+}
+
+// static
+bool StructTraits<gpu::mojom::VkQueueFamilyPropertiesDataView,
+ VkQueueFamilyProperties>::
+ Read(gpu::mojom::VkQueueFamilyPropertiesDataView data,
+ VkQueueFamilyProperties* out) {
+ out->queueFlags = data.queueFlags();
+
+ out->queueCount = data.queueCount();
+
+ out->timestampValidBits = data.timestampValidBits();
+
+ if (!data.ReadMinImageTransferGranularity(&out->minImageTransferGranularity))
+ return false;
+
+ return true;
+}
+
+// static
+bool StructTraits<gpu::mojom::VkExtent3DDataView, VkExtent3D>::Read(
+ gpu::mojom::VkExtent3DDataView data,
+ VkExtent3D* out) {
+ out->width = data.width();
+
+ out->height = data.height();
+
+ out->depth = data.depth();
+
+ return true;
+}
+
+} // namespace mojo \ No newline at end of file
diff --git a/chromium/gpu/ipc/common/vulkan_types_mojom_traits.h b/chromium/gpu/ipc/common/vulkan_types_mojom_traits.h
new file mode 100644
index 00000000000..fa4aef92498
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_types_mojom_traits.h
@@ -0,0 +1,951 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/ipc/common/generate_vulkan_types.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_
+#define GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_
+
+#include "base/containers/span.h"
+#include "base/strings/string_piece.h"
+#include "gpu/ipc/common/vulkan_types.h"
+#include "gpu/ipc/common/vulkan_types.mojom-shared.h"
+
+namespace mojo {
+
+template <>
+struct StructTraits<gpu::mojom::VkExtensionPropertiesDataView,
+ VkExtensionProperties> {
+ static base::StringPiece extensionName(const VkExtensionProperties& input) {
+ return input.extensionName;
+ }
+
+ static uint32_t specVersion(const VkExtensionProperties& input) {
+ return input.specVersion;
+ }
+
+ static bool Read(gpu::mojom::VkExtensionPropertiesDataView data,
+ VkExtensionProperties* out);
+};
+
+template <>
+struct StructTraits<gpu::mojom::VkLayerPropertiesDataView, VkLayerProperties> {
+ static base::StringPiece layerName(const VkLayerProperties& input) {
+ return input.layerName;
+ }
+
+ static uint32_t specVersion(const VkLayerProperties& input) {
+ return input.specVersion;
+ }
+
+ static uint32_t implementationVersion(const VkLayerProperties& input) {
+ return input.implementationVersion;
+ }
+
+ static base::StringPiece description(const VkLayerProperties& input) {
+ return input.description;
+ }
+
+ static bool Read(gpu::mojom::VkLayerPropertiesDataView data,
+ VkLayerProperties* out);
+};
+
+template <>
+struct StructTraits<gpu::mojom::VkPhysicalDevicePropertiesDataView,
+ VkPhysicalDeviceProperties> {
+ static uint32_t apiVersion(const VkPhysicalDeviceProperties& input) {
+ return input.apiVersion;
+ }
+
+ static uint32_t driverVersion(const VkPhysicalDeviceProperties& input) {
+ return input.driverVersion;
+ }
+
+ static uint32_t vendorID(const VkPhysicalDeviceProperties& input) {
+ return input.vendorID;
+ }
+
+ static uint32_t deviceID(const VkPhysicalDeviceProperties& input) {
+ return input.deviceID;
+ }
+
+ static VkPhysicalDeviceType deviceType(
+ const VkPhysicalDeviceProperties& input) {
+ return input.deviceType;
+ }
+
+ static base::StringPiece deviceName(const VkPhysicalDeviceProperties& input) {
+ return input.deviceName;
+ }
+
+ static base::span<const uint8_t> pipelineCacheUUID(
+ const VkPhysicalDeviceProperties& input) {
+ return input.pipelineCacheUUID;
+ }
+
+ static const VkPhysicalDeviceLimits& limits(
+ const VkPhysicalDeviceProperties& input) {
+ return input.limits;
+ }
+
+ static const VkPhysicalDeviceSparseProperties& sparseProperties(
+ const VkPhysicalDeviceProperties& input) {
+ return input.sparseProperties;
+ }
+
+ static bool Read(gpu::mojom::VkPhysicalDevicePropertiesDataView data,
+ VkPhysicalDeviceProperties* out);
+};
+
+template <>
+struct EnumTraits<gpu::mojom::VkPhysicalDeviceType, VkPhysicalDeviceType> {
+ static gpu::mojom::VkPhysicalDeviceType ToMojom(VkPhysicalDeviceType input) {
+ switch (input) {
+ case VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_OTHER:
+ return gpu::mojom::VkPhysicalDeviceType::OTHER;
+ case VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
+ return gpu::mojom::VkPhysicalDeviceType::INTEGRATED_GPU;
+ case VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
+ return gpu::mojom::VkPhysicalDeviceType::DISCRETE_GPU;
+ case VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU:
+ return gpu::mojom::VkPhysicalDeviceType::VIRTUAL_GPU;
+ case VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_CPU:
+ return gpu::mojom::VkPhysicalDeviceType::CPU;
+ default:
+ NOTREACHED();
+ return gpu::mojom::VkPhysicalDeviceType::INVALID_VALUE;
+ }
+ }
+
+ static bool FromMojom(gpu::mojom::VkPhysicalDeviceType input,
+ VkPhysicalDeviceType* out) {
+ switch (input) {
+ case gpu::mojom::VkPhysicalDeviceType::OTHER:
+ *out = VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_OTHER;
+ return true;
+ case gpu::mojom::VkPhysicalDeviceType::INTEGRATED_GPU:
+ *out = VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
+ return true;
+ case gpu::mojom::VkPhysicalDeviceType::DISCRETE_GPU:
+ *out = VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU;
+ return true;
+ case gpu::mojom::VkPhysicalDeviceType::VIRTUAL_GPU:
+ *out = VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
+ return true;
+ case gpu::mojom::VkPhysicalDeviceType::CPU:
+ *out = VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_CPU;
+ return true;
+ case gpu::mojom::VkPhysicalDeviceType::INVALID_VALUE:
+ NOTREACHED();
+ return false;
+ }
+ NOTREACHED();
+ return false;
+ }
+};
+template <>
+struct StructTraits<gpu::mojom::VkPhysicalDeviceLimitsDataView,
+ VkPhysicalDeviceLimits> {
+ static uint32_t maxImageDimension1D(const VkPhysicalDeviceLimits& input) {
+ return input.maxImageDimension1D;
+ }
+
+ static uint32_t maxImageDimension2D(const VkPhysicalDeviceLimits& input) {
+ return input.maxImageDimension2D;
+ }
+
+ static uint32_t maxImageDimension3D(const VkPhysicalDeviceLimits& input) {
+ return input.maxImageDimension3D;
+ }
+
+ static uint32_t maxImageDimensionCube(const VkPhysicalDeviceLimits& input) {
+ return input.maxImageDimensionCube;
+ }
+
+ static uint32_t maxImageArrayLayers(const VkPhysicalDeviceLimits& input) {
+ return input.maxImageArrayLayers;
+ }
+
+ static uint32_t maxTexelBufferElements(const VkPhysicalDeviceLimits& input) {
+ return input.maxTexelBufferElements;
+ }
+
+ static uint32_t maxUniformBufferRange(const VkPhysicalDeviceLimits& input) {
+ return input.maxUniformBufferRange;
+ }
+
+ static uint32_t maxStorageBufferRange(const VkPhysicalDeviceLimits& input) {
+ return input.maxStorageBufferRange;
+ }
+
+ static uint32_t maxPushConstantsSize(const VkPhysicalDeviceLimits& input) {
+ return input.maxPushConstantsSize;
+ }
+
+ static uint32_t maxMemoryAllocationCount(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxMemoryAllocationCount;
+ }
+
+ static uint32_t maxSamplerAllocationCount(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxSamplerAllocationCount;
+ }
+
+ static bool bufferImageGranularity(const VkPhysicalDeviceLimits& input) {
+ return input.bufferImageGranularity;
+ }
+
+ static bool sparseAddressSpaceSize(const VkPhysicalDeviceLimits& input) {
+ return input.sparseAddressSpaceSize;
+ }
+
+ static uint32_t maxBoundDescriptorSets(const VkPhysicalDeviceLimits& input) {
+ return input.maxBoundDescriptorSets;
+ }
+
+ static uint32_t maxPerStageDescriptorSamplers(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxPerStageDescriptorSamplers;
+ }
+
+ static uint32_t maxPerStageDescriptorUniformBuffers(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxPerStageDescriptorUniformBuffers;
+ }
+
+ static uint32_t maxPerStageDescriptorStorageBuffers(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxPerStageDescriptorStorageBuffers;
+ }
+
+ static uint32_t maxPerStageDescriptorSampledImages(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxPerStageDescriptorSampledImages;
+ }
+
+ static uint32_t maxPerStageDescriptorStorageImages(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxPerStageDescriptorStorageImages;
+ }
+
+ static uint32_t maxPerStageDescriptorInputAttachments(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxPerStageDescriptorInputAttachments;
+ }
+
+ static uint32_t maxPerStageResources(const VkPhysicalDeviceLimits& input) {
+ return input.maxPerStageResources;
+ }
+
+ static uint32_t maxDescriptorSetSamplers(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxDescriptorSetSamplers;
+ }
+
+ static uint32_t maxDescriptorSetUniformBuffers(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxDescriptorSetUniformBuffers;
+ }
+
+ static uint32_t maxDescriptorSetUniformBuffersDynamic(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxDescriptorSetUniformBuffersDynamic;
+ }
+
+ static uint32_t maxDescriptorSetStorageBuffers(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxDescriptorSetStorageBuffers;
+ }
+
+ static uint32_t maxDescriptorSetStorageBuffersDynamic(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxDescriptorSetStorageBuffersDynamic;
+ }
+
+ static uint32_t maxDescriptorSetSampledImages(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxDescriptorSetSampledImages;
+ }
+
+ static uint32_t maxDescriptorSetStorageImages(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxDescriptorSetStorageImages;
+ }
+
+ static uint32_t maxDescriptorSetInputAttachments(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxDescriptorSetInputAttachments;
+ }
+
+ static uint32_t maxVertexInputAttributes(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxVertexInputAttributes;
+ }
+
+ static uint32_t maxVertexInputBindings(const VkPhysicalDeviceLimits& input) {
+ return input.maxVertexInputBindings;
+ }
+
+ static uint32_t maxVertexInputAttributeOffset(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxVertexInputAttributeOffset;
+ }
+
+ static uint32_t maxVertexInputBindingStride(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxVertexInputBindingStride;
+ }
+
+ static uint32_t maxVertexOutputComponents(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxVertexOutputComponents;
+ }
+
+ static uint32_t maxTessellationGenerationLevel(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxTessellationGenerationLevel;
+ }
+
+ static uint32_t maxTessellationPatchSize(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxTessellationPatchSize;
+ }
+
+ static uint32_t maxTessellationControlPerVertexInputComponents(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxTessellationControlPerVertexInputComponents;
+ }
+
+ static uint32_t maxTessellationControlPerVertexOutputComponents(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxTessellationControlPerVertexOutputComponents;
+ }
+
+ static uint32_t maxTessellationControlPerPatchOutputComponents(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxTessellationControlPerPatchOutputComponents;
+ }
+
+ static uint32_t maxTessellationControlTotalOutputComponents(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxTessellationControlTotalOutputComponents;
+ }
+
+ static uint32_t maxTessellationEvaluationInputComponents(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxTessellationEvaluationInputComponents;
+ }
+
+ static uint32_t maxTessellationEvaluationOutputComponents(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxTessellationEvaluationOutputComponents;
+ }
+
+ static uint32_t maxGeometryShaderInvocations(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxGeometryShaderInvocations;
+ }
+
+ static uint32_t maxGeometryInputComponents(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxGeometryInputComponents;
+ }
+
+ static uint32_t maxGeometryOutputComponents(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxGeometryOutputComponents;
+ }
+
+ static uint32_t maxGeometryOutputVertices(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxGeometryOutputVertices;
+ }
+
+ static uint32_t maxGeometryTotalOutputComponents(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxGeometryTotalOutputComponents;
+ }
+
+ static uint32_t maxFragmentInputComponents(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxFragmentInputComponents;
+ }
+
+ static uint32_t maxFragmentOutputAttachments(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxFragmentOutputAttachments;
+ }
+
+ static uint32_t maxFragmentDualSrcAttachments(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxFragmentDualSrcAttachments;
+ }
+
+ static uint32_t maxFragmentCombinedOutputResources(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxFragmentCombinedOutputResources;
+ }
+
+ static uint32_t maxComputeSharedMemorySize(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxComputeSharedMemorySize;
+ }
+
+ static base::span<const uint32_t> maxComputeWorkGroupCount(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxComputeWorkGroupCount;
+ }
+
+ static uint32_t maxComputeWorkGroupInvocations(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxComputeWorkGroupInvocations;
+ }
+
+ static base::span<const uint32_t> maxComputeWorkGroupSize(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxComputeWorkGroupSize;
+ }
+
+ static uint32_t subPixelPrecisionBits(const VkPhysicalDeviceLimits& input) {
+ return input.subPixelPrecisionBits;
+ }
+
+ static uint32_t subTexelPrecisionBits(const VkPhysicalDeviceLimits& input) {
+ return input.subTexelPrecisionBits;
+ }
+
+ static uint32_t mipmapPrecisionBits(const VkPhysicalDeviceLimits& input) {
+ return input.mipmapPrecisionBits;
+ }
+
+ static uint32_t maxDrawIndexedIndexValue(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxDrawIndexedIndexValue;
+ }
+
+ static uint32_t maxDrawIndirectCount(const VkPhysicalDeviceLimits& input) {
+ return input.maxDrawIndirectCount;
+ }
+
+ static float maxSamplerLodBias(const VkPhysicalDeviceLimits& input) {
+ return input.maxSamplerLodBias;
+ }
+
+ static float maxSamplerAnisotropy(const VkPhysicalDeviceLimits& input) {
+ return input.maxSamplerAnisotropy;
+ }
+
+ static uint32_t maxViewports(const VkPhysicalDeviceLimits& input) {
+ return input.maxViewports;
+ }
+
+ static base::span<const uint32_t> maxViewportDimensions(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxViewportDimensions;
+ }
+
+ static base::span<const float> viewportBoundsRange(
+ const VkPhysicalDeviceLimits& input) {
+ return input.viewportBoundsRange;
+ }
+
+ static uint32_t viewportSubPixelBits(const VkPhysicalDeviceLimits& input) {
+ return input.viewportSubPixelBits;
+ }
+
+ static size_t minMemoryMapAlignment(const VkPhysicalDeviceLimits& input) {
+ return input.minMemoryMapAlignment;
+ }
+
+ static bool minTexelBufferOffsetAlignment(
+ const VkPhysicalDeviceLimits& input) {
+ return input.minTexelBufferOffsetAlignment;
+ }
+
+ static bool minUniformBufferOffsetAlignment(
+ const VkPhysicalDeviceLimits& input) {
+ return input.minUniformBufferOffsetAlignment;
+ }
+
+ static bool minStorageBufferOffsetAlignment(
+ const VkPhysicalDeviceLimits& input) {
+ return input.minStorageBufferOffsetAlignment;
+ }
+
+ static int32_t minTexelOffset(const VkPhysicalDeviceLimits& input) {
+ return input.minTexelOffset;
+ }
+
+ static uint32_t maxTexelOffset(const VkPhysicalDeviceLimits& input) {
+ return input.maxTexelOffset;
+ }
+
+ static int32_t minTexelGatherOffset(const VkPhysicalDeviceLimits& input) {
+ return input.minTexelGatherOffset;
+ }
+
+ static uint32_t maxTexelGatherOffset(const VkPhysicalDeviceLimits& input) {
+ return input.maxTexelGatherOffset;
+ }
+
+ static float minInterpolationOffset(const VkPhysicalDeviceLimits& input) {
+ return input.minInterpolationOffset;
+ }
+
+ static float maxInterpolationOffset(const VkPhysicalDeviceLimits& input) {
+ return input.maxInterpolationOffset;
+ }
+
+ static uint32_t subPixelInterpolationOffsetBits(
+ const VkPhysicalDeviceLimits& input) {
+ return input.subPixelInterpolationOffsetBits;
+ }
+
+ static uint32_t maxFramebufferWidth(const VkPhysicalDeviceLimits& input) {
+ return input.maxFramebufferWidth;
+ }
+
+ static uint32_t maxFramebufferHeight(const VkPhysicalDeviceLimits& input) {
+ return input.maxFramebufferHeight;
+ }
+
+ static uint32_t maxFramebufferLayers(const VkPhysicalDeviceLimits& input) {
+ return input.maxFramebufferLayers;
+ }
+
+ static VkSampleCountFlags framebufferColorSampleCounts(
+ const VkPhysicalDeviceLimits& input) {
+ return input.framebufferColorSampleCounts;
+ }
+
+ static VkSampleCountFlags framebufferDepthSampleCounts(
+ const VkPhysicalDeviceLimits& input) {
+ return input.framebufferDepthSampleCounts;
+ }
+
+ static VkSampleCountFlags framebufferStencilSampleCounts(
+ const VkPhysicalDeviceLimits& input) {
+ return input.framebufferStencilSampleCounts;
+ }
+
+ static VkSampleCountFlags framebufferNoAttachmentsSampleCounts(
+ const VkPhysicalDeviceLimits& input) {
+ return input.framebufferNoAttachmentsSampleCounts;
+ }
+
+ static uint32_t maxColorAttachments(const VkPhysicalDeviceLimits& input) {
+ return input.maxColorAttachments;
+ }
+
+ static VkSampleCountFlags sampledImageColorSampleCounts(
+ const VkPhysicalDeviceLimits& input) {
+ return input.sampledImageColorSampleCounts;
+ }
+
+ static VkSampleCountFlags sampledImageIntegerSampleCounts(
+ const VkPhysicalDeviceLimits& input) {
+ return input.sampledImageIntegerSampleCounts;
+ }
+
+ static VkSampleCountFlags sampledImageDepthSampleCounts(
+ const VkPhysicalDeviceLimits& input) {
+ return input.sampledImageDepthSampleCounts;
+ }
+
+ static VkSampleCountFlags sampledImageStencilSampleCounts(
+ const VkPhysicalDeviceLimits& input) {
+ return input.sampledImageStencilSampleCounts;
+ }
+
+ static VkSampleCountFlags storageImageSampleCounts(
+ const VkPhysicalDeviceLimits& input) {
+ return input.storageImageSampleCounts;
+ }
+
+ static uint32_t maxSampleMaskWords(const VkPhysicalDeviceLimits& input) {
+ return input.maxSampleMaskWords;
+ }
+
+ static bool timestampComputeAndGraphics(const VkPhysicalDeviceLimits& input) {
+ return input.timestampComputeAndGraphics;
+ }
+
+ static float timestampPeriod(const VkPhysicalDeviceLimits& input) {
+ return input.timestampPeriod;
+ }
+
+ static uint32_t maxClipDistances(const VkPhysicalDeviceLimits& input) {
+ return input.maxClipDistances;
+ }
+
+ static uint32_t maxCullDistances(const VkPhysicalDeviceLimits& input) {
+ return input.maxCullDistances;
+ }
+
+ static uint32_t maxCombinedClipAndCullDistances(
+ const VkPhysicalDeviceLimits& input) {
+ return input.maxCombinedClipAndCullDistances;
+ }
+
+ static uint32_t discreteQueuePriorities(const VkPhysicalDeviceLimits& input) {
+ return input.discreteQueuePriorities;
+ }
+
+ static base::span<const float> pointSizeRange(
+ const VkPhysicalDeviceLimits& input) {
+ return input.pointSizeRange;
+ }
+
+ static base::span<const float> lineWidthRange(
+ const VkPhysicalDeviceLimits& input) {
+ return input.lineWidthRange;
+ }
+
+ static float pointSizeGranularity(const VkPhysicalDeviceLimits& input) {
+ return input.pointSizeGranularity;
+ }
+
+ static float lineWidthGranularity(const VkPhysicalDeviceLimits& input) {
+ return input.lineWidthGranularity;
+ }
+
+ static bool strictLines(const VkPhysicalDeviceLimits& input) {
+ return input.strictLines;
+ }
+
+ static bool standardSampleLocations(const VkPhysicalDeviceLimits& input) {
+ return input.standardSampleLocations;
+ }
+
+ static bool optimalBufferCopyOffsetAlignment(
+ const VkPhysicalDeviceLimits& input) {
+ return input.optimalBufferCopyOffsetAlignment;
+ }
+
+ static bool optimalBufferCopyRowPitchAlignment(
+ const VkPhysicalDeviceLimits& input) {
+ return input.optimalBufferCopyRowPitchAlignment;
+ }
+
+ static bool nonCoherentAtomSize(const VkPhysicalDeviceLimits& input) {
+ return input.nonCoherentAtomSize;
+ }
+
+ static bool Read(gpu::mojom::VkPhysicalDeviceLimitsDataView data,
+ VkPhysicalDeviceLimits* out);
+};
+
+template <>
+struct StructTraits<gpu::mojom::VkPhysicalDeviceSparsePropertiesDataView,
+ VkPhysicalDeviceSparseProperties> {
+ static bool residencyStandard2DBlockShape(
+ const VkPhysicalDeviceSparseProperties& input) {
+ return input.residencyStandard2DBlockShape;
+ }
+
+ static bool residencyStandard2DMultisampleBlockShape(
+ const VkPhysicalDeviceSparseProperties& input) {
+ return input.residencyStandard2DMultisampleBlockShape;
+ }
+
+ static bool residencyStandard3DBlockShape(
+ const VkPhysicalDeviceSparseProperties& input) {
+ return input.residencyStandard3DBlockShape;
+ }
+
+ static bool residencyAlignedMipSize(
+ const VkPhysicalDeviceSparseProperties& input) {
+ return input.residencyAlignedMipSize;
+ }
+
+ static bool residencyNonResidentStrict(
+ const VkPhysicalDeviceSparseProperties& input) {
+ return input.residencyNonResidentStrict;
+ }
+
+ static bool Read(gpu::mojom::VkPhysicalDeviceSparsePropertiesDataView data,
+ VkPhysicalDeviceSparseProperties* out);
+};
+
+template <>
+struct StructTraits<gpu::mojom::VkPhysicalDeviceFeaturesDataView,
+ VkPhysicalDeviceFeatures> {
+ static bool robustBufferAccess(const VkPhysicalDeviceFeatures& input) {
+ return input.robustBufferAccess;
+ }
+
+ static bool fullDrawIndexUint32(const VkPhysicalDeviceFeatures& input) {
+ return input.fullDrawIndexUint32;
+ }
+
+ static bool imageCubeArray(const VkPhysicalDeviceFeatures& input) {
+ return input.imageCubeArray;
+ }
+
+ static bool independentBlend(const VkPhysicalDeviceFeatures& input) {
+ return input.independentBlend;
+ }
+
+ static bool geometryShader(const VkPhysicalDeviceFeatures& input) {
+ return input.geometryShader;
+ }
+
+ static bool tessellationShader(const VkPhysicalDeviceFeatures& input) {
+ return input.tessellationShader;
+ }
+
+ static bool sampleRateShading(const VkPhysicalDeviceFeatures& input) {
+ return input.sampleRateShading;
+ }
+
+ static bool dualSrcBlend(const VkPhysicalDeviceFeatures& input) {
+ return input.dualSrcBlend;
+ }
+
+ static bool logicOp(const VkPhysicalDeviceFeatures& input) {
+ return input.logicOp;
+ }
+
+ static bool multiDrawIndirect(const VkPhysicalDeviceFeatures& input) {
+ return input.multiDrawIndirect;
+ }
+
+ static bool drawIndirectFirstInstance(const VkPhysicalDeviceFeatures& input) {
+ return input.drawIndirectFirstInstance;
+ }
+
+ static bool depthClamp(const VkPhysicalDeviceFeatures& input) {
+ return input.depthClamp;
+ }
+
+ static bool depthBiasClamp(const VkPhysicalDeviceFeatures& input) {
+ return input.depthBiasClamp;
+ }
+
+ static bool fillModeNonSolid(const VkPhysicalDeviceFeatures& input) {
+ return input.fillModeNonSolid;
+ }
+
+ static bool depthBounds(const VkPhysicalDeviceFeatures& input) {
+ return input.depthBounds;
+ }
+
+ static bool wideLines(const VkPhysicalDeviceFeatures& input) {
+ return input.wideLines;
+ }
+
+ static bool largePoints(const VkPhysicalDeviceFeatures& input) {
+ return input.largePoints;
+ }
+
+ static bool alphaToOne(const VkPhysicalDeviceFeatures& input) {
+ return input.alphaToOne;
+ }
+
+ static bool multiViewport(const VkPhysicalDeviceFeatures& input) {
+ return input.multiViewport;
+ }
+
+ static bool samplerAnisotropy(const VkPhysicalDeviceFeatures& input) {
+ return input.samplerAnisotropy;
+ }
+
+ static bool textureCompressionETC2(const VkPhysicalDeviceFeatures& input) {
+ return input.textureCompressionETC2;
+ }
+
+ static bool textureCompressionASTC_LDR(
+ const VkPhysicalDeviceFeatures& input) {
+ return input.textureCompressionASTC_LDR;
+ }
+
+ static bool textureCompressionBC(const VkPhysicalDeviceFeatures& input) {
+ return input.textureCompressionBC;
+ }
+
+ static bool occlusionQueryPrecise(const VkPhysicalDeviceFeatures& input) {
+ return input.occlusionQueryPrecise;
+ }
+
+ static bool pipelineStatisticsQuery(const VkPhysicalDeviceFeatures& input) {
+ return input.pipelineStatisticsQuery;
+ }
+
+ static bool vertexPipelineStoresAndAtomics(
+ const VkPhysicalDeviceFeatures& input) {
+ return input.vertexPipelineStoresAndAtomics;
+ }
+
+ static bool fragmentStoresAndAtomics(const VkPhysicalDeviceFeatures& input) {
+ return input.fragmentStoresAndAtomics;
+ }
+
+ static bool shaderTessellationAndGeometryPointSize(
+ const VkPhysicalDeviceFeatures& input) {
+ return input.shaderTessellationAndGeometryPointSize;
+ }
+
+ static bool shaderImageGatherExtended(const VkPhysicalDeviceFeatures& input) {
+ return input.shaderImageGatherExtended;
+ }
+
+ static bool shaderStorageImageExtendedFormats(
+ const VkPhysicalDeviceFeatures& input) {
+ return input.shaderStorageImageExtendedFormats;
+ }
+
+ static bool shaderStorageImageMultisample(
+ const VkPhysicalDeviceFeatures& input) {
+ return input.shaderStorageImageMultisample;
+ }
+
+ static bool shaderStorageImageReadWithoutFormat(
+ const VkPhysicalDeviceFeatures& input) {
+ return input.shaderStorageImageReadWithoutFormat;
+ }
+
+ static bool shaderStorageImageWriteWithoutFormat(
+ const VkPhysicalDeviceFeatures& input) {
+ return input.shaderStorageImageWriteWithoutFormat;
+ }
+
+ static bool shaderUniformBufferArrayDynamicIndexing(
+ const VkPhysicalDeviceFeatures& input) {
+ return input.shaderUniformBufferArrayDynamicIndexing;
+ }
+
+ static bool shaderSampledImageArrayDynamicIndexing(
+ const VkPhysicalDeviceFeatures& input) {
+ return input.shaderSampledImageArrayDynamicIndexing;
+ }
+
+ static bool shaderStorageBufferArrayDynamicIndexing(
+ const VkPhysicalDeviceFeatures& input) {
+ return input.shaderStorageBufferArrayDynamicIndexing;
+ }
+
+ static bool shaderStorageImageArrayDynamicIndexing(
+ const VkPhysicalDeviceFeatures& input) {
+ return input.shaderStorageImageArrayDynamicIndexing;
+ }
+
+ static bool shaderClipDistance(const VkPhysicalDeviceFeatures& input) {
+ return input.shaderClipDistance;
+ }
+
+ static bool shaderCullDistance(const VkPhysicalDeviceFeatures& input) {
+ return input.shaderCullDistance;
+ }
+
+ static bool shaderFloat64(const VkPhysicalDeviceFeatures& input) {
+ return input.shaderFloat64;
+ }
+
+ static bool shaderInt64(const VkPhysicalDeviceFeatures& input) {
+ return input.shaderInt64;
+ }
+
+ static bool shaderInt16(const VkPhysicalDeviceFeatures& input) {
+ return input.shaderInt16;
+ }
+
+ static bool shaderResourceResidency(const VkPhysicalDeviceFeatures& input) {
+ return input.shaderResourceResidency;
+ }
+
+ static bool shaderResourceMinLod(const VkPhysicalDeviceFeatures& input) {
+ return input.shaderResourceMinLod;
+ }
+
+ static bool sparseBinding(const VkPhysicalDeviceFeatures& input) {
+ return input.sparseBinding;
+ }
+
+ static bool sparseResidencyBuffer(const VkPhysicalDeviceFeatures& input) {
+ return input.sparseResidencyBuffer;
+ }
+
+ static bool sparseResidencyImage2D(const VkPhysicalDeviceFeatures& input) {
+ return input.sparseResidencyImage2D;
+ }
+
+ static bool sparseResidencyImage3D(const VkPhysicalDeviceFeatures& input) {
+ return input.sparseResidencyImage3D;
+ }
+
+ static bool sparseResidency2Samples(const VkPhysicalDeviceFeatures& input) {
+ return input.sparseResidency2Samples;
+ }
+
+ static bool sparseResidency4Samples(const VkPhysicalDeviceFeatures& input) {
+ return input.sparseResidency4Samples;
+ }
+
+ static bool sparseResidency8Samples(const VkPhysicalDeviceFeatures& input) {
+ return input.sparseResidency8Samples;
+ }
+
+ static bool sparseResidency16Samples(const VkPhysicalDeviceFeatures& input) {
+ return input.sparseResidency16Samples;
+ }
+
+ static bool sparseResidencyAliased(const VkPhysicalDeviceFeatures& input) {
+ return input.sparseResidencyAliased;
+ }
+
+ static bool variableMultisampleRate(const VkPhysicalDeviceFeatures& input) {
+ return input.variableMultisampleRate;
+ }
+
+ static bool inheritedQueries(const VkPhysicalDeviceFeatures& input) {
+ return input.inheritedQueries;
+ }
+
+ static bool Read(gpu::mojom::VkPhysicalDeviceFeaturesDataView data,
+ VkPhysicalDeviceFeatures* out);
+};
+
+template <>
+struct StructTraits<gpu::mojom::VkQueueFamilyPropertiesDataView,
+ VkQueueFamilyProperties> {
+ static VkQueueFlags queueFlags(const VkQueueFamilyProperties& input) {
+ return input.queueFlags;
+ }
+
+ static uint32_t queueCount(const VkQueueFamilyProperties& input) {
+ return input.queueCount;
+ }
+
+ static uint32_t timestampValidBits(const VkQueueFamilyProperties& input) {
+ return input.timestampValidBits;
+ }
+
+ static const VkExtent3D& minImageTransferGranularity(
+ const VkQueueFamilyProperties& input) {
+ return input.minImageTransferGranularity;
+ }
+
+ static bool Read(gpu::mojom::VkQueueFamilyPropertiesDataView data,
+ VkQueueFamilyProperties* out);
+};
+
+template <>
+struct StructTraits<gpu::mojom::VkExtent3DDataView, VkExtent3D> {
+ static uint32_t width(const VkExtent3D& input) { return input.width; }
+
+ static uint32_t height(const VkExtent3D& input) { return input.height; }
+
+ static uint32_t depth(const VkExtent3D& input) { return input.depth; }
+
+ static bool Read(gpu::mojom::VkExtent3DDataView data, VkExtent3D* out);
+};
+
+} // namespace mojo
+
+#endif // GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_ \ No newline at end of file
diff --git a/chromium/gpu/ipc/host/shader_disk_cache.cc b/chromium/gpu/ipc/host/shader_disk_cache.cc
index 73b90030a5c..52dbcde835d 100644
--- a/chromium/gpu/ipc/host/shader_disk_cache.cc
+++ b/chromium/gpu/ipc/host/shader_disk_cache.cc
@@ -556,8 +556,9 @@ void ShaderDiskCache::Init() {
int rv = disk_cache::CreateCacheBackend(
net::SHADER_CACHE, net::CACHE_BACKEND_DEFAULT,
- cache_path_.Append(kGpuCachePath), CacheSizeBytes(), true, nullptr,
- &backend_, base::BindOnce(&ShaderDiskCache::CacheCreatedCallback, this));
+ cache_path_.Append(kGpuCachePath), CacheSizeBytes(),
+ disk_cache::ResetHandling::kResetOnError, nullptr, &backend_,
+ base::BindOnce(&ShaderDiskCache::CacheCreatedCallback, this));
if (rv == net::OK)
cache_available_ = true;
diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc
index 4b98aab3ca3..08162a8e826 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.cc
+++ b/chromium/gpu/ipc/in_process_command_buffer.cc
@@ -645,6 +645,11 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
use_virtualized_gl_context_
? gl_share_group_->GetSharedContext(surface_.get())
: nullptr;
+ if (real_context &&
+ (!real_context->MakeCurrent(surface_.get()) ||
+ real_context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) {
+ real_context = nullptr;
+ }
if (!real_context) {
real_context = gl::init::CreateGLContext(
gl_share_group_.get(), surface_.get(),
@@ -680,7 +685,8 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
if (!context_state_) {
context_state_ = base::MakeRefCounted<SharedContextState>(
gl_share_group_, surface_, real_context,
- use_virtualized_gl_context_, base::DoNothing());
+ use_virtualized_gl_context_, base::DoNothing(),
+ task_executor_->gpu_preferences().gr_context_type);
context_state_->InitializeGL(task_executor_->gpu_preferences(),
context_group_->feature_info());
context_state_->InitializeGrContext(workarounds, params.gr_shader_cache,
@@ -1771,6 +1777,16 @@ viz::GpuVSyncCallback InProcessCommandBuffer::GetGpuVSyncCallback() {
std::move(handle_gpu_vsync_callback));
}
+base::TimeDelta InProcessCommandBuffer::GetGpuBlockedTimeSinceLastSwap() {
+ // Some examples and tests create InProcessCommandBuffer without
+ // GpuChannelManagerDelegate.
+ if (!gpu_channel_manager_delegate_)
+ return base::TimeDelta::Min();
+
+ return gpu_channel_manager_delegate_->GetGpuScheduler()
+ ->TakeTotalBlockingTime();
+}
+
void InProcessCommandBuffer::HandleGpuVSyncOnOriginThread(
base::TimeTicks vsync_time,
base::TimeDelta vsync_interval) {
diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h
index 6c842b835da..939e083cf05 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.h
+++ b/chromium/gpu/ipc/in_process_command_buffer.h
@@ -178,6 +178,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
const GpuPreferences& GetGpuPreferences() const override;
void BufferPresented(const gfx::PresentationFeedback& feedback) override;
viz::GpuVSyncCallback GetGpuVSyncCallback() override;
+ base::TimeDelta GetGpuBlockedTimeSinceLastSwap() override;
// Upstream this function to GpuControl if needs arise. Can be called on any
// thread.
diff --git a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
index 3b9c6aeaf55..21ad0705ac8 100644
--- a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
+++ b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
@@ -66,8 +66,8 @@ CommandBufferTaskExecutor* InProcessGpuThreadHolder::GetTaskExecutor() {
void InProcessGpuThreadHolder::InitializeOnGpuThread(
base::WaitableEvent* completion) {
sync_point_manager_ = std::make_unique<SyncPointManager>();
- scheduler_ =
- std::make_unique<Scheduler>(task_runner(), sync_point_manager_.get());
+ scheduler_ = std::make_unique<Scheduler>(
+ task_runner(), sync_point_manager_.get(), gpu_preferences_);
mailbox_manager_ = gles2::CreateMailboxManager(gpu_preferences_);
shared_image_manager_ = std::make_unique<SharedImageManager>();
task_executor_ = std::make_unique<GpuInProcessThreadService>(
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index 082ac932cfa..c3ca00c900f 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -10,6 +10,10 @@ if (is_mac) {
import("//build/config/mac/mac_sdk.gni")
}
+declare_args() {
+ subpixel_font_rendering_disabled = false
+}
+
jumbo_component("service") {
output_name = "gpu_ipc_service"
sources = [
@@ -52,9 +56,13 @@ jumbo_component("service") {
if (is_chromecast) {
defines += [ "IS_CHROMECAST" ]
}
+ if (subpixel_font_rendering_disabled) {
+ defines += [ "SUBPIXEL_FONT_RENDERING_DISABLED" ]
+ }
public_deps = [
"//base",
"//components/viz/common",
+ "//gpu/config",
"//ipc",
"//ui/base",
"//ui/display",
diff --git a/chromium/gpu/ipc/service/command_buffer_stub.cc b/chromium/gpu/ipc/service/command_buffer_stub.cc
index e430c664365..dea796c946d 100644
--- a/chromium/gpu/ipc/service/command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/command_buffer_stub.cc
@@ -12,7 +12,7 @@
#include "base/json/json_writer.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
-#include "base/memory/shared_memory.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/no_destructor.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
index 98a27387fe5..28a7a9dd845 100644
--- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
@@ -13,7 +13,7 @@
#include "base/json/json_writer.h"
#include "base/macros.h"
#include "base/memory/memory_pressure_listener.h"
-#include "base/memory/shared_memory.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
@@ -31,6 +31,7 @@
#include "gpu/command_buffer/service/logger.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/command_buffer/service/transfer_buffer_manager.h"
@@ -261,6 +262,10 @@ gpu::ContextResult GLES2CommandBufferStub::Initialize(
scoped_refptr<gl::GLContext> context;
if (use_virtualized_gl_context_ && share_group_) {
context = share_group_->GetSharedContext(surface_.get());
+ if (context && (!context->MakeCurrent(surface_.get()) ||
+ context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) {
+ context = nullptr;
+ }
if (!context) {
context = gl::init::CreateGLContext(
share_group_.get(), surface_.get(),
@@ -423,12 +428,17 @@ viz::GpuVSyncCallback GLES2CommandBufferStub::GetGpuVSyncCallback() {
return viz::GpuVSyncCallback();
}
+base::TimeDelta GLES2CommandBufferStub::GetGpuBlockedTimeSinceLastSwap() {
+ return channel_->scheduler()->TakeTotalBlockingTime();
+}
+
MemoryTracker* GLES2CommandBufferStub::GetMemoryTracker() const {
return context_group_->memory_tracker();
}
-void GLES2CommandBufferStub::OnGpuSwitched() {
- Send(new GpuCommandBufferMsg_GpuSwitched(route_id_));
+void GLES2CommandBufferStub::OnGpuSwitched(
+ gl::GpuPreference active_gpu_heuristic) {
+ Send(new GpuCommandBufferMsg_GpuSwitched(route_id_, active_gpu_heuristic));
}
bool GLES2CommandBufferStub::HandleMessage(const IPC::Message& message) {
diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.h b/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
index 9b3f695c233..953919de878 100644
--- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
@@ -40,7 +40,7 @@ class GPU_IPC_SERVICE_EXPORT GLES2CommandBufferStub
MemoryTracker* GetMemoryTracker() const override;
// DecoderClient implementation.
- void OnGpuSwitched() override;
+ void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) override;
// ImageTransportSurfaceDelegate implementation:
#if defined(OS_WIN)
@@ -53,6 +53,7 @@ class GPU_IPC_SERVICE_EXPORT GLES2CommandBufferStub
const GpuPreferences& GetGpuPreferences() const override;
void BufferPresented(const gfx::PresentationFeedback& feedback) override;
viz::GpuVSyncCallback GetGpuVSyncCallback() override;
+ base::TimeDelta GetGpuBlockedTimeSinceLastSwap() override;
private:
bool HandleMessage(const IPC::Message& message) override;
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc
index 1cacaa4c14c..39544aca67e 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc
@@ -110,7 +110,8 @@ GpuChannelManager::GpuChannelManager(
scoped_refptr<gl::GLSurface> default_offscreen_surface,
ImageDecodeAcceleratorWorker* image_decode_accelerator_worker,
viz::VulkanContextProvider* vulkan_context_provider,
- viz::MetalContextProvider* metal_context_provider)
+ viz::MetalContextProvider* metal_context_provider,
+ viz::DawnContextProvider* dawn_context_provider)
: task_runner_(task_runner),
io_task_runner_(io_task_runner),
gpu_preferences_(gpu_preferences),
@@ -133,7 +134,8 @@ GpuChannelManager::GpuChannelManager(
base::BindRepeating(&GpuChannelManager::HandleMemoryPressure,
base::Unretained(this))),
vulkan_context_provider_(vulkan_context_provider),
- metal_context_provider_(metal_context_provider) {
+ metal_context_provider_(metal_context_provider),
+ dawn_context_provider_(dawn_context_provider) {
DCHECK(task_runner->BelongsToCurrentThread());
DCHECK(io_task_runner);
DCHECK(scheduler);
@@ -284,6 +286,9 @@ void GpuChannelManager::GetVideoMemoryUsageStats(
.video_memory += size;
}
+ if (shared_context_state_ && !shared_context_state_->context_lost())
+ total_size += shared_context_state_->GetMemoryUsage();
+
// Assign the total across all processes in the GPU process
video_memory_usage_stats->process_map[base::GetCurrentProcId()].video_memory =
total_size;
@@ -438,6 +443,10 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
scoped_refptr<gl::GLContext> context =
use_virtualized_gl_contexts ? share_group->GetSharedContext(surface.get())
: nullptr;
+ if (context && (!context->MakeCurrent(surface.get()) ||
+ context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) {
+ context = nullptr;
+ }
if (!context) {
gl::GLContextAttribs attribs = gles2::GenerateGLContextAttribs(
ContextCreationAttribs(), use_passthrough_decoder);
@@ -481,7 +490,8 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
use_virtualized_gl_contexts,
base::BindOnce(&GpuChannelManager::OnContextLost, base::Unretained(this),
/*synthetic_loss=*/false),
- vulkan_context_provider_, metal_context_provider_);
+ gpu_preferences_.gr_context_type, vulkan_context_provider_,
+ metal_context_provider_, dawn_context_provider_, peak_memory_monitor());
// OOP-R needs GrContext for raster tiles.
bool need_gr_context =
@@ -492,7 +502,7 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
need_gr_context |= features::IsUsingSkiaRenderer();
if (need_gr_context) {
- if (!vulkan_context_provider_ && !metal_context_provider_) {
+ if (gpu_preferences_.gr_context_type == gpu::GrContextType::kGL) {
auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
gpu_driver_bug_workarounds(), gpu_feature_info());
if (!shared_context_state_->InitializeGL(gpu_preferences_,
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h
index a8abfe5f4b0..217adb652cb 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.h
@@ -83,7 +83,8 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
scoped_refptr<gl::GLSurface> default_offscreen_surface,
ImageDecodeAcceleratorWorker* image_decode_accelerator_worker,
viz::VulkanContextProvider* vulkan_context_provider = nullptr,
- viz::MetalContextProvider* metal_context_provider = nullptr);
+ viz::MetalContextProvider* metal_context_provider = nullptr,
+ viz::DawnContextProvider* dawn_context_provider = nullptr);
~GpuChannelManager() override;
GpuChannelManagerDelegate* delegate() const { return delegate_; }
@@ -288,13 +289,18 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
scoped_refptr<SharedContextState> shared_context_state_;
// With --enable-vulkan, |vulkan_context_provider_| will be set from
- // viz::GpuServiceImpl. The raster decoders will use it for rasterization.
+ // viz::GpuServiceImpl. The raster decoders will use it for rasterization if
+ // --gr-context-type is also set to Vulkan.
viz::VulkanContextProvider* vulkan_context_provider_ = nullptr;
// If features::SkiaOnMetad, |metal_context_provider_| will be set from
// viz::GpuServiceImpl. The raster decoders will use it for rasterization.
viz::MetalContextProvider* metal_context_provider_ = nullptr;
+ // With --gr-context-type=dawn, |dawn_context_provider_| will be set from
+ // viz::GpuServiceImpl. The raster decoders will use it for rasterization.
+ viz::DawnContextProvider* dawn_context_provider_ = nullptr;
+
GpuPeakMemoryMonitor peak_memory_monitor_;
// Member variables should appear before the WeakPtrFactory, to ensure
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
index 9209fa995ea..9e6809b7779 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
@@ -60,6 +60,9 @@ class GpuChannelManagerDelegate {
// thread.
virtual bool IsExiting() const = 0;
+ // Returns GPU Scheduler
+ virtual gpu::Scheduler* GetGpuScheduler() = 0;
+
#if defined(OS_WIN)
// Tells the delegate that |child_window| was created in the GPU process and
// to send an IPC to make SetParent() syscall. This syscall is blocked by the
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.cc b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
index dd1894b2a06..22966507b37 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
@@ -4,7 +4,7 @@
#include "gpu/ipc/service/gpu_channel_test_common.h"
-#include "base/memory/shared_memory.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "gpu/command_buffer/common/activity_flags.h"
@@ -23,7 +23,7 @@ namespace gpu {
class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate {
public:
- TestGpuChannelManagerDelegate() = default;
+ TestGpuChannelManagerDelegate(Scheduler* scheduler) : scheduler_(scheduler) {}
~TestGpuChannelManagerDelegate() override = default;
// GpuChannelManagerDelegate implementation:
@@ -47,8 +47,11 @@ class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate {
SurfaceHandle child_window) override {}
#endif
+ Scheduler* GetGpuScheduler() override { return scheduler_; }
+
private:
bool is_exiting_ = false;
+ Scheduler* const scheduler_;
DISALLOW_COPY_AND_ASSIGN(TestGpuChannelManagerDelegate);
};
@@ -63,8 +66,11 @@ GpuChannelTestCommon::GpuChannelTestCommon(
io_task_runner_(new base::TestSimpleTaskRunner),
sync_point_manager_(new SyncPointManager()),
shared_image_manager_(new SharedImageManager(false /* thread_safe */)),
- scheduler_(new Scheduler(task_runner_, sync_point_manager_.get())),
- channel_manager_delegate_(new TestGpuChannelManagerDelegate()) {
+ scheduler_(new Scheduler(task_runner_,
+ sync_point_manager_.get(),
+ GpuPreferences())),
+ channel_manager_delegate_(
+ new TestGpuChannelManagerDelegate(scheduler_.get())) {
// We need GL bindings to actually initialize command buffers.
if (use_stub_bindings)
gl::GLSurfaceTestSupport::InitializeOneOffWithStubBindings();
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index 0aa6832893e..1aeba67edd4 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -51,13 +51,13 @@
#if BUILDFLAG(ENABLE_VULKAN)
#include "gpu/vulkan/init/vulkan_factory.h"
#include "gpu/vulkan/vulkan_implementation.h"
+#include "gpu/vulkan/vulkan_instance.h"
#endif
namespace gpu {
namespace {
-bool CollectGraphicsInfo(GPUInfo* gpu_info,
- const GpuPreferences& gpu_preferences) {
+bool CollectGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
TRACE_EVENT0("gpu,startup", "Collect Graphics Info");
base::TimeTicks before_collect_context_graphics_info = base::TimeTicks::Now();
@@ -164,6 +164,11 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// Set keys for crash logging based on preliminary gpu info, in case we
// crash during feature collection.
gpu::SetKeysForCrashLogging(gpu_info_);
+#if defined(SUBPIXEL_FONT_RENDERING_DISABLED)
+ gpu_info_.subpixel_font_rendering = false;
+#else
+ gpu_info_.subpixel_font_rendering = true;
+#endif
#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
if (gpu_info_.gpu.vendor_id == 0x10de && // NVIDIA
@@ -178,7 +183,6 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
}
#endif // !OS_ANDROID && !IS_CHROMECAST
gpu_info_.in_process_gpu = false;
-
bool use_swiftshader = false;
// GL bindings may have already been initialized, specifically on MacOSX.
@@ -196,7 +200,8 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
}
bool enable_watchdog = !gpu_preferences_.disable_gpu_watchdog &&
- !command_line->HasSwitch(switches::kHeadless);
+ !command_line->HasSwitch(switches::kHeadless) &&
+ !use_swiftshader;
// Disable the watchdog in debug builds because they tend to only be run by
// developers who will not appreciate the watchdog killing the GPU process.
@@ -216,6 +221,11 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
delayed_watchdog_enable = true;
#endif
+ // PreSandbox is mainly for resource handling and not related to the GPU
+ // driver, it doesn't need the GPU watchdog. The loadLibrary may take long
+ // time that killing and restarting the GPU process will not help.
+ sandbox_helper_->PreSandboxStartup();
+
// Start the GPU watchdog only after anything that is expected to be time
// consuming has completed, otherwise the process is liable to be aborted.
if (enable_watchdog && !delayed_watchdog_enable) {
@@ -243,8 +253,6 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
#endif // OS_WIN
}
- sandbox_helper_->PreSandboxStartup();
-
bool attempted_startsandbox = false;
#if defined(OS_LINUX)
// On Chrome OS ARM Mali, GPU driver userspace creates threads when
@@ -289,12 +297,30 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
gl_initialized = false;
#endif // OS_LINUX
}
- if (!gl_initialized)
- gl_initialized = gl::init::InitializeGLNoExtensionsOneOff();
+
if (!gl_initialized) {
- VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed";
- return false;
+ // Pause watchdog. LoadLibrary in GLBindings may take long time.
+ if (watchdog_thread_)
+ watchdog_thread_->PauseWatchdog();
+ gl_initialized = gl::init::InitializeStaticGLBindingsOneOff();
+
+ if (!gl_initialized) {
+ VLOG(1) << "gl::init::InitializeStaticGLBindingsOneOff failed";
+ return false;
+ }
+
+ if (watchdog_thread_)
+ watchdog_thread_->ResumeWatchdog();
+ if (gl::GetGLImplementation() != gl::kGLImplementationDisabled) {
+ gl_initialized =
+ gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ false);
+ if (!gl_initialized) {
+ VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed";
+ return false;
+ }
+ }
}
+
bool gl_disabled = gl::GetGLImplementation() == gl::kGLImplementationDisabled;
// Compute passthrough decoder status before ComputeGpuFeatureInfo below.
@@ -305,7 +331,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// We need to collect GL strings (VENDOR, RENDERER) for blacklisting purposes.
if (!gl_disabled) {
if (!use_swiftshader) {
- if (!CollectGraphicsInfo(&gpu_info_, gpu_preferences_))
+ if (!CollectGraphicsInfo(&gpu_info_))
return false;
gpu::SetKeysForCrashLogging(gpu_info_);
gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(
@@ -320,7 +346,9 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
return false;
#else
gl::init::ShutdownGL(true);
- if (!gl::init::InitializeGLNoExtensionsOneOff()) {
+ watchdog_thread_ = nullptr;
+ watchdog_init.SetGpuWatchdogPtr(nullptr);
+ if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) {
VLOG(1)
<< "gl::init::InitializeGLNoExtensionsOneOff with SwiftShader "
<< "failed";
@@ -331,10 +359,10 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
} else { // use_swiftshader == true
switch (gpu_preferences_.use_vulkan) {
case gpu::VulkanImplementationName::kNative: {
- // Collect GPU info, so we can use backlist to disable vulkan if it is
- // needed.
+ // Collect GPU info, so we can use blacklist to disable vulkan if it
+ // is needed.
gpu::GPUInfo gpu_info;
- if (!CollectGraphicsInfo(&gpu_info, gpu_preferences_))
+ if (!CollectGraphicsInfo(&gpu_info))
return false;
auto gpu_feature_info = gpu::ComputeGpuFeatureInfo(
gpu_info, gpu_preferences_, command_line, nullptr);
@@ -355,38 +383,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
}
}
-#if BUILDFLAG(ENABLE_VULKAN)
- if (gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] ==
- gpu::kGpuFeatureStatusEnabled) {
- DCHECK_NE(gpu_preferences_.use_vulkan,
- gpu::VulkanImplementationName::kNone);
- bool vulkan_use_swiftshader = gpu_preferences_.use_vulkan ==
- gpu::VulkanImplementationName::kSwiftshader;
- const bool enforce_protected_memory =
- gpu_preferences_.enforce_vulkan_protected_memory;
- vulkan_implementation_ = gpu::CreateVulkanImplementation(
- vulkan_use_swiftshader,
- enforce_protected_memory ? true : false /* allow_protected_memory */,
- enforce_protected_memory);
- if (!vulkan_implementation_ ||
- !vulkan_implementation_->InitializeVulkanInstance(
- !gpu_preferences_.disable_vulkan_surface)) {
- DLOG(ERROR) << "Failed to create and initialize Vulkan implementation.";
- vulkan_implementation_ = nullptr;
- CHECK(!gpu_preferences_.disable_vulkan_fallback_to_gl_for_testing);
- }
- }
- if (!vulkan_implementation_) {
- gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone;
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu::kGpuFeatureStatusDisabled;
- }
-
-#else
- gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone;
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu::kGpuFeatureStatusDisabled;
-#endif
+ InitializeVulkan();
// Collect GPU process info
if (!gl_disabled) {
@@ -418,7 +415,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// information on Linux platform. Try to collect graphics information
// based on core profile context after disabling platform extensions.
if (!gl_disabled && !use_swiftshader) {
- if (!CollectGraphicsInfo(&gpu_info_, gpu_preferences_))
+ if (!CollectGraphicsInfo(&gpu_info_))
return false;
gpu::SetKeysForCrashLogging(gpu_info_);
gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
@@ -462,8 +459,9 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
if (use_swiftshader ||
gl::GetGLImplementation() == gl::GetSoftwareGLImplementation()) {
gpu_info_.software_rendering = true;
- if (watchdog_thread_)
- watchdog_thread_->Stop();
+ watchdog_thread_ = nullptr;
+ watchdog_init.SetGpuWatchdogPtr(nullptr);
+ } else if (gl_disabled) {
watchdog_thread_ = nullptr;
watchdog_init.SetGpuWatchdogPtr(nullptr);
} else if (enable_watchdog && delayed_watchdog_enable) {
@@ -519,6 +517,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
InitializeGLThreadSafe(command_line, gpu_preferences_, &gpu_info_,
&gpu_feature_info_);
+ InitializeVulkan();
default_offscreen_surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
@@ -551,6 +550,11 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
if (!PopGPUInfoCache(&gpu_info_)) {
CollectBasicGraphicsInfo(command_line, &gpu_info_);
}
+#if defined(SUBPIXEL_FONT_RENDERING_DISABLED)
+ gpu_info_.subpixel_font_rendering = false;
+#else
+ gpu_info_.subpixel_font_rendering = true;
+#endif
if (!PopGpuFeatureInfoCache(&gpu_feature_info_)) {
gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
command_line, &needs_more_info);
@@ -564,7 +568,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
bool use_swiftshader = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, needs_more_info);
- if (!gl::init::InitializeGLNoExtensionsOneOff()) {
+ if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) {
VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed";
return;
}
@@ -579,7 +583,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
gpu_preferences_.disable_software_rasterizer, false);
if (use_swiftshader) {
gl::init::ShutdownGL(true);
- if (!gl::init::InitializeGLNoExtensionsOneOff()) {
+ if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) {
VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed "
<< "with SwiftShader";
return;
@@ -617,7 +621,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
gpu_preferences_.disable_software_rasterizer, false);
if (use_swiftshader) {
gl::init::ShutdownGL(true);
- if (!gl::init::InitializeGLNoExtensionsOneOff()) {
+ if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) {
VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed "
<< "with SwiftShader";
return;
@@ -651,4 +655,59 @@ scoped_refptr<gl::GLSurface> GpuInit::TakeDefaultOffscreenSurface() {
return std::move(default_offscreen_surface_);
}
+void GpuInit::InitializeVulkan() {
+#if BUILDFLAG(ENABLE_VULKAN)
+ if (gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] ==
+ gpu::kGpuFeatureStatusEnabled) {
+ DCHECK_NE(gpu_preferences_.use_vulkan,
+ gpu::VulkanImplementationName::kNone);
+ bool vulkan_use_swiftshader = gpu_preferences_.use_vulkan ==
+ gpu::VulkanImplementationName::kSwiftshader;
+ const bool enforce_protected_memory =
+ gpu_preferences_.enforce_vulkan_protected_memory;
+ vulkan_implementation_ = gpu::CreateVulkanImplementation(
+ vulkan_use_swiftshader,
+ enforce_protected_memory ? true : false /* allow_protected_memory */,
+ enforce_protected_memory);
+ if (!vulkan_implementation_ ||
+ !vulkan_implementation_->InitializeVulkanInstance(
+ !gpu_preferences_.disable_vulkan_surface)) {
+ DLOG(ERROR) << "Failed to create and initialize Vulkan implementation.";
+ vulkan_implementation_ = nullptr;
+ CHECK(!gpu_preferences_.disable_vulkan_fallback_to_gl_for_testing);
+ }
+ // TODO(penghuang): Remove GPU.SupportsVulkan and GPU.VulkanVersion from
+ // //gpu/config/gpu_info_collector_win.cc when we are finch vulkan on
+ // Windows.
+ if (!vulkan_use_swiftshader) {
+ const bool supports_vulkan = !!vulkan_implementation_;
+ UMA_HISTOGRAM_BOOLEAN("GPU.SupportsVulkan", supports_vulkan);
+ uint32_t vulkan_version = 0;
+ if (supports_vulkan) {
+ const auto& vulkan_info =
+ vulkan_implementation_->GetVulkanInstance()->vulkan_info();
+ vulkan_version = vulkan_info.used_api_version;
+ }
+ UMA_HISTOGRAM_ENUMERATION(
+ "GPU.VulkanVersion", ConvertToHistogramVulkanVersion(vulkan_version));
+ }
+ }
+ if (!vulkan_implementation_) {
+ if (gpu_preferences_.gr_context_type == gpu::GrContextType::kVulkan) {
+ gpu_preferences_.gr_context_type = gpu::GrContextType::kGL;
+ }
+ gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone;
+ gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
+ gpu::kGpuFeatureStatusDisabled;
+ } else {
+ gpu_info_.vulkan_info =
+ vulkan_implementation_->GetVulkanInstance()->vulkan_info();
+ }
+#else
+ gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone;
+ gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
+ gpu::kGpuFeatureStatusDisabled;
+#endif // BUILDFLAG(ENABLE_VULKAN)
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_init.h b/chromium/gpu/ipc/service/gpu_init.h
index 0545b9dfb73..5263436f3dc 100644
--- a/chromium/gpu/ipc/service/gpu_init.h
+++ b/chromium/gpu/ipc/service/gpu_init.h
@@ -79,6 +79,8 @@ class GPU_IPC_SERVICE_EXPORT GpuInit {
#endif
private:
+ void InitializeVulkan();
+
GpuSandboxHelper* sandbox_helper_ = nullptr;
std::unique_ptr<GpuWatchdogThread> watchdog_thread_;
GPUInfo gpu_info_;
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc
index 50cd9b9bcba..b7811234c24 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc
@@ -6,7 +6,6 @@
#include "base/android/android_hardware_buffer_compat.h"
#include "base/logging.h"
-#include "base/memory/shared_memory_handle.h"
#include "base/stl_util.h"
#include "build/build_config.h"
#include "gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.h"
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
index d10e5e21cc1..dd1e489ad49 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
@@ -165,13 +165,15 @@ GpuMemoryBufferFactoryNativePixmap::CreateAnonymousImage(
#endif
if (!pixmap.get()) {
LOG(ERROR) << "Failed to create pixmap " << size.ToString() << ", "
- << gfx::BufferFormatToString(format);
+ << gfx::BufferFormatToString(format) << ", usage "
+ << gfx::BufferUsageToString(usage);
return nullptr;
}
auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(size, format);
if (!image->Initialize(std::move(pixmap))) {
LOG(ERROR) << "Failed to create GLImage " << size.ToString() << ", "
- << gfx::BufferFormatToString(format);
+ << gfx::BufferFormatToString(format) << ", usage "
+ << gfx::BufferUsageToString(usage);
return nullptr;
}
*is_cleared = true;
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
index b4b8b36c805..873527c6fcf 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
@@ -10,6 +10,7 @@
#include "base/files/file_util.h"
#include "base/format_macros.h"
#include "base/message_loop/message_loop_current.h"
+#include "base/metrics/histogram_functions.h"
#include "base/power_monitor/power_monitor.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_number_conversions.h"
@@ -81,7 +82,6 @@ GpuWatchdogThreadImplV1::GpuWatchdogThreadImplV1()
host_tty_ = GetActiveTTY();
#endif
base::MessageLoopCurrent::Get()->AddTaskObserver(&task_observer_);
- GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart);
}
// static
@@ -124,7 +124,7 @@ void GpuWatchdogThreadImplV1::OnForegrounded() {
void GpuWatchdogThreadImplV1::GpuWatchdogHistogram(
GpuWatchdogThreadEvent thread_event) {
- UMA_HISTOGRAM_ENUMERATION("GPU.WatchdogThread.Event", thread_event);
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Event", thread_event);
}
bool GpuWatchdogThreadImplV1::IsGpuHangDetectedForTesting() {
@@ -149,7 +149,8 @@ GpuWatchdogThreadImplV1::GpuWatchdogTaskObserver::~GpuWatchdogTaskObserver() =
default;
void GpuWatchdogThreadImplV1::GpuWatchdogTaskObserver::WillProcessTask(
- const base::PendingTask& pending_task) {
+ const base::PendingTask& pending_task,
+ bool was_blocked_or_low_priority) {
watchdog_->CheckArmed();
}
@@ -330,6 +331,14 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
// Should not get here while the system is suspended.
DCHECK(!suspension_counter_.HasRefs());
+ // If this metric is added too early (eg. watchdog creation time), it cannot
+ // be persistent. The histogram data will be lost after crash or browser exit.
+ // Delay the recording of kGpuWatchdogStart until the first OnCheckTimeout().
+ if (!is_watchdog_start_histogram_recorded) {
+ is_watchdog_start_histogram_recorded = true;
+ GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart);
+ }
+
// If the watchdog woke up significantly behind schedule, disarm and reset
// the watchdog check. This is to prevent the watchdog thread from terminating
// when a machine wakes up from sleep or hibernation, which would otherwise
@@ -375,6 +384,7 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
// Don't crash if we're not on the TTY of our host X11 server.
int active_tty = GetActiveTTY();
if (host_tty_ != -1 && active_tty != -1 && host_tty_ != active_tty) {
+ OnAcknowledge();
return;
}
#endif
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.h b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
index 3112cef2008..7d128d5d362 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.h
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
@@ -58,6 +58,13 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThread : public base::Thread,
// viz::GpuServiceImpl::~GpuServiceImpl()
virtual void OnGpuProcessTearDown() = 0;
+ // Pause the GPU watchdog to stop the timeout task. If the current heavy task
+ // is not running on the GPU driver, the watchdog can be paused to avoid
+ // unneeded crash.
+ virtual void PauseWatchdog() = 0;
+ // Continue the watchdog after a pause.
+ virtual void ResumeWatchdog() = 0;
+
virtual void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) = 0;
// For gpu testing only. Return status for the watchdog tests
@@ -86,6 +93,8 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1
void OnForegrounded() override;
void OnInitComplete() override {}
void OnGpuProcessTearDown() override {}
+ void ResumeWatchdog() override {}
+ void PauseWatchdog() override {}
void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) override;
bool IsGpuHangDetectedForTesting() override;
@@ -105,7 +114,8 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1
~GpuWatchdogTaskObserver() override;
// Implements TaskObserver.
- void WillProcessTask(const base::PendingTask& pending_task) override;
+ void WillProcessTask(const base::PendingTask& pending_task,
+ bool was_blocked_or_low_priority) override;
void DidProcessTask(const base::PendingTask& pending_task) override;
private:
@@ -219,6 +229,9 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1
base::Time check_time_;
base::TimeTicks check_timeticks_;
+ // whether GpuWatchdogThreadEvent::kGpuWatchdogStart has been recorded.
+ bool is_watchdog_start_histogram_recorded = false;
+
#if defined(USE_X11)
FILE* tty_file_;
int host_tty_;
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc
index 6beb6aad8d4..925457ef637 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc
@@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/test/task_environment.h"
#include "gpu/ipc/service/gpu_watchdog_thread_v2.h"
-#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_loop_current.h"
#include "base/power_monitor/power_monitor.h"
#include "base/power_monitor/power_monitor_source.h"
#include "base/test/power_monitor_test_base.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
+#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gpu {
@@ -19,6 +20,9 @@ namespace {
constexpr auto kGpuWatchdogTimeoutForTesting =
base::TimeDelta::FromMilliseconds(1000);
+constexpr base::TimeDelta kMaxWaitTimeForTesting =
+ base::TimeDelta::FromMilliseconds(4000);
+
// This task will run for duration_ms milliseconds.
void SimpleTask(base::TimeDelta duration) {
base::PlatformThread::Sleep(duration);
@@ -41,7 +45,7 @@ class GpuWatchdogTest : public testing::Test {
protected:
~GpuWatchdogTest() override = default;
- base::MessageLoop main_loop;
+ base::test::SingleThreadTaskEnvironment task_environment_;
base::RunLoop run_loop;
std::unique_ptr<gpu::GpuWatchdogThread> watchdog_thread_;
};
@@ -70,6 +74,7 @@ void GpuWatchdogTest::SetUp() {
watchdog_thread_ = gpu::GpuWatchdogThreadImplV2::Create(
/*start_backgrounded*/ false,
/*timeout*/ kGpuWatchdogTimeoutForTesting,
+ /*max_wait_time*/ kMaxWaitTimeForTesting,
/*test_mode*/ true);
}
@@ -136,9 +141,16 @@ void GpuWatchdogPowerTest::LongTaskOnResume(
// GPU Hang In Initialization
TEST_F(GpuWatchdogTest, GpuInitializationHang) {
- // Gpu init (5000 ms) takes longer than timeout (2000 ms).
+ // GPU init takes longer than timeout.
+#if defined(OS_WIN)
+ SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor +
+ kGpuWatchdogTimeoutForTesting *
+ kMaxCountOfMoreGpuThreadTimeAllowed +
+ kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(3000));
+#else
SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor +
- base::TimeDelta::FromMilliseconds(3000));
+ kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(3000));
+#endif
// Gpu hangs. OnInitComplete() is not called
@@ -154,23 +166,24 @@ TEST_F(GpuWatchdogTest, GpuInitializationAndRunningTasks) {
// Start running GPU tasks. Watchdog function WillProcessTask(),
// DidProcessTask() and ReportProgress() are tested.
- main_loop.task_runner()->PostTask(
+ task_environment_.GetMainThreadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&SimpleTask, base::TimeDelta::FromMilliseconds(500)));
- main_loop.task_runner()->PostTask(
+ task_environment_.GetMainThreadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&SimpleTask, base::TimeDelta::FromMilliseconds(500)));
// This long task takes 3000 milliseconds to finish, longer than timeout.
// But it reports progress every 500 milliseconds
- main_loop.task_runner()->PostTask(
+ task_environment_.GetMainThreadTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&GpuWatchdogTest::LongTaskWithReportProgress,
base::Unretained(this),
kGpuWatchdogTimeoutForTesting +
base::TimeDelta::FromMilliseconds(2000),
base::TimeDelta::FromMilliseconds(500)));
- main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure());
+ task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE,
+ run_loop.QuitClosure());
run_loop.Run();
// Everything should be fine. No GPU hang detected.
@@ -183,13 +196,25 @@ TEST_F(GpuWatchdogTest, GpuRunningATaskHang) {
// Report gpu init complete
watchdog_thread_->OnInitComplete();
- // Start running a GPU task. This long task takes 6000 milliseconds to finish.
- main_loop.task_runner()->PostTask(
+ // Start running a GPU task.
+#if defined(OS_WIN)
+ task_environment_.GetMainThreadTaskRunner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&SimpleTask, kGpuWatchdogTimeoutForTesting * 2 +
+ kGpuWatchdogTimeoutForTesting *
+ kMaxCountOfMoreGpuThreadTimeAllowed +
+ kMaxWaitTimeForTesting +
+ base::TimeDelta::FromMilliseconds(4000)));
+#else
+ task_environment_.GetMainThreadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&SimpleTask, kGpuWatchdogTimeoutForTesting * 2 +
+ kMaxWaitTimeForTesting +
base::TimeDelta::FromMilliseconds(4000)));
+#endif
- main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure());
+ task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE,
+ run_loop.QuitClosure());
run_loop.Run();
// This GPU task takes too long. A GPU hang should be detected.
@@ -209,11 +234,12 @@ TEST_F(GpuWatchdogTest, ChromeInBackground) {
watchdog_thread_->OnInitComplete();
// Run a task that takes longer (3000 milliseconds) than timeout.
- main_loop.task_runner()->PostTask(
+ task_environment_.GetMainThreadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&SimpleTask, kGpuWatchdogTimeoutForTesting * 2 +
base::TimeDelta::FromMilliseconds(1000)));
- main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure());
+ task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE,
+ run_loop.QuitClosure());
run_loop.Run();
// The gpu might be slow when running in the background. This is ok.
@@ -228,16 +254,32 @@ TEST_F(GpuWatchdogTest, GpuSwitchingToForegroundHang) {
// A task stays in the background for 200 milliseconds, and then
// switches to the foreground and runs for 6000 milliseconds. This is longer
// than the first-time foreground watchdog timeout (2000 ms).
- main_loop.task_runner()->PostTask(
+#if defined(OS_WIN)
+ task_environment_.GetMainThreadTaskRunner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&GpuWatchdogTest::LongTaskFromBackgroundToForeground,
+ base::Unretained(this),
+ /*duration*/ kGpuWatchdogTimeoutForTesting * 2 +
+ kGpuWatchdogTimeoutForTesting *
+ kMaxCountOfMoreGpuThreadTimeAllowed +
+ kMaxWaitTimeForTesting +
+ base::TimeDelta::FromMilliseconds(4200),
+ /*time_to_switch_to_foreground*/
+ base::TimeDelta::FromMilliseconds(200)));
+#else
+ task_environment_.GetMainThreadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&GpuWatchdogTest::LongTaskFromBackgroundToForeground,
base::Unretained(this),
/*duration*/ kGpuWatchdogTimeoutForTesting * 2 +
+ kMaxWaitTimeForTesting +
base::TimeDelta::FromMilliseconds(4200),
/*time_to_switch_to_foreground*/
base::TimeDelta::FromMilliseconds(200)));
+#endif
- main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure());
+ task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE,
+ run_loop.QuitClosure());
run_loop.Run();
// It takes too long to finish a task after switching to the foreground.
@@ -246,6 +288,37 @@ TEST_F(GpuWatchdogTest, GpuSwitchingToForegroundHang) {
EXPECT_TRUE(result);
}
+TEST_F(GpuWatchdogTest, GpuInitializationPause) {
+ // Running for 100 ms in the beginning of GPU init.
+ SimpleTask(base::TimeDelta::FromMilliseconds(100));
+ watchdog_thread_->PauseWatchdog();
+
+ // The Gpu init continues for another (init timeout + 1000) ms after the pause
+ SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor +
+ base::TimeDelta::FromMilliseconds(1000));
+
+ // No GPU hang is detected when the watchdog is paused.
+ bool result = watchdog_thread_->IsGpuHangDetectedForTesting();
+ EXPECT_FALSE(result);
+
+ // Continue the watchdog now.
+ watchdog_thread_->ResumeWatchdog();
+ // The Gpu init continues for (init timeout + 4000) ms.
+#if defined(OS_WIN)
+ SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor +
+ kGpuWatchdogTimeoutForTesting *
+ kMaxCountOfMoreGpuThreadTimeAllowed +
+ kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4000));
+#else
+ SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor +
+ kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4000));
+#endif
+
+ // A GPU hang should be detected.
+ result = watchdog_thread_->IsGpuHangDetectedForTesting();
+ EXPECT_TRUE(result);
+}
+
TEST_F(GpuWatchdogPowerTest, GpuOnSuspend) {
// watchdog_thread_->OnInitComplete() is called in SetUp
@@ -253,11 +326,12 @@ TEST_F(GpuWatchdogPowerTest, GpuOnSuspend) {
power_monitor_source_->GenerateSuspendEvent();
// Run a task that takes longer (5000 milliseconds) than timeout.
- main_loop.task_runner()->PostTask(
+ task_environment_.GetMainThreadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&SimpleTask, kGpuWatchdogTimeoutForTesting * 2 +
base::TimeDelta::FromMilliseconds(3000)));
- main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure());
+ task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE,
+ run_loop.QuitClosure());
run_loop.Run();
// A task might take long time to finish after entering suspension mode.
@@ -272,16 +346,30 @@ TEST_F(GpuWatchdogPowerTest, GpuOnResumeHang) {
// This task stays in the suspension mode for 200 milliseconds, and it
// wakes up on power resume and then runs for 6000 milliseconds. This is
// longer than the watchdog resume timeout (2000 ms).
- main_loop.task_runner()->PostTask(
+#if defined(OS_WIN)
+ task_environment_.GetMainThreadTaskRunner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(
+ &GpuWatchdogPowerTest::LongTaskOnResume, base::Unretained(this),
+ /*duration*/ kGpuWatchdogTimeoutForTesting * kRestartFactor +
+ kGpuWatchdogTimeoutForTesting *
+ kMaxCountOfMoreGpuThreadTimeAllowed +
+ kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4200),
+ /*time_to_power_resume*/
+ base::TimeDelta::FromMilliseconds(200)));
+#else
+ task_environment_.GetMainThreadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(
&GpuWatchdogPowerTest::LongTaskOnResume, base::Unretained(this),
/*duration*/ kGpuWatchdogTimeoutForTesting * kRestartFactor +
- base::TimeDelta::FromMilliseconds(4200),
+ kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4200),
/*time_to_power_resume*/
base::TimeDelta::FromMilliseconds(200)));
+#endif
- main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure());
+ task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE,
+ run_loop.QuitClosure());
run_loop.Run();
// It takes too long to finish this task after power resume. A GPU hang should
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
index 9677124367a..970e6e56022 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
@@ -7,9 +7,16 @@
#include "base/atomicops.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/bit_cast.h"
#include "base/debug/alias.h"
+#include "base/files/file_path.h"
#include "base/message_loop/message_loop_current.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/native_library.h"
#include "base/power_monitor/power_monitor.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -18,11 +25,25 @@
namespace gpu {
GpuWatchdogThreadImplV2::GpuWatchdogThreadImplV2(base::TimeDelta timeout,
+ base::TimeDelta max_wait_time,
bool is_test_mode)
: watchdog_timeout_(timeout),
+ in_gpu_initialization_(true),
+ max_wait_time_(max_wait_time),
is_test_mode_(is_test_mode),
watched_gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()) {
base::MessageLoopCurrent::Get()->AddTaskObserver(this);
+#if defined(OS_WIN)
+ // GetCurrentThread returns a pseudo-handle that cannot be used by one thread
+ // to identify another. DuplicateHandle creates a "real" handle that can be
+ // used for this purpose.
+ if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
+ GetCurrentProcess(), &watched_thread_handle_,
+ THREAD_QUERY_INFORMATION, FALSE, 0)) {
+ watched_thread_handle_ = nullptr;
+ }
+#endif
+
Arm();
}
@@ -33,15 +54,20 @@ GpuWatchdogThreadImplV2::~GpuWatchdogThreadImplV2() {
base::MessageLoopCurrent::Get()->RemoveTaskObserver(this);
base::PowerMonitor::RemoveObserver(this);
GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogEnd);
+#if defined(OS_WIN)
+ if (watched_thread_handle_)
+ CloseHandle(watched_thread_handle_);
+#endif
}
// static
std::unique_ptr<GpuWatchdogThreadImplV2> GpuWatchdogThreadImplV2::Create(
bool start_backgrounded,
base::TimeDelta timeout,
+ base::TimeDelta max_wait_time,
bool is_test_mode) {
- auto watchdog_thread =
- base::WrapUnique(new GpuWatchdogThreadImplV2(timeout, is_test_mode));
+ auto watchdog_thread = base::WrapUnique(
+ new GpuWatchdogThreadImplV2(timeout, max_wait_time, is_test_mode));
base::Thread::Options options;
options.timer_slack = base::TIMER_SLACK_MAXIMUM;
watchdog_thread->StartWithOptions(options);
@@ -53,7 +79,7 @@ std::unique_ptr<GpuWatchdogThreadImplV2> GpuWatchdogThreadImplV2::Create(
// static
std::unique_ptr<GpuWatchdogThreadImplV2> GpuWatchdogThreadImplV2::Create(
bool start_backgrounded) {
- return Create(start_backgrounded, kGpuWatchdogTimeout, false);
+ return Create(start_backgrounded, kGpuWatchdogTimeout, kMaxWaitTime, false);
}
// Do not add power observer during watchdog init, PowerMonitor might not be up
@@ -70,25 +96,30 @@ void GpuWatchdogThreadImplV2::AddPowerObserver() {
base::Unretained(this)));
}
-// Called from the gpu thread.
+// Android Chrome goes to the background. Called from the gpu thread.
void GpuWatchdogThreadImplV2::OnBackgrounded() {
task_runner()->PostTask(
FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogBackgrounded,
- base::Unretained(this)));
+ base::BindOnce(&GpuWatchdogThreadImplV2::StopWatchdogTimeoutTask,
+ base::Unretained(this), kAndroidBackgroundForeground));
}
-// Called from the gpu thread.
+// Android Chrome goes to the foreground. Called from the gpu thread.
void GpuWatchdogThreadImplV2::OnForegrounded() {
task_runner()->PostTask(
FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogForegrounded,
- base::Unretained(this)));
+ base::BindOnce(&GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask,
+ base::Unretained(this), kAndroidBackgroundForeground));
}
// Called from the gpu thread when gpu init has completed.
void GpuWatchdogThreadImplV2::OnInitComplete() {
DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
+
+ task_runner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&GpuWatchdogThreadImplV2::UpdateInitializationFlag,
+ base::Unretained(this)));
Disarm();
}
@@ -104,6 +135,26 @@ void GpuWatchdogThreadImplV2::OnGpuProcessTearDown() {
Arm();
}
+// Called from the gpu main thread.
+void GpuWatchdogThreadImplV2::PauseWatchdog() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
+
+ task_runner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&GpuWatchdogThreadImplV2::StopWatchdogTimeoutTask,
+ base::Unretained(this), kGeneralGpuFlow));
+}
+
+// Called from the gpu main thread.
+void GpuWatchdogThreadImplV2::ResumeWatchdog() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
+
+ task_runner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask,
+ base::Unretained(this), kGeneralGpuFlow));
+}
+
// Running on the watchdog thread.
// On Linux, Init() will be called twice for Sandbox Initialization. The
// watchdog is stopped and then restarted in StartSandboxLinux(). Everything
@@ -113,17 +164,24 @@ void GpuWatchdogThreadImplV2::Init() {
// Get and Invalidate weak_ptr should be done on the watchdog thread only.
weak_ptr_ = weak_factory_.GetWeakPtr();
+ base::TimeDelta timeout = watchdog_timeout_ * kInitFactor;
task_runner()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_),
- watchdog_timeout_ * kInitFactor);
+ timeout);
last_arm_disarm_counter_ = base::subtle::NoBarrier_Load(&arm_disarm_counter_);
watchdog_start_timeticks_ = base::TimeTicks::Now();
last_on_watchdog_timeout_timeticks_ = watchdog_start_timeticks_;
- last_on_watchdog_timeout_time_ = base::Time::Now();
- GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart);
+#if defined(OS_WIN)
+ if (watched_thread_handle_) {
+ if (base::ThreadTicks::IsSupported())
+ base::ThreadTicks::WaitUntilInitialized();
+ last_on_watchdog_timeout_thread_ticks_ = GetWatchedThreadTime();
+ remaining_watched_thread_ticks_ = timeout;
+ }
+#endif
}
// Running on the watchdog thread.
@@ -138,7 +196,8 @@ void GpuWatchdogThreadImplV2::ReportProgress() {
}
void GpuWatchdogThreadImplV2::WillProcessTask(
- const base::PendingTask& pending_task) {
+ const base::PendingTask& pending_task,
+ bool was_blocked_or_low_priority) {
DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
// The watchdog is armed at the beginning of the gpu process teardown.
@@ -160,23 +219,14 @@ void GpuWatchdogThreadImplV2::DidProcessTask(
Disarm();
}
-// Running on the watchdog thread.
+// Power Suspends. Running on the watchdog thread.
void GpuWatchdogThreadImplV2::OnSuspend() {
- DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
- in_power_suspension_ = true;
- // Revoke any pending watchdog timeout task
- weak_factory_.InvalidateWeakPtrs();
- suspend_timeticks_ = base::TimeTicks::Now();
+ StopWatchdogTimeoutTask(kPowerSuspendResume);
}
-// Running on the watchdog thread.
+// Power Resumes. Running on the watchdog thread.
void GpuWatchdogThreadImplV2::OnResume() {
- DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
-
- in_power_suspension_ = false;
- RestartWatchdogTimeoutTask();
- resume_timeticks_ = base::TimeTicks::Now();
- is_first_timeout_after_power_resume = true;
+ RestartWatchdogTimeoutTask(kPowerSuspendResume);
}
// Running on the watchdog thread.
@@ -188,41 +238,92 @@ void GpuWatchdogThreadImplV2::OnAddPowerObserver() {
}
// Running on the watchdog thread.
-void GpuWatchdogThreadImplV2::OnWatchdogBackgrounded() {
- DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
-
- is_backgrounded_ = true;
- // Revoke any pending watchdog timeout task
- weak_factory_.InvalidateWeakPtrs();
- backgrounded_timeticks_ = base::TimeTicks::Now();
-}
-
-// Running on the watchdog thread.
-void GpuWatchdogThreadImplV2::OnWatchdogForegrounded() {
- DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
-
- is_backgrounded_ = false;
- RestartWatchdogTimeoutTask();
- foregrounded_timeticks_ = base::TimeTicks::Now();
-}
-
-// Running on the watchdog thread.
-void GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask() {
+void GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask(
+ PauseResumeSource source_of_request) {
DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
+ base::TimeDelta timeout;
+
+ switch (source_of_request) {
+ case kAndroidBackgroundForeground:
+ if (!is_backgrounded_)
+ return;
+ is_backgrounded_ = false;
+ timeout = watchdog_timeout_ * kRestartFactor;
+ foregrounded_timeticks_ = base::TimeTicks::Now();
+ foregrounded_event_ = true;
+ num_of_timeout_after_foregrounded_ = 0;
+ break;
+ case kPowerSuspendResume:
+ if (!in_power_suspension_)
+ return;
+ in_power_suspension_ = false;
+ timeout = watchdog_timeout_ * kRestartFactor;
+ power_resume_timeticks_ = base::TimeTicks::Now();
+ power_resumed_event_ = true;
+ num_of_timeout_after_power_resume_ = 0;
+ break;
+ case kGeneralGpuFlow:
+ if (!is_paused_)
+ return;
+ is_paused_ = false;
+ timeout = watchdog_timeout_ * kInitFactor;
+ watchdog_resume_timeticks_ = base::TimeTicks::Now();
+ break;
+ }
- if (!is_backgrounded_ && !in_power_suspension_) {
- // Make the timeout twice long. The system/gpu might be very slow right
- // after resume or foregrounded.
+ if (!is_backgrounded_ && !in_power_suspension_ && !is_paused_) {
weak_ptr_ = weak_factory_.GetWeakPtr();
task_runner()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_),
- watchdog_timeout_ * kRestartFactor);
+ timeout);
last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now();
- last_on_watchdog_timeout_time_ = base::Time::Now();
+ last_arm_disarm_counter_ =
+ base::subtle::NoBarrier_Load(&arm_disarm_counter_);
+#if defined(OS_WIN)
+ if (watched_thread_handle_) {
+ last_on_watchdog_timeout_thread_ticks_ = GetWatchedThreadTime();
+ remaining_watched_thread_ticks_ = timeout;
+ }
+#endif
}
}
+void GpuWatchdogThreadImplV2::StopWatchdogTimeoutTask(
+ PauseResumeSource source_of_request) {
+ DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
+
+ switch (source_of_request) {
+ case kAndroidBackgroundForeground:
+ if (is_backgrounded_)
+ return;
+ is_backgrounded_ = true;
+ backgrounded_timeticks_ = base::TimeTicks::Now();
+ foregrounded_event_ = false;
+ break;
+ case kPowerSuspendResume:
+ if (in_power_suspension_)
+ return;
+ in_power_suspension_ = true;
+ power_suspend_timeticks_ = base::TimeTicks::Now();
+ power_resumed_event_ = false;
+ break;
+ case kGeneralGpuFlow:
+ if (is_paused_)
+ return;
+ is_paused_ = true;
+ watchdog_pause_timeticks_ = base::TimeTicks::Now();
+ break;
+ }
+
+ // Revoke any pending watchdog timeout task
+ weak_factory_.InvalidateWeakPtrs();
+}
+
+void GpuWatchdogThreadImplV2::UpdateInitializationFlag() {
+ in_gpu_initialization_ = false;
+}
+
// Called from the gpu main thread.
// The watchdog is armed only in these three functions -
// GpuWatchdogThreadImplV2(), WillProcessTask(), and OnGpuProcessTearDown()
@@ -264,19 +365,52 @@ void GpuWatchdogThreadImplV2::OnWatchdogTimeout() {
DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
DCHECK(!is_backgrounded_);
DCHECK(!in_power_suspension_);
+ DCHECK(!is_paused_);
+
+ // If this metric is added too early (eg. watchdog creation time), it cannot
+ // be persistent. The histogram data will be lost after crash or browser exit.
+ // Delay the recording of kGpuWatchdogStart until the firs
+ // OnWatchdogTimeout() to ensure this metric is created in the persistent
+ // memory.
+ if (!is_watchdog_start_histogram_recorded) {
+ is_watchdog_start_histogram_recorded = true;
+ GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart);
+ }
+
base::subtle::Atomic32 arm_disarm_counter =
base::subtle::NoBarrier_Load(&arm_disarm_counter_);
-
- // disarmed is true if it's an even number.
- bool disarmed = arm_disarm_counter % 2 == 0;
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeout);
+ if (power_resumed_event_)
+ num_of_timeout_after_power_resume_++;
+ if (foregrounded_event_)
+ num_of_timeout_after_foregrounded_++;
+
+ // Collect all needed info for gpu hang detection.
+ bool disarmed = arm_disarm_counter % 2 == 0; // even number
bool gpu_makes_progress = arm_disarm_counter != last_arm_disarm_counter_;
- last_arm_disarm_counter_ = arm_disarm_counter;
+ bool watched_thread_needs_more_time =
+ WatchedThreadNeedsMoreTime(disarmed || gpu_makes_progress);
+
+ // No gpu hang is detected. Continue with another OnWatchdogTimeout task
+ if (disarmed || gpu_makes_progress || watched_thread_needs_more_time) {
+ last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now();
+ last_arm_disarm_counter_ =
+ base::subtle::NoBarrier_Load(&arm_disarm_counter_);
+
+ task_runner()->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_),
+ watchdog_timeout_);
+ return;
+ }
- // No gpu hang is detected. Continue with another OnWatchdogTimeout
- if (disarmed || gpu_makes_progress) {
+ // An experiment for all platforms: Wait for max_wait_time_ and see if GPU
+ // will response.
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeoutWait);
+ if (GpuRespondsAfterWaiting()) {
last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now();
- last_on_watchdog_timeout_time_ = base::Time::Now();
- is_first_timeout_after_power_resume = false;
+ last_arm_disarm_counter_ =
+ base::subtle::NoBarrier_Load(&arm_disarm_counter_);
task_runner()->PostDelayedTask(
FROM_HERE,
@@ -286,9 +420,117 @@ void GpuWatchdogThreadImplV2::OnWatchdogTimeout() {
}
// Still armed without any progress. GPU possibly hangs.
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kKill);
DeliberatelyTerminateToRecoverFromHang();
}
+bool GpuWatchdogThreadImplV2::GpuIsAlive() {
+ base::subtle::Atomic32 arm_disarm_counter =
+ base::subtle::NoBarrier_Load(&arm_disarm_counter_);
+ bool gpu_makes_progress = arm_disarm_counter != last_arm_disarm_counter_;
+
+ return (gpu_makes_progress);
+}
+
+bool GpuWatchdogThreadImplV2::WatchedThreadNeedsMoreTime(
+ bool no_gpu_hang_detected) {
+#if defined(OS_WIN)
+ if (!watched_thread_handle_)
+ return false;
+
+ // For metrics only - If count_of_more_gpu_thread_time_allowed_ > 0, we know
+ // extra time was extended in the previous OnWatchdogTimeout(). Now we find
+ // gpu makes progress. Record this case.
+ if (no_gpu_hang_detected && count_of_more_gpu_thread_time_allowed_ > 0) {
+ GpuWatchdogTimeoutHistogram(
+ GpuWatchdogTimeoutEvent::kProgressAfterMoreThreadTime);
+ WindowsNumOfExtraTimeoutsHistogram();
+ }
+ // For metrics only - The extra time was give in timeouts.
+ time_in_extra_timeouts_ =
+ count_of_more_gpu_thread_time_allowed_ * watchdog_timeout_;
+
+ // Calculate how many thread ticks the watched thread spent doing the work.
+ base::ThreadTicks now = GetWatchedThreadTime();
+ base::TimeDelta thread_time_elapsed =
+ now - last_on_watchdog_timeout_thread_ticks_;
+ last_on_watchdog_timeout_thread_ticks_ = now;
+ remaining_watched_thread_ticks_ -= thread_time_elapsed;
+
+ if (no_gpu_hang_detected ||
+ count_of_more_gpu_thread_time_allowed_ >=
+ kMaxCountOfMoreGpuThreadTimeAllowed ||
+ thread_time_elapsed < base::TimeDelta() /* bogus data */ ||
+ remaining_watched_thread_ticks_ <= base::TimeDelta()) {
+ // Reset the remaining thread ticks.
+ remaining_watched_thread_ticks_ = watchdog_timeout_;
+ count_of_more_gpu_thread_time_allowed_ = 0;
+ return false;
+ } else {
+ count_of_more_gpu_thread_time_allowed_++;
+ // Only record it once for all extenteded timeout on the same detected gpu
+ // hang, so we know this is equivlent one crash in our crash reports.
+ if (count_of_more_gpu_thread_time_allowed_ == 1)
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kMoreThreadTime);
+
+ return true;
+ }
+#else
+ return false;
+#endif
+}
+
+#if defined(OS_WIN)
+base::ThreadTicks GpuWatchdogThreadImplV2::GetWatchedThreadTime() {
+ DCHECK(watched_thread_handle_);
+
+ if (base::ThreadTicks::IsSupported()) {
+ // Note: GetForThread() might return bogus results if running on different
+ // CPUs between two calls.
+ return base::ThreadTicks::GetForThread(
+ base::PlatformThreadHandle(watched_thread_handle_));
+ } else {
+ FILETIME creation_time;
+ FILETIME exit_time;
+ FILETIME kernel_time;
+ FILETIME user_time;
+ BOOL result = GetThreadTimes(watched_thread_handle_, &creation_time,
+ &exit_time, &kernel_time, &user_time);
+ if (!result)
+ return base::ThreadTicks();
+
+ // Need to bit_cast to fix alignment, then divide by 10 to convert
+ // 100-nanoseconds to microseconds.
+ int64_t user_time_us = bit_cast<int64_t, FILETIME>(user_time) / 10;
+ int64_t kernel_time_us = bit_cast<int64_t, FILETIME>(kernel_time) / 10;
+
+ return base::ThreadTicks() +
+ base::TimeDelta::FromMicroseconds(user_time_us + kernel_time_us);
+ }
+}
+#endif
+
+// This is an experiment on all platforms to see whether GPU will response
+// after waiting longer.
+bool GpuWatchdogThreadImplV2::GpuRespondsAfterWaiting() {
+ base::TimeDelta duration;
+ base::TimeTicks start_timeticks = base::TimeTicks::Now();
+
+ while (duration < max_wait_time_) {
+ // Sleep for 1 seconds each time and check if the GPU makes a progress.
+ base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(1));
+ duration = base::TimeTicks::Now() - start_timeticks;
+
+ if (GpuIsAlive()) {
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kProgressAfterWait);
+ GpuWatchdogWaitTimeHistogram(duration);
+ return true;
+ }
+ }
+
+ return false;
+}
+
void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() {
DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
// If this is for gpu testing, do not terminate the gpu process.
@@ -304,13 +546,18 @@ void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() {
// Store variables so they're available in crash dumps to help determine the
// cause of any hang.
- base::TimeTicks current_timeticks = base::TimeTicks::Now();
- base::debug::Alias(&current_timeticks);
+ base::TimeTicks function_begin_timeticks = base::TimeTicks::Now();
+ base::debug::Alias(&in_gpu_initialization_);
+ base::debug::Alias(&num_of_timeout_after_power_resume_);
+ base::debug::Alias(&num_of_timeout_after_foregrounded_);
+ base::debug::Alias(&function_begin_timeticks);
base::debug::Alias(&watchdog_start_timeticks_);
- base::debug::Alias(&suspend_timeticks_);
- base::debug::Alias(&resume_timeticks_);
+ base::debug::Alias(&power_suspend_timeticks_);
+ base::debug::Alias(&power_resume_timeticks_);
base::debug::Alias(&backgrounded_timeticks_);
base::debug::Alias(&foregrounded_timeticks_);
+ base::debug::Alias(&watchdog_pause_timeticks_);
+ base::debug::Alias(&watchdog_resume_timeticks_);
base::debug::Alias(&in_power_suspension_);
base::debug::Alias(&in_gpu_process_teardown_);
base::debug::Alias(&is_backgrounded_);
@@ -318,21 +565,19 @@ void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() {
base::debug::Alias(&is_power_observer_added_);
base::debug::Alias(&last_on_watchdog_timeout_timeticks_);
base::TimeDelta timeticks_elapses =
- current_timeticks - last_on_watchdog_timeout_timeticks_;
+ function_begin_timeticks - last_on_watchdog_timeout_timeticks_;
base::debug::Alias(&timeticks_elapses);
-
- // If clock_time_elapses is much longer than time_elapses, it might be a sign
- // of a busy system.
- base::Time current_time = base::Time::Now();
- base::TimeDelta time_elapses = current_time - last_on_watchdog_timeout_time_;
- base::debug::Alias(&current_time);
- base::debug::Alias(&last_on_watchdog_timeout_time_);
- base::debug::Alias(&time_elapses);
+#if defined(OS_WIN)
+ base::debug::Alias(&remaining_watched_thread_ticks_);
+#endif
GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogKill);
+ crash_keys::gpu_watchdog_crashed_in_gpu_init.Set(
+ in_gpu_initialization_ ? "1" : "0");
+
crash_keys::gpu_watchdog_kill_after_power_resume.Set(
- is_first_timeout_after_power_resume ? "1" : "0");
+ WithinOneMinFromPowerResumed() ? "1" : "0");
// Deliberately crash the process to create a crash dump.
*((volatile int*)0) = 0xdeadface;
@@ -340,8 +585,130 @@ void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() {
void GpuWatchdogThreadImplV2::GpuWatchdogHistogram(
GpuWatchdogThreadEvent thread_event) {
- UMA_HISTOGRAM_ENUMERATION("GPU.WatchdogThread.Event.V2", thread_event);
- UMA_HISTOGRAM_ENUMERATION("GPU.WatchdogThread.Event", thread_event);
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Event.V2", thread_event);
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Event", thread_event);
+}
+
+void GpuWatchdogThreadImplV2::GpuWatchdogTimeoutHistogram(
+ GpuWatchdogTimeoutEvent timeout_event) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout", timeout_event);
+
+ bool recorded = false;
+ if (in_gpu_initialization_) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Init",
+ timeout_event);
+ recorded = true;
+ }
+
+ if (WithinOneMinFromPowerResumed()) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.PowerResume",
+ timeout_event);
+ recorded = true;
+ }
+
+ if (WithinOneMinFromForegrounded()) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Foregrounded",
+ timeout_event);
+ recorded = true;
+ }
+
+ if (!recorded) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Normal",
+ timeout_event);
+ }
+}
+
+#if defined(OS_WIN)
+void GpuWatchdogThreadImplV2::WindowsNumOfExtraTimeoutsHistogram() {
+ // Record the number of timeouts the GPU main thread needs to make a progress
+ // after GPU OnWatchdogTimeout() is triggered. The maximum count is 6 which
+ // is more than kMaxCountOfMoreGpuThreadTimeAllowed(4);
+ constexpr int kMin = 1;
+ constexpr int kMax = 6;
+ constexpr int kBuckets = 6;
+ int count = count_of_more_gpu_thread_time_allowed_;
+ bool recorded = false;
+
+ base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime", count,
+ kMin, kMax, kBuckets);
+
+ if (in_gpu_initialization_) {
+ base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime.Init",
+ count, kMin, kMax, kBuckets);
+ recorded = true;
+ }
+
+ if (WithinOneMinFromPowerResumed()) {
+ base::UmaHistogramCustomCounts(
+ "GPU.WatchdogThread.ExtraThreadTime.PowerResume", count, kMin, kMax,
+ kBuckets);
+ recorded = true;
+ }
+
+ if (WithinOneMinFromForegrounded()) {
+ base::UmaHistogramCustomCounts(
+ "GPU.WatchdogThread.ExtraThreadTime.Foregrounded", count, kMin, kMax,
+ kBuckets);
+ recorded = true;
+ }
+
+ if (!recorded) {
+ base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime.Normal",
+ count, kMin, kMax, kBuckets);
+ }
+}
+#endif
+
+void GpuWatchdogThreadImplV2::GpuWatchdogWaitTimeHistogram(
+ base::TimeDelta wait_time) {
+#if defined(OS_WIN)
+ // Add the time the GPU thread was given for full thread time.
+ wait_time += time_in_extra_timeouts_;
+#endif
+
+ // Record the wait time in OnWatchdogTimeout() for the GPU main thread to
+ // make a progress. The maximum recodrding time is 150 seconds because
+ // Windows need to add the time spent before reaching here (max 60 sec).
+ constexpr base::TimeDelta kMin = base::TimeDelta::FromSeconds(1);
+ constexpr base::TimeDelta kMax = base::TimeDelta::FromSeconds(150);
+ constexpr int kBuckets = 50;
+ bool recorded = false;
+
+ base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime", wait_time, kMin,
+ kMax, kBuckets);
+
+ if (in_gpu_initialization_) {
+ base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.Init", wait_time,
+ kMin, kMax, kBuckets);
+ recorded = true;
+ }
+
+ if (WithinOneMinFromPowerResumed()) {
+ base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.PowerResume",
+ wait_time, kMin, kMax, kBuckets);
+ recorded = true;
+ }
+
+ if (WithinOneMinFromForegrounded()) {
+ base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.Foregrounded",
+ wait_time, kMin, kMax, kBuckets);
+ recorded = true;
+ }
+
+ if (!recorded) {
+ base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.Normal",
+ wait_time, kMin, kMax, kBuckets);
+ }
+}
+
+bool GpuWatchdogThreadImplV2::WithinOneMinFromPowerResumed() {
+ size_t count = base::TimeDelta::FromSeconds(60) / watchdog_timeout_;
+ return power_resumed_event_ && num_of_timeout_after_power_resume_ <= count;
+}
+
+bool GpuWatchdogThreadImplV2::WithinOneMinFromForegrounded() {
+ size_t count = base::TimeDelta::FromSeconds(60) / watchdog_timeout_;
+ return foregrounded_event_ && num_of_timeout_after_foregrounded_ <= count;
}
// For gpu testing only. Return whether a GPU hang was detected or not.
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
index fe5d4d94521..f9a63c7d953 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
@@ -8,6 +8,36 @@
#include "gpu/ipc/service/gpu_watchdog_thread.h"
namespace gpu {
+#if defined(OS_WIN)
+// If the actual time the watched GPU thread spent doing actual work is less
+// than the wathdog timeout, the GPU thread can continue running through
+// OnGPUWatchdogTimeout for at most 4 times before the gpu thread is killed.
+constexpr int kMaxCountOfMoreGpuThreadTimeAllowed = 4;
+#endif
+constexpr base::TimeDelta kMaxWaitTime = base::TimeDelta::FromSeconds(60);
+
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused.
+enum class GpuWatchdogTimeoutEvent {
+ // Recorded each time OnWatchdogTimeout() is called.
+ kTimeout,
+ // Recorded when a GPU main thread is killed for a detected hang.
+ kKill,
+ // Window only: Recorded when a hang is detected but we allow the GPU main
+ // thread to continue until it spent the full
+ // thread time doing the work.
+ kMoreThreadTime,
+ // Windows only: The GPU makes progress after givenmore thread time. The GPU
+ // main thread is not killed.
+ kProgressAfterMoreThreadTime,
+ // A gpu hang is detected but watchdog waits for 60 seconds before taking
+ // action.
+ kTimeoutWait,
+ // The GPU makes progress within 60 sec in OnWatchdogTimeout(). The GPU main
+ // thread is not killed.
+ kProgressAfterWait,
+ kMaxValue = kProgressAfterWait,
+};
class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
: public GpuWatchdogThread,
@@ -16,8 +46,11 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
static std::unique_ptr<GpuWatchdogThreadImplV2> Create(
bool start_backgrounded);
- static std::unique_ptr<GpuWatchdogThreadImplV2>
- Create(bool start_backgrounded, base::TimeDelta timeout, bool test_mode);
+ static std::unique_ptr<GpuWatchdogThreadImplV2> Create(
+ bool start_backgrounded,
+ base::TimeDelta timeout,
+ base::TimeDelta max_wait_time,
+ bool test_mode);
~GpuWatchdogThreadImplV2() override;
@@ -27,6 +60,8 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
void OnForegrounded() override;
void OnInitComplete() override;
void OnGpuProcessTearDown() override;
+ void ResumeWatchdog() override;
+ void PauseWatchdog() override;
void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) override;
bool IsGpuHangDetectedForTesting() override;
void WaitForPowerObserverAddedForTesting() override;
@@ -39,7 +74,8 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
void ReportProgress() override;
// Implements TaskObserver.
- void WillProcessTask(const base::PendingTask& pending_task) override;
+ void WillProcessTask(const base::PendingTask& pending_task,
+ bool was_blocked_or_low_priority) override;
void DidProcessTask(const base::PendingTask& pending_task) override;
// Implements base::PowerObserver.
@@ -47,47 +83,98 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
void OnResume() override;
private:
- GpuWatchdogThreadImplV2(base::TimeDelta timeout, bool test_mode);
+ enum PauseResumeSource {
+ kAndroidBackgroundForeground = 0,
+ kPowerSuspendResume = 1,
+ kGeneralGpuFlow = 2,
+ };
+
+ GpuWatchdogThreadImplV2(base::TimeDelta timeout,
+ base::TimeDelta max_wait_time,
+ bool test_mode);
void OnAddPowerObserver();
- void OnWatchdogBackgrounded();
- void OnWatchdogForegrounded();
- void RestartWatchdogTimeoutTask();
+ void RestartWatchdogTimeoutTask(PauseResumeSource source_of_request);
+ void StopWatchdogTimeoutTask(PauseResumeSource source_of_request);
+ void UpdateInitializationFlag();
void Arm();
void Disarm();
void InProgress();
bool IsArmed();
void OnWatchdogTimeout();
+ bool GpuIsAlive();
+ bool WatchedThreadNeedsMoreTime(bool no_gpu_hang_detected);
+#if defined(OS_WIN)
+ base::ThreadTicks GetWatchedThreadTime();
+#endif
+ bool GpuRespondsAfterWaiting();
// Do not change the function name. It is used for [GPU HANG] carsh reports.
void DeliberatelyTerminateToRecoverFromHang();
+ // Histogram recorded in OnWatchdogTimeout()
+ void GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent timeout_event);
+
+#if defined(OS_WIN)
+ // The extra timeout the GPU main thread needs to make a progress.
+ void WindowsNumOfExtraTimeoutsHistogram();
+#endif
+
+ // The wait time in OnWatchdogTimeout() for the GPU main thread to make a
+ // progress.
+ void GpuWatchdogWaitTimeHistogram(base::TimeDelta wait_time);
+
+ // Used for metrics. It's 1 minute after the event.
+ bool WithinOneMinFromPowerResumed();
+ bool WithinOneMinFromForegrounded();
+
// This counter is only written on the gpu thread, and read on both threads.
base::subtle::Atomic32 arm_disarm_counter_ = 0;
// The counter number read in the last OnWatchdogTimeout() on the watchdog
// thread.
int32_t last_arm_disarm_counter_ = 0;
- // Timeout on the watchdog thread to check if gpu hangs
+ // Timeout on the watchdog thread to check if gpu hangs.
base::TimeDelta watchdog_timeout_;
- // The time the gpu watchdog was created
+ // The time the gpu watchdog was created.
base::TimeTicks watchdog_start_timeticks_;
// The time the last OnSuspend and OnResume was called.
- base::TimeTicks suspend_timeticks_;
- base::TimeTicks resume_timeticks_;
+ base::TimeTicks power_suspend_timeticks_;
+ base::TimeTicks power_resume_timeticks_;
// The time the last OnBackgrounded and OnForegrounded was called.
base::TimeTicks backgrounded_timeticks_;
base::TimeTicks foregrounded_timeticks_;
- // Time: Interpreting the wall-clock time provided by a remote system.
+ // The time PauseWatchdog and ResumeWatchdog was called.
+ base::TimeTicks watchdog_pause_timeticks_;
+ base::TimeTicks watchdog_resume_timeticks_;
+
// TimeTicks: Tracking the amount of time a task runs. Executing delayed
// tasks at the right time.
+ // ThreadTicks: Use this timer to (approximately) measure how much time the
+ // calling thread spent doing actual work vs. being de-scheduled.
// The time the last OnWatchdogTimeout() was called.
base::TimeTicks last_on_watchdog_timeout_timeticks_;
- base::Time last_on_watchdog_timeout_time_;
+#if defined(OS_WIN)
+ base::ThreadTicks last_on_watchdog_timeout_thread_ticks_;
+
+ // The difference between the timeout and the actual time the watched thread
+ // spent doing actual work.
+ base::TimeDelta remaining_watched_thread_ticks_;
+
+ // The Windows thread hanndle of the watched GPU main thread.
+ void* watched_thread_handle_ = nullptr;
+
+ // After GPU hang detected, how many times has the GPU thread been allowed to
+ // continue due to not enough thread time.
+ int count_of_more_gpu_thread_time_allowed_ = 0;
+
+ // The accumulated timeout time the GPU main thread was given.
+ base::TimeDelta time_in_extra_timeouts_;
+#endif
// The system has entered the power suspension mode.
bool in_power_suspension_ = false;
@@ -95,18 +182,32 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
// The GPU process has started tearing down. Accessed only in the gpu process.
bool in_gpu_process_teardown_ = false;
- // OnWatchdogTimeout() is called for the first time after power resume.
- bool is_first_timeout_after_power_resume = false;
-
// Chrome is running on the background on Android. Gpu is probably very slow
// or stalled.
bool is_backgrounded_ = false;
+ // The GPU watchdog is paused. The timeout task is temporarily stopped.
+ bool is_paused_ = false;
+
// Whether the watchdog thread has been called and added to the power monitor
// observer.
bool is_add_power_observer_called_ = false;
bool is_power_observer_added_ = false;
+ // whether GpuWatchdogThreadEvent::kGpuWatchdogStart has been recorded.
+ bool is_watchdog_start_histogram_recorded = false;
+
+ // Read/Write by the watchdog thread only after initialized in the
+ // constructor.
+ bool in_gpu_initialization_ = false;
+
+ // For the experiment and the debugging purpose
+ size_t num_of_timeout_after_power_resume_ = 0;
+ size_t num_of_timeout_after_foregrounded_ = 0;
+ bool foregrounded_event_ = false;
+ bool power_resumed_event_ = false;
+ base::TimeDelta max_wait_time_;
+
// For gpu testing only.
const bool is_test_mode_;
// Set by the watchdog thread and Read by the test thread.
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
index 21aa72df856..f61a9798e97 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
@@ -153,27 +153,11 @@ void ImageDecodeAcceleratorStub::OnScheduleImageDecode(
uint64_t release_count) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
base::AutoLock lock(lock_);
- if (!channel_ || destroying_channel_) {
+ if (!channel_) {
// The channel is no longer available, so don't do anything.
return;
}
- // Make sure the decode sync token is ordered with respect to the last decode
- // request.
- if (release_count <= last_release_count_) {
- DLOG(ERROR) << "Out-of-order decode sync token";
- OnError();
- return;
- }
- last_release_count_ = release_count;
-
- // Make sure the output dimensions are not too small.
- if (decode_params.output_size.IsEmpty()) {
- DLOG(ERROR) << "Output dimensions are too small";
- OnError();
- return;
- }
-
// Start the actual decode.
worker_->Decode(
std::move(decode_params.encoded_data), decode_params.output_size,
@@ -200,7 +184,7 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
uint64_t decode_release_count) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
base::AutoLock lock(lock_);
- if (!channel_ || destroying_channel_) {
+ if (!channel_) {
// The channel is no longer available, so don't do anything.
return;
}
@@ -208,6 +192,29 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
DCHECK(!pending_completed_decodes_.empty());
std::unique_ptr<ImageDecodeAcceleratorWorker::DecodeResult> completed_decode =
std::move(pending_completed_decodes_.front());
+ pending_completed_decodes_.pop();
+
+ // Regardless of what happens next, make sure the sync token gets released and
+ // the sequence gets disabled if there are no more completed decodes after
+ // this. base::Unretained(this) is safe because *this outlives the
+ // ScopedClosureRunner.
+ base::ScopedClosureRunner finalizer(
+ base::BindOnce(&ImageDecodeAcceleratorStub::FinishCompletedDecode,
+ base::Unretained(this), decode_release_count));
+
+ if (!completed_decode) {
+ DLOG(ERROR) << "The image could not be decoded";
+ return;
+ }
+
+ // TODO(crbug.com/995883): the output_size parameter is going away, so this
+ // validation is not needed. Checking if the size is too small should happen
+ // at the level of the decoder (since that's the component that's aware of its
+ // own capabilities).
+ if (params.output_size.IsEmpty()) {
+ DLOG(ERROR) << "Output dimensions are too small";
+ return;
+ }
// Gain access to the transfer cache through the GpuChannelManager's
// SharedContextState. We will also use that to get a GrContext that will be
@@ -217,7 +224,6 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
channel_->gpu_channel_manager()->GetSharedContextState(&context_result);
if (context_result != ContextResult::kSuccess) {
DLOG(ERROR) << "Unable to obtain the SharedContextState";
- OnError();
return;
}
DCHECK(shared_context_state);
@@ -227,17 +233,14 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
// other graphics APIs).
if (!shared_context_state->IsGLInitialized()) {
DLOG(ERROR) << "GL has not been initialized";
- OnError();
return;
}
if (!shared_context_state->gr_context()) {
DLOG(ERROR) << "Could not get the GrContext";
- OnError();
return;
}
if (!shared_context_state->MakeCurrent(nullptr /* surface */)) {
DLOG(ERROR) << "Could not MakeCurrent the shared context";
- OnError();
return;
}
@@ -269,7 +272,6 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
if (!safe_uv_width.AssignIfValid(&uv_width) ||
!safe_uv_height.AssignIfValid(&uv_height)) {
DLOG(ERROR) << "Could not calculate subsampled dimensions";
- OnError();
return;
}
gfx::Size uv_plane_size = gfx::Size(uv_width, uv_height);
@@ -343,13 +345,11 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
}
if (!plane_image) {
DLOG(ERROR) << "Could not create GL image";
- OnError();
return;
}
resource->gl_image = std::move(plane_image);
if (!resource->gl_image->BindTexImage(GL_TEXTURE_EXTERNAL_OES)) {
DLOG(ERROR) << "Could not bind GL image to texture";
- OnError();
return;
}
@@ -372,7 +372,6 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
resource);
if (!plane_sk_images[plane]) {
DLOG(ERROR) << "Could not create planar SkImage";
- OnError();
return;
}
// No need for us to call the resource cleaner. Skia should do that.
@@ -383,7 +382,6 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
// |native_pixmap_handle| member of a GpuMemoryBufferHandle.
NOTIMPLEMENTED()
<< "Image decode acceleration is unsupported for this platform";
- OnError();
return;
#endif
@@ -395,7 +393,6 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
channel_->LookupCommandBuffer(params.raster_decoder_route_id);
if (!command_buffer) {
DLOG(ERROR) << "Could not find the command buffer";
- OnError();
return;
}
scoped_refptr<Buffer> handle_buffer =
@@ -403,13 +400,11 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
if (!DiscardableHandleBase::ValidateParameters(
handle_buffer.get(), params.discardable_handle_shm_offset)) {
DLOG(ERROR) << "Could not validate the discardable handle parameters";
- OnError();
return;
}
DCHECK(command_buffer->decoder_context());
if (command_buffer->decoder_context()->GetRasterDecoderId() < 0) {
DLOG(ERROR) << "Could not get the raster decoder ID";
- OnError();
return;
}
@@ -441,21 +436,18 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
completed_decode->yuv_color_space,
completed_decode->buffer_byte_size, params.needs_mips)) {
DLOG(ERROR) << "Could not create and insert the transfer cache entry";
- OnError();
return;
}
}
DCHECK(notify_gl_state_changed);
notify_gl_state_changed->RunAndReset();
+}
- // All done! The decoded image can now be used for rasterization, so we can
- // release the decode sync token.
+void ImageDecodeAcceleratorStub::FinishCompletedDecode(
+ uint64_t decode_release_count) {
+ DCHECK(main_task_runner_->BelongsToCurrentThread());
+ lock_.AssertAcquired();
sync_point_client_state_->ReleaseFenceSync(decode_release_count);
-
- // If there are no more completed decodes to be processed, we can disable the
- // sequence: when the next decode is completed, the sequence will be
- // re-enabled.
- pending_completed_decodes_.pop();
if (pending_completed_decodes_.empty())
channel_->scheduler()->DisableSequence(sequence_);
}
@@ -464,19 +456,13 @@ void ImageDecodeAcceleratorStub::OnDecodeCompleted(
gfx::Size expected_output_size,
std::unique_ptr<ImageDecodeAcceleratorWorker::DecodeResult> result) {
base::AutoLock lock(lock_);
- if (!channel_ || destroying_channel_) {
+ if (!channel_) {
// The channel is no longer available, so don't do anything.
return;
}
- if (!result) {
- DLOG(ERROR) << "The decode failed";
- OnError();
- return;
- }
-
// A sanity check on the output of the decoder.
- DCHECK(expected_output_size == result->visible_size);
+ DCHECK(!result || expected_output_size == result->visible_size);
// The decode is ready to be processed: add it to |pending_completed_decodes_|
// so that ProcessCompletedDecode() can pick it up.
@@ -488,19 +474,4 @@ void ImageDecodeAcceleratorStub::OnDecodeCompleted(
channel_->scheduler()->EnableSequence(sequence_);
}
-void ImageDecodeAcceleratorStub::OnError() {
- lock_.AssertAcquired();
- DCHECK(channel_);
-
- // Trigger the destruction of the channel and stop processing further
- // completed decodes, even if they're successful. We can't call
- // GpuChannel::OnChannelError() directly because that will end up calling
- // ImageDecodeAcceleratorStub::Shutdown() while |lock_| is still acquired. So,
- // we post a task to the main thread instead.
- destroying_channel_ = true;
- channel_->task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(&GpuChannel::OnChannelError, channel_->AsWeakPtr()));
-}
-
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.h b/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
index 41256a3067e..b3552f98573 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
@@ -76,23 +76,22 @@ class GPU_IPC_SERVICE_EXPORT ImageDecodeAcceleratorStub
uint64_t release_count);
// Creates the service-side cache entry for a completed decode and releases
- // the decode sync token.
+ // the decode sync token. If the decode was unsuccessful, no cache entry is
+ // created but the decode sync token is still released.
void ProcessCompletedDecode(GpuChannelMsg_ScheduleImageDecode_Params params,
uint64_t decode_release_count);
- // The |worker_| calls this when a decode is completed. If the decode is
- // successful, |sequence_| will be enabled so that ProcessCompletedDecode() is
- // called. If the decode is not successful, we destroy the channel (see
- // OnError()).
+ // Releases the decode sync token corresponding to |decode_release_count| and
+ // disables |sequence_| if there are no more decodes to process for now.
+ void FinishCompletedDecode(uint64_t decode_release_count)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // The |worker_| calls this when a decode is completed. |result| is enqueued
+ // and |sequence_| is enabled so that ProcessCompletedDecode() picks it up.
void OnDecodeCompleted(
gfx::Size expected_output_size,
std::unique_ptr<ImageDecodeAcceleratorWorker::DecodeResult> result);
- // Triggers the destruction of the channel asynchronously and makes it so that
- // we stop accepting completed decodes. On entry, |channel_| must not be
- // nullptr.
- void OnError() EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
// The object to which the actual decoding can be delegated.
ImageDecodeAcceleratorWorker* worker_ = nullptr;
@@ -103,8 +102,6 @@ class GPU_IPC_SERVICE_EXPORT ImageDecodeAcceleratorStub
GUARDED_BY(lock_);
base::queue<std::unique_ptr<ImageDecodeAcceleratorWorker::DecodeResult>>
pending_completed_decodes_ GUARDED_BY(lock_);
- bool destroying_channel_ GUARDED_BY(lock_) = false;
- uint64_t last_release_count_ GUARDED_BY(lock_) = 0;
ImageFactory* external_image_factory_for_testing_ = nullptr;
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
index 23830d15ddd..b190cfcc37d 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
@@ -73,6 +73,11 @@ class MemoryTracker;
namespace {
+struct ExpectedCacheEntry {
+ uint32_t id = 0u;
+ SkISize dimensions;
+};
+
std::unique_ptr<MemoryTracker> CreateMockMemoryTracker(
const GPUCreateCommandBufferConfig& init_params) {
return std::make_unique<gles2::MockMemoryTracker>();
@@ -204,8 +209,7 @@ class ImageDecodeAcceleratorStubTest
int GetRasterDecoderId() {
GpuChannel* channel = channel_manager()->LookupChannel(kChannelId);
- if (!channel)
- return -1;
+ DCHECK(channel);
CommandBufferStub* command_buffer =
channel->LookupCommandBuffer(kCommandBufferRouteId);
if (!command_buffer || !command_buffer->decoder_context())
@@ -283,7 +287,7 @@ class ImageDecodeAcceleratorStubTest
scoped_refptr<Buffer> buffer,
uint64_t handle_release_count) {
GpuChannel* channel = channel_manager()->LookupChannel(kChannelId);
- CHECK(channel);
+ DCHECK(channel);
CommandBufferStub* command_buffer =
channel->LookupCommandBuffer(kCommandBufferRouteId);
CHECK(command_buffer);
@@ -295,12 +299,11 @@ class ImageDecodeAcceleratorStubTest
// the raster sequence) to register the handle's buffer and release the sync
// token corresponding to |handle_release_count| (see the
// RegisterDiscardableHandleBuffer() method). Returns an invalid handle if the
- // GPU channel or the command buffer doesn't exist.
+ // command buffer doesn't exist.
ClientDiscardableHandle CreateDiscardableHandle(
uint64_t handle_release_count) {
GpuChannel* channel = channel_manager()->LookupChannel(kChannelId);
- if (!channel)
- return ClientDiscardableHandle();
+ DCHECK(channel);
CommandBufferStub* command_buffer =
channel->LookupCommandBuffer(kCommandBufferRouteId);
if (!command_buffer)
@@ -324,20 +327,14 @@ class ImageDecodeAcceleratorStubTest
// (|decode_release_count|), the transfer cache entry ID
// (|transfer_cache_entry_id|), and the release count of the sync token that
// is signaled after the discardable handle's buffer has been registered in
- // the TransferBufferManager. If the channel does not exist or the discardable
- // handle can't be created, this function returns an empty sync token.
+ // the TransferBufferManager. If the discardable handle can't be created, this
+ // function returns an empty sync token.
SyncToken SendDecodeRequest(const gfx::Size& output_size,
uint64_t decode_release_count,
uint32_t transfer_cache_entry_id,
uint64_t handle_release_count) {
GpuChannel* channel = channel_manager()->LookupChannel(kChannelId);
- if (!channel) {
- // It's possible that the channel was destroyed as part of an earlier
- // SendDecodeRequest() call. This would happen if
- // ImageDecodeAcceleratorStub::OnScheduleImageDecode decides to destroy
- // the channel.
- return SyncToken();
- }
+ DCHECK(channel);
// Create the decode sync token for the decode request so that we can test
// that it's actually released.
@@ -383,7 +380,8 @@ class ImageDecodeAcceleratorStubTest
}
}
- void CheckTransferCacheEntries(std::vector<SkISize> expected_sizes) {
+ void CheckTransferCacheEntries(
+ const std::vector<ExpectedCacheEntry>& expected_entries) {
ServiceTransferCache* transfer_cache = GetServiceTransferCache();
ASSERT_TRUE(transfer_cache);
@@ -391,8 +389,8 @@ class ImageDecodeAcceleratorStubTest
// expected.
const size_t num_actual_cache_entries =
transfer_cache->entries_count_for_testing();
- ASSERT_EQ(expected_sizes.size(), num_actual_cache_entries);
- if (expected_sizes.empty())
+ ASSERT_EQ(expected_entries.size(), num_actual_cache_entries);
+ if (expected_entries.empty())
return;
// Then, check the dimensions of the entries to make sure they are as
@@ -402,7 +400,8 @@ class ImageDecodeAcceleratorStubTest
for (size_t i = 0; i < num_actual_cache_entries; i++) {
auto* decode_entry = static_cast<cc::ServiceImageTransferCacheEntry*>(
transfer_cache->GetEntry(ServiceTransferCache::EntryKey(
- raster_decoder_id, cc::TransferCacheEntryType::kImage, i + 1)));
+ raster_decoder_id, cc::TransferCacheEntryType::kImage,
+ expected_entries[i].id)));
ASSERT_TRUE(decode_entry);
ASSERT_EQ(gfx::NumberOfPlanesForLinearBufferFormat(GetParam()),
decode_entry->plane_images().size());
@@ -412,9 +411,9 @@ class ImageDecodeAcceleratorStubTest
EXPECT_TRUE(decode_entry->plane_images()[plane]->isTextureBacked());
}
ASSERT_TRUE(decode_entry->image());
- EXPECT_EQ(expected_sizes[i].width(),
+ EXPECT_EQ(expected_entries[i].dimensions.width(),
decode_entry->image()->dimensions().width());
- EXPECT_EQ(expected_sizes[i].height(),
+ EXPECT_EQ(expected_entries[i].dimensions.height(),
decode_entry->image()->dimensions().height());
}
}
@@ -471,11 +470,9 @@ TEST_P(ImageDecodeAcceleratorStubTest,
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
- // The channel should still exist at the end.
- EXPECT_TRUE(channel_manager()->LookupChannel(kChannelId));
-
// Check that the decoded images are in the transfer cache.
- CheckTransferCacheEntries({SkISize::Make(100, 100), SkISize::Make(200, 200)});
+ CheckTransferCacheEntries(
+ {{1u, SkISize::Make(100, 100)}, {2u, SkISize::Make(200, 200)}});
}
// Tests the following flow: three decode requests are sent. The first decode
@@ -521,18 +518,14 @@ TEST_P(ImageDecodeAcceleratorStubTest,
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode3_sync_token));
- // The channel should still exist at the end.
- EXPECT_TRUE(channel_manager()->LookupChannel(kChannelId));
-
// Check that the decoded images are in the transfer cache.
- CheckTransferCacheEntries({SkISize::Make(100, 100), SkISize::Make(200, 200),
- SkISize::Make(300, 300)});
+ CheckTransferCacheEntries({{1u, SkISize::Make(100, 100)},
+ {2u, SkISize::Make(200, 200)},
+ {3u, SkISize::Make(300, 300)}});
}
// Tests the following flow: three decode requests are sent. The first decode
-// fails which should trigger the destruction of the channel. The second
-// succeeds and the third one fails. Regardless, the channel should still be
-// destroyed and all sync tokens should be released.
+// fails, the second succeeds, and the third one fails.
TEST_P(ImageDecodeAcceleratorStubTest, FailedDecodes) {
{
InSequence call_sequence;
@@ -561,25 +554,29 @@ TEST_P(ImageDecodeAcceleratorStubTest, FailedDecodes) {
EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode3_sync_token));
+
+ // All decode sync tokens should be released after completing all the decodes.
image_decode_accelerator_worker_.FinishOneDecode(false);
image_decode_accelerator_worker_.FinishOneDecode(true);
image_decode_accelerator_worker_.FinishOneDecode(false);
-
- // We expect the destruction of the ImageDecodeAcceleratorStub, which also
- // implies that all decode sync tokens should be released.
RunTasksUntilIdle();
- EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode3_sync_token));
- // We expect no entries in the transfer cache.
- CheckTransferCacheEntries({});
+ // There should only be one image in the transfer cache (the one that
+ // succeeded).
+ CheckTransferCacheEntries({{2u, SkISize::Make(200, 200)}});
}
TEST_P(ImageDecodeAcceleratorStubTest, OutOfOrderDecodeSyncTokens) {
- EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100)))
- .Times(1);
+ {
+ InSequence call_sequence;
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100)))
+ .Times(1);
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(200, 200)))
+ .Times(1);
+ }
const SyncToken decode1_sync_token = SendDecodeRequest(
gfx::Size(100, 100) /* output_size */, 2u /* decode_release_count */,
1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
@@ -590,62 +587,87 @@ TEST_P(ImageDecodeAcceleratorStubTest, OutOfOrderDecodeSyncTokens) {
2u /* transfer_cache_entry_id */, 2u /* handle_release_count */);
ASSERT_TRUE(decode2_sync_token.HasData());
- // We expect the destruction of the ImageDecodeAcceleratorStub, which also
- // implies that all decode sync tokens should be released.
+ // A decode sync token should not be released before a decode is finished.
+ RunTasksUntilIdle();
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
+
+ // Since the sync tokens are out of order, releasing the first one should also
+ // release the second one.
+ image_decode_accelerator_worker_.FinishOneDecode(true);
RunTasksUntilIdle();
- EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
- // We expect no entries in the transfer cache.
- CheckTransferCacheEntries({});
+ // We only expect the first image in the transfer cache.
+ CheckTransferCacheEntries({{1u, SkISize::Make(100, 100)}});
+
+ // Finishing the second decode should not "unrelease" the first sync token.
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ RunTasksUntilIdle();
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
+ CheckTransferCacheEntries(
+ {{1u, SkISize::Make(100, 100)}, {2u, SkISize::Make(200, 200)}});
}
TEST_P(ImageDecodeAcceleratorStubTest, ZeroReleaseCountDecodeSyncToken) {
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100)))
+ .Times(1);
const SyncToken decode_sync_token = SendDecodeRequest(
gfx::Size(100, 100) /* output_size */, 0u /* decode_release_count */,
1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
ASSERT_TRUE(decode_sync_token.HasData());
- // We expect the destruction of the ImageDecodeAcceleratorStub, which also
- // implies that all decode sync tokens should be released.
+ // A zero-release count sync token is always considered released.
RunTasksUntilIdle();
- EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
- // We expect no entries in the transfer cache.
- CheckTransferCacheEntries({});
+ // Even though the release count is not really valid, we can still finish the
+ // decode.
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ RunTasksUntilIdle();
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+ CheckTransferCacheEntries({{1u, SkISize::Make(100, 100)}});
}
TEST_P(ImageDecodeAcceleratorStubTest, ZeroWidthOutputSize) {
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(0, 100)))
+ .Times(1);
const SyncToken decode_sync_token = SendDecodeRequest(
gfx::Size(0, 100) /* output_size */, 1u /* decode_release_count */,
1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
ASSERT_TRUE(decode_sync_token.HasData());
- // We expect the destruction of the ImageDecodeAcceleratorStub, which also
- // implies that all decode sync tokens should be released.
+ // A decode sync token should not be released before a decode is finished.
RunTasksUntilIdle();
- EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
- EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
- // We expect no entries in the transfer cache.
+ // Even though the output size is not valid, we can still finish the decode.
+ // We just shouldn't get any entries in the transfer cache.
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ RunTasksUntilIdle();
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
CheckTransferCacheEntries({});
}
TEST_P(ImageDecodeAcceleratorStubTest, ZeroHeightOutputSize) {
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 0)))
+ .Times(1);
const SyncToken decode_sync_token = SendDecodeRequest(
gfx::Size(100, 0) /* output_size */, 1u /* decode_release_count */,
1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
ASSERT_TRUE(decode_sync_token.HasData());
- // We expect the destruction of the ImageDecodeAcceleratorStub, which also
- // implies that all decode sync tokens should be released.
+ // A decode sync token should not be released before a decode is finished.
RunTasksUntilIdle();
- EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
- EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
- // We expect no entries in the transfer cache.
+ // Even though the output size is not valid, we can still finish the decode.
+ // We just shouldn't get any entries in the transfer cache.
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ RunTasksUntilIdle();
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
CheckTransferCacheEntries({});
}
@@ -683,14 +705,6 @@ TEST_P(ImageDecodeAcceleratorStubTest, WaitForDiscardableHandleRegistration) {
RunTasksUntilIdle();
EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
- // Let's make sure that the channel and the command buffer are still alive
- // because if we didn't wait for the discardable handle's buffer to be
- // registered, we could have caused a channel teardown.
- ASSERT_TRUE(channel_manager()->LookupChannel(kChannelId));
- ASSERT_TRUE(channel_manager()
- ->LookupChannel(kChannelId)
- ->LookupCommandBuffer(kCommandBufferRouteId));
-
// Now let's register the discardable handle's buffer by re-enabling the
// raster sequence. This should trigger the processing of the completed decode
// and the subsequent release of the decode sync token.
@@ -698,17 +712,14 @@ TEST_P(ImageDecodeAcceleratorStubTest, WaitForDiscardableHandleRegistration) {
RunTasksUntilIdle();
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
- // The channel should still exist at the end.
- EXPECT_TRUE(channel_manager()->LookupChannel(kChannelId));
-
// Check that the decoded images are in the transfer cache.
- CheckTransferCacheEntries({SkISize::Make(100, 100)});
+ CheckTransferCacheEntries({{1u, SkISize::Make(100, 100)}});
}
// TODO(andrescj): test the deletion of transfer cache entries.
INSTANTIATE_TEST_SUITE_P(
- ,
+ All,
ImageDecodeAcceleratorStubTest,
::testing::Values(gfx::BufferFormat::YVU_420,
gfx::BufferFormat::YUV_420_BIPLANAR));
diff --git a/chromium/gpu/ipc/service/image_transport_surface_delegate.h b/chromium/gpu/ipc/service/image_transport_surface_delegate.h
index 44430c0ed95..1e1319d7535 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_delegate.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_delegate.h
@@ -47,6 +47,9 @@ class GPU_IPC_SERVICE_EXPORT ImageTransportSurfaceDelegate {
// Callback for GPU vsync signal. May be called on a different thread.
virtual viz::GpuVSyncCallback GetGpuVSyncCallback() = 0;
+ // Returns how long GpuThread was blocked since last swap. Used for metrics.
+ virtual base::TimeDelta GetGpuBlockedTimeSinceLastSwap() = 0;
+
protected:
virtual ~ImageTransportSurfaceDelegate() = default;
};
diff --git a/chromium/gpu/ipc/service/image_transport_surface_linux.cc b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
index 41a2d297482..c5c4d6ce7ed 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_linux.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
@@ -23,8 +23,10 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
#endif
if (!surface) {
surface = gl::init::CreateViewGLSurface(surface_handle);
- if (gl::GetGLImplementation() == gl::kGLImplementationDesktopGL)
+ if (gl::GetGLImplementation() == gl::kGLImplementationDesktopGL ||
+ gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE) {
override_vsync_for_multi_window_swap = true;
+ }
}
if (!surface)
return surface;
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
index f343b7e0e04..f65ad035e90 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
@@ -82,7 +82,7 @@ class ImageTransportSurfaceOverlayMacBase : public BaseClass,
bool IsSurfaceless() const override;
// ui::GpuSwitchingObserver implementation.
- void OnGpuSwitched() override;
+ void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) override;
private:
~ImageTransportSurfaceOverlayMacBase() override;
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index fa1ef9c99b2..c1af03a268d 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -316,7 +316,8 @@ bool ImageTransportSurfaceOverlayMacBase<BaseClass>::Resize(
}
template <typename BaseClass>
-void ImageTransportSurfaceOverlayMacBase<BaseClass>::OnGpuSwitched() {
+void ImageTransportSurfaceOverlayMacBase<BaseClass>::OnGpuSwitched(
+ gl::GpuPreference active_gpu_heuristic) {
// Create a new context, and use the GL renderer ID that the new context gets.
scoped_refptr<ui::IOSurfaceContext> context_on_new_gpu =
ui::IOSurfaceContext::Get(ui::IOSurfaceContext::kCALayerContext);
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
index 0230484ec25..fa58d426738 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
@@ -9,6 +9,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
+#include "base/metrics/histogram_macros.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/swap_buffers_complete_params.h"
#include "ui/gfx/vsync_provider.h"
@@ -169,6 +170,29 @@ void PassThroughImageTransportSurface::SetVSyncEnabled(bool enabled) {
GLSurfaceAdapter::SetVSyncEnabled(enabled);
}
+void PassThroughImageTransportSurface::TrackMultiSurfaceSwap() {
+ // This code is a simple way of enforcing that we only vsync if one surface
+ // is swapping per frame. This provides single window cases a stable refresh
+ // while allowing multi-window cases to not slow down due to multiple syncs
+ // on a single thread. A better way to fix this problem would be to have
+ // each surface present on its own thread.
+ if (g_current_swap_generation_ == swap_generation_) {
+ // No other surface has swapped since we swapped last time.
+ if (g_num_swaps_in_current_swap_generation_ > 1)
+ g_last_multi_window_swap_generation_ = g_current_swap_generation_;
+ g_num_swaps_in_current_swap_generation_ = 0;
+ g_current_swap_generation_++;
+ }
+
+ swap_generation_ = g_current_swap_generation_;
+ g_num_swaps_in_current_swap_generation_++;
+
+ multiple_surfaces_swapped_ =
+ (g_num_swaps_in_current_swap_generation_ > 1) ||
+ (g_current_swap_generation_ - g_last_multi_window_swap_generation_ <
+ kMultiWindowSwapEnableVSyncDelay);
+}
+
void PassThroughImageTransportSurface::UpdateVSyncEnabled() {
if (is_gpu_vsync_disabled_) {
SetVSyncEnabled(false);
@@ -177,33 +201,14 @@ void PassThroughImageTransportSurface::UpdateVSyncEnabled() {
bool should_override_vsync = false;
if (is_multi_window_swap_vsync_override_enabled_) {
- // This code is a simple way of enforcing that we only vsync if one surface
- // is swapping per frame. This provides single window cases a stable refresh
- // while allowing multi-window cases to not slow down due to multiple syncs
- // on a single thread. A better way to fix this problem would be to have
- // each surface present on its own thread.
-
- if (g_current_swap_generation_ == swap_generation_) {
- // No other surface has swapped since we swapped last time.
- if (g_num_swaps_in_current_swap_generation_ > 1)
- g_last_multi_window_swap_generation_ = g_current_swap_generation_;
- g_num_swaps_in_current_swap_generation_ = 0;
- g_current_swap_generation_++;
- }
-
- swap_generation_ = g_current_swap_generation_;
- g_num_swaps_in_current_swap_generation_++;
-
- should_override_vsync =
- (g_num_swaps_in_current_swap_generation_ > 1) ||
- (g_current_swap_generation_ - g_last_multi_window_swap_generation_ <
- kMultiWindowSwapEnableVSyncDelay);
+ should_override_vsync = multiple_surfaces_swapped_;
}
SetVSyncEnabled(!should_override_vsync);
}
void PassThroughImageTransportSurface::StartSwapBuffers(
gfx::SwapResponse* response) {
+ TrackMultiSurfaceSwap();
UpdateVSyncEnabled();
#if DCHECK_IS_ON()
@@ -231,6 +236,32 @@ void PassThroughImageTransportSurface::FinishSwapBuffers(
#endif
if (delegate_) {
+ auto blocked_time_since_last_swap =
+ delegate_->GetGpuBlockedTimeSinceLastSwap();
+
+ if (!multiple_surfaces_swapped_) {
+ static constexpr base::TimeDelta kTimingMetricsHistogramMin =
+ base::TimeDelta::FromMicroseconds(5);
+ static constexpr base::TimeDelta kTimingMetricsHistogramMax =
+ base::TimeDelta::FromMilliseconds(500);
+ static constexpr uint32_t kTimingMetricsHistogramBuckets = 50;
+
+ base::TimeDelta delta =
+ response.timings.swap_end - response.timings.swap_start;
+ UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
+ "GPU.SwapTimeUs", delta, kTimingMetricsHistogramMin,
+ kTimingMetricsHistogramMax, kTimingMetricsHistogramBuckets);
+
+ // Report only if collection is enabled and supported on current platform
+ // See gpu::Scheduler::TakeTotalBlockingTime for details.
+ if (!blocked_time_since_last_swap.is_min()) {
+ UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
+ "GPU.GpuBlockedBetweenSwapsUs2", blocked_time_since_last_swap,
+ kTimingMetricsHistogramMin, kTimingMetricsHistogramMax,
+ kTimingMetricsHistogramBuckets);
+ }
+ }
+
SwapBuffersCompleteParams params;
params.swap_response = std::move(response);
delegate_->DidSwapBuffersComplete(std::move(params));
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
index b11596dac75..e463dc1e95b 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
@@ -55,6 +55,7 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
private:
~PassThroughImageTransportSurface() override;
+ void TrackMultiSurfaceSwap();
void UpdateVSyncEnabled();
void StartSwapBuffers(gfx::SwapResponse* response);
@@ -74,6 +75,7 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
base::WeakPtr<ImageTransportSurfaceDelegate> delegate_;
int swap_generation_ = 0;
bool vsync_enabled_ = true;
+ bool multiple_surfaces_swapped_ = false;
// Local swap ids, which are used to make sure the swap order is correct and
// the presentation callbacks are not called earlier than the swap ack of the
diff --git a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
index d2d4bb303bc..e9c23ac950b 100644
--- a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
@@ -8,7 +8,7 @@
#include <utility>
#include "base/macros.h"
-#include "base/memory/shared_memory.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/constants.h"
@@ -138,7 +138,10 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
: "0");
scoped_refptr<gl::GLContext> context = shared_context_state->context();
- if (!shared_context_state->MakeCurrent(nullptr)) {
+ // Raster decoder needs gl context for GPUTracing.
+ // TODO(penghuang): get rid of the gl dependeny when GL is not used for
+ // raster. https://crbug.com/c/1018725
+ if (!shared_context_state->MakeCurrent(nullptr, true /* needs_gl */)) {
LOG(ERROR) << "ContextResult::kTransientFailure: "
"Failed to make context current.";
return gpu::ContextResult::kTransientFailure;
diff --git a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
index ad83c5bf8eb..11d71cc56db 100644
--- a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
@@ -8,7 +8,7 @@
#include <utility>
#include "base/macros.h"
-#include "base/memory/shared_memory.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/constants.h"