summaryrefslogtreecommitdiff
path: root/chromium/gpu/ipc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/gpu/ipc')
-rw-r--r--chromium/gpu/ipc/client/client_shared_image_interface.cc17
-rw-r--r--chromium/gpu/ipc/client/client_shared_image_interface.h6
-rw-r--r--chromium/gpu/ipc/client/shared_image_interface_proxy.cc36
-rw-r--r--chromium/gpu/ipc/client/shared_image_interface_proxy.h6
-rw-r--r--chromium/gpu/ipc/common/BUILD.gn29
-rw-r--r--chromium/gpu/ipc/common/PRESUBMIT.py4
-rw-r--r--chromium/gpu/ipc/common/gpu_info.mojom19
-rw-r--r--chromium/gpu/ipc/common/gpu_info_mojom_traits.cc25
-rw-r--r--chromium/gpu/ipc/common/gpu_info_mojom_traits.h47
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc1
-rw-r--r--chromium/gpu/ipc/common/gpu_messages.h6
-rw-r--r--chromium/gpu/ipc/common/gpu_param_traits_macros.h5
-rw-r--r--chromium/gpu/ipc/common/luid.mojom12
-rw-r--r--chromium/gpu/ipc/common/luid_mojom_traits.h27
-rw-r--r--chromium/gpu/ipc/in_process_gpu_thread_holder.cc10
-rw-r--r--chromium/gpu/ipc/scheduler_sequence.h2
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn2
-rw-r--r--chromium/gpu/ipc/service/context_url.cc7
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.cc45
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.h4
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager_delegate.h3
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.cc11
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.h7
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_unittest.cc2
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc270
-rw-r--r--chromium/gpu/ipc/service/gpu_init.h3
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_ablation_experiment.cc227
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_ablation_experiment.h133
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.cc71
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.h13
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc4
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h7
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_linux.cc1
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h23
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm93
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.cc13
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.h3
-rw-r--r--chromium/gpu/ipc/service/shared_image_stub.cc23
-rw-r--r--chromium/gpu/ipc/service/shared_image_stub.h6
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.cc19
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.h1
-rw-r--r--chromium/gpu/ipc/shared_image_interface_in_process.cc40
-rw-r--r--chromium/gpu/ipc/shared_image_interface_in_process.h9
43 files changed, 938 insertions, 354 deletions
diff --git a/chromium/gpu/ipc/client/client_shared_image_interface.cc b/chromium/gpu/ipc/client/client_shared_image_interface.cc
index f9a9acbd066..dbfb3874876 100644
--- a/chromium/gpu/ipc/client/client_shared_image_interface.cc
+++ b/chromium/gpu/ipc/client/client_shared_image_interface.cc
@@ -40,8 +40,10 @@ void ClientSharedImageInterface::PresentSwapChain(const SyncToken& sync_token,
#if defined(OS_FUCHSIA)
void ClientSharedImageInterface::RegisterSysmemBufferCollection(
gfx::SysmemBufferCollectionId id,
- zx::channel token) {
- proxy_->RegisterSysmemBufferCollection(id, std::move(token));
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
+ proxy_->RegisterSysmemBufferCollection(id, std::move(token), format, usage);
}
void ClientSharedImageInterface::ReleaseSysmemBufferCollection(
@@ -58,6 +60,11 @@ SyncToken ClientSharedImageInterface::GenVerifiedSyncToken() {
return proxy_->GenVerifiedSyncToken();
}
+void ClientSharedImageInterface::WaitSyncToken(
+ const gpu::SyncToken& sync_token) {
+ proxy_->WaitSyncToken(sync_token);
+}
+
void ClientSharedImageInterface::Flush() {
proxy_->Flush();
}
@@ -124,6 +131,12 @@ uint32_t ClientSharedImageInterface::UsageForMailbox(const Mailbox& mailbox) {
return proxy_->UsageForMailbox(mailbox);
}
+void ClientSharedImageInterface::NotifyMailboxAdded(const Mailbox& mailbox,
+ uint32_t usage) {
+ AddMailbox(mailbox);
+ proxy_->NotifyMailboxAdded(mailbox, usage);
+}
+
Mailbox ClientSharedImageInterface::AddMailbox(const gpu::Mailbox& mailbox) {
if (mailbox.IsZero())
return mailbox;
diff --git a/chromium/gpu/ipc/client/client_shared_image_interface.h b/chromium/gpu/ipc/client/client_shared_image_interface.h
index 78771d64ef4..64e6edca80a 100644
--- a/chromium/gpu/ipc/client/client_shared_image_interface.h
+++ b/chromium/gpu/ipc/client/client_shared_image_interface.h
@@ -32,11 +32,14 @@ class GPU_EXPORT ClientSharedImageInterface : public SharedImageInterface {
const Mailbox& mailbox) override;
#if defined(OS_FUCHSIA)
void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token) override;
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) override;
void ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id) override;
#endif // defined(OS_FUCHSIA)
SyncToken GenUnverifiedSyncToken() override;
SyncToken GenVerifiedSyncToken() override;
+ void WaitSyncToken(const gpu::SyncToken& sync_token) override;
void Flush() override;
scoped_refptr<gfx::NativePixmap> GetNativePixmap(
const Mailbox& mailbox) override;
@@ -62,6 +65,7 @@ class GPU_EXPORT ClientSharedImageInterface : public SharedImageInterface {
void DestroySharedImage(const SyncToken& sync_token,
const Mailbox& mailbox) override;
uint32_t UsageForMailbox(const Mailbox& mailbox) override;
+ void NotifyMailboxAdded(const Mailbox& mailbox, uint32_t usage) override;
private:
Mailbox AddMailbox(const Mailbox& mailbox);
diff --git a/chromium/gpu/ipc/client/shared_image_interface_proxy.cc b/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
index 64b88276295..3085268e547 100644
--- a/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
+++ b/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
@@ -260,6 +260,28 @@ SyncToken SharedImageInterfaceProxy::GenUnverifiedSyncToken() {
next_release_id_);
}
+void SharedImageInterfaceProxy::WaitSyncToken(const SyncToken& sync_token) {
+ if (!sync_token.HasData())
+ return;
+
+ std::vector<SyncToken> dependencies;
+ dependencies.push_back(sync_token);
+ SyncToken& new_token = dependencies.back();
+ if (!new_token.verified_flush()) {
+ // Only allow unverified sync tokens for the same channel.
+ DCHECK_EQ(sync_token.namespace_id(), gpu::CommandBufferNamespace::GPU_IO);
+ int sync_token_channel_id =
+ ChannelIdFromCommandBufferId(sync_token.command_buffer_id());
+ DCHECK_EQ(sync_token_channel_id, host_->channel_id());
+ new_token.SetVerifyFlush();
+ }
+ {
+ base::AutoLock lock(lock_);
+ last_flush_id_ = host_->EnqueueDeferredMessage(GpuChannelMsg_Nop(),
+ std::move(dependencies));
+ }
+}
+
void SharedImageInterfaceProxy::Flush() {
base::AutoLock lock(lock_);
host_->EnsureFlush(last_flush_id_);
@@ -390,9 +412,11 @@ void SharedImageInterfaceProxy::PresentSwapChain(const SyncToken& sync_token,
#if defined(OS_FUCHSIA)
void SharedImageInterfaceProxy::RegisterSysmemBufferCollection(
gfx::SysmemBufferCollectionId id,
- zx::channel token) {
- host_->Send(
- new GpuChannelMsg_RegisterSysmemBufferCollection(route_id_, id, token));
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
+ host_->Send(new GpuChannelMsg_RegisterSysmemBufferCollection(
+ route_id_, id, token, format, usage));
}
void SharedImageInterfaceProxy::ReleaseSysmemBufferCollection(
@@ -427,4 +451,10 @@ uint32_t SharedImageInterfaceProxy::UsageForMailbox(const Mailbox& mailbox) {
return it->second;
}
+void SharedImageInterfaceProxy::NotifyMailboxAdded(const Mailbox& mailbox,
+ uint32_t usage) {
+ base::AutoLock lock(lock_);
+ AddMailbox(mailbox, usage);
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/client/shared_image_interface_proxy.h b/chromium/gpu/ipc/client/shared_image_interface_proxy.h
index b22b61d3237..0ad687fde81 100644
--- a/chromium/gpu/ipc/client/shared_image_interface_proxy.h
+++ b/chromium/gpu/ipc/client/shared_image_interface_proxy.h
@@ -41,6 +41,7 @@ class SharedImageInterfaceProxy {
void DestroySharedImage(const SyncToken& sync_token, const Mailbox& mailbox);
SyncToken GenVerifiedSyncToken();
SyncToken GenUnverifiedSyncToken();
+ void WaitSyncToken(const SyncToken& sync_token);
void Flush();
SharedImageInterface::SwapChainMailboxes CreateSwapChain(
@@ -52,13 +53,16 @@ class SharedImageInterfaceProxy {
#if defined(OS_FUCHSIA)
void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token);
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage);
void ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id);
#endif // defined(OS_FUCHSIA)
scoped_refptr<gfx::NativePixmap> GetNativePixmap(const gpu::Mailbox& mailbox);
uint32_t UsageForMailbox(const Mailbox& mailbox);
+ void NotifyMailboxAdded(const Mailbox& mailbox, uint32_t usage);
private:
bool GetSHMForPixelData(base::span<const uint8_t> pixel_data,
diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn
index a8f3c602db5..51c88a3817c 100644
--- a/chromium/gpu/ipc/common/BUILD.gn
+++ b/chromium/gpu/ipc/common/BUILD.gn
@@ -214,6 +214,10 @@ mojom("interfaces") {
"vulkan_ycbcr_info.mojom",
]
+ if (is_win) {
+ sources += [ "luid.mojom" ]
+ }
+
public_deps = [
":gpu_preferences_interface",
"//mojo/public/mojom/base",
@@ -269,6 +273,24 @@ mojom("interfaces") {
},
]
+ if (is_win) {
+ shared_cpp_typemaps += [
+ {
+ types = [
+ {
+ mojom = "gpu.mojom.Luid"
+ cpp = "::LUID"
+ },
+ ]
+ traits_headers = [ "luid_mojom_traits.h" ]
+ traits_public_deps = [
+ ":mojom_traits",
+ "//gpu/config",
+ ]
+ },
+ ]
+ }
+
cpp_typemaps = shared_cpp_typemaps
blink_cpp_typemaps = shared_cpp_typemaps
@@ -364,10 +386,6 @@ mojom("interfaces") {
cpp = "::gpu::CollectInfoResult"
},
{
- mojom = "gpu.mojom.Dx12VulkanVersionInfo"
- cpp = "::gpu::Dx12VulkanVersionInfo"
- },
- {
mojom = "gpu.mojom.OverlayInfo"
cpp = "::gpu::OverlayInfo"
},
@@ -580,6 +598,9 @@ source_set("mojom_traits") {
if (is_android) {
sources += [ "vulkan_ycbcr_info_mojom_traits.h" ]
}
+ if (is_win) {
+ sources += [ "luid_mojom_traits.h" ]
+ }
if (enable_vulkan) {
deps += [ ":vulkan_types_mojom_traits" ]
}
diff --git a/chromium/gpu/ipc/common/PRESUBMIT.py b/chromium/gpu/ipc/common/PRESUBMIT.py
index fa602d4a0ad..98ecf4b0a9c 100644
--- a/chromium/gpu/ipc/common/PRESUBMIT.py
+++ b/chromium/gpu/ipc/common/PRESUBMIT.py
@@ -26,8 +26,8 @@ def CommonChecks(input_api, output_api):
if generated_files and not generating_files:
long_text = 'Changed files:\n'
- for file in generated_files:
- long_text += file.LocalPath() + '\n'
+ for generated_file in generated_files:
+ long_text += generated_file.LocalPath() + '\n'
long_text += '\n'
messages.append(output_api.PresubmitError(
'Vulkan types generated files changed but the generator '
diff --git a/chromium/gpu/ipc/common/gpu_info.mojom b/chromium/gpu/ipc/common/gpu_info.mojom
index 7dc59093d2b..ad90a1ca7e9 100644
--- a/chromium/gpu/ipc/common/gpu_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_info.mojom
@@ -6,6 +6,8 @@
module gpu.mojom;
import "gpu/ipc/common/dx_diag_node.mojom";
+[EnableIf=is_win]
+import "gpu/ipc/common/luid.mojom";
import "mojo/public/mojom/base/time.mojom";
import "ui/gfx/geometry/mojom/geometry.mojom";
@@ -26,6 +28,8 @@ struct GpuDevice {
string driver_vendor;
string driver_version;
int32 cuda_compute_capability_major;
+ [EnableIf=is_win]
+ Luid luid;
};
// gpu::VideoCodecProfile
@@ -116,15 +120,6 @@ enum OverlaySupport {
SOFTWARE,
};
-// gpu::Dx12VulkanVersionInfo
-[EnableIf=is_win]
-struct Dx12VulkanVersionInfo {
- bool supports_dx12;
- bool supports_vulkan;
- uint32 d3d12_feature_level;
- uint32 vulkan_version;
-};
-
// gpu::OverlayInfo
[EnableIf=is_win]
struct OverlayInfo {
@@ -132,6 +127,8 @@ struct OverlayInfo {
bool supports_overlays;
OverlaySupport yuy2_overlay_support;
OverlaySupport nv12_overlay_support;
+ OverlaySupport bgra8_overlay_support;
+ OverlaySupport rgb10a2_overlay_support;
};
// Corresponds to |gpu::GPUInfo| in gpu/config/gpu_info.h
@@ -167,7 +164,9 @@ struct GpuInfo {
[EnableIf=is_win]
DxDiagNode dx_diagnostics;
[EnableIf=is_win]
- Dx12VulkanVersionInfo dx12_vulkan_version_info;
+ uint32 d3d12_feature_level;
+ [EnableIf=is_win]
+ uint32 vulkan_version;
[EnableIf=is_win]
OverlayInfo overlay_info;
diff --git a/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc b/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
index 3d7e314ba8c..1d967041854 100644
--- a/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
@@ -28,6 +28,9 @@ bool StructTraits<gpu::mojom::GpuDeviceDataView, gpu::GPUInfo::GPUDevice>::Read(
out->cuda_compute_capability_major = data.cuda_compute_capability_major();
return data.ReadVendorString(&out->vendor_string) &&
data.ReadDeviceString(&out->device_string) &&
+#if defined(OS_WIN)
+ data.ReadLuid(&out->luid) &&
+#endif // OS_WIN
data.ReadDriverVendor(&out->driver_vendor) &&
data.ReadDriverVersion(&out->driver_version);
}
@@ -355,25 +358,15 @@ bool EnumTraits<gpu::mojom::OverlaySupport, gpu::OverlaySupport>::FromMojom(
return true;
}
-// static
-bool StructTraits<gpu::mojom::Dx12VulkanVersionInfoDataView,
- gpu::Dx12VulkanVersionInfo>::
- Read(gpu::mojom::Dx12VulkanVersionInfoDataView data,
- gpu::Dx12VulkanVersionInfo* out) {
- out->supports_dx12 = data.supports_dx12();
- out->supports_vulkan = data.supports_vulkan();
- out->d3d12_feature_level = data.d3d12_feature_level();
- out->vulkan_version = data.vulkan_version();
- return true;
-}
-
bool StructTraits<gpu::mojom::OverlayInfoDataView, gpu::OverlayInfo>::Read(
gpu::mojom::OverlayInfoDataView data,
gpu::OverlayInfo* out) {
out->direct_composition = data.direct_composition();
out->supports_overlays = data.supports_overlays();
return data.ReadYuy2OverlaySupport(&out->yuy2_overlay_support) &&
- data.ReadNv12OverlaySupport(&out->nv12_overlay_support);
+ data.ReadNv12OverlaySupport(&out->nv12_overlay_support) &&
+ data.ReadBgra8OverlaySupport(&out->bgra8_overlay_support) &&
+ data.ReadRgb10a2OverlaySupport(&out->rgb10a2_overlay_support);
}
#endif
@@ -402,6 +395,11 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
out->oop_rasterization_supported = data.oop_rasterization_supported();
out->subpixel_font_rendering = data.subpixel_font_rendering();
+#if defined(OS_WIN)
+ out->d3d12_feature_level = data.d3d12_feature_level();
+ out->vulkan_version = data.vulkan_version();
+#endif
+
return data.ReadInitializationTime(&out->initialization_time) &&
data.ReadGpu(&out->gpu) &&
data.ReadSecondaryGpus(&out->secondary_gpus) &&
@@ -421,7 +419,6 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
#if defined(OS_WIN)
data.ReadOverlayInfo(&out->overlay_info) &&
data.ReadDxDiagnostics(&out->dx_diagnostics) &&
- data.ReadDx12VulkanVersionInfo(&out->dx12_vulkan_version_info) &&
#endif
data.ReadVideoDecodeAcceleratorCapabilities(
&out->video_decode_accelerator_capabilities) &&
diff --git a/chromium/gpu/ipc/common/gpu_info_mojom_traits.h b/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
index 5fc0b439a94..09778fffb31 100644
--- a/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
+++ b/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
@@ -36,6 +36,10 @@ struct StructTraits<gpu::mojom::GpuDeviceDataView, gpu::GPUInfo::GPUDevice> {
static uint32_t revision(const gpu::GPUInfo::GPUDevice& input) {
return input.revision;
}
+
+ static const LUID luid(const gpu::GPUInfo::GPUDevice& input) {
+ return input.luid;
+ }
#endif // OS_WIN
static bool active(const gpu::GPUInfo::GPUDevice& input) {
@@ -208,29 +212,6 @@ struct EnumTraits<gpu::mojom::OverlaySupport, gpu::OverlaySupport> {
};
template <>
-struct StructTraits<gpu::mojom::Dx12VulkanVersionInfoDataView,
- gpu::Dx12VulkanVersionInfo> {
- static bool Read(gpu::mojom::Dx12VulkanVersionInfoDataView data,
- gpu::Dx12VulkanVersionInfo* out);
-
- static bool supports_dx12(const gpu::Dx12VulkanVersionInfo& input) {
- return input.supports_dx12;
- }
-
- static bool supports_vulkan(const gpu::Dx12VulkanVersionInfo& input) {
- return input.supports_vulkan;
- }
-
- static uint32_t d3d12_feature_level(const gpu::Dx12VulkanVersionInfo& input) {
- return input.d3d12_feature_level;
- }
-
- static uint32_t vulkan_version(const gpu::Dx12VulkanVersionInfo& input) {
- return input.vulkan_version;
- }
-};
-
-template <>
struct StructTraits<gpu::mojom::OverlayInfoDataView, gpu::OverlayInfo> {
static bool Read(gpu::mojom::OverlayInfoDataView data, gpu::OverlayInfo* out);
@@ -251,6 +232,16 @@ struct StructTraits<gpu::mojom::OverlayInfoDataView, gpu::OverlayInfo> {
const gpu::OverlayInfo& input) {
return input.nv12_overlay_support;
}
+
+ static gpu::OverlaySupport bgra8_overlay_support(
+ const gpu::OverlayInfo& input) {
+ return input.bgra8_overlay_support;
+ }
+
+ static gpu::OverlaySupport rgb10a2_overlay_support(
+ const gpu::OverlayInfo& input) {
+ return input.rgb10a2_overlay_support;
+ }
};
#endif
@@ -360,14 +351,16 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
#endif // OS_MACOSX
#if defined(OS_WIN)
-
static const gpu::DxDiagNode& dx_diagnostics(const gpu::GPUInfo& input) {
return input.dx_diagnostics;
}
- static const gpu::Dx12VulkanVersionInfo& dx12_vulkan_version_info(
- const gpu::GPUInfo& input) {
- return input.dx12_vulkan_version_info;
+ static uint32_t d3d12_feature_level(const gpu::GPUInfo& input) {
+ return input.d3d12_feature_level;
+ }
+
+ static uint32_t vulkan_version(const gpu::GPUInfo& input) {
+ return input.vulkan_version;
}
static const gpu::OverlayInfo& overlay_info(const gpu::GPUInfo& input) {
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
index 500ff3cb9e4..051e8a95b75 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
diff --git a/chromium/gpu/ipc/common/gpu_messages.h b/chromium/gpu/ipc/common/gpu_messages.h
index ca201770ab5..db1bb624ef7 100644
--- a/chromium/gpu/ipc/common/gpu_messages.h
+++ b/chromium/gpu/ipc/common/gpu_messages.h
@@ -189,9 +189,11 @@ IPC_MESSAGE_ROUTED2(GpuChannelMsg_PresentSwapChain,
uint32_t /* release_id */)
#endif // OS_WIN
#if defined(OS_FUCHSIA)
-IPC_MESSAGE_ROUTED2(GpuChannelMsg_RegisterSysmemBufferCollection,
+IPC_MESSAGE_ROUTED4(GpuChannelMsg_RegisterSysmemBufferCollection,
gfx::SysmemBufferCollectionId /* id */,
- zx::channel /* token */)
+ zx::channel /* token */,
+ gfx::BufferFormat /* format */,
+ gfx::BufferUsage /* usage */)
IPC_MESSAGE_ROUTED1(GpuChannelMsg_ReleaseSysmemBufferCollection,
gfx::SysmemBufferCollectionId /* id */)
#endif // OS_FUCHSIA
diff --git a/chromium/gpu/ipc/common/gpu_param_traits_macros.h b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
index 9fd93a4b637..02296d13bde 100644
--- a/chromium/gpu/ipc/common/gpu_param_traits_macros.h
+++ b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
@@ -12,6 +12,7 @@
#include "gpu/command_buffer/common/swap_buffers_complete_params.h"
#include "gpu/gpu_export.h"
#include "gpu/ipc/common/gpu_command_buffer_traits.h"
+#include "gpu/ipc/common/surface_handle.h"
#include "ipc/ipc_message_macros.h"
#include "ui/gfx/ipc/gfx_param_traits.h"
#include "url/ipc/url_param_traits.h"
@@ -32,4 +33,8 @@ IPC_STRUCT_TRAITS_END()
IPC_ENUM_TRAITS_MAX_VALUE(viz::ResourceFormat, viz::RESOURCE_FORMAT_MAX)
+#if defined(USE_X11)
+IPC_ENUM_TRAITS(gpu::SurfaceHandle)
+#endif
+
#endif // GPU_IPC_COMMON_GPU_PARAM_TRAITS_MACROS_H_
diff --git a/chromium/gpu/ipc/common/luid.mojom b/chromium/gpu/ipc/common/luid.mojom
new file mode 100644
index 00000000000..68da5dbda46
--- /dev/null
+++ b/chromium/gpu/ipc/common/luid.mojom
@@ -0,0 +1,12 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module gpu.mojom;
+
+// Corresponds to LUID in dxgi.h
+[EnableIf=is_win]
+struct Luid {
+ int32 high;
+ uint32 low;
+};
diff --git a/chromium/gpu/ipc/common/luid_mojom_traits.h b/chromium/gpu/ipc/common/luid_mojom_traits.h
new file mode 100644
index 00000000000..e736c53c455
--- /dev/null
+++ b/chromium/gpu/ipc/common/luid_mojom_traits.h
@@ -0,0 +1,27 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_LUID_MOJOM_TRAITS_H_
+#define GPU_IPC_COMMON_LUID_MOJOM_TRAITS_H_
+
+#include "gpu/ipc/common/luid.mojom-shared.h"
+
+namespace mojo {
+
+template <>
+struct StructTraits<gpu::mojom::LuidDataView, LUID> {
+ static bool Read(gpu::mojom::LuidDataView data, LUID* out) {
+ out->HighPart = data.high();
+ out->LowPart = data.low();
+ return true;
+ }
+
+ static int32_t high(const LUID& input) { return input.HighPart; }
+
+ static uint32_t low(const LUID& input) { return input.LowPart; }
+};
+
+} // namespace mojo
+
+#endif // GPU_IPC_COMMON_LUID_MOJOM_TRAITS_H_
diff --git a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
index 5a027baac2c..796466533fb 100644
--- a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
+++ b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
@@ -83,17 +83,19 @@ void InProcessGpuThreadHolder::InitializeOnGpuThread(
GpuDriverBugWorkarounds gpu_driver_bug_workarounds(
gpu_feature_info_.enabled_gpu_driver_bug_workarounds);
- bool use_virtualized_gl_context_ = false;
+ bool use_virtualized_gl_context = false;
#if defined(OS_MACOSX)
// Virtualize GpuPreference:::kLowPower contexts by default on OS X to prevent
// performance regressions when enabling FCM. https://crbug.com/180463
- use_virtualized_gl_context_ = true;
+ use_virtualized_gl_context = true;
#endif
- use_virtualized_gl_context_ |=
+ use_virtualized_gl_context |=
gpu_driver_bug_workarounds.use_virtualized_gl_contexts;
+ if (use_virtualized_gl_context)
+ share_group_->SetSharedContext(context_.get());
context_state_ = base::MakeRefCounted<SharedContextState>(
- share_group_, surface_, context_, use_virtualized_gl_context_,
+ share_group_, surface_, context_, use_virtualized_gl_context,
base::DoNothing(), gpu_preferences_.gr_context_type);
auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
gpu_driver_bug_workarounds, gpu_feature_info_);
diff --git a/chromium/gpu/ipc/scheduler_sequence.h b/chromium/gpu/ipc/scheduler_sequence.h
index d099492564c..15ae245cfe1 100644
--- a/chromium/gpu/ipc/scheduler_sequence.h
+++ b/chromium/gpu/ipc/scheduler_sequence.h
@@ -9,7 +9,7 @@
#include <vector>
#include "base/callback.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/sequence_id.h"
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index a6cdd1737d6..4450d8337f0 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -32,6 +32,8 @@ jumbo_component("service") {
"gpu_config.h",
"gpu_init.cc",
"gpu_init.h",
+ "gpu_memory_ablation_experiment.cc",
+ "gpu_memory_ablation_experiment.h",
"gpu_memory_buffer_factory.cc",
"gpu_memory_buffer_factory.h",
"gpu_watchdog_thread.cc",
diff --git a/chromium/gpu/ipc/service/context_url.cc b/chromium/gpu/ipc/service/context_url.cc
index a02b18257f5..4fd16df8412 100644
--- a/chromium/gpu/ipc/service/context_url.cc
+++ b/chromium/gpu/ipc/service/context_url.cc
@@ -20,9 +20,10 @@ void ContextUrl::SetActiveUrl(const gpu::ContextUrl& active_url) {
last_url_hash = active_url.hash();
- // Note that the url is intentionally excluded from webview crash dumps
- // using a whitelist for privacy reasons. See kWebViewCrashKeyWhiteList.
- static crash_reporter::CrashKeyString<1024> crash_key("url-chunk");
+ // Note that the url is intentionally excluded from WebView and WebLayer
+ // crash dumps using an allowlist for privacy reasons. See
+ // kWebViewCrashKeyAllowList and kWebLayerCrashKeyAllowList.
+ static crash_reporter::CrashKeyString<1024> crash_key("gpu-url-chunk");
crash_key.Set(active_url.url().possibly_invalid_spec());
}
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc
index 52d8cb82c4d..98f44c24c28 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc
@@ -33,6 +33,7 @@
#include "gpu/ipc/common/memory_stats.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
+#include "gpu/ipc/service/gpu_memory_ablation_experiment.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "third_party/skia/include/core/SkGraphics.h"
@@ -99,8 +100,11 @@ void FormatAllocationSourcesForTracing(
} // namespace
-GpuChannelManager::GpuPeakMemoryMonitor::GpuPeakMemoryMonitor()
- : weak_factory_(this) {}
+GpuChannelManager::GpuPeakMemoryMonitor::GpuPeakMemoryMonitor(
+ GpuChannelManager* channel_manager)
+ : ablation_experiment_(
+ std::make_unique<GpuMemoryAblationExperiment>(channel_manager)),
+ weak_factory_(this) {}
GpuChannelManager::GpuPeakMemoryMonitor::~GpuPeakMemoryMonitor() = default;
@@ -114,6 +118,12 @@ GpuChannelManager::GpuPeakMemoryMonitor::GetPeakMemoryUsage(
if (sequence != sequence_trackers_.end()) {
*out_peak_memory = sequence->second.total_memory_;
allocation_per_source = sequence->second.peak_memory_per_source_;
+
+ uint64_t ablation_memory =
+ ablation_experiment_->GetPeakMemory(sequence_num);
+ *out_peak_memory += ablation_memory;
+ allocation_per_source[GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB] +=
+ ablation_memory;
}
return allocation_per_source;
}
@@ -123,6 +133,7 @@ void GpuChannelManager::GpuPeakMemoryMonitor::StartGpuMemoryTracking(
sequence_trackers_.emplace(
sequence_num,
SequenceTracker(current_memory_, current_memory_per_source_));
+ ablation_experiment_->StartSequence(sequence_num);
TRACE_EVENT_ASYNC_BEGIN2("gpu", "PeakMemoryTracking", sequence_num, "start",
current_memory_, "start_sources",
StartTrackingTracedValue());
@@ -136,6 +147,7 @@ void GpuChannelManager::GpuPeakMemoryMonitor::StopGpuMemoryTracking(
sequence->second.total_memory_, "end_sources",
StopTrackingTracedValue(sequence->second));
sequence_trackers_.erase(sequence);
+ ablation_experiment_->StopSequence(sequence_num);
}
}
@@ -217,6 +229,8 @@ void GpuChannelManager::GpuPeakMemoryMonitor::OnMemoryAllocatedChange(
uint64_t diff = new_size - old_size;
current_memory_ += diff;
current_memory_per_source_[source] += diff;
+
+ ablation_experiment_->OnMemoryAllocated(old_size, new_size);
if (old_size < new_size) {
// When memory has increased, iterate over the sequences to update their
// peak.
@@ -279,11 +293,13 @@ GpuChannelManager::GpuChannelManager(
image_decode_accelerator_worker_(image_decode_accelerator_worker),
activity_flags_(std::move(activity_flags)),
memory_pressure_listener_(
+ FROM_HERE,
base::BindRepeating(&GpuChannelManager::HandleMemoryPressure,
base::Unretained(this))),
vulkan_context_provider_(vulkan_context_provider),
metal_context_provider_(metal_context_provider),
- dawn_context_provider_(dawn_context_provider) {
+ dawn_context_provider_(dawn_context_provider),
+ peak_memory_monitor_(this) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(task_runner->BelongsToCurrentThread());
DCHECK(io_task_runner);
@@ -721,7 +737,7 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
}
// TODO(penghuang): https://crbug.com/899735 Handle device lost for Vulkan.
- shared_context_state_ = base::MakeRefCounted<SharedContextState>(
+ auto shared_context_state = base::MakeRefCounted<SharedContextState>(
std::move(share_group), std::move(surface), std::move(context),
use_virtualized_gl_contexts,
base::BindOnce(&GpuChannelManager::OnContextLost, base::Unretained(this),
@@ -738,24 +754,33 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
// SkiaRenderer needs GrContext to composite output surface.
need_gr_context |= features::IsUsingSkiaRenderer();
+ // GpuMemoryAblationExperiment needs a context to use Skia for Gpu
+ // allocations.
+ need_gr_context |= base::FeatureList::IsEnabled(kGPUMemoryAblationFeature);
+
if (need_gr_context) {
if (gpu_preferences_.gr_context_type == gpu::GrContextType::kGL) {
auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
gpu_driver_bug_workarounds(), gpu_feature_info());
- if (!shared_context_state_->InitializeGL(gpu_preferences_,
- feature_info.get())) {
- shared_context_state_ = nullptr;
+ if (!shared_context_state->InitializeGL(gpu_preferences_,
+ feature_info.get())) {
LOG(ERROR) << "ContextResult::kFatalFailure: Failed to Initialize GL "
"for SharedContextState";
*result = ContextResult::kFatalFailure;
return nullptr;
}
}
- shared_context_state_->InitializeGrContext(
- gpu_preferences_, gpu_driver_bug_workarounds_, gr_shader_cache(),
- &activity_flags_, watchdog_);
+ if (!shared_context_state->InitializeGrContext(
+ gpu_preferences_, gpu_driver_bug_workarounds_, gr_shader_cache(),
+ &activity_flags_, watchdog_)) {
+ LOG(ERROR) << "ContextResult::kFatalFailure: Failed to Initialize"
+ "GrContext for SharedContextState";
+ *result = ContextResult::kFatalFailure;
+ return nullptr;
+ }
}
+ shared_context_state_ = std::move(shared_context_state);
gr_cache_controller_.emplace(shared_context_state_.get(), task_runner_);
*result = ContextResult::kSuccess;
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h
index 4846800441e..6413a3ed829 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.h
@@ -56,6 +56,7 @@ struct GpuPreferences;
struct SyncToken;
class GpuChannel;
class GpuChannelManagerDelegate;
+class GpuMemoryAblationExperiment;
class GpuMemoryBufferFactory;
class GpuWatchdogThread;
class ImageDecodeAcceleratorWorker;
@@ -205,7 +206,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
class GPU_IPC_SERVICE_EXPORT GpuPeakMemoryMonitor
: public MemoryTracker::Observer {
public:
- GpuPeakMemoryMonitor();
+ explicit GpuPeakMemoryMonitor(GpuChannelManager* channel_manager);
~GpuPeakMemoryMonitor() override;
base::flat_map<GpuPeakMemoryAllocationSource, uint64_t> GetPeakMemoryUsage(
@@ -253,6 +254,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
current_memory_per_source_;
+ std::unique_ptr<GpuMemoryAblationExperiment> ablation_experiment_;
base::WeakPtrFactory<GpuPeakMemoryMonitor> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(GpuPeakMemoryMonitor);
};
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
index 98897f9ad8a..3ddc375f308 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
@@ -74,6 +74,9 @@ class GpuChannelManagerDelegate {
// Tells the delegate that overlay info was updated.
virtual void DidUpdateOverlayInfo(const gpu::OverlayInfo& overlay_info) = 0;
+ // Tells the delegate that HDR status was updated.
+ virtual void DidUpdateHDRStatus(bool hdr_enabled) = 0;
+
// Tells the delegate that |child_window| was created in the GPU process and
// to send an IPC to make SetParent() syscall. This syscall is blocked by the
// GPU sandbox and must be made in the browser process.
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.cc b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
index a52769db1dd..6e1c4bebd1a 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
@@ -5,9 +5,11 @@
#include "gpu/ipc/service/gpu_channel_test_common.h"
#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/test/scoped_feature_list.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
+#include "components/viz/common/features.h"
#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
@@ -46,6 +48,7 @@ class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate {
bool IsExiting() const override { return is_exiting_; }
#if defined(OS_WIN)
void DidUpdateOverlayInfo(const gpu::OverlayInfo& overlay_info) override {}
+ void DidUpdateHDRStatus(bool hdr_enabled) override {}
void SendCreatedChildWindow(SurfaceHandle parent_window,
SurfaceHandle child_window) override {}
#endif
@@ -77,10 +80,14 @@ GpuChannelTestCommon::GpuChannelTestCommon(
channel_manager_delegate_(
new TestGpuChannelManagerDelegate(scheduler_.get())) {
// We need GL bindings to actually initialize command buffers.
- if (use_stub_bindings)
+ if (use_stub_bindings) {
gl::GLSurfaceTestSupport::InitializeOneOffWithStubBindings();
- else
+ // GrContext cannot be created with stub bindings.
+ scoped_feature_list_ = std::make_unique<base::test::ScopedFeatureList>();
+ scoped_feature_list_->InitAndDisableFeature(features::kUseSkiaRenderer);
+ } else {
gl::GLSurfaceTestSupport::InitializeOneOff();
+ }
GpuFeatureInfo feature_info;
feature_info.enabled_gpu_driver_bug_workarounds =
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.h b/chromium/gpu/ipc/service/gpu_channel_test_common.h
index 1a15276f605..504a4720457 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.h
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.h
@@ -15,9 +15,15 @@
namespace base {
class TestSimpleTaskRunner;
+
+namespace test {
+class ScopedFeatureList;
+} // namespace test
+
namespace trace_event {
class MemoryDumpManager;
} // namespace trace_event
+
} // namespace base
namespace IPC {
@@ -63,6 +69,7 @@ class GpuChannelTestCommon : public testing::Test {
std::unique_ptr<SharedImageManager> shared_image_manager_;
std::unique_ptr<Scheduler> scheduler_;
std::unique_ptr<TestGpuChannelManagerDelegate> channel_manager_delegate_;
+ std::unique_ptr<base::test::ScopedFeatureList> scoped_feature_list_;
std::unique_ptr<GpuChannelManager> channel_manager_;
DISALLOW_COPY_AND_ASSIGN(GpuChannelTestCommon);
diff --git a/chromium/gpu/ipc/service/gpu_channel_unittest.cc b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
index d9eb13b7c07..4475b8c8503 100644
--- a/chromium/gpu/ipc/service/gpu_channel_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
@@ -21,6 +21,8 @@ class GpuChannelTest : public GpuChannelTestCommon {
#if defined(OS_WIN)
const SurfaceHandle kFakeSurfaceHandle = reinterpret_cast<SurfaceHandle>(1);
+#elif defined(USE_X11)
+const SurfaceHandle kFakeSurfaceHandle = static_cast<SurfaceHandle>(1);
#else
const SurfaceHandle kFakeSurfaceHandle = 1;
#endif
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index da3cadecbcd..fa0b192cc67 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -7,6 +7,7 @@
#include <string>
#include "base/command_line.h"
+#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/strings/string_number_conversions.h"
#include "base/threading/scoped_blocking_call.h"
@@ -56,6 +57,7 @@
#include "gpu/vulkan/init/vulkan_factory.h"
#include "gpu/vulkan/vulkan_implementation.h"
#include "gpu/vulkan/vulkan_instance.h"
+#include "gpu/vulkan/vulkan_util.h"
#endif
namespace gpu {
@@ -67,7 +69,7 @@ bool CollectGraphicsInfo(GPUInfo* gpu_info) {
base::TimeTicks before_collect_context_graphics_info = base::TimeTicks::Now();
bool success = CollectContextGraphicsInfo(gpu_info);
if (!success)
- LOG(ERROR) << "gpu::CollectGraphicsInfo failed.";
+ LOG(ERROR) << "CollectGraphicsInfo failed.";
if (success) {
base::TimeDelta collect_context_time =
@@ -113,17 +115,33 @@ class GpuWatchdogInit {
watchdog_ptr_->OnInitComplete();
}
- void SetGpuWatchdogPtr(gpu::GpuWatchdogThread* ptr) { watchdog_ptr_ = ptr; }
+ void SetGpuWatchdogPtr(GpuWatchdogThread* ptr) { watchdog_ptr_ = ptr; }
private:
- gpu::GpuWatchdogThread* watchdog_ptr_ = nullptr;
+ GpuWatchdogThread* watchdog_ptr_ = nullptr;
};
+
+// TODO(https://crbug.com/1095744): We currently do not handle
+// VK_ERROR_DEVICE_LOST in in-process-gpu.
+void DisableInProcessGpuVulkan(GpuFeatureInfo* gpu_feature_info,
+ GpuPreferences* gpu_preferences) {
+ if (gpu_feature_info->status_values[GPU_FEATURE_TYPE_VULKAN] ==
+ kGpuFeatureStatusEnabled) {
+ LOG(ERROR) << "Vulkan not supported with in process gpu";
+ gpu_preferences->use_vulkan = VulkanImplementationName::kNone;
+ gpu_feature_info->status_values[GPU_FEATURE_TYPE_VULKAN] =
+ kGpuFeatureStatusDisabled;
+ if (gpu_preferences->gr_context_type == GrContextType::kVulkan)
+ gpu_preferences->gr_context_type = GrContextType::kGL;
+ }
+}
+
} // namespace
GpuInit::GpuInit() = default;
GpuInit::~GpuInit() {
- gpu::StopForceDiscreteGPU();
+ StopForceDiscreteGPU();
}
bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
@@ -146,7 +164,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// Set keys for crash logging based on preliminary gpu info, in case we
// crash during feature collection.
- gpu::SetKeysForCrashLogging(gpu_info_);
+ SetKeysForCrashLogging(gpu_info_);
#if defined(SUBPIXEL_FONT_RENDERING_DISABLED)
gpu_info_.subpixel_font_rendering = false;
#else
@@ -168,31 +186,31 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
if (!PopGpuFeatureInfoCache(&gpu_feature_info_)) {
// Compute blacklist and driver bug workaround decisions based on basic GPU
// info.
- gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(
- gpu_info_, gpu_preferences_, command_line, &needs_more_info);
+ gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
+ command_line, &needs_more_info);
}
#endif // !OS_ANDROID && !BUILDFLAG(IS_CHROMECAST)
gpu_info_.in_process_gpu = false;
- bool use_swiftshader = false;
+ gl_use_swiftshader_ = false;
// GL bindings may have already been initialized, specifically on MacOSX.
bool gl_initialized = gl::GetGLImplementation() != gl::kGLImplementationNone;
if (!gl_initialized) {
// If GL has already been initialized, then it's too late to select GPU.
- if (gpu::SwitchableGPUsSupported(gpu_info_, *command_line)) {
- gpu::InitializeSwitchableGPUs(
+ if (SwitchableGPUsSupported(gpu_info_, *command_line)) {
+ InitializeSwitchableGPUs(
gpu_feature_info_.enabled_gpu_driver_bug_workarounds);
}
} else if (gl::GetGLImplementation() == gl::kGLImplementationSwiftShaderGL &&
command_line->GetSwitchValueASCII(switches::kUseGL) !=
gl::kGLImplementationSwiftShaderName) {
- use_swiftshader = true;
+ gl_use_swiftshader_ = true;
}
bool enable_watchdog = !gpu_preferences_.disable_gpu_watchdog &&
!command_line->HasSwitch(switches::kHeadless) &&
- !use_swiftshader;
+ !gl_use_swiftshader_;
// Disable the watchdog in debug builds because they tend to only be run by
// developers who will not appreciate the watchdog killing the GPU process.
@@ -233,11 +251,11 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// consuming has completed, otherwise the process is liable to be aborted.
if (enable_watchdog && !delayed_watchdog_enable) {
if (base::FeatureList::IsEnabled(features::kGpuWatchdogV2)) {
- watchdog_thread_ = gpu::GpuWatchdogThreadImplV2::Create(
+ watchdog_thread_ = GpuWatchdogThreadImplV2::Create(
gpu_preferences_.watchdog_starts_backgrounded);
watchdog_init.SetGpuWatchdogPtr(watchdog_thread_.get());
} else {
- watchdog_thread_ = gpu::GpuWatchdogThreadImplV1::Create(
+ watchdog_thread_ = GpuWatchdogThreadImplV1::Create(
gpu_preferences_.watchdog_starts_backgrounded);
}
@@ -282,12 +300,12 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
->GetSupportedFormatsForTexturing();
#endif
- if (!use_swiftshader) {
- use_swiftshader = EnableSwiftShaderIfNeeded(
+ if (!gl_use_swiftshader_) {
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, needs_more_info);
}
- if (gl_initialized && use_swiftshader &&
+ if (gl_initialized && gl_use_swiftshader_ &&
gl::GetGLImplementation() != gl::kGLImplementationSwiftShaderGL) {
#if defined(OS_LINUX)
VLOG(1) << "Quit GPU process launch to fallback to SwiftShader cleanly "
@@ -324,7 +342,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
#if defined(OS_LINUX)
// The ContentSandboxHelper is currently the only one implementation of
- // gpu::GpuSandboxHelper and it has no dependency. Except on Linux where
+ // GpuSandboxHelper and it has no dependency. Except on Linux where
// VaapiWrapper checks the GL implementation to determine which display
// to use. So call PreSandboxStartup after GL initialization. But make
// sure the watchdog is paused as loadLibrary may take a long time and
@@ -350,7 +368,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
#if defined(OS_MACOSX)
if (gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE &&
gl::GetANGLEImplementation() == gl::ANGLEImplementation::kSwiftShader) {
- gpu::SetMacOSSpecificTextureTarget(GL_TEXTURE_2D);
+ SetMacOSSpecificTextureTarget(GL_TEXTURE_2D);
}
#endif // defined(OS_MACOSX)
@@ -363,16 +381,17 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// We need to collect GL strings (VENDOR, RENDERER) for blacklisting purposes.
if (!gl_disabled) {
- if (!use_swiftshader) {
+ if (!gl_use_swiftshader_) {
if (!CollectGraphicsInfo(&gpu_info_))
return false;
- gpu::SetKeysForCrashLogging(gpu_info_);
- gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(
- gpu_info_, gpu_preferences_, command_line, nullptr);
- use_swiftshader = EnableSwiftShaderIfNeeded(
+
+ SetKeysForCrashLogging(gpu_info_);
+ gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
+ command_line, nullptr);
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, false);
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
#if defined(OS_LINUX)
VLOG(1) << "Quit GPU process launch to fallback to SwiftShader cleanly "
<< "on Linux";
@@ -389,34 +408,54 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
}
#endif // OS_LINUX
}
- } else { // use_swiftshader == true
+ } else { // gl_use_swiftshader_ == true
switch (gpu_preferences_.use_vulkan) {
- case gpu::VulkanImplementationName::kNative: {
+ case VulkanImplementationName::kNative: {
// Collect GPU info, so we can use blacklist to disable vulkan if it
// is needed.
- gpu::GPUInfo gpu_info;
+ GPUInfo gpu_info;
if (!CollectGraphicsInfo(&gpu_info))
return false;
- auto gpu_feature_info = gpu::ComputeGpuFeatureInfo(
+ auto gpu_feature_info = ComputeGpuFeatureInfo(
gpu_info, gpu_preferences_, command_line, nullptr);
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu_feature_info.status_values[gpu::GPU_FEATURE_TYPE_VULKAN];
+ gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN] =
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_VULKAN];
break;
}
- case gpu::VulkanImplementationName::kForcedNative:
- case gpu::VulkanImplementationName::kSwiftshader:
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu::kGpuFeatureStatusEnabled;
+ case VulkanImplementationName::kForcedNative:
+ case VulkanImplementationName::kSwiftshader:
+ gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN] =
+ kGpuFeatureStatusEnabled;
break;
- case gpu::VulkanImplementationName::kNone:
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu::kGpuFeatureStatusDisabled;
+ case VulkanImplementationName::kNone:
+ gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN] =
+ kGpuFeatureStatusDisabled;
break;
}
}
}
- InitializeVulkan();
+ if (gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN] !=
+ kGpuFeatureStatusEnabled ||
+ !InitializeVulkan()) {
+ gpu_preferences_.use_vulkan = VulkanImplementationName::kNone;
+ gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN] =
+ kGpuFeatureStatusDisabled;
+ if (gpu_preferences_.gr_context_type == GrContextType::kVulkan) {
+#if defined(OS_FUCHSIA)
+ // Fuchsia uses ANGLE for GL which requires Vulkan, so don't fall
+ // back to GL if Vulkan init fails.
+ LOG(FATAL) << "Vulkan initialization failed";
+#endif
+ gpu_preferences_.gr_context_type = GrContextType::kGL;
+ }
+ } else {
+ // TODO(https://crbug.com/1095744): It would be better to cleanly tear
+ // down and recreate the VkDevice on VK_ERROR_DEVICE_LOST. Until that
+ // happens, we will exit_on_context_lost to ensure there are no leaks.
+ gpu_feature_info_.enabled_gpu_driver_bug_workarounds.push_back(
+ EXIT_ON_CONTEXT_LOST);
+ }
// Collect GPU process info
if (!gl_disabled) {
@@ -447,16 +486,16 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// Driver may create a compatibility profile context when collect graphics
// information on Linux platform. Try to collect graphics information
// based on core profile context after disabling platform extensions.
- if (!gl_disabled && !use_swiftshader) {
+ if (!gl_disabled && !gl_use_swiftshader_) {
if (!CollectGraphicsInfo(&gpu_info_))
return false;
- gpu::SetKeysForCrashLogging(gpu_info_);
- gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
- command_line, nullptr);
- use_swiftshader = EnableSwiftShaderIfNeeded(
+ SetKeysForCrashLogging(gpu_info_);
+ gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
+ command_line, nullptr);
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, false);
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
VLOG(1) << "Quit GPU process launch to fallback to SwiftShader cleanly "
<< "on Linux";
return false;
@@ -464,7 +503,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
}
#endif // defined(OS_LINUX)
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
AdjustInfoToSwiftShader();
}
@@ -482,14 +521,14 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// Software GL is expected to run slowly, so disable the watchdog
// in that case.
// In SwiftShader case, the implementation is actually EGLGLES2.
- if (!use_swiftshader && command_line->HasSwitch(switches::kUseGL)) {
+ if (!gl_use_swiftshader_ && command_line->HasSwitch(switches::kUseGL)) {
std::string use_gl = command_line->GetSwitchValueASCII(switches::kUseGL);
if (use_gl == gl::kGLImplementationSwiftShaderName ||
use_gl == gl::kGLImplementationSwiftShaderForWebGLName) {
- use_swiftshader = true;
+ gl_use_swiftshader_ = true;
}
}
- if (use_swiftshader ||
+ if (gl_use_swiftshader_ ||
gl::GetGLImplementation() == gl::GetSoftwareGLImplementation()) {
gpu_info_.software_rendering = true;
watchdog_thread_ = nullptr;
@@ -499,11 +538,11 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
watchdog_init.SetGpuWatchdogPtr(nullptr);
} else if (enable_watchdog && delayed_watchdog_enable) {
if (base::FeatureList::IsEnabled(features::kGpuWatchdogV2)) {
- watchdog_thread_ = gpu::GpuWatchdogThreadImplV2::Create(
+ watchdog_thread_ = GpuWatchdogThreadImplV2::Create(
gpu_preferences_.watchdog_starts_backgrounded);
watchdog_init.SetGpuWatchdogPtr(watchdog_thread_.get());
} else {
- watchdog_thread_ = gpu::GpuWatchdogThreadImplV1::Create(
+ watchdog_thread_ = GpuWatchdogThreadImplV1::Create(
gpu_preferences_.watchdog_starts_backgrounded);
}
}
@@ -544,8 +583,8 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
InitializeGLThreadSafe(command_line, gpu_preferences_, &gpu_info_,
&gpu_feature_info_);
- InitializeVulkan();
+ DisableInProcessGpuVulkan(&gpu_feature_info_, &gpu_preferences_);
default_offscreen_surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
UMA_HISTOGRAM_ENUMERATION("GPU.GLImplementation", gl::GetGLImplementation());
@@ -585,7 +624,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
}
#endif // !BUILDFLAG(IS_CHROMECAST)
- bool use_swiftshader = EnableSwiftShaderIfNeeded(
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, needs_more_info);
if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) {
@@ -594,14 +633,14 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
}
bool gl_disabled = gl::GetGLImplementation() == gl::kGLImplementationDisabled;
- if (!gl_disabled && !use_swiftshader) {
+ if (!gl_disabled && !gl_use_swiftshader_) {
CollectContextGraphicsInfo(&gpu_info_);
gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
command_line, nullptr);
- use_swiftshader = EnableSwiftShaderIfNeeded(
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, false);
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
gl::init::ShutdownGL(true);
if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) {
VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed "
@@ -632,14 +671,14 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
// Driver may create a compatibility profile context when collect graphics
// information on Linux platform. Try to collect graphics information
// based on core profile context after disabling platform extensions.
- if (!gl_disabled && !use_swiftshader) {
+ if (!gl_disabled && !gl_use_swiftshader_) {
CollectContextGraphicsInfo(&gpu_info_);
gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
command_line, nullptr);
- use_swiftshader = EnableSwiftShaderIfNeeded(
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, false);
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
gl::init::ShutdownGL(true);
if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) {
VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed "
@@ -650,7 +689,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
}
#endif // defined(OS_LINUX)
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
AdjustInfoToSwiftShader();
}
@@ -659,6 +698,8 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
std::move(supported_buffer_formats_for_texturing);
#endif
+ DisableInProcessGpuVulkan(&gpu_feature_info_, &gpu_preferences_);
+
UMA_HISTOGRAM_ENUMERATION("GPU.GLImplementation", gl::GetGLImplementation());
}
#endif // OS_ANDROID
@@ -675,63 +716,64 @@ scoped_refptr<gl::GLSurface> GpuInit::TakeDefaultOffscreenSurface() {
return std::move(default_offscreen_surface_);
}
-void GpuInit::InitializeVulkan() {
#if BUILDFLAG(ENABLE_VULKAN)
- if (gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] ==
- gpu::kGpuFeatureStatusEnabled) {
- DCHECK_NE(gpu_preferences_.use_vulkan,
- gpu::VulkanImplementationName::kNone);
- bool vulkan_use_swiftshader = gpu_preferences_.use_vulkan ==
- gpu::VulkanImplementationName::kSwiftshader;
- const bool enforce_protected_memory =
- gpu_preferences_.enforce_vulkan_protected_memory;
- vulkan_implementation_ = gpu::CreateVulkanImplementation(
- vulkan_use_swiftshader,
- enforce_protected_memory ? true : false /* allow_protected_memory */,
- enforce_protected_memory);
- if (!vulkan_implementation_ ||
- !vulkan_implementation_->InitializeVulkanInstance(
- !gpu_preferences_.disable_vulkan_surface)) {
- DLOG(ERROR) << "Failed to create and initialize Vulkan implementation.";
- vulkan_implementation_ = nullptr;
- CHECK(!gpu_preferences_.disable_vulkan_fallback_to_gl_for_testing);
- }
- // TODO(penghuang): Remove GPU.SupportsVulkan and GPU.VulkanVersion from
- // //gpu/config/gpu_info_collector_win.cc when we are finch vulkan on
- // Windows.
- if (!vulkan_use_swiftshader) {
- const bool supports_vulkan = !!vulkan_implementation_;
- UMA_HISTOGRAM_BOOLEAN("GPU.SupportsVulkan", supports_vulkan);
- uint32_t vulkan_version = 0;
- if (supports_vulkan) {
- const auto& vulkan_info =
- vulkan_implementation_->GetVulkanInstance()->vulkan_info();
- vulkan_version = vulkan_info.used_api_version;
- }
- UMA_HISTOGRAM_ENUMERATION(
- "GPU.VulkanVersion", ConvertToHistogramVulkanVersion(vulkan_version));
+bool GpuInit::InitializeVulkan() {
+ DCHECK_EQ(gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN],
+ kGpuFeatureStatusEnabled);
+ DCHECK_NE(gpu_preferences_.use_vulkan, VulkanImplementationName::kNone);
+ bool vulkan_use_swiftshader =
+ gpu_preferences_.use_vulkan == VulkanImplementationName::kSwiftshader;
+ bool forced_native =
+ gpu_preferences_.use_vulkan == VulkanImplementationName::kForcedNative;
+ bool use_swiftshader = gl_use_swiftshader_ || vulkan_use_swiftshader;
+
+ const bool enforce_protected_memory =
+ gpu_preferences_.enforce_vulkan_protected_memory;
+ vulkan_implementation_ = CreateVulkanImplementation(
+ vulkan_use_swiftshader,
+ enforce_protected_memory ? true : false /* allow_protected_memory */,
+ enforce_protected_memory);
+ if (!vulkan_implementation_ ||
+ !vulkan_implementation_->InitializeVulkanInstance(
+ !gpu_preferences_.disable_vulkan_surface)) {
+ DLOG(ERROR) << "Failed to create and initialize Vulkan implementation.";
+ vulkan_implementation_ = nullptr;
+ CHECK(!gpu_preferences_.disable_vulkan_fallback_to_gl_for_testing);
+ }
+
+ // Vulkan info is no longer collected in gpu/config/gpu_info_collector_win.cc
+ // Histogram GPU.SupportsVulkan and GPU.VulkanVersion were marked as expired.
+ // TODO(magchen): Add back these two histograms here and re-enable them in
+ // histograms.xml when we start Vulkan finch on Windows.
+ if (!vulkan_use_swiftshader) {
+ const bool supports_vulkan = !!vulkan_implementation_;
+ uint32_t vulkan_version = 0;
+ if (supports_vulkan) {
+ const auto& vulkan_info =
+ vulkan_implementation_->GetVulkanInstance()->vulkan_info();
+ vulkan_version = vulkan_info.used_api_version;
}
}
- if (!vulkan_implementation_) {
- if (gpu_preferences_.gr_context_type == gpu::GrContextType::kVulkan) {
-#if defined(OS_FUCHSIA)
- // Fuchsia uses ANGLE for GL which requires Vulkan, so don't fall
- // back to GL if Vulkan init fails.
- LOG(FATAL) << "Vulkan initialization failed";
-#endif
- gpu_preferences_.gr_context_type = gpu::GrContextType::kGL;
- }
- gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone;
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu::kGpuFeatureStatusDisabled;
- } else {
- gpu_info_.vulkan_info =
- vulkan_implementation_->GetVulkanInstance()->vulkan_info();
+
+ if (!vulkan_implementation_)
+ return false;
+
+ if (!use_swiftshader && !forced_native &&
+ !CheckVulkanCompabilities(
+ vulkan_implementation_->GetVulkanInstance()->vulkan_info(),
+ gpu_info_)) {
+ vulkan_implementation_.reset();
+ return false;
}
-#else
- gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone;
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu::kGpuFeatureStatusDisabled;
-#endif // BUILDFLAG(ENABLE_VULKAN)
+
+ gpu_info_.vulkan_info =
+ vulkan_implementation_->GetVulkanInstance()->vulkan_info();
+ return true;
}
+#else // BUILDFLAG(ENABLE_VULKAN)
+bool GpuInit::InitializeVulkan() {
+ return false;
+}
+#endif // !BUILDFLAG(ENABLE_VULKAN)
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_init.h b/chromium/gpu/ipc/service/gpu_init.h
index 5f1d6fcdf02..3c236dbc502 100644
--- a/chromium/gpu/ipc/service/gpu_init.h
+++ b/chromium/gpu/ipc/service/gpu_init.h
@@ -83,9 +83,10 @@ class GPU_IPC_SERVICE_EXPORT GpuInit {
#endif
private:
- void InitializeVulkan();
+ bool InitializeVulkan();
GpuSandboxHelper* sandbox_helper_ = nullptr;
+ bool gl_use_swiftshader_ = false;
std::unique_ptr<GpuWatchdogThread> watchdog_thread_;
GPUInfo gpu_info_;
GpuFeatureInfo gpu_feature_info_;
diff --git a/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.cc b/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.cc
new file mode 100644
index 00000000000..4da13f07c52
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.cc
@@ -0,0 +1,227 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/service/gpu_memory_ablation_experiment.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/time/time.h"
+#include "base/trace_event/common/trace_event_common.h"
+#include "components/viz/common/features.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/mailbox_manager_impl.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/ipc/common/surface_handle.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
+#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkColor.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+
+// Main feature flag to control the entire experiment, encompassing bot CPU and
+// GPU ablations.
+const base::Feature kGPUMemoryAblationFeature{
+ "GPUMemoryAblation", base::FEATURE_DISABLED_BY_DEFAULT};
+
+// TODO(jonross): Replace these feature flags with Field Trial Param lookup.
+const base::Feature kGPUMemoryAblationGPUSmall{
+ "GPUMemoryAblationGPUSmall", base::FEATURE_DISABLED_BY_DEFAULT};
+
+const base::Feature kGPUMemoryAblationGPUMedium{
+ "GPUMemoryAblationGPUMedium", base::FEATURE_DISABLED_BY_DEFAULT};
+
+const base::Feature kGPUMemoryAblationGPULarge{
+ "GPUMemoryAblationGPULarge", base::FEATURE_DISABLED_BY_DEFAULT};
+
+// The size to use when allocating images. The sizes vary based on the chosen
+// experiment.
+constexpr gfx::Size kSmallSize(256, 256);
+constexpr gfx::Size kMediumSize(256 * 4, 256 * 4);
+constexpr gfx::Size kLargeSize(256 * 8, 256 * 8);
+
+// Image allocation parameters.
+constexpr viz::ResourceFormat kFormat = viz::ResourceFormat::RGBA_8888;
+constexpr uint32_t kUsage = SHARED_IMAGE_USAGE_DISPLAY;
+
+GpuMemoryAblationExperiment::GpuMemoryAblationExperiment(
+ GpuChannelManager* channel_manager)
+ : enabled_(base::FeatureList::IsEnabled(kGPUMemoryAblationFeature)),
+ channel_manager_(channel_manager) {}
+
+GpuMemoryAblationExperiment::~GpuMemoryAblationExperiment() = default;
+
+void GpuMemoryAblationExperiment::OnMemoryAllocated(uint64_t old_size,
+ uint64_t new_size) {
+ if (!enabled_)
+ return;
+ if (!init_) {
+ InitGpu(channel_manager_);
+ }
+ // TODO(jonross): Investigate why there are 0 size allocations.
+ if (new_size > old_size) {
+ // TODO(jonross): Impl CPU ablation
+ if (gpu_enabled_)
+ AllocateGpuMemory();
+ } else if (old_size > new_size) {
+ // TODO(jonross): Impl CPU ablation
+ if (gpu_enabled_ && !mailboxes_.empty()) {
+ DeleteGpuMemory();
+ }
+ }
+}
+
+uint64_t GpuMemoryAblationExperiment::GetPeakMemory(
+ uint32_t sequence_num) const {
+ auto it = sequences_.find(sequence_num);
+ if (it == sequences_.end())
+ return 0u;
+
+ return it->second.peak_memory_;
+}
+
+void GpuMemoryAblationExperiment::StartSequence(uint32_t sequence_num) {
+ sequences_.emplace(sequence_num, SequenceTracker());
+}
+
+void GpuMemoryAblationExperiment::StopSequence(uint32_t sequence_num) {
+ auto it = sequences_.find(sequence_num);
+ if (it == sequences_.end())
+ return;
+
+ TRACE_EVENT_INSTANT2("gpu.memory", "Memory.GPU.PeakMemoryUsage.AblationTimes",
+ TRACE_EVENT_SCOPE_THREAD, "alloc",
+ it->second.allocs_.InMilliseconds(), "dealloc",
+ it->second.deallocs_.InMilliseconds());
+
+ sequences_.erase(it);
+}
+
+void GpuMemoryAblationExperiment::AllocateGpuMemory() {
+ // We can't successfully create an image without a context, so do not even
+ // perform the initial allocations.
+ if (!MakeContextCurrent())
+ return;
+ base::Time start = base::Time::Now();
+
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+
+ if (!factory_->CreateSharedImage(mailbox, kFormat, size_, color_space,
+ gpu::kNullSurfaceHandle, kUsage)) {
+ return;
+ }
+
+ auto skia_rep = rep_factory_->ProduceSkia(mailbox, context_state_);
+ if (!skia_rep)
+ return;
+
+ auto write_access = skia_rep->BeginScopedWriteAccess(
+ /*begin_semaphores=*/{}, /*end_semaphores=*/{},
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ if (!write_access)
+ return;
+
+ auto* canvas = write_access->surface()->getCanvas();
+ canvas->clear(SK_ColorWHITE);
+
+ mailboxes_.push_back(mailbox);
+
+ base::TimeDelta delta = base::Time::Now() - start;
+ for (auto& it : sequences_)
+ it.second.allocs_ += delta;
+}
+
+void GpuMemoryAblationExperiment::DeleteGpuMemory() {
+ if (mailboxes_.empty())
+ return;
+ base::Time start = base::Time::Now();
+
+ auto mailbox = mailboxes_.front();
+ // We can't successfully destroy the image if we cannot get the context,
+ // however we still need to cleanup our internal state.
+ if (MakeContextCurrent())
+ factory_->DestroySharedImage(mailbox);
+
+ mailboxes_.erase(mailboxes_.begin());
+
+ base::TimeDelta delta = base::Time::Now() - start;
+ for (auto& it : sequences_)
+ it.second.deallocs_ += delta;
+}
+
+void GpuMemoryAblationExperiment::InitGpu(GpuChannelManager* channel_manager) {
+ // GPU Info Collection Process can be created, with no graphical output
+ // possible. Don't init there, as all future image operations will fail.
+ if (gl::GetGLImplementation() == gl::kGLImplementationDisabled)
+ return;
+
+ if (base::FeatureList::IsEnabled(kGPUMemoryAblationGPUSmall)) {
+ size_ = kSmallSize;
+ } else if (base::FeatureList::IsEnabled(kGPUMemoryAblationGPUMedium)) {
+ size_ = kMediumSize;
+ } else if (base::FeatureList::IsEnabled(kGPUMemoryAblationGPULarge)) {
+ size_ = kLargeSize;
+ }
+
+ ContextResult result;
+ context_state_ = channel_manager->GetSharedContextState(&result);
+ if (result != ContextResult::kSuccess || !MakeContextCurrent()) {
+ context_state_ = nullptr;
+ return;
+ }
+
+ gpu::GpuMemoryBufferFactory* gmb_factory =
+ channel_manager->gpu_memory_buffer_factory();
+ factory_ = std::make_unique<SharedImageFactory>(
+ channel_manager->gpu_preferences(),
+ channel_manager->gpu_driver_bug_workarounds(),
+ channel_manager->gpu_feature_info(), context_state_.get(),
+ channel_manager->mailbox_manager(),
+ channel_manager->shared_image_manager(),
+ gmb_factory ? gmb_factory->AsImageFactory() : nullptr, this,
+ features::IsUsingSkiaRenderer());
+
+ rep_factory_ = std::make_unique<SharedImageRepresentationFactory>(
+ channel_manager->shared_image_manager(), this);
+ gpu_enabled_ = true;
+ init_ = true;
+}
+
+bool GpuMemoryAblationExperiment::MakeContextCurrent() {
+ return context_state_->MakeCurrent(nullptr);
+}
+
+// MemoryTracker:
+void GpuMemoryAblationExperiment::TrackMemoryAllocatedChange(int64_t delta) {
+ DCHECK(delta >= 0 || gpu_allocation_size_ >= static_cast<uint64_t>(-delta));
+ gpu_allocation_size_ += delta;
+ for (auto& it : sequences_) {
+ if (gpu_allocation_size_ > it.second.peak_memory_)
+ it.second.peak_memory_ = gpu_allocation_size_;
+ }
+}
+
+// Unused methods that form the basis of memory dumps
+uint64_t GpuMemoryAblationExperiment::GetSize() const {
+ return 0u;
+}
+
+uint64_t GpuMemoryAblationExperiment::ClientTracingId() const {
+ return 0u;
+}
+
+int GpuMemoryAblationExperiment::ClientId() const {
+ return 0;
+}
+
+uint64_t GpuMemoryAblationExperiment::ContextGroupTracingId() const {
+ return 0u;
+}
+
+} // namespace gpu \ No newline at end of file
diff --git a/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.h b/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.h
new file mode 100644
index 00000000000..e5f2c60c25e
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.h
@@ -0,0 +1,133 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_SERVICE_GPU_MEMORY_ABLATION_EXPERIMENT_H_
+#define GPU_IPC_SERVICE_GPU_MEMORY_ABLATION_EXPERIMENT_H_
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/containers/flat_map.h"
+#include "base/feature_list.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/ipc/service/gpu_ipc_service_export.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace gpu {
+class GpuChannelManager;
+class SharedContextState;
+class SharedImageFactory;
+class SharedImageRepresentationFactory;
+
+extern const base::Feature kGPUMemoryAblationFeature;
+
+// When enabled, this experiment allocates additional memory alongside each
+// normal allocation. This will allow a study of the correlation between
+// memory usage and performance metrics.
+//
+// Each increase reported to OnMemoryAllocated will allocate a chunk of memory.
+// Each decrease reported will release a previously allocated chunk.
+//
+// GpuMemoryAblationExperiment acts as the MemoryTracker for all of its own
+// allocations. This prevents a cycle of memory allocations:
+// - GpuChannelManager::GpuPeakMemoryMonitor::OnMemoryAllocatedChange
+// - GpuMemoryAblationExperiment::OnMemoryAllocated
+// - MemoryTracker::TrackMemoryAllocatedChange
+// - GpuChannelManager::GpuPeakMemoryMonitor::OnMemoryAllocatedChange
+// - etc.
+//
+// Instead this will track the memory it allocated, which can be retrieved via
+// GetPeakMemory.
+class GPU_IPC_SERVICE_EXPORT GpuMemoryAblationExperiment
+ : public MemoryTracker {
+ public:
+ explicit GpuMemoryAblationExperiment(GpuChannelManager* channel_manager);
+ ~GpuMemoryAblationExperiment() override;
+
+ // Allocates a chunk of memory in response to increases. Reported decreases
+ // will release previously allocated chunks. The amount of memory allocated
+ // is returned in bytes.
+ void OnMemoryAllocated(uint64_t old_size, uint64_t new_size);
+
+ uint64_t GetPeakMemory(uint32_t sequence_num) const;
+ void StartSequence(uint32_t sequence_num);
+ void StopSequence(uint32_t sequence_num);
+
+ private:
+ // Tracks the time spent doing the allocations/deallocations in order to
+ // determine if the change in metrics was solely due to the ablation.
+ //
+ // The memory allocated for ablation is not reported directly to
+ // GpuChannelManager::GpuPeakMemoryMonitor, as GpuMemoryAblationExperiment
+ // acts as the MemoryTracker for its own allocations. This tracks the peak
+ // allocation so that it can be reported.
+ struct SequenceTracker {
+ public:
+ SequenceTracker() = default;
+ ~SequenceTracker() = default;
+
+ base::TimeDelta allocs_;
+ base::TimeDelta deallocs_;
+ uint64_t peak_memory_ = 0u;
+ };
+
+ void AllocateGpuMemory();
+ void DeleteGpuMemory();
+
+ // Setups the Gpu resources needed to allocate Gpu RAM. These are influenced
+ // by SharedImageStub. Which is not used directly as there is no external
+ // host to pair a GpuChannel with.
+ void InitGpu(GpuChannelManager* channel_manager);
+
+ // This must be called before any actions on |factory_|. If this method fails
+ // then subsequent work on the |factory_| will fail. Also influenced by
+ // SharedImageStub.
+ bool MakeContextCurrent();
+
+ // MemoryTracker:
+ void TrackMemoryAllocatedChange(int64_t delta) override;
+ uint64_t GetSize() const override;
+ uint64_t ClientTracingId() const override;
+ int ClientId() const override;
+ uint64_t ContextGroupTracingId() const override;
+
+ // Whether or not the entire experiment is enabled.
+ bool enabled_;
+ bool init_ = false;
+ // If |true| then a Gpu ablation was requested, and initialization succeeded.
+ bool gpu_enabled_ = false;
+
+ // Size of image to allocate, determined by experiment parameters.
+ gfx::Size size_;
+
+ // The Mailboxes allocated for each image.
+ std::vector<Mailbox> mailboxes_;
+
+ // Tracks the time spent doing the allocations/deallocations, along with the
+ // peak memory allocated. Thus allowing to determine if the change in only
+ // metrics was solely due to the ablation.
+ base::flat_map<uint32_t, SequenceTracker> sequences_;
+
+ // The memory allocated for ablation is not reported directly to
+ // GpuChannelManager::GpuPeakMemoryMonitor, as this class acts as the
+ // MemoryTracker for its own allocations. Tracks the current amount of
+ // memory allocated as a part of the ablation.
+ uint64_t gpu_allocation_size_ = 0;
+
+ scoped_refptr<SharedContextState> context_state_;
+ std::unique_ptr<SharedImageFactory> factory_;
+ std::unique_ptr<SharedImageRepresentationFactory> rep_factory_;
+ GpuChannelManager* channel_manager_;
+ base::WeakPtrFactory<GpuMemoryAblationExperiment> weak_factory_{this};
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_SERVICE_GPU_MEMORY_ABLATION_EXPERIMENT_H_
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
index cc5d8a9f1c9..1e4bae0022a 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
@@ -9,6 +9,7 @@
#include "base/debug/alias.h"
#include "base/files/file_util.h"
#include "base/format_macros.h"
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop_current.h"
#include "base/metrics/histogram_functions.h"
@@ -53,17 +54,6 @@ const int kNewGpuTimeout = 17000;
const int kNewGpuTimeout = 15000;
#endif
-// Histogram parameters for GPU.WatchdogThread.V1.ExtraThreadTime and
-// GPU.WatchdogThread.V1.WaitTime
-constexpr base::TimeDelta kMin = base::TimeDelta::FromSeconds(1);
-constexpr base::TimeDelta kMax = base::TimeDelta::FromSeconds(150);
-constexpr int kBuckets = 50;
-
-// Histogram recorded in OnWatchdogTimeout()
-void GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent timeout_event) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.V1.Timeout", timeout_event);
-}
-
#if defined(USE_X11)
const base::FilePath::CharType kTtyFilePath[] =
FILE_PATH_LITERAL("/sys/class/tty/tty0/active");
@@ -150,11 +140,6 @@ void GpuWatchdogThreadImplV1::OnForegrounded() {
base::Unretained(this)));
}
-void GpuWatchdogThreadImplV1::GpuWatchdogHistogram(
- GpuWatchdogThreadEvent thread_event) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.Event", thread_event);
-}
-
bool GpuWatchdogThreadImplV1::IsGpuHangDetectedForTesting() {
return false;
}
@@ -166,7 +151,6 @@ void GpuWatchdogThreadImplV1::Init() {
void GpuWatchdogThreadImplV1::CleanUp() {
weak_factory_.InvalidateWeakPtrs();
- more_gpu_thread_time_allowed_ = false;
armed_ = false;
}
@@ -258,24 +242,11 @@ GpuWatchdogThreadImplV1::~GpuWatchdogThreadImplV1() {
#endif
base::MessageLoopCurrent::Get()->RemoveTaskObserver(&task_observer_);
- GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogEnd);
}
void GpuWatchdogThreadImplV1::OnAcknowledge() {
CHECK(base::PlatformThread::CurrentId() == GetThreadId());
- // For metrics only
- if (more_gpu_thread_time_allowed_) {
- base::TimeDelta wait_time =
- base::TimeTicks::Now() - last_timeout_timeticks_;
- base::UmaHistogramCustomTimes("GPU.WatchdogThread.V1.ExtraThreadTime",
- wait_time, kMin, kMax, kBuckets);
- GpuWatchdogTimeoutHistogram(
- GpuWatchdogTimeoutEvent::kProgressAfterMoreThreadTime);
-
- more_gpu_thread_time_allowed_ = false;
- }
-
// The check has already been acknowledged and another has already been
// scheduled by a previous call to OnAcknowledge. It is normal for a
// watched thread to see armed_ being true multiple times before
@@ -372,25 +343,11 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
// Should not get here while the system is suspended.
DCHECK(!suspension_counter_.HasRefs());
- base::TimeTicks function_start = base::TimeTicks::Now();
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeout);
-
- // If this metric is added too early (eg. watchdog creation time), it cannot
- // be persistent. The histogram data will be lost after crash or browser exit.
- // Delay the recording of kGpuWatchdogStart until the first OnCheckTimeout().
- if (!is_watchdog_start_histogram_recorded) {
- is_watchdog_start_histogram_recorded = true;
- GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart);
- }
-
// If the watchdog woke up significantly behind schedule, disarm and reset
// the watchdog check. This is to prevent the watchdog thread from terminating
// when a machine wakes up from sleep or hibernation, which would otherwise
// appear to be a hang.
if (base::Time::Now() > suspension_timeout_) {
- // Reset the timeticks after resume for metrics.
- last_timeout_timeticks_ = function_start;
-
OnAcknowledge();
return;
}
@@ -406,12 +363,6 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
base::ThreadTicks current_cpu_time = GetWatchedThreadTime();
base::TimeDelta time_since_arm = current_cpu_time - arm_cpu_time_;
if (use_thread_cpu_time_ && (time_since_arm < timeout_)) {
- // For metrics
- if (!more_gpu_thread_time_allowed_) {
- more_gpu_thread_time_allowed_ = true;
- last_timeout_timeticks_ = function_start;
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kMoreThreadTime);
- }
task_runner()->PostDelayedTask(
FROM_HERE,
@@ -421,7 +372,6 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
return;
}
#endif
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeoutWait);
// For minimal developer annoyance, don't keep terminating. You need to skip
// the call to base::Process::Terminate below in a debugger for this to be
@@ -439,11 +389,6 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
// Don't crash if we're not on the TTY of our host X11 server.
UpdateActiveTTY();
if (host_tty_ != -1 && active_tty_ != -1 && host_tty_ != active_tty_) {
- // Only record for the time there is a change on TTY
- if (last_active_tty_ != active_tty_) {
- GpuWatchdogTimeoutHistogram(
- GpuWatchdogTimeoutEvent::kContinueOnNonHostServerTty);
- }
OnAcknowledge();
return;
}
@@ -504,23 +449,9 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
// Check it one last time before crashing.
if (!base::subtle::NoBarrier_Load(&awaiting_acknowledge_)) {
- { // For metrics only
- base::TimeDelta wait_time;
- if (more_gpu_thread_time_allowed_) {
- more_gpu_thread_time_allowed_ = false;
- wait_time = base::TimeTicks::Now() - last_timeout_timeticks_;
- } else {
- wait_time = base::TimeTicks::Now() - function_start;
- }
- base::UmaHistogramCustomTimes("GPU.WatchdogThread.V1.WaitTime", wait_time,
- kMin, kMax, kBuckets);
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kProgressAfterWait);
- }
OnAcknowledge();
return;
}
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kKill);
- GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogKill);
// Deliberately crash the process to create a crash dump.
*((volatile int*)0) = 0x1337;
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.h b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
index b39ae227318..ad26565910a 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.h
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
@@ -100,8 +100,6 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThread : public base::Thread,
// Continue the watchdog after a pause.
virtual void ResumeWatchdog() = 0;
- virtual void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) = 0;
-
// For gpu testing only. Return status for the watchdog tests
virtual bool IsGpuHangDetectedForTesting() = 0;
@@ -130,7 +128,6 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1
void OnGpuProcessTearDown() override {}
void ResumeWatchdog() override {}
void PauseWatchdog() override {}
- void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) override;
bool IsGpuHangDetectedForTesting() override;
// gl::ProgressReporter implementation:
@@ -264,16 +261,6 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1
base::Time check_time_;
base::TimeTicks check_timeticks_;
- // The time in the last OnCheckTimeout()
- base::TimeTicks last_timeout_timeticks_;
-
- // After GPU hang detected, whether the GPU thread is allowed to continue due
- // to not spending enough thread time.
- bool more_gpu_thread_time_allowed_ = false;
-
- // whether GpuWatchdogThreadEvent::kGpuWatchdogStart has been recorded.
- bool is_watchdog_start_histogram_recorded = false;
-
#if defined(USE_X11)
FILE* tty_file_;
int host_tty_;
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
index 5dc976af739..6df85496cfd 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
@@ -284,7 +284,8 @@ void GpuWatchdogThreadImplV2::OnAddPowerObserver() {
DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
DCHECK(base::PowerMonitor::IsInitialized());
- is_power_observer_added_ = base::PowerMonitor::AddObserver(this);
+ base::PowerMonitor::AddObserver(this);
+ is_power_observer_added_ = true;
}
// Running on the watchdog thread.
@@ -660,7 +661,6 @@ void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() {
void GpuWatchdogThreadImplV2::GpuWatchdogHistogram(
GpuWatchdogThreadEvent thread_event) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.Event.V2", thread_event);
base::UmaHistogramEnumeration("GPU.WatchdogThread.Event", thread_event);
}
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
index 2e0fd292ebe..4c79e535b5e 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
@@ -13,7 +13,7 @@ namespace gpu {
// If the actual time the watched GPU thread spent doing actual work is less
// than the wathdog timeout, the GPU thread can continue running through
// OnGPUWatchdogTimeout for at most 4 times before the gpu thread is killed.
-constexpr int kMaxCountOfMoreGpuThreadTimeAllowed = 4;
+constexpr int kMaxCountOfMoreGpuThreadTimeAllowed = 3;
#endif
constexpr int kMaxExtraCyclesBeforeKill = 0;
@@ -40,8 +40,6 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
void OnGpuProcessTearDown() override;
void ResumeWatchdog() override;
void PauseWatchdog() override;
- // Records "GPU.WatchdogThread.Event.V2" and "GPU.WatchdogThread.Event".
- void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) override;
bool IsGpuHangDetectedForTesting() override;
void WaitForPowerObserverAddedForTesting() override;
@@ -91,6 +89,9 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
// Do not change the function name. It is used for [GPU HANG] carsh reports.
void DeliberatelyTerminateToRecoverFromHang();
+ // Records "GPU.WatchdogThread.Event".
+ void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event);
+
// Histogram recorded in OnWatchdogTimeout()
// Records "GPU.WatchdogThread.Timeout"
void GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent timeout_event);
diff --git a/chromium/gpu/ipc/service/image_transport_surface_linux.cc b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
index c5c4d6ce7ed..05ce217a16d 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_linux.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
@@ -5,7 +5,6 @@
#include "gpu/ipc/service/image_transport_surface.h"
#include "gpu/ipc/service/pass_through_image_transport_surface.h"
-#include "ui/gl/gl_surface_glx.h"
#include "ui/gl/init/gl_factory.h"
namespace gpu {
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
index a298747e3b7..d565d2ab23d 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
@@ -58,13 +58,31 @@ class ImageTransportSurfaceOverlayMacBase : public BaseClass,
bool IsOffscreen() override;
gfx::SwapResult SwapBuffers(
gl::GLSurface::PresentationCallback callback) override;
+ void SwapBuffersAsync(
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) override;
gfx::SwapResult PostSubBuffer(
int x,
int y,
int width,
int height,
gl::GLSurface::PresentationCallback callback) override;
+ void PostSubBufferAsync(
+ int x,
+ int y,
+ int width,
+ int height,
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) override;
+ gfx::SwapResult CommitOverlayPlanes(
+ gl::GLSurface::PresentationCallback callback) override;
+ void CommitOverlayPlanesAsync(
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) override;
+
bool SupportsPostSubBuffer() override;
+ bool SupportsCommitOverlayPlanes() override;
+ bool SupportsAsyncSwap() override;
gfx::Size GetSize() override;
void* GetHandle() override;
gl::GLSurfaceFormat GetFormat() override;
@@ -80,6 +98,7 @@ class ImageTransportSurfaceOverlayMacBase : public BaseClass,
void ScheduleCALayerInUseQuery(
std::vector<gl::GLSurface::CALayerInUseQuery> queries) override;
bool IsSurfaceless() const override;
+ gfx::SurfaceOrigin GetOrigin() const override;
// ui::GpuSwitchingObserver implementation.
void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) override;
@@ -88,8 +107,8 @@ class ImageTransportSurfaceOverlayMacBase : public BaseClass,
~ImageTransportSurfaceOverlayMacBase() override;
gfx::SwapResult SwapBuffersInternal(
- const gfx::Rect& pixel_damage_rect,
- gl::GLSurface::PresentationCallback callback);
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback);
void ApplyBackpressure();
void BufferPresented(gl::GLSurface::PresentationCallback callback,
const gfx::PresentationFeedback& feedback);
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index eb46993b8f4..f0dd2928aef 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -51,6 +51,15 @@ ImageTransportSurfaceOverlayMacBase<BaseClass>::
ca_layer_tree_coordinator_.reset(new ui::CALayerTreeCoordinator(
use_remote_layer_api_, allow_av_sample_buffer_display_layer));
+
+ // Create the CAContext to send this to the GPU process, and the layer for
+ // the context.
+ if (use_remote_layer_api_) {
+ CGSConnectionID connection_id = CGSMainConnectionID();
+ ca_context_.reset([[CAContext contextWithCGSConnection:connection_id
+ options:@{}] retain]);
+ [ca_context_ setLayer:ca_layer_tree_coordinator_->GetCALayerForDisplay()];
+ }
}
template <typename BaseClass>
@@ -63,14 +72,6 @@ ImageTransportSurfaceOverlayMacBase<
template <typename BaseClass>
bool ImageTransportSurfaceOverlayMacBase<BaseClass>::Initialize(
gl::GLSurfaceFormat format) {
- // Create the CAContext to send this to the GPU process, and the layer for
- // the context.
- if (use_remote_layer_api_) {
- CGSConnectionID connection_id = CGSMainConnectionID();
- ca_context_.reset([
- [CAContext contextWithCGSConnection:connection_id options:@{}] retain]);
- [ca_context_ setLayer:ca_layer_tree_coordinator_->GetCALayerForDisplay()];
- }
return true;
}
@@ -112,8 +113,8 @@ void ImageTransportSurfaceOverlayMacBase<BaseClass>::BufferPresented(
template <typename BaseClass>
gfx::SwapResult
ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffersInternal(
- const gfx::Rect& pixel_damage_rect,
- gl::GLSurface::PresentationCallback callback) {
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) {
TRACE_EVENT0("gpu", "ImageTransportSurfaceOverlayMac::SwapBuffersInternal");
// Do a GL fence for flush to apply back-pressure before drawing.
@@ -126,7 +127,7 @@ ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffersInternal(
base::TimeTicks before_transaction_time = base::TimeTicks::Now();
{
TRACE_EVENT0("gpu", "CommitPendingTreesToCA");
- ca_layer_tree_coordinator_->CommitPendingTreesToCA(pixel_damage_rect);
+ ca_layer_tree_coordinator_->CommitPendingTreesToCA();
base::TimeTicks after_transaction_time = base::TimeTicks::Now();
UMA_HISTOGRAM_TIMES("GPU.IOSurface.CATransactionTime",
after_transaction_time - before_transaction_time);
@@ -173,6 +174,15 @@ ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffersInternal(
}
// Send the swap parameters to the browser.
+ if (completion_callback) {
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::BindOnce(
+ std::move(completion_callback),
+ gfx::SwapCompletionResult(
+ gfx::SwapResult::SWAP_ACK,
+ std::make_unique<gfx::CALayerParams>(params.ca_layer_params))));
+ }
delegate_->DidSwapBuffersComplete(std::move(params));
constexpr int64_t kRefreshIntervalInMicroseconds =
base::Time::kMicrosecondsPerSecond / 60;
@@ -184,7 +194,8 @@ ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffersInternal(
FROM_HERE,
base::BindOnce(
&ImageTransportSurfaceOverlayMacBase<BaseClass>::BufferPresented,
- weak_ptr_factory_.GetWeakPtr(), std::move(callback), feedback));
+ weak_ptr_factory_.GetWeakPtr(), std::move(presentation_callback),
+ feedback));
return gfx::SwapResult::SWAP_ACK;
}
@@ -192,8 +203,15 @@ template <typename BaseClass>
gfx::SwapResult ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffers(
gl::GLSurface::PresentationCallback callback) {
return SwapBuffersInternal(
- gfx::Rect(0, 0, pixel_size_.width(), pixel_size_.height()),
- std::move(callback));
+ base::DoNothing(), std::move(callback));
+}
+
+template <typename BaseClass>
+void ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffersAsync(
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) {
+ SwapBuffersInternal(
+ std::move(completion_callback), std::move(presentation_callback));
}
template <typename BaseClass>
@@ -203,8 +221,34 @@ gfx::SwapResult ImageTransportSurfaceOverlayMacBase<BaseClass>::PostSubBuffer(
int width,
int height,
gl::GLSurface::PresentationCallback callback) {
- return SwapBuffersInternal(gfx::Rect(x, y, width, height),
- std::move(callback));
+ return SwapBuffersInternal(base::DoNothing(), std::move(callback));
+}
+
+template <typename BaseClass>
+void ImageTransportSurfaceOverlayMacBase<BaseClass>::PostSubBufferAsync(
+ int x,
+ int y,
+ int width,
+ int height,
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) {
+ SwapBuffersInternal(std::move(completion_callback),
+ std::move(presentation_callback));
+}
+
+template <typename BaseClass>
+gfx::SwapResult
+ImageTransportSurfaceOverlayMacBase<BaseClass>::CommitOverlayPlanes(
+ gl::GLSurface::PresentationCallback callback) {
+ return SwapBuffersInternal(base::DoNothing(), std::move(callback));
+}
+
+template <typename BaseClass>
+void ImageTransportSurfaceOverlayMacBase<BaseClass>::CommitOverlayPlanesAsync(
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) {
+ SwapBuffersInternal(std::move(completion_callback),
+ std::move(presentation_callback));
}
template <typename BaseClass>
@@ -213,6 +257,17 @@ bool ImageTransportSurfaceOverlayMacBase<BaseClass>::SupportsPostSubBuffer() {
}
template <typename BaseClass>
+bool ImageTransportSurfaceOverlayMacBase<
+ BaseClass>::SupportsCommitOverlayPlanes() {
+ return true;
+}
+
+template <typename BaseClass>
+bool ImageTransportSurfaceOverlayMacBase<BaseClass>::SupportsAsyncSwap() {
+ return true;
+}
+
+template <typename BaseClass>
gfx::Size ImageTransportSurfaceOverlayMacBase<BaseClass>::GetSize() {
return gfx::Size();
}
@@ -304,6 +359,12 @@ bool ImageTransportSurfaceOverlayMacBase<BaseClass>::IsSurfaceless() const {
}
template <typename BaseClass>
+gfx::SurfaceOrigin ImageTransportSurfaceOverlayMacBase<BaseClass>::GetOrigin()
+ const {
+ return gfx::SurfaceOrigin::kTopLeft;
+}
+
+template <typename BaseClass>
bool ImageTransportSurfaceOverlayMacBase<BaseClass>::Resize(
const gfx::Size& pixel_size,
float scale_factor,
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
index fa58d426738..4d0703ea055 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
@@ -272,17 +272,18 @@ void PassThroughImageTransportSurface::FinishSwapBuffersAsync(
SwapCompletionCallback callback,
gfx::SwapResponse response,
uint64_t local_swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence) {
+ gfx::SwapCompletionResult result) {
// TODO(afrantzis): It's probably not ideal to introduce a wait here.
// However, since this is a temporary step to maintain existing behavior
// until we are ready to expose the gpu_fence further, and fences are only
// enabled with a flag, this should be fine for now.
- if (gpu_fence)
- gpu_fence->Wait();
- response.result = result;
+ if (result.gpu_fence) {
+ result.gpu_fence->Wait();
+ result.gpu_fence.reset();
+ }
+ response.result = result.swap_result;
FinishSwapBuffers(std::move(response), local_swap_id);
- std::move(callback).Run(result, nullptr);
+ std::move(callback).Run(std::move(result));
}
void PassThroughImageTransportSurface::BufferPresented(
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
index e463dc1e95b..373221bc923 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
@@ -63,8 +63,7 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
void FinishSwapBuffersAsync(SwapCompletionCallback callback,
gfx::SwapResponse response,
uint64_t local_swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence);
+ gfx::SwapCompletionResult result);
void BufferPresented(PresentationCallback callback,
uint64_t local_swap_id,
diff --git a/chromium/gpu/ipc/service/shared_image_stub.cc b/chromium/gpu/ipc/service/shared_image_stub.cc
index 30789237258..4dac0b496fb 100644
--- a/chromium/gpu/ipc/service/shared_image_stub.cc
+++ b/chromium/gpu/ipc/service/shared_image_stub.cc
@@ -354,13 +354,16 @@ void SharedImageStub::OnPresentSwapChain(const Mailbox& mailbox,
#if defined(OS_FUCHSIA)
void SharedImageStub::OnRegisterSysmemBufferCollection(
gfx::SysmemBufferCollectionId id,
- zx::channel token) {
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
if (!id || !token) {
OnError();
return;
}
- if (!factory_->RegisterSysmemBufferCollection(id, std::move(token))) {
+ if (!factory_->RegisterSysmemBufferCollection(id, std::move(token), format,
+ usage)) {
OnError();
}
}
@@ -389,7 +392,7 @@ void SharedImageStub::OnRegisterSharedImageUploadBuffer(
}
}
-bool SharedImageStub::MakeContextCurrent() {
+bool SharedImageStub::MakeContextCurrent(bool needs_gl) {
DCHECK(context_state_);
if (context_state_->context_lost()) {
@@ -400,13 +403,9 @@ bool SharedImageStub::MakeContextCurrent() {
// |factory_| never writes to the surface, so pass nullptr to
// improve performance. https://crbug.com/457431
auto* context = context_state_->real_context();
- if (context->IsCurrent(nullptr) ||
- context->MakeCurrent(context_state_->surface())) {
- return true;
- }
- context_state_->MarkContextLost();
- LOG(ERROR) << "SharedImageStub: MakeCurrent failed";
- return false;
+ if (context->IsCurrent(nullptr))
+ return !context_state_->CheckResetStatus(needs_gl);
+ return context_state_->MakeCurrent(/*surface=*/nullptr, needs_gl);
}
ContextResult SharedImageStub::MakeContextCurrentAndCreateFactory() {
@@ -421,7 +420,9 @@ ContextResult SharedImageStub::MakeContextCurrentAndCreateFactory() {
}
DCHECK(context_state_);
DCHECK(!context_state_->context_lost());
- if (!MakeContextCurrent()) {
+ // Some shared image backing factories will use GL in ctor, so we need GL even
+ // if chrome is using non-GL backing.
+ if (!MakeContextCurrent(/*needs_gl=*/true)) {
context_state_ = nullptr;
return ContextResult::kTransientFailure;
}
diff --git a/chromium/gpu/ipc/service/shared_image_stub.h b/chromium/gpu/ipc/service/shared_image_stub.h
index 1bc71f842cd..781c1dc55b7 100644
--- a/chromium/gpu/ipc/service/shared_image_stub.h
+++ b/chromium/gpu/ipc/service/shared_image_stub.h
@@ -87,11 +87,13 @@ class GPU_IPC_SERVICE_EXPORT SharedImageStub
#endif // OS_WIN
#if defined(OS_FUCHSIA)
void OnRegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token);
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage);
void OnReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id);
#endif // OS_FUCHSIA
- bool MakeContextCurrent();
+ bool MakeContextCurrent(bool needs_gl = false);
ContextResult MakeContextCurrentAndCreateFactory();
void OnError();
diff --git a/chromium/gpu/ipc/service/stream_texture_android.cc b/chromium/gpu/ipc/service/stream_texture_android.cc
index 74faadd37b9..bae8cca172f 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.cc
+++ b/chromium/gpu/ipc/service/stream_texture_android.cc
@@ -128,17 +128,6 @@ void StreamTexture::ReleaseChannel() {
channel_ = nullptr;
}
-// gpu::gles2::GLStreamTextureMatrix implementation
-void StreamTexture::GetTextureMatrix(float xform[16]) {
- static constexpr float kIdentity[16]{
- 1, 0, 0, 0, //
- 0, 1, 0, 0, //
- 0, 0, 1, 0, //
- 0, 0, 0, 1 //
- };
- memcpy(xform, kIdentity, sizeof(kIdentity));
-}
-
bool StreamTexture::IsUsingGpuMemory() const {
// Once the image is bound during the first update, we just replace/update the
// same image every time in future and hence the image is always bound to a
@@ -236,8 +225,12 @@ void StreamTexture::OnFrameAvailable() {
gfx::Rect visible_rect;
gfx::Size coded_size;
- texture_owner_->GetCodedSizeAndVisibleRect(rotated_visible_size_, &coded_size,
- &visible_rect);
+ if (!texture_owner_->GetCodedSizeAndVisibleRect(rotated_visible_size_,
+ &coded_size, &visible_rect)) {
+ // if we failed to get right size fallback to visible size.
+ coded_size = rotated_visible_size_;
+ visible_rect = gfx::Rect(coded_size);
+ }
if (coded_size != coded_size_ || visible_rect != visible_rect_) {
coded_size_ = coded_size;
diff --git a/chromium/gpu/ipc/service/stream_texture_android.h b/chromium/gpu/ipc/service/stream_texture_android.h
index d99197dac47..6fedde43727 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.h
+++ b/chromium/gpu/ipc/service/stream_texture_android.h
@@ -81,7 +81,6 @@ class StreamTexture : public StreamTextureSharedImageInterface,
GetAHardwareBuffer() override;
// gpu::gles2::GLStreamTextureMatrix implementation
- void GetTextureMatrix(float xform[16]) override;
void NotifyPromotionHint(bool promotion_hint,
int display_x,
int display_y,
diff --git a/chromium/gpu/ipc/shared_image_interface_in_process.cc b/chromium/gpu/ipc/shared_image_interface_in_process.cc
index 715ea24292b..744162df245 100644
--- a/chromium/gpu/ipc/shared_image_interface_in_process.cc
+++ b/chromium/gpu/ipc/shared_image_interface_in_process.cc
@@ -94,7 +94,7 @@ void SharedImageInterfaceInProcess::DestroyOnGpu(
completion->Signal();
}
-bool SharedImageInterfaceInProcess::MakeContextCurrent() {
+bool SharedImageInterfaceInProcess::MakeContextCurrent(bool needs_gl) {
if (!context_state_)
return false;
@@ -104,12 +104,9 @@ bool SharedImageInterfaceInProcess::MakeContextCurrent() {
// |shared_image_factory_| never writes to the surface, so skip unnecessary
// MakeCurrent to improve performance. https://crbug.com/457431
auto* context = context_state_->real_context();
- if (context->IsCurrent(nullptr) ||
- context->MakeCurrent(context_state_->surface()))
- return true;
-
- context_state_->MarkContextLost();
- return false;
+ if (context->IsCurrent(nullptr))
+ return !context_state_->CheckResetStatus(needs_gl);
+ return context_state_->MakeCurrent(/*surface=*/nullptr, needs_gl);
}
void SharedImageInterfaceInProcess::LazyCreateSharedImageFactory() {
@@ -117,6 +114,11 @@ void SharedImageInterfaceInProcess::LazyCreateSharedImageFactory() {
if (shared_image_factory_)
return;
+ // Some shared image backing factories will use GL in ctor, so we need GL even
+ // if chrome is using non-GL backing.
+ if (!MakeContextCurrent(/*needs_gl=*/true))
+ return;
+
// We need WrappedSkImage to support creating a SharedImage with pixel data
// when GL is unavailable. This is used in various unit tests.
const bool enable_wrapped_sk_image =
@@ -308,7 +310,9 @@ void SharedImageInterfaceInProcess::PresentSwapChain(
#if defined(OS_FUCHSIA)
void SharedImageInterfaceInProcess::RegisterSysmemBufferCollection(
gfx::SysmemBufferCollectionId id,
- zx::channel token) {
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
NOTREACHED();
}
void SharedImageInterfaceInProcess::ReleaseSysmemBufferCollection(
@@ -383,6 +387,16 @@ void SharedImageInterfaceInProcess::DestroySharedImageOnGpuThread(
}
}
+void SharedImageInterfaceInProcess::WaitSyncTokenOnGpuThread(
+ const SyncToken& sync_token) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ if (!MakeContextCurrent())
+ return;
+
+ mailbox_manager_->PushTextureUpdates(sync_token);
+ sync_point_client_state_->ReleaseFenceSync(sync_token.release_count());
+}
+
SyncToken SharedImageInterfaceInProcess::GenUnverifiedSyncToken() {
base::AutoLock lock(lock_);
return MakeSyncToken(next_fence_sync_release_ - 1);
@@ -395,6 +409,16 @@ SyncToken SharedImageInterfaceInProcess::GenVerifiedSyncToken() {
return sync_token;
}
+void SharedImageInterfaceInProcess::WaitSyncToken(const SyncToken& sync_token) {
+ base::AutoLock lock(lock_);
+
+ ScheduleGpuTask(
+ base::BindOnce(&SharedImageInterfaceInProcess::WaitSyncTokenOnGpuThread,
+ base::Unretained(this),
+ MakeSyncToken(next_fence_sync_release_++)),
+ {sync_token});
+}
+
void SharedImageInterfaceInProcess::Flush() {
// No need to flush in this implementation.
}
diff --git a/chromium/gpu/ipc/shared_image_interface_in_process.h b/chromium/gpu/ipc/shared_image_interface_in_process.h
index 60b1a3dc318..714911006e9 100644
--- a/chromium/gpu/ipc/shared_image_interface_in_process.h
+++ b/chromium/gpu/ipc/shared_image_interface_in_process.h
@@ -113,7 +113,9 @@ class GL_IN_PROCESS_CONTEXT_EXPORT SharedImageInterfaceInProcess
#if defined(OS_FUCHSIA)
// Registers a sysmem buffer collection. Not reached in this implementation.
void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token) override;
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) override;
// Not reached in this implementation.
void ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id) override;
@@ -127,6 +129,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT SharedImageInterfaceInProcess
// commands on this interface have executed on the service side.
SyncToken GenVerifiedSyncToken() override;
+ void WaitSyncToken(const SyncToken& sync_token) override;
+
// Flush the SharedImageInterface, issuing any deferred IPCs.
void Flush() override;
@@ -150,7 +154,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT SharedImageInterfaceInProcess
std::vector<SyncToken> sync_token_fences);
// Only called on the gpu thread.
- bool MakeContextCurrent();
+ bool MakeContextCurrent(bool needs_gl = false);
void LazyCreateSharedImageFactory();
void CreateSharedImageOnGpuThread(const Mailbox& mailbox,
viz::ResourceFormat format,
@@ -177,6 +181,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT SharedImageInterfaceInProcess
void UpdateSharedImageOnGpuThread(const Mailbox& mailbox,
const SyncToken& sync_token);
void DestroySharedImageOnGpuThread(const Mailbox& mailbox);
+ void WaitSyncTokenOnGpuThread(const SyncToken& sync_token);
void WrapTaskWithGpuUrl(base::OnceClosure task);
// Used to schedule work on the gpu thread. This is a raw pointer for now