summaryrefslogtreecommitdiff
path: root/chromium/gpu/ipc
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-04-05 14:08:31 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-04-11 07:46:53 +0000
commit6a4cabb866f66d4128a97cdc6d9d08ce074f1247 (patch)
treeab00f70a5e89278d6a0d16ff0c42578dc4d84a2d /chromium/gpu/ipc
parente733310db58160074f574c429d48f8308c0afe17 (diff)
downloadqtwebengine-chromium-6a4cabb866f66d4128a97cdc6d9d08ce074f1247.tar.gz
BASELINE: Update Chromium to 57.0.2987.144
Change-Id: I29db402ff696c71a04c4dbaec822c2e53efe0267 Reviewed-by: Peter Varga <pvarga@inf.u-szeged.hu>
Diffstat (limited to 'chromium/gpu/ipc')
-rw-r--r--chromium/gpu/ipc/BUILD.gn11
-rw-r--r--chromium/gpu/ipc/client/BUILD.gn7
-rw-r--r--chromium/gpu/ipc/client/android/in_process_surface_texture_manager.cc60
-rw-r--r--chromium/gpu/ipc/client/android/in_process_surface_texture_manager.h48
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.cc164
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.h36
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.cc48
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.h10
-rw-r--r--chromium/gpu/ipc/client/gpu_in_process_context_tests.cc21
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc3
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl.h6
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc26
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc19
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h2
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h10
-rw-r--r--chromium/gpu/ipc/common/BUILD.gn3
-rw-r--r--chromium/gpu/ipc/common/android/surface_texture_manager.cc28
-rw-r--r--chromium/gpu/ipc/common/android/surface_texture_manager.h42
-rw-r--r--chromium/gpu/ipc/common/gpu_info.mojom5
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.cc3
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.h9
-rw-r--r--chromium/gpu/ipc/common/gpu_param_traits_macros.h1
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.mojom53
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.typemap16
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences_struct_traits.h238
-rw-r--r--chromium/gpu/ipc/common/gpu_surface_tracker.cc4
-rw-r--r--chromium/gpu/ipc/common/gpu_surface_tracker.h7
-rw-r--r--chromium/gpu/ipc/common/struct_traits_unittest.cc37
-rw-r--r--chromium/gpu/ipc/common/traits_test_service.mojom4
-rw-r--r--chromium/gpu/ipc/common/typemaps.gni1
-rw-r--r--chromium/gpu/ipc/gl_in_process_context.cc31
-rw-r--r--chromium/gpu/ipc/gl_in_process_context.h23
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.cc69
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.h58
-rw-r--r--chromium/gpu/ipc/host/BUILD.gn24
-rw-r--r--chromium/gpu/ipc/host/DEPS3
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.cc113
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.h46
-rw-r--r--chromium/gpu/ipc/host/gpu_switches.cc16
-rw-r--r--chromium/gpu/ipc/host/gpu_switches.h17
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache.cc629
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache.h157
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache_unittest.cc109
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.cc502
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.h121
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn9
-rw-r--r--chromium/gpu/ipc/service/child_window_surface_win.cc188
-rw-r--r--chromium/gpu/ipc/service/child_window_surface_win.h16
-rw-r--r--chromium/gpu/ipc/service/child_window_win.cc210
-rw-r--r--chromium/gpu/ipc/service/child_window_win.h47
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.cc21
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.h4
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.cc14
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.h20
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager_delegate.h6
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_unittest.cc2
-rw-r--r--chromium/gpu/ipc/service/gpu_command_buffer_stub.cc78
-rw-r--r--chromium/gpu/ipc/service/gpu_command_buffer_stub.h6
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc5
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc38
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h5
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h13
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider.h48
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider_posix.cc22
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc84
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider_win.cc264
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface.h3
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_android.cc2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_linux.cc2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_mac.mm6
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_win.cc2
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.cc2
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.h3
75 files changed, 3072 insertions, 892 deletions
diff --git a/chromium/gpu/ipc/BUILD.gn b/chromium/gpu/ipc/BUILD.gn
index a30ffdcfa98..61afbc6f518 100644
--- a/chromium/gpu/ipc/BUILD.gn
+++ b/chromium/gpu/ipc/BUILD.gn
@@ -16,9 +16,16 @@ group("command_buffer") {
}
}
-source_set("command_buffer_sources") {
+if (is_component_build) {
+ link_target_type = "source_set"
+} else {
+ link_target_type = "static_library"
+}
+target(link_target_type, "command_buffer_sources") {
visibility = [ "//gpu/*" ]
sources = [
+ "gpu_in_process_thread_service.cc",
+ "gpu_in_process_thread_service.h",
"in_process_command_buffer.cc",
"in_process_command_buffer.h",
]
@@ -31,6 +38,8 @@ source_set("command_buffer_sources") {
"//gpu/command_buffer/common:common_sources",
"//gpu/command_buffer/service:service_sources",
"//gpu/config:config_sources",
+ "//gpu/ipc/client:ipc_client_sources",
+ "//gpu/ipc/service:ipc_service_sources",
"//ui/gfx",
"//ui/gl",
"//ui/gl/init",
diff --git a/chromium/gpu/ipc/client/BUILD.gn b/chromium/gpu/ipc/client/BUILD.gn
index aa4f317e4af..53775227407 100644
--- a/chromium/gpu/ipc/client/BUILD.gn
+++ b/chromium/gpu/ipc/client/BUILD.gn
@@ -35,13 +35,6 @@ source_set("ipc_client_sources") {
"gpu_process_hosted_ca_layer_tree_params.h",
]
}
- if (is_android) {
- sources += [
- "android/in_process_surface_texture_manager.cc",
- "android/in_process_surface_texture_manager.h",
- ]
- libs = [ "android" ]
- }
if (use_ozone) {
sources += [
"gpu_memory_buffer_impl_ozone_native_pixmap.cc",
diff --git a/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.cc b/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.cc
deleted file mode 100644
index 84fc5eee88d..00000000000
--- a/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/client/android/in_process_surface_texture_manager.h"
-
-#include <android/native_window.h>
-#include <android/native_window_jni.h>
-
-#include "base/android/jni_android.h"
-#include "base/containers/scoped_ptr_hash_map.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-
-namespace gpu {
-
-// static
-InProcessSurfaceTextureManager* InProcessSurfaceTextureManager::GetInstance() {
- return base::Singleton<
- InProcessSurfaceTextureManager,
- base::LeakySingletonTraits<InProcessSurfaceTextureManager>>::get();
-}
-
-void InProcessSurfaceTextureManager::RegisterSurfaceTexture(
- int surface_texture_id,
- int client_id,
- gl::SurfaceTexture* surface_texture) {
- base::AutoLock lock(lock_);
-
- DCHECK(surface_textures_.find(surface_texture_id) == surface_textures_.end());
- surface_textures_.add(
- surface_texture_id,
- base::MakeUnique<gl::ScopedJavaSurface>(surface_texture));
-}
-
-void InProcessSurfaceTextureManager::UnregisterSurfaceTexture(
- int surface_texture_id,
- int client_id) {
- base::AutoLock lock(lock_);
-
- DCHECK(surface_textures_.find(surface_texture_id) != surface_textures_.end());
- surface_textures_.erase(surface_texture_id);
-}
-
-gfx::AcceleratedWidget
-InProcessSurfaceTextureManager::AcquireNativeWidgetForSurfaceTexture(
- int surface_texture_id) {
- base::AutoLock lock(lock_);
-
- DCHECK(surface_textures_.find(surface_texture_id) != surface_textures_.end());
- JNIEnv* env = base::android::AttachCurrentThread();
- return ANativeWindow_fromSurface(
- env, surface_textures_.get(surface_texture_id)->j_surface().obj());
-}
-
-InProcessSurfaceTextureManager::InProcessSurfaceTextureManager() {}
-
-InProcessSurfaceTextureManager::~InProcessSurfaceTextureManager() {}
-
-} // namespace gpu
diff --git a/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.h b/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.h
deleted file mode 100644
index 106fc7aa54a..00000000000
--- a/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_IPC_CLIENT_ANDROID_IN_PROCESS_SURFACE_TEXTURE_MANAGER_H_
-#define GPU_IPC_CLIENT_ANDROID_IN_PROCESS_SURFACE_TEXTURE_MANAGER_H_
-
-#include <memory>
-
-#include "base/containers/scoped_ptr_hash_map.h"
-#include "base/macros.h"
-#include "base/memory/singleton.h"
-#include "base/synchronization/lock.h"
-#include "gpu/gpu_export.h"
-#include "gpu/ipc/common/android/surface_texture_manager.h"
-#include "ui/gl/android/scoped_java_surface.h"
-
-namespace gpu {
-
-class GPU_EXPORT InProcessSurfaceTextureManager : public SurfaceTextureManager {
- public:
- static GPU_EXPORT InProcessSurfaceTextureManager* GetInstance();
-
- // Overridden from SurfaceTextureManager:
- void RegisterSurfaceTexture(int surface_texture_id,
- int client_id,
- gl::SurfaceTexture* surface_texture) override;
- void UnregisterSurfaceTexture(int surface_texture_id, int client_id) override;
- gfx::AcceleratedWidget AcquireNativeWidgetForSurfaceTexture(
- int surface_texture_id) override;
-
- private:
- friend struct base::DefaultSingletonTraits<InProcessSurfaceTextureManager>;
-
- InProcessSurfaceTextureManager();
- ~InProcessSurfaceTextureManager() override;
-
- using SurfaceTextureMap =
- base::ScopedPtrHashMap<int, std::unique_ptr<gl::ScopedJavaSurface>>;
- SurfaceTextureMap surface_textures_;
- base::Lock lock_;
-
- DISALLOW_COPY_AND_ASSIGN(InProcessSurfaceTextureManager);
-};
-
-} // namespace gpu
-
-#endif // GPU_IPC_CLIENT_ANDROID_IN_PROCESS_SURFACE_TEXTURE_MANAGER_H_
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
index 07c31ff9a95..552f23b92d4 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
@@ -106,9 +106,9 @@ CommandBufferProxyImpl::~CommandBufferProxyImpl() {
}
bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
- std::unique_ptr<base::AutoLock> lock;
+ base::Optional<base::AutoLock> lock;
if (lock_)
- lock.reset(new base::AutoLock(*lock_));
+ lock.emplace(*lock_);
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
@@ -123,6 +123,7 @@ bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
if (!handled) {
LOG(ERROR) << "Gpu process sent invalid message.";
+ base::AutoLock last_state_lock(last_state_lock_);
OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage,
gpu::error::kLostContext);
}
@@ -130,9 +131,10 @@ bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
}
void CommandBufferProxyImpl::OnChannelError() {
- std::unique_ptr<base::AutoLock> lock;
+ base::Optional<base::AutoLock> lock;
if (lock_)
- lock.reset(new base::AutoLock(*lock_));
+ lock.emplace(*lock_);
+ base::AutoLock last_state_lock(last_state_lock_);
gpu::error::ContextLostReason context_lost_reason =
gpu::error::kGpuChannelLost;
@@ -148,6 +150,7 @@ void CommandBufferProxyImpl::OnChannelError() {
void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason,
gpu::error::Error error) {
+ base::AutoLock lock(last_state_lock_);
OnGpuAsyncMessageError(reason, error);
}
@@ -177,6 +180,7 @@ void CommandBufferProxyImpl::OnSignalAck(uint32_t id) {
SignalTaskMap::iterator it = signal_tasks_.find(id);
if (it == signal_tasks_.end()) {
LOG(ERROR) << "Gpu process sent invalid SignalAck.";
+ base::AutoLock lock(last_state_lock_);
OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage,
gpu::error::kLostContext);
return;
@@ -210,7 +214,6 @@ bool CommandBufferProxyImpl::Initialize(
if (!base::SharedMemory::IsHandleValid(handle))
return false;
-
// TODO(vadimt): Remove ScopedTracker below once crbug.com/125248 is fixed.
tracked_objects::ScopedTracker tracking_profile(
FROM_HERE_WITH_EXPLICIT_FUNCTION(
@@ -239,17 +242,15 @@ bool CommandBufferProxyImpl::Initialize(
return true;
}
-gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
- return last_state_;
-}
-
-int32_t CommandBufferProxyImpl::GetLastToken() {
+CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
+ base::AutoLock lock(last_state_lock_);
TryUpdateState();
- return last_state_.token;
+ return last_state_;
}
void CommandBufferProxyImpl::Flush(int32_t put_offset) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -283,6 +284,7 @@ void CommandBufferProxyImpl::Flush(int32_t put_offset) {
void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -333,30 +335,53 @@ void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback(
update_vsync_parameters_completion_callback_ = callback;
}
-void CommandBufferProxyImpl::WaitForTokenInRange(int32_t start, int32_t end) {
+gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForTokenInRange(
+ int32_t start,
+ int32_t end) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForToken", "start", start,
"end", end);
+ // Error needs to be checked in case the state was updated on another thread.
+ // We need to make sure that the reentrant context loss callback is called so
+ // that the share group is also lost before we return any error up the stack.
+ if (last_state_.error != gpu::error::kNoError) {
+ if (gpu_control_client_)
+ gpu_control_client_->OnGpuControlLostContextMaybeReentrant();
+ return last_state_;
+ }
TryUpdateState();
if (!InRange(start, end, last_state_.token) &&
last_state_.error == gpu::error::kNoError) {
gpu::CommandBuffer::State state;
if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(route_id_, start, end,
- &state)))
+ &state))) {
SetStateFromSyncReply(state);
+ }
}
if (!InRange(start, end, last_state_.token) &&
last_state_.error == gpu::error::kNoError) {
LOG(ERROR) << "GPU state invalid after WaitForTokenInRange.";
OnGpuSyncReplyError();
}
+ return last_state_;
}
-void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start,
- int32_t end) {
+gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForGetOffsetInRange(
+ int32_t start,
+ int32_t end) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForGetOffset", "start",
start, "end", end);
+ // Error needs to be checked in case the state was updated on another thread.
+ // We need to make sure that the reentrant context loss callback is called so
+ // that the share group is also lost before we return any error up the stack.
+ if (last_state_.error != gpu::error::kNoError) {
+ if (gpu_control_client_)
+ gpu_control_client_->OnGpuControlLostContextMaybeReentrant();
+ return last_state_;
+ }
TryUpdateState();
if (!InRange(start, end, last_state_.get_offset) &&
last_state_.error == gpu::error::kNoError) {
@@ -370,10 +395,12 @@ void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start,
LOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange.";
OnGpuSyncReplyError();
}
+ return last_state_;
}
void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -385,6 +412,7 @@ scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
size_t size,
int32_t* id) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
*id = -1;
if (last_state_.error != gpu::error::kNoError)
@@ -428,6 +456,7 @@ scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -448,6 +477,7 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
size_t height,
unsigned internal_format) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return -1;
@@ -462,10 +492,9 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
// This handle is owned by the GPU process and must be passed to it or it
// will leak. In otherwords, do not early out on error between here and the
// sending of the CreateImage IPC below.
- bool requires_sync_token = false;
gfx::GpuMemoryBufferHandle handle =
- channel_->ShareGpuMemoryBufferToGpuProcess(gpu_memory_buffer->GetHandle(),
- &requires_sync_token);
+ gfx::CloneHandleForIPC(gpu_memory_buffer->GetHandle());
+ bool requires_sync_token = handle.type == gfx::IO_SURFACE_BUFFER;
uint64_t image_fence_sync = 0;
if (requires_sync_token) {
@@ -509,6 +538,7 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
void CommandBufferProxyImpl::DestroyImage(int32_t id) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -522,7 +552,7 @@ int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage(
unsigned usage) {
CheckLock();
std::unique_ptr<gfx::GpuMemoryBuffer> buffer(
- channel_->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer(
+ channel_->gpu_memory_buffer_manager()->CreateGpuMemoryBuffer(
gfx::Size(width, height),
gpu::DefaultBufferFormatForImageFormat(internal_format),
gfx::BufferUsage::SCANOUT, gpu::kNullSurfaceHandle));
@@ -536,6 +566,7 @@ int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage(
uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return 0;
@@ -588,6 +619,7 @@ bool CommandBufferProxyImpl::IsFenceSyncFlushed(uint64_t release) {
bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return false;
@@ -611,9 +643,18 @@ bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) {
return false;
}
+// This can be called from any thread without holding |lock_|. Use a thread-safe
+// non-error throwing variant of TryUpdateState for this.
+bool CommandBufferProxyImpl::IsFenceSyncReleased(uint64_t release) {
+ base::AutoLock lock(last_state_lock_);
+ TryUpdateStateThreadSafe();
+ return release <= last_state_.release_count;
+}
+
void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -648,6 +689,7 @@ bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken(
void CommandBufferProxyImpl::SignalQuery(uint32_t query,
const base::Closure& callback) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -666,6 +708,7 @@ void CommandBufferProxyImpl::SignalQuery(uint32_t query,
void CommandBufferProxyImpl::TakeFrontBuffer(const gpu::Mailbox& mailbox) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -676,6 +719,7 @@ void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox,
const gpu::SyncToken& sync_token,
bool is_lost) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -683,36 +727,47 @@ void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox,
Send(new GpuCommandBufferMsg_ReturnFrontBuffer(route_id_, mailbox, is_lost));
}
-gpu::error::Error CommandBufferProxyImpl::GetLastError() {
- return last_state_.error;
-}
-
bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
- // Caller should not intentionally send a message if the context is lost.
- DCHECK(last_state_.error == gpu::error::kNoError);
DCHECK(channel_);
-
- if (!msg->is_sync()) {
- bool result = channel_->Send(msg);
- // Send() should always return true for async messages.
- DCHECK(result);
- return true;
+ last_state_lock_.AssertAcquired();
+ DCHECK_EQ(gpu::error::kNoError, last_state_.error);
+
+ last_state_lock_.Release();
+
+ // Call is_sync() before sending message.
+ bool is_sync = msg->is_sync();
+ bool result = channel_->Send(msg);
+ // Send() should always return true for async messages.
+ DCHECK(is_sync || result);
+
+ last_state_lock_.Acquire();
+
+ if (last_state_.error != gpu::error::kNoError) {
+ // Error needs to be checked in case the state was updated on another thread
+ // while we were waiting on Send. We need to make sure that the reentrant
+ // context loss callback is called so that the share group is also lost
+ // before we return any error up the stack.
+ if (gpu_control_client_)
+ gpu_control_client_->OnGpuControlLostContextMaybeReentrant();
+ return false;
}
- if (channel_->Send(msg))
- return true;
+ if (!result) {
+ // Flag the command buffer as lost. Defer deleting the channel until
+ // OnChannelError is called after returning to the message loop in case it
+ // is referenced elsewhere.
+ DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
+ OnClientError(gpu::error::kLostContext);
+ return false;
+ }
- // Flag the command buffer as lost. Defer deleting the channel until
- // OnChannelError is called after returning to the message loop in case
- // it is referenced elsewhere.
- DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
- OnClientError(gpu::error::kLostContext);
- return false;
+ return true;
}
void CommandBufferProxyImpl::SetStateFromSyncReply(
const gpu::CommandBuffer::State& state) {
- DCHECK(last_state_.error == gpu::error::kNoError);
+ CheckLock();
+ last_state_lock_.AssertAcquired();
// Handle wraparound. It works as long as we don't have more than 2B state
// updates in flight across which reordering occurs.
if (state.generation - last_state_.generation < 0x80000000U)
@@ -722,6 +777,8 @@ void CommandBufferProxyImpl::SetStateFromSyncReply(
}
void CommandBufferProxyImpl::TryUpdateState() {
+ CheckLock();
+ last_state_lock_.AssertAcquired();
if (last_state_.error == gpu::error::kNoError) {
shared_state()->Read(&last_state_);
if (last_state_.error != gpu::error::kNoError)
@@ -729,7 +786,21 @@ void CommandBufferProxyImpl::TryUpdateState() {
}
}
+void CommandBufferProxyImpl::TryUpdateStateThreadSafe() {
+ last_state_lock_.AssertAcquired();
+ if (last_state_.error == gpu::error::kNoError) {
+ shared_state()->Read(&last_state_);
+ if (last_state_.error != gpu::error::kNoError) {
+ callback_thread_->PostTask(
+ FROM_HERE,
+ base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel,
+ weak_this_));
+ }
+ }
+}
+
void CommandBufferProxyImpl::TryUpdateStateDontReportError() {
+ last_state_lock_.AssertAcquired();
if (last_state_.error == gpu::error::kNoError)
shared_state()->Read(&last_state_);
}
@@ -800,6 +871,8 @@ void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase,
}
void CommandBufferProxyImpl::OnGpuSyncReplyError() {
+ CheckLock();
+ last_state_lock_.AssertAcquired();
last_state_.error = gpu::error::kLostContext;
last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage;
// This method may be inside a callstack from the GpuControlClient (we got a
@@ -812,15 +885,20 @@ void CommandBufferProxyImpl::OnGpuAsyncMessageError(
gpu::error::ContextLostReason reason,
gpu::error::Error error) {
CheckLock();
+ last_state_lock_.AssertAcquired();
last_state_.error = error;
last_state_.context_lost_reason = reason;
// This method only occurs when receiving IPC messages, so we know it's not in
- // a callstack from the GpuControlClient.
+ // a callstack from the GpuControlClient. Unlock the state lock to prevent
+ // a deadlock when calling the context loss callback.
+ base::AutoUnlock unlock(last_state_lock_);
DisconnectChannel();
}
void CommandBufferProxyImpl::OnGpuStateError() {
- DCHECK(last_state_.error != gpu::error::kNoError);
+ CheckLock();
+ last_state_lock_.AssertAcquired();
+ DCHECK_NE(gpu::error::kNoError, last_state_.error);
// This method may be inside a callstack from the GpuControlClient (we
// encountered an error while trying to perform some action). So avoid
// re-entering the GpuControlClient here.
@@ -829,6 +907,7 @@ void CommandBufferProxyImpl::OnGpuStateError() {
void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) {
CheckLock();
+ last_state_lock_.AssertAcquired();
last_state_.error = error;
last_state_.context_lost_reason = gpu::error::kUnknown;
// This method may be inside a callstack from the GpuControlClient (we
@@ -839,6 +918,7 @@ void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) {
void CommandBufferProxyImpl::DisconnectChannelInFreshCallStack() {
CheckLock();
+ last_state_lock_.AssertAcquired();
// Inform the GpuControlClient of the lost state immediately, though this may
// be a re-entrant call to the client so we use the MaybeReentrant variant.
if (gpu_control_client_)
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
index 5c8b269982b..ea8f8bfcaeb 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
@@ -16,7 +16,6 @@
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/containers/hash_tables.h"
-#include "base/containers/scoped_ptr_hash_map.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -45,10 +44,6 @@ namespace base {
class SharedMemory;
}
-namespace gfx {
-class Size;
-}
-
namespace gpu {
struct GpuProcessHostedCALayerTreeParamsMac;
struct Mailbox;
@@ -100,11 +95,10 @@ class GPU_EXPORT CommandBufferProxyImpl
// CommandBuffer implementation:
State GetLastState() override;
- int32_t GetLastToken() override;
void Flush(int32_t put_offset) override;
void OrderingBarrier(int32_t put_offset) override;
- void WaitForTokenInRange(int32_t start, int32_t end) override;
- void WaitForGetOffsetInRange(int32_t start, int32_t end) override;
+ State WaitForTokenInRange(int32_t start, int32_t end) override;
+ State WaitForGetOffsetInRange(int32_t start, int32_t end) override;
void SetGetBuffer(int32_t shm_id) override;
scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
int32_t* id) override;
@@ -132,6 +126,7 @@ class GPU_EXPORT CommandBufferProxyImpl
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;
bool IsFenceSyncFlushReceived(uint64_t release) override;
+ bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) override;
bool CanWaitUnverifiedSyncToken(const gpu::SyncToken* sync_token) override;
@@ -161,12 +156,6 @@ class GPU_EXPORT CommandBufferProxyImpl
void SetUpdateVSyncParametersCallback(
const UpdateVSyncParametersCallback& callback);
- // TODO(apatrick): this is a temporary optimization while skia is calling
- // ContentGLContext::MakeCurrent prior to every GL call. It saves returning 6
- // ints redundantly when only the error is needed for the
- // CommandBufferProxyImpl implementation.
- gpu::error::Error GetLastError() override;
-
int32_t route_id() const { return route_id_; }
const scoped_refptr<GpuChannelHost>& channel() const { return channel_; }
@@ -215,6 +204,9 @@ class GPU_EXPORT CommandBufferProxyImpl
// Try to read an updated copy of the state from shared memory, and calls
// OnGpuStateError() if the new state has an error.
void TryUpdateState();
+ // Like above but calls the error handler and disconnects channel by posting
+ // a task.
+ void TryUpdateStateThreadSafe();
// Like the above but does not call the error event handler if the new state
// has an error.
void TryUpdateStateDontReportError();
@@ -244,6 +236,16 @@ class GPU_EXPORT CommandBufferProxyImpl
// The shared memory area used to update state.
gpu::CommandBufferSharedState* shared_state() const;
+ // The shared memory area used to update state.
+ std::unique_ptr<base::SharedMemory> shared_state_shm_;
+
+ // The last cached state received from the service.
+ State last_state_;
+
+ // Lock to access shared state e.g. sync token release count across multiple
+ // threads. This allows tracking command buffer progress from another thread.
+ base::Lock last_state_lock_;
+
// There should be a lock_ if this is going to be used across multiple
// threads, or we guarantee it is used by a single thread by using a thread
// checker if no lock_ is set.
@@ -256,12 +258,6 @@ class GPU_EXPORT CommandBufferProxyImpl
// Unowned list of DeletionObservers.
base::ObserverList<DeletionObserver> deletion_observers_;
- // The last cached state received from the service.
- State last_state_;
-
- // The shared memory area used to update state.
- std::unique_ptr<base::SharedMemory> shared_state_shm_;
-
scoped_refptr<GpuChannelHost> channel_;
const gpu::CommandBufferId command_buffer_id_;
const int32_t route_id_;
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.cc b/chromium/gpu/ipc/client/gpu_channel_host.cc
index b639d8ffa54..5e41fd9e274 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.cc
+++ b/chromium/gpu/ipc/client/gpu_channel_host.cc
@@ -220,7 +220,7 @@ void GpuChannelHost::RemoveRoute(int route_id) {
}
base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
- base::SharedMemoryHandle source_handle) {
+ const base::SharedMemoryHandle& source_handle) {
if (IsLost())
return base::SharedMemory::NULLHandle();
@@ -232,52 +232,6 @@ int32_t GpuChannelHost::ReserveTransferBufferId() {
return g_next_transfer_buffer_id.GetNext() + 1;
}
-gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
- const gfx::GpuMemoryBufferHandle& source_handle,
- bool* requires_sync_point) {
- switch (source_handle.type) {
- case gfx::SHARED_MEMORY_BUFFER: {
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::SHARED_MEMORY_BUFFER;
- handle.handle = ShareToGpuProcess(source_handle.handle);
- handle.offset = source_handle.offset;
- handle.stride = source_handle.stride;
- *requires_sync_point = false;
- return handle;
- }
-#if defined(USE_OZONE)
- case gfx::OZONE_NATIVE_PIXMAP: {
- std::vector<base::ScopedFD> scoped_fds;
- for (auto& fd : source_handle.native_pixmap_handle.fds) {
- base::ScopedFD scoped_fd(HANDLE_EINTR(dup(fd.fd)));
- if (!scoped_fd.is_valid()) {
- PLOG(ERROR) << "dup";
- return gfx::GpuMemoryBufferHandle();
- }
- scoped_fds.emplace_back(std::move(scoped_fd));
- }
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::OZONE_NATIVE_PIXMAP;
- handle.id = source_handle.id;
- for (auto& scoped_fd : scoped_fds) {
- handle.native_pixmap_handle.fds.emplace_back(scoped_fd.release(),
- true /* auto_close */);
- }
- handle.native_pixmap_handle.planes =
- source_handle.native_pixmap_handle.planes;
- *requires_sync_point = false;
- return handle;
- }
-#endif
- case gfx::IO_SURFACE_BUFFER:
- *requires_sync_point = true;
- return source_handle;
- default:
- NOTREACHED();
- return gfx::GpuMemoryBufferHandle();
- }
-}
-
int32_t GpuChannelHost::ReserveImageId() {
return next_image_id_.GetNext();
}
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.h b/chromium/gpu/ipc/client/gpu_channel_host.h
index 5acf6c79a5f..989f1df6a8d 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.h
+++ b/chromium/gpu/ipc/client/gpu_channel_host.h
@@ -13,7 +13,6 @@
#include <vector>
#include "base/atomic_sequence_num.h"
-#include "base/containers/scoped_ptr_hash_map.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -139,18 +138,11 @@ class GPU_EXPORT GpuChannelHost
// GPU process. The caller is responsible for ensuring it is closed. Returns
// an invalid handle on failure.
base::SharedMemoryHandle ShareToGpuProcess(
- base::SharedMemoryHandle source_handle);
+ const base::SharedMemoryHandle& source_handle);
// Reserve one unused transfer buffer ID.
int32_t ReserveTransferBufferId();
- // Returns a GPU memory buffer handle to the buffer that can be sent via
- // IPC to the GPU process. The caller is responsible for ensuring it is
- // closed. Returns an invalid handle on failure.
- gfx::GpuMemoryBufferHandle ShareGpuMemoryBufferToGpuProcess(
- const gfx::GpuMemoryBufferHandle& source_handle,
- bool* requires_sync_point);
-
// Reserve one unused image ID.
int32_t ReserveImageId();
diff --git a/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc b/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
index fdce1c024d4..7562908be44 100644
--- a/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
+++ b/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
@@ -12,6 +12,7 @@
#include "base/threading/thread_task_runner_handle.h"
#include "gpu/command_buffer/client/gles2_implementation.h"
#include "gpu/command_buffer/client/shared_memory_limits.h"
+#include "gpu/ipc/common/surface_handle.h"
#include "gpu/ipc/gl_in_process_context.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_surface.h"
@@ -32,16 +33,16 @@ class ContextTestBase : public testing::Test {
attributes.sample_buffers = 1;
attributes.bind_generates_resource = false;
- context_.reset(gpu::GLInProcessContext::Create(
- nullptr, /* service */
- nullptr, /* surface */
- true, /* offscreen */
- gfx::kNullAcceleratedWidget, /* window */
- nullptr, /* share_context */
- attributes, gpu::SharedMemoryLimits(),
- nullptr, /* gpu_memory_buffer_manager */
- nullptr, /* image_factory */
- base::ThreadTaskRunnerHandle::Get()));
+ context_.reset(
+ gpu::GLInProcessContext::Create(nullptr, /* service */
+ nullptr, /* surface */
+ true, /* offscreen */
+ gpu::kNullSurfaceHandle, /* window */
+ nullptr, /* share_context */
+ attributes, gpu::SharedMemoryLimits(),
+ nullptr, /* gpu_memory_buffer_manager */
+ nullptr, /* image_factory */
+ base::ThreadTaskRunnerHandle::Get()));
gl_ = context_->GetImplementation();
context_support_ = context_->GetImplementation();
}
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc
index 185e921f78a..a673cbf2555 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc
@@ -30,7 +30,8 @@ GpuMemoryBufferImpl::GpuMemoryBufferImpl(gfx::GpuMemoryBufferId id,
GpuMemoryBufferImpl::~GpuMemoryBufferImpl() {
DCHECK(!mapped_);
- callback_.Run(destruction_sync_token_);
+ if (!callback_.is_null())
+ callback_.Run(destruction_sync_token_);
}
// static
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl.h b/chromium/gpu/ipc/client/gpu_memory_buffer_impl.h
index a8dc56b7e3a..43e23e34bc5 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl.h
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl.h
@@ -24,9 +24,9 @@ class GPU_EXPORT GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
~GpuMemoryBufferImpl() override;
// Creates an instance from the given |handle|. |size| and |internalformat|
- // should match what was used to allocate the |handle|. |callback| is
- // called when instance is deleted, which is not necessarily on the same
- // thread as this function was called on and instance was created on.
+ // should match what was used to allocate the |handle|. |callback|, if
+ // non-null, is called when instance is deleted, which is not necessarily on
+ // the same thread as this function was called on and instance was created on.
static std::unique_ptr<GpuMemoryBufferImpl> CreateFromHandle(
const gfx::GpuMemoryBufferHandle& handle,
const gfx::Size& size,
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc
index 8e84b7dd73f..e85f23be46d 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc
@@ -47,22 +47,26 @@ GpuMemoryBufferImplOzoneNativePixmap::CreateFromHandle(
gfx::BufferFormat format,
gfx::BufferUsage usage,
const DestructionCallback& callback) {
- DCHECK_EQ(handle.native_pixmap_handle.fds.size(), 1u);
+ DCHECK_LE(handle.native_pixmap_handle.fds.size(), 1u);
// GpuMemoryBufferImpl needs the FD to implement GetHandle() but
// ui::ClientNativePixmapFactory::ImportFromHandle is expected to take
// ownership of the FD passed in the handle so we have to dup it here in
// order to pass a valid FD to the GpuMemoryBufferImpl ctor.
- base::ScopedFD scoped_fd(
- HANDLE_EINTR(dup(handle.native_pixmap_handle.fds[0].fd)));
- if (!scoped_fd.is_valid()) {
- PLOG(ERROR) << "dup";
- return nullptr;
+ base::ScopedFD scoped_fd;
+ if (!handle.native_pixmap_handle.fds.empty()) {
+ scoped_fd.reset(HANDLE_EINTR(dup(handle.native_pixmap_handle.fds[0].fd)));
+ if (!scoped_fd.is_valid()) {
+ PLOG(ERROR) << "dup";
+ return nullptr;
+ }
}
gfx::NativePixmapHandle native_pixmap_handle;
- native_pixmap_handle.fds.emplace_back(handle.native_pixmap_handle.fds[0].fd,
- true /* auto_close */);
+ if (scoped_fd.is_valid()) {
+ native_pixmap_handle.fds.emplace_back(handle.native_pixmap_handle.fds[0].fd,
+ true /* auto_close */);
+ }
native_pixmap_handle.planes = handle.native_pixmap_handle.planes;
std::unique_ptr<ui::ClientNativePixmap> native_pixmap =
ui::ClientNativePixmapFactory::GetInstance()->ImportFromHandle(
@@ -125,8 +129,10 @@ gfx::GpuMemoryBufferHandle GpuMemoryBufferImplOzoneNativePixmap::GetHandle()
gfx::GpuMemoryBufferHandle handle;
handle.type = gfx::OZONE_NATIVE_PIXMAP;
handle.id = id_;
- handle.native_pixmap_handle.fds.emplace_back(fd_.get(),
- false /* auto_close */);
+ if (fd_.is_valid()) {
+ handle.native_pixmap_handle.fds.emplace_back(fd_.get(),
+ false /* auto_close */);
+ }
handle.native_pixmap_handle.planes = planes_;
return handle;
}
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
index 910cbe9b4a2..9c2e1bc60c6 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
@@ -15,11 +15,6 @@
#include "ui/gl/gl_bindings.h"
namespace gpu {
-namespace {
-
-void Noop() {}
-
-} // namespace
GpuMemoryBufferImplSharedMemory::GpuMemoryBufferImplSharedMemory(
gfx::GpuMemoryBufferId id,
@@ -59,7 +54,7 @@ GpuMemoryBufferImplSharedMemory::Create(gfx::GpuMemoryBufferId id,
// static
gfx::GpuMemoryBufferHandle
-GpuMemoryBufferImplSharedMemory::AllocateForChildProcess(
+GpuMemoryBufferImplSharedMemory::CreateGpuMemoryBuffer(
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format) {
@@ -164,16 +159,8 @@ base::Closure GpuMemoryBufferImplSharedMemory::AllocateForTesting(
gfx::BufferFormat format,
gfx::BufferUsage usage,
gfx::GpuMemoryBufferHandle* handle) {
- base::SharedMemory shared_memory;
- bool rv = shared_memory.CreateAnonymous(
- gfx::BufferSizeForBufferFormat(size, format));
- DCHECK(rv);
- handle->type = gfx::SHARED_MEMORY_BUFFER;
- handle->offset = 0;
- handle->stride = static_cast<int32_t>(
- gfx::RowSizeForBufferFormat(size.width(), format, 0));
- handle->handle = base::SharedMemory::DuplicateHandle(shared_memory.handle());
- return base::Bind(&Noop);
+ *handle = CreateGpuMemoryBuffer(handle->id, size, format);
+ return base::Bind(&base::DoNothing);
}
bool GpuMemoryBufferImplSharedMemory::Map() {
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h
index b991f9572fa..243d3d5beaf 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h
@@ -26,7 +26,7 @@ class GPU_EXPORT GpuMemoryBufferImplSharedMemory : public GpuMemoryBufferImpl {
gfx::BufferFormat format,
const DestructionCallback& callback);
- static gfx::GpuMemoryBufferHandle AllocateForChildProcess(
+ static gfx::GpuMemoryBufferHandle CreateGpuMemoryBuffer(
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format);
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h
index 1cab70de963..fb27bb8653e 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h
@@ -22,7 +22,7 @@ namespace gpu {
template <typename GpuMemoryBufferImplType>
class GpuMemoryBufferImplTest : public testing::Test {
public:
- GpuMemoryBufferImpl::DestructionCallback AllocateGpuMemoryBuffer(
+ GpuMemoryBufferImpl::DestructionCallback CreateGpuMemoryBuffer(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
@@ -62,8 +62,8 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, CreateFromHandle) {
bool destroyed = false;
gfx::GpuMemoryBufferHandle handle;
GpuMemoryBufferImpl::DestructionCallback destroy_callback =
- TestFixture::AllocateGpuMemoryBuffer(kBufferSize, format, usage,
- &handle, &destroyed);
+ TestFixture::CreateGpuMemoryBuffer(kBufferSize, format, usage,
+ &handle, &destroyed);
std::unique_ptr<TypeParam> buffer(TypeParam::CreateFromHandle(
handle, kBufferSize, format, usage, destroy_callback));
ASSERT_TRUE(buffer);
@@ -88,7 +88,7 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, Map) {
gfx::GpuMemoryBufferHandle handle;
GpuMemoryBufferImpl::DestructionCallback destroy_callback =
- TestFixture::AllocateGpuMemoryBuffer(
+ TestFixture::CreateGpuMemoryBuffer(
kBufferSize, format, gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
&handle, nullptr);
std::unique_ptr<TypeParam> buffer(TypeParam::CreateFromHandle(
@@ -138,7 +138,7 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, PersistentMap) {
gfx::GpuMemoryBufferHandle handle;
GpuMemoryBufferImpl::DestructionCallback destroy_callback =
- TestFixture::AllocateGpuMemoryBuffer(
+ TestFixture::CreateGpuMemoryBuffer(
kBufferSize, format,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT, &handle,
nullptr);
diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn
index 73136d7258c..396041edf39 100644
--- a/chromium/gpu/ipc/common/BUILD.gn
+++ b/chromium/gpu/ipc/common/BUILD.gn
@@ -106,8 +106,6 @@ source_set("ipc_common_sources") {
sources += [
"android/scoped_surface_request_conduit.cc",
"android/scoped_surface_request_conduit.h",
- "android/surface_texture_manager.cc",
- "android/surface_texture_manager.h",
"android/surface_texture_peer.cc",
"android/surface_texture_peer.h",
]
@@ -143,6 +141,7 @@ mojom("interfaces") {
"capabilities.mojom",
"dx_diag_node.mojom",
"gpu_info.mojom",
+ "gpu_preferences.mojom",
"mailbox.mojom",
"mailbox_holder.mojom",
"surface_handle.mojom",
diff --git a/chromium/gpu/ipc/common/android/surface_texture_manager.cc b/chromium/gpu/ipc/common/android/surface_texture_manager.cc
deleted file mode 100644
index 22b27d03fb1..00000000000
--- a/chromium/gpu/ipc/common/android/surface_texture_manager.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/common/android/surface_texture_manager.h"
-
-#include "base/logging.h"
-
-namespace gpu {
-namespace {
-
-SurfaceTextureManager* g_instance = nullptr;
-
-} // namespace
-
-// static
-SurfaceTextureManager* SurfaceTextureManager::GetInstance() {
- DCHECK(g_instance);
- return g_instance;
-}
-
-// static
-void SurfaceTextureManager::SetInstance(SurfaceTextureManager* instance) {
- DCHECK(!g_instance || !instance);
- g_instance = instance;
-}
-
-} // namespace gpu
diff --git a/chromium/gpu/ipc/common/android/surface_texture_manager.h b/chromium/gpu/ipc/common/android/surface_texture_manager.h
deleted file mode 100644
index 8a0134b6804..00000000000
--- a/chromium/gpu/ipc/common/android/surface_texture_manager.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_IPC_COMMON_ANDROID_SURFACE_TEXTURE_MANAGER_H_
-#define GPU_IPC_COMMON_ANDROID_SURFACE_TEXTURE_MANAGER_H_
-
-#include "gpu/gpu_export.h"
-#include "ui/gfx/native_widget_types.h"
-
-namespace gl {
-class SurfaceTexture;
-}
-
-namespace gpu {
-
-class GPU_EXPORT SurfaceTextureManager {
- public:
- static SurfaceTextureManager* GetInstance();
- static void SetInstance(SurfaceTextureManager* instance);
-
- // Register a surface texture for use in another process.
- virtual void RegisterSurfaceTexture(int surface_texture_id,
- int client_id,
- gl::SurfaceTexture* surface_texture) = 0;
-
- // Unregister a surface texture previously registered for use in another
- // process.
- virtual void UnregisterSurfaceTexture(int surface_texture_id,
- int client_id) = 0;
-
- // Acquire native widget for a registered surface texture.
- virtual gfx::AcceleratedWidget AcquireNativeWidgetForSurfaceTexture(
- int surface_texture_id) = 0;
-
- protected:
- virtual ~SurfaceTextureManager() {}
-};
-
-} // namespace gpu
-
-#endif // GPU_IPC_COMMON_ANDROID_SURFACE_TEXTURE_MANAGER_H_
diff --git a/chromium/gpu/ipc/common/gpu_info.mojom b/chromium/gpu/ipc/common/gpu_info.mojom
index 2dc02806687..ef65b579e28 100644
--- a/chromium/gpu/ipc/common/gpu_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_info.mojom
@@ -6,7 +6,8 @@
module gpu.mojom;
import "gpu/ipc/common/dx_diag_node.mojom";
-import "mojo/common/common_custom_types.mojom";
+import "mojo/common/time.mojom";
+import "mojo/common/version.mojom";
import "ui/gfx/geometry/mojo/geometry.mojom";
// gpu::GPUInfo::GPUDevice
@@ -60,6 +61,7 @@ struct VideoDecodeAcceleratorSupportedProfile {
// gpu::VideoDecodeAcceleratorCapabilities
struct VideoDecodeAcceleratorCapabilities {
+ array<VideoDecodeAcceleratorSupportedProfile> supported_profiles;
uint32 flags;
};
@@ -102,6 +104,7 @@ struct GpuInfo {
bool sandboxed;
int32 process_crash_count;
bool in_process_gpu;
+ bool passthrough_cmd_decoder;
CollectInfoResult basic_info_state;
CollectInfoResult context_info_state;
CollectInfoResult dx_diagnostics_info_state;
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
index 6c2bd82051f..b132575c9d1 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
@@ -197,6 +197,8 @@ bool StructTraits<gpu::mojom::VideoDecodeAcceleratorCapabilitiesDataView,
gpu::VideoDecodeAcceleratorCapabilities>::
Read(gpu::mojom::VideoDecodeAcceleratorCapabilitiesDataView data,
gpu::VideoDecodeAcceleratorCapabilities* out) {
+ if (!data.ReadSupportedProfiles(&out->supported_profiles))
+ return false;
out->flags = data.flags();
return true;
}
@@ -224,6 +226,7 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
out->direct_rendering = data.direct_rendering();
out->sandboxed = data.sandboxed();
out->in_process_gpu = data.in_process_gpu();
+ out->passthrough_cmd_decoder = data.passthrough_cmd_decoder();
out->process_crash_count = data.process_crash_count();
out->jpeg_decode_accelerator_supported =
data.jpeg_decode_accelerator_supported();
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.h b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
index ea76e0e279e..a6b9527a080 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
@@ -96,6 +96,11 @@ struct StructTraits<gpu::mojom::VideoDecodeAcceleratorCapabilitiesDataView,
static uint32_t flags(const gpu::VideoDecodeAcceleratorCapabilities& input) {
return input.flags;
}
+
+ static std::vector<gpu::VideoDecodeAcceleratorSupportedProfile>
+ supported_profiles(const gpu::VideoDecodeAcceleratorCapabilities& input) {
+ return input.supported_profiles;
+ }
};
template <>
@@ -243,6 +248,10 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
return input.in_process_gpu;
}
+ static bool passthrough_cmd_decoder(const gpu::GPUInfo& input) {
+ return input.passthrough_cmd_decoder;
+ }
+
static gpu::CollectInfoResult basic_info_state(const gpu::GPUInfo& input) {
return input.basic_info_state;
}
diff --git a/chromium/gpu/ipc/common/gpu_param_traits_macros.h b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
index e6409d2b5a8..64248121f4d 100644
--- a/chromium/gpu/ipc/common/gpu_param_traits_macros.h
+++ b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
@@ -91,6 +91,7 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::GPUInfo)
IPC_STRUCT_TRAITS_MEMBER(sandboxed)
IPC_STRUCT_TRAITS_MEMBER(process_crash_count)
IPC_STRUCT_TRAITS_MEMBER(in_process_gpu)
+ IPC_STRUCT_TRAITS_MEMBER(passthrough_cmd_decoder)
IPC_STRUCT_TRAITS_MEMBER(basic_info_state)
IPC_STRUCT_TRAITS_MEMBER(context_info_state)
#if defined(OS_WIN)
diff --git a/chromium/gpu/ipc/common/gpu_preferences.mojom b/chromium/gpu/ipc/common/gpu_preferences.mojom
new file mode 100644
index 00000000000..e1565ec7d49
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_preferences.mojom
@@ -0,0 +1,53 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// gpu/command_buffer/service/gpu_preferences.h
+module gpu.mojom;
+
+// gpu::GpuPreferences::VpxDecodeVendors
+enum VpxDecodeVendors {
+ VPX_VENDOR_NONE = 0,
+ VPX_VENDOR_MICROSOFT = 1,
+ VPX_VENDOR_AMD = 2,
+ VPX_VENDOR_ALL = 3,
+};
+
+// gpu::GpuPreferences
+struct GpuPreferences {
+ bool single_process;
+ bool in_process_gpu;
+ bool ui_prioritize_in_gpu_process;
+ bool disable_accelerated_video_decode;
+
+ bool disable_vaapi_accelerated_video_encode;
+
+ bool disable_web_rtc_hw_encoding;
+
+ VpxDecodeVendors enable_accelerated_vpx_decode;
+ bool enable_low_latency_dxva;
+ bool enable_zero_copy_dxgi_video;
+ bool enable_nv12_dxgi_video;
+
+ bool compile_shader_always_succeeds;
+ bool disable_gl_error_limit;
+ bool disable_glsl_translator;
+ bool disable_gpu_driver_bug_workarounds;
+ bool disable_shader_name_hashing;
+ bool enable_gpu_command_logging;
+ bool enable_gpu_debugging;
+ bool enable_gpu_service_logging_gpu;
+ bool enable_gpu_driver_debug_logging;
+ bool disable_gpu_program_cache;
+ bool enforce_gl_minimums;
+ uint32 force_gpu_mem_available;
+ uint32 gpu_program_cache_size;
+ bool disable_gpu_shader_disk_cache;
+ bool enable_threaded_texture_mailboxes;
+ bool gl_shader_interm_output;
+ bool emulate_shader_precision;
+ bool enable_gpu_service_logging;
+ bool enable_gpu_service_tracing;
+ bool enable_es3_apis;
+ bool use_passthrough_cmd_decoder;
+};
diff --git a/chromium/gpu/ipc/common/gpu_preferences.typemap b/chromium/gpu/ipc/common/gpu_preferences.typemap
new file mode 100644
index 00000000000..0dfa9025eac
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_preferences.typemap
@@ -0,0 +1,16 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+mojom = "//gpu/ipc/common/gpu_preferences.mojom"
+public_headers = [ "//gpu/command_buffer/service/gpu_preferences.h" ]
+traits_headers = [ "//gpu/ipc/common/gpu_preferences_struct_traits.h" ]
+public_deps = [
+ "//gpu/command_buffer/service",
+ "//media:media_features",
+ "//mojo/common:struct_traits",
+]
+type_mappings = [
+ "gpu.mojom.GpuPreferences=gpu::GpuPreferences",
+ "gpu.mojom.VpxDecodeVendors=gpu::GpuPreferences::VpxDecodeVendors",
+]
diff --git a/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
new file mode 100644
index 00000000000..0f2a121e08e
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
@@ -0,0 +1,238 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_GPU_PREFERENCES_STRUCT_TRAITS_H_
+#define GPU_IPC_COMMON_GPU_PREFERENCES_STRUCT_TRAITS_H_
+
+#include "gpu/ipc/common/gpu_preferences.mojom.h"
+
+namespace mojo {
+
+template <>
+struct EnumTraits<gpu::mojom::VpxDecodeVendors,
+ gpu::GpuPreferences::VpxDecodeVendors> {
+ static gpu::mojom::VpxDecodeVendors ToMojom(
+ gpu::GpuPreferences::VpxDecodeVendors vpx) {
+ switch (vpx) {
+ case gpu::GpuPreferences::VPX_VENDOR_NONE:
+ return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_NONE;
+ case gpu::GpuPreferences::VPX_VENDOR_MICROSOFT:
+ return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_MICROSOFT;
+ case gpu::GpuPreferences::VPX_VENDOR_AMD:
+ return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_AMD;
+ case gpu::GpuPreferences::VPX_VENDOR_ALL:
+ return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_ALL;
+ }
+ NOTREACHED();
+ return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_NONE;
+ }
+
+ static bool FromMojom(gpu::mojom::VpxDecodeVendors input,
+ gpu::GpuPreferences::VpxDecodeVendors* out) {
+ switch (input) {
+ case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_NONE:
+ *out = gpu::GpuPreferences::VPX_VENDOR_NONE;
+ return true;
+ case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_MICROSOFT:
+ *out = gpu::GpuPreferences::VPX_VENDOR_MICROSOFT;
+ return true;
+ case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_AMD:
+ *out = gpu::GpuPreferences::VPX_VENDOR_AMD;
+ return true;
+ case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_ALL:
+ *out = gpu::GpuPreferences::VPX_VENDOR_ALL;
+ return true;
+ }
+ return false;
+ }
+};
+
+template <>
+struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
+ static bool Read(gpu::mojom::GpuPreferencesDataView prefs,
+ gpu::GpuPreferences* out) {
+ out->single_process = prefs.single_process();
+ out->in_process_gpu = prefs.in_process_gpu();
+ out->ui_prioritize_in_gpu_process = prefs.ui_prioritize_in_gpu_process();
+ out->disable_accelerated_video_decode =
+ prefs.disable_accelerated_video_decode();
+
+#if defined(OS_CHROMEOS)
+ out->disable_vaapi_accelerated_video_encode =
+ prefs.disable_vaapi_accelerated_video_encode();
+#endif
+
+#if BUILDFLAG(ENABLE_WEBRTC)
+ out->disable_web_rtc_hw_encoding = prefs.disable_web_rtc_hw_encoding();
+#endif
+
+#if defined(OS_WIN)
+ if (!prefs.ReadEnableAcceleratedVpxDecode(
+ &out->enable_accelerated_vpx_decode))
+ return false;
+ out->enable_low_latency_dxva = prefs.enable_low_latency_dxva();
+ out->enable_zero_copy_dxgi_video = prefs.enable_zero_copy_dxgi_video();
+ out->enable_nv12_dxgi_video = prefs.enable_nv12_dxgi_video();
+#endif
+
+ out->compile_shader_always_succeeds =
+ prefs.compile_shader_always_succeeds();
+ out->disable_gl_error_limit = prefs.disable_gl_error_limit();
+ out->disable_glsl_translator = prefs.disable_glsl_translator();
+ out->disable_gpu_driver_bug_workarounds =
+ prefs.disable_gpu_driver_bug_workarounds();
+ out->disable_shader_name_hashing = prefs.disable_shader_name_hashing();
+ out->enable_gpu_command_logging = prefs.enable_gpu_command_logging();
+ out->enable_gpu_debugging = prefs.enable_gpu_debugging();
+ out->enable_gpu_service_logging_gpu =
+ prefs.enable_gpu_service_logging_gpu();
+ out->enable_gpu_driver_debug_logging =
+ prefs.enable_gpu_driver_debug_logging();
+ out->disable_gpu_program_cache = prefs.disable_gpu_program_cache();
+ out->enforce_gl_minimums = prefs.enforce_gl_minimums();
+ out->force_gpu_mem_available = prefs.force_gpu_mem_available();
+ out->gpu_program_cache_size = prefs.gpu_program_cache_size();
+ out->disable_gpu_shader_disk_cache = prefs.disable_gpu_shader_disk_cache();
+ out->enable_threaded_texture_mailboxes =
+ prefs.enable_threaded_texture_mailboxes();
+ out->gl_shader_interm_output = prefs.gl_shader_interm_output();
+ out->emulate_shader_precision = prefs.emulate_shader_precision();
+ out->enable_gpu_service_logging = prefs.enable_gpu_service_logging();
+ out->enable_gpu_service_tracing = prefs.enable_gpu_service_tracing();
+ out->enable_es3_apis = prefs.enable_es3_apis();
+ out->use_passthrough_cmd_decoder = prefs.use_passthrough_cmd_decoder();
+ return true;
+ }
+
+ static bool single_process(const gpu::GpuPreferences& prefs) {
+ return prefs.single_process;
+ }
+ static bool in_process_gpu(const gpu::GpuPreferences& prefs) {
+ return prefs.in_process_gpu;
+ }
+ static bool ui_prioritize_in_gpu_process(const gpu::GpuPreferences& prefs) {
+ return prefs.ui_prioritize_in_gpu_process;
+ }
+ static bool disable_accelerated_video_decode(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.disable_accelerated_video_decode;
+ }
+
+ static bool disable_vaapi_accelerated_video_encode(
+ const gpu::GpuPreferences& prefs) {
+#if defined(OS_CHROMEOS)
+ return prefs.disable_vaapi_accelerated_video_encode;
+#else
+ return false;
+#endif
+ }
+
+ static bool disable_web_rtc_hw_encoding(const gpu::GpuPreferences& prefs) {
+#if BUILDFLAG(ENABLE_WEBRTC)
+ return prefs.disable_web_rtc_hw_encoding;
+#else
+ return false;
+#endif
+ }
+
+ static gpu::GpuPreferences::VpxDecodeVendors enable_accelerated_vpx_decode(
+ const gpu::GpuPreferences& prefs) {
+#if defined(OS_WIN)
+ return prefs.enable_accelerated_vpx_decode;
+#else
+ return gpu::GpuPreferences::VPX_VENDOR_MICROSOFT;
+#endif
+ }
+ static bool enable_low_latency_dxva(const gpu::GpuPreferences& prefs) {
+#if defined(OS_WIN)
+ return prefs.enable_low_latency_dxva;
+#else
+ return false;
+#endif
+ }
+ static bool enable_zero_copy_dxgi_video(const gpu::GpuPreferences& prefs) {
+#if defined(OS_WIN)
+ return prefs.enable_zero_copy_dxgi_video;
+#else
+ return false;
+#endif
+ }
+ static bool enable_nv12_dxgi_video(const gpu::GpuPreferences& prefs) {
+#if defined(OS_WIN)
+ return prefs.enable_nv12_dxgi_video;
+#else
+ return false;
+#endif
+ }
+ static bool compile_shader_always_succeeds(const gpu::GpuPreferences& prefs) {
+ return prefs.compile_shader_always_succeeds;
+ }
+ static bool disable_gl_error_limit(const gpu::GpuPreferences& prefs) {
+ return prefs.disable_gl_error_limit;
+ }
+ static bool disable_glsl_translator(const gpu::GpuPreferences& prefs) {
+ return prefs.disable_glsl_translator;
+ }
+ static bool disable_gpu_driver_bug_workarounds(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.disable_gpu_driver_bug_workarounds;
+ }
+ static bool disable_shader_name_hashing(const gpu::GpuPreferences& prefs) {
+ return prefs.disable_shader_name_hashing;
+ }
+ static bool enable_gpu_command_logging(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_command_logging;
+ }
+ static bool enable_gpu_debugging(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_debugging;
+ }
+ static bool enable_gpu_service_logging_gpu(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_service_logging_gpu;
+ }
+ static bool enable_gpu_driver_debug_logging(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_driver_debug_logging;
+ }
+ static bool disable_gpu_program_cache(const gpu::GpuPreferences& prefs) {
+ return prefs.disable_gpu_program_cache;
+ }
+ static bool enforce_gl_minimums(const gpu::GpuPreferences& prefs) {
+ return prefs.enforce_gl_minimums;
+ }
+ static uint32_t force_gpu_mem_available(const gpu::GpuPreferences& prefs) {
+ return prefs.force_gpu_mem_available;
+ }
+ static uint32_t gpu_program_cache_size(const gpu::GpuPreferences& prefs) {
+ return prefs.gpu_program_cache_size;
+ }
+ static bool disable_gpu_shader_disk_cache(const gpu::GpuPreferences& prefs) {
+ return prefs.disable_gpu_shader_disk_cache;
+ }
+ static bool enable_threaded_texture_mailboxes(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.enable_threaded_texture_mailboxes;
+ }
+ static bool gl_shader_interm_output(const gpu::GpuPreferences& prefs) {
+ return prefs.gl_shader_interm_output;
+ }
+ static bool emulate_shader_precision(const gpu::GpuPreferences& prefs) {
+ return prefs.emulate_shader_precision;
+ }
+ static bool enable_gpu_service_logging(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_service_logging;
+ }
+ static bool enable_gpu_service_tracing(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_service_tracing;
+ }
+ static bool enable_es3_apis(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_es3_apis;
+ }
+ static bool use_passthrough_cmd_decoder(const gpu::GpuPreferences& prefs) {
+ return prefs.use_passthrough_cmd_decoder;
+ }
+};
+
+} // namespace mojo
+
+#endif // GPU_IPC_COMMON_GPU_PREFERENCES_STRUCT_TRAITS_H_
diff --git a/chromium/gpu/ipc/common/gpu_surface_tracker.cc b/chromium/gpu/ipc/common/gpu_surface_tracker.cc
index fa02242491b..b2b8dacdaf7 100644
--- a/chromium/gpu/ipc/common/gpu_surface_tracker.cc
+++ b/chromium/gpu/ipc/common/gpu_surface_tracker.cc
@@ -64,12 +64,12 @@ gfx::AcceleratedWidget GpuSurfaceTracker::AcquireNativeWidget(
#if defined(OS_ANDROID)
void GpuSurfaceTracker::RegisterViewSurface(
- int surface_id, const base::android::JavaRef<jobject>& j_surface) {
+ int surface_id, jobject j_surface) {
base::AutoLock lock(surface_view_map_lock_);
DCHECK(surface_view_map_.find(surface_id) == surface_view_map_.end());
surface_view_map_[surface_id] =
- gl::ScopedJavaSurface::AcquireExternalSurface(j_surface.obj());
+ gl::ScopedJavaSurface::AcquireExternalSurface(j_surface);
CHECK(surface_view_map_[surface_id].IsValid());
}
diff --git a/chromium/gpu/ipc/common/gpu_surface_tracker.h b/chromium/gpu/ipc/common/gpu_surface_tracker.h
index b781ae27d76..6d372712521 100644
--- a/chromium/gpu/ipc/common/gpu_surface_tracker.h
+++ b/chromium/gpu/ipc/common/gpu_surface_tracker.h
@@ -23,10 +23,10 @@ namespace gpu {
// window surfaces exposed to the GPU process. Every surface gets registered to
// this class, and gets a handle. The handle can be passed to
// CommandBufferProxyImpl::Create or to
-// GpuMemoryBufferManager::AllocateGpuMemoryBuffer.
+// GpuMemoryBufferManager::CreateGpuMemoryBuffer.
// On Android, the handle is used in the GPU process to get a reference to the
// ANativeWindow, using GpuSurfaceLookup (implemented by
-// SurfaceTextureManagerImpl).
+// ChildProcessSurfaceManager).
// On Mac, the handle just passes through the GPU process, and is sent back via
// GpuCommandBufferMsg_SwapBuffersCompleted to reference the surface.
// This class is thread safe.
@@ -39,8 +39,7 @@ class GPU_EXPORT GpuSurfaceTracker : public gpu::GpuSurfaceLookup {
gpu::SurfaceHandle surface_handle) override;
#if defined(OS_ANDROID)
- void RegisterViewSurface(int surface_id,
- const base::android::JavaRef<jobject>& j_surface);
+ void RegisterViewSurface(int surface_id, jobject j_surface);
void UnregisterViewSurface(int surface_id);
gl::ScopedJavaSurface AcquireJavaSurface(int surface_id) override;
#endif
diff --git a/chromium/gpu/ipc/common/struct_traits_unittest.cc b/chromium/gpu/ipc/common/struct_traits_unittest.cc
index 70ddbc0ad15..2223921dfbf 100644
--- a/chromium/gpu/ipc/common/struct_traits_unittest.cc
+++ b/chromium/gpu/ipc/common/struct_traits_unittest.cc
@@ -74,6 +74,11 @@ class StructTraitsTest : public testing::Test, public mojom::TraitsTestService {
callback.Run(v);
}
+ void EchoGpuPreferences(const GpuPreferences& prefs,
+ const EchoGpuPreferencesCallback& callback) override {
+ callback.Run(prefs);
+ }
+
base::MessageLoop loop_;
mojo::BindingSet<TraitsTestService> traits_test_bindings_;
@@ -148,6 +153,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
const bool sandboxed = true;
const int process_crash_count = 0xdead;
const bool in_process_gpu = true;
+ const bool passthrough_cmd_decoder = true;
const gpu::CollectInfoResult basic_info_state =
gpu::CollectInfoResult::kCollectInfoSuccess;
const gpu::CollectInfoResult context_info_state =
@@ -197,6 +203,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
input.sandboxed = sandboxed;
input.process_crash_count = process_crash_count;
input.in_process_gpu = in_process_gpu;
+ input.passthrough_cmd_decoder = passthrough_cmd_decoder;
input.basic_info_state = basic_info_state;
input.context_info_state = context_info_state;
#if defined(OS_WIN)
@@ -259,6 +266,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
EXPECT_EQ(sandboxed, output.sandboxed);
EXPECT_EQ(process_crash_count, output.process_crash_count);
EXPECT_EQ(in_process_gpu, output.in_process_gpu);
+ EXPECT_EQ(passthrough_cmd_decoder, output.passthrough_cmd_decoder);
EXPECT_EQ(basic_info_state, output.basic_info_state);
EXPECT_EQ(context_info_state, output.context_info_state);
#if defined(OS_WIN)
@@ -386,11 +394,16 @@ TEST_F(StructTraitsTest, VideoDecodeAcceleratorCapabilities) {
gpu::VideoDecodeAcceleratorCapabilities input;
input.flags = flags;
+ input.supported_profiles.push_back(
+ gpu::VideoDecodeAcceleratorSupportedProfile());
+ input.supported_profiles.push_back(
+ gpu::VideoDecodeAcceleratorSupportedProfile());
mojom::TraitsTestServicePtr proxy = GetTraitsTestProxy();
gpu::VideoDecodeAcceleratorCapabilities output;
proxy->EchoVideoDecodeAcceleratorCapabilities(input, &output);
EXPECT_EQ(flags, output.flags);
+ EXPECT_EQ(input.supported_profiles.size(), output.supported_profiles.size());
}
TEST_F(StructTraitsTest, VideoEncodeAcceleratorSupportedProfile) {
@@ -415,4 +428,28 @@ TEST_F(StructTraitsTest, VideoEncodeAcceleratorSupportedProfile) {
EXPECT_EQ(max_framerate_denominator, output.max_framerate_denominator);
}
+TEST_F(StructTraitsTest, GpuPreferences) {
+ GpuPreferences prefs;
+ prefs.single_process = true;
+ prefs.in_process_gpu = true;
+ prefs.ui_prioritize_in_gpu_process = true;
+#if defined(OS_WIN)
+ const GpuPreferences::VpxDecodeVendors vendor =
+ GpuPreferences::VPX_VENDOR_AMD;
+ prefs.enable_accelerated_vpx_decode = vendor;
+#endif
+ prefs.enable_gpu_driver_debug_logging = true;
+
+ mojom::TraitsTestServicePtr proxy = GetTraitsTestProxy();
+ GpuPreferences echo;
+ proxy->EchoGpuPreferences(prefs, &echo);
+ EXPECT_TRUE(echo.single_process);
+ EXPECT_TRUE(echo.in_process_gpu);
+ EXPECT_TRUE(echo.ui_prioritize_in_gpu_process);
+ EXPECT_TRUE(echo.enable_gpu_driver_debug_logging);
+#if defined(OS_WIN)
+ EXPECT_EQ(vendor, echo.enable_accelerated_vpx_decode);
+#endif
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/common/traits_test_service.mojom b/chromium/gpu/ipc/common/traits_test_service.mojom
index bd7a961a8f9..f2d911d39d4 100644
--- a/chromium/gpu/ipc/common/traits_test_service.mojom
+++ b/chromium/gpu/ipc/common/traits_test_service.mojom
@@ -6,6 +6,7 @@ module gpu.mojom;
import "gpu/ipc/common/dx_diag_node.mojom";
import "gpu/ipc/common/gpu_info.mojom";
+import "gpu/ipc/common/gpu_preferences.mojom";
import "gpu/ipc/common/mailbox.mojom";
import "gpu/ipc/common/mailbox_holder.mojom";
import "gpu/ipc/common/sync_token.mojom";
@@ -46,4 +47,7 @@ interface TraitsTestService {
EchoVideoEncodeAcceleratorSupportedProfile(
VideoEncodeAcceleratorSupportedProfile v) =>
(VideoEncodeAcceleratorSupportedProfile pass);
+
+ [Sync]
+ EchoGpuPreferences(GpuPreferences prefs) => (GpuPreferences pass);
};
diff --git a/chromium/gpu/ipc/common/typemaps.gni b/chromium/gpu/ipc/common/typemaps.gni
index 5f353e0a294..23acc1fd9f8 100644
--- a/chromium/gpu/ipc/common/typemaps.gni
+++ b/chromium/gpu/ipc/common/typemaps.gni
@@ -5,6 +5,7 @@
typemaps = [
"//gpu/ipc/common/capabilities.typemap",
"//gpu/ipc/common/gpu_info.typemap",
+ "//gpu/ipc/common/gpu_preferences.typemap",
"//gpu/ipc/common/dx_diag_node.typemap",
"//gpu/ipc/common/mailbox.typemap",
"//gpu/ipc/common/mailbox_holder.typemap",
diff --git a/chromium/gpu/ipc/gl_in_process_context.cc b/chromium/gpu/ipc/gl_in_process_context.cc
index 16a1cb16500..c854913758b 100644
--- a/chromium/gpu/ipc/gl_in_process_context.cc
+++ b/chromium/gpu/ipc/gl_in_process_context.cc
@@ -54,7 +54,7 @@ class GLInProcessContextImpl
bool Initialize(scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
GLInProcessContext* share_context,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
const gpu::gles2::ContextCreationAttribHelper& attribs,
const scoped_refptr<InProcessCommandBuffer::Service>& service,
const SharedMemoryLimits& mem_limits,
@@ -63,7 +63,14 @@ class GLInProcessContextImpl
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
// GLInProcessContext implementation:
+ gpu::Capabilities GetCapabilities() override;
gles2::GLES2Implementation* GetImplementation() override;
+ void SetSwapBuffersCompletionCallback(
+ const gpu::InProcessCommandBuffer::SwapBuffersCompletionCallback&
+ callback) override;
+ void SetUpdateVSyncParametersCallback(
+ const gpu::InProcessCommandBuffer::UpdateVSyncParametersCallback&
+ callback) override;
void SetLock(base::Lock* lock) override;
private:
@@ -84,10 +91,26 @@ GLInProcessContextImpl::~GLInProcessContextImpl() {
Destroy();
}
+Capabilities GLInProcessContextImpl::GetCapabilities() {
+ return command_buffer_->GetCapabilities();
+}
+
gles2::GLES2Implementation* GLInProcessContextImpl::GetImplementation() {
return gles2_implementation_.get();
}
+void GLInProcessContextImpl::SetSwapBuffersCompletionCallback(
+ const gpu::InProcessCommandBuffer::SwapBuffersCompletionCallback&
+ callback) {
+ command_buffer_->SetSwapBuffersCompletionCallback(callback);
+}
+
+void GLInProcessContextImpl::SetUpdateVSyncParametersCallback(
+ const gpu::InProcessCommandBuffer::UpdateVSyncParametersCallback&
+ callback) {
+ command_buffer_->SetUpdateVSyncParametersCallback(callback);
+}
+
void GLInProcessContextImpl::SetLock(base::Lock* lock) {
NOTREACHED();
}
@@ -96,7 +119,7 @@ bool GLInProcessContextImpl::Initialize(
scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
GLInProcessContext* share_context,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
const gles2::ContextCreationAttribHelper& attribs,
const scoped_refptr<InProcessCommandBuffer::Service>& service,
const SharedMemoryLimits& mem_limits,
@@ -183,7 +206,7 @@ GLInProcessContext* GLInProcessContext::Create(
scoped_refptr<gpu::InProcessCommandBuffer::Service> service,
scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
GLInProcessContext* share_context,
const ::gpu::gles2::ContextCreationAttribHelper& attribs,
const SharedMemoryLimits& memory_limits,
@@ -196,7 +219,7 @@ GLInProcessContext* GLInProcessContext::Create(
if (surface) {
DCHECK_EQ(surface->IsOffscreen(), is_offscreen);
- DCHECK_EQ(gfx::kNullAcceleratedWidget, window);
+ DCHECK_EQ(kNullSurfaceHandle, window);
}
std::unique_ptr<GLInProcessContextImpl> context(new GLInProcessContextImpl);
diff --git a/chromium/gpu/ipc/gl_in_process_context.h b/chromium/gpu/ipc/gl_in_process_context.h
index e03363f3aca..eef93398c24 100644
--- a/chromium/gpu/ipc/gl_in_process_context.h
+++ b/chromium/gpu/ipc/gl_in_process_context.h
@@ -17,17 +17,8 @@
#include "ui/gl/gl_surface.h"
#include "ui/gl/gpu_preference.h"
-namespace gfx {
-class Size;
-}
-
-#if defined(OS_ANDROID)
-namespace gl {
-class SurfaceTexture;
-}
-#endif
-
namespace gpu {
+class InProcessCommandBuffer;
struct SharedMemoryLimits;
namespace gles2 {
@@ -50,7 +41,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GLInProcessContext {
scoped_refptr<gpu::InProcessCommandBuffer::Service> service,
scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
GLInProcessContext* share_context,
const gpu::gles2::ContextCreationAttribHelper& attribs,
const SharedMemoryLimits& memory_limits,
@@ -58,11 +49,21 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GLInProcessContext {
ImageFactory* image_factory,
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
+ virtual gpu::Capabilities GetCapabilities() = 0;
+
// Allows direct access to the GLES2 implementation so a GLInProcessContext
// can be used without making it current.
virtual gles2::GLES2Implementation* GetImplementation() = 0;
virtual void SetLock(base::Lock* lock) = 0;
+
+ virtual void SetSwapBuffersCompletionCallback(
+ const gpu::InProcessCommandBuffer::SwapBuffersCompletionCallback&
+ callback) = 0;
+
+ virtual void SetUpdateVSyncParametersCallback(
+ const gpu::InProcessCommandBuffer::UpdateVSyncParametersCallback&
+ callback) = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.cc b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
new file mode 100644
index 00000000000..b21b864e3b1
--- /dev/null
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
@@ -0,0 +1,69 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/gpu_in_process_thread_service.h"
+
+#include "base/lazy_instance.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace gpu {
+
+GpuInProcessThreadService::GpuInProcessThreadService(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ gpu::SyncPointManager* sync_point_manager,
+ gpu::gles2::MailboxManager* mailbox_manager,
+ scoped_refptr<gl::GLShareGroup> share_group)
+ : gpu::InProcessCommandBuffer::Service(mailbox_manager, share_group),
+ task_runner_(task_runner),
+ sync_point_manager_(sync_point_manager) {}
+
+void GpuInProcessThreadService::ScheduleTask(const base::Closure& task) {
+ task_runner_->PostTask(FROM_HERE, task);
+}
+
+void GpuInProcessThreadService::ScheduleDelayedWork(const base::Closure& task) {
+ task_runner_->PostDelayedTask(FROM_HERE, task,
+ base::TimeDelta::FromMilliseconds(2));
+}
+bool GpuInProcessThreadService::UseVirtualizedGLContexts() {
+ return true;
+}
+
+scoped_refptr<gpu::gles2::ShaderTranslatorCache>
+GpuInProcessThreadService::shader_translator_cache() {
+ if (!shader_translator_cache_) {
+ shader_translator_cache_ = make_scoped_refptr(
+ new gpu::gles2::ShaderTranslatorCache(gpu_preferences()));
+ }
+ return shader_translator_cache_;
+}
+
+scoped_refptr<gpu::gles2::FramebufferCompletenessCache>
+GpuInProcessThreadService::framebuffer_completeness_cache() {
+ if (!framebuffer_completeness_cache_.get()) {
+ framebuffer_completeness_cache_ =
+ make_scoped_refptr(new gpu::gles2::FramebufferCompletenessCache);
+ }
+ return framebuffer_completeness_cache_;
+}
+
+gpu::SyncPointManager* GpuInProcessThreadService::sync_point_manager() {
+ return sync_point_manager_;
+}
+
+void GpuInProcessThreadService::AddRef() const {
+ base::RefCountedThreadSafe<GpuInProcessThreadService>::AddRef();
+}
+
+void GpuInProcessThreadService::Release() const {
+ base::RefCountedThreadSafe<GpuInProcessThreadService>::Release();
+}
+
+bool GpuInProcessThreadService::BlockThreadOnWaitSyncToken() const {
+ return false;
+}
+
+GpuInProcessThreadService::~GpuInProcessThreadService() {}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.h b/chromium/gpu/ipc/gpu_in_process_thread_service.h
new file mode 100644
index 00000000000..413592f8117
--- /dev/null
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.h
@@ -0,0 +1,58 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_GPU_IN_PROCESS_THREAD_SERVICE_H_
+#define GPU_IPC_GPU_IN_PROCESS_THREAD_SERVICE_H_
+
+#include "base/compiler_specific.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/gpu_export.h"
+#include "gpu/ipc/in_process_command_buffer.h"
+#include "ui/gl/gl_share_group.h"
+
+namespace gpu {
+
+// Default Service class when no service is specified. GpuInProcessThreadService
+// is used by Mus and unit tests.
+class GPU_EXPORT GpuInProcessThreadService
+ : public NON_EXPORTED_BASE(gpu::InProcessCommandBuffer::Service),
+ public base::RefCountedThreadSafe<GpuInProcessThreadService> {
+ public:
+ GpuInProcessThreadService(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ gpu::SyncPointManager* sync_point_manager,
+ gpu::gles2::MailboxManager* mailbox_manager,
+ scoped_refptr<gl::GLShareGroup> share_group);
+
+ // gpu::InProcessCommandBuffer::Service implementation.
+ void ScheduleTask(const base::Closure& task) override;
+ void ScheduleDelayedWork(const base::Closure& task) override;
+ bool UseVirtualizedGLContexts() override;
+ scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache()
+ override;
+ scoped_refptr<gpu::gles2::FramebufferCompletenessCache>
+ framebuffer_completeness_cache() override;
+ gpu::SyncPointManager* sync_point_manager() override;
+ void AddRef() const override;
+ void Release() const override;
+ bool BlockThreadOnWaitSyncToken() const override;
+
+ private:
+ friend class base::RefCountedThreadSafe<GpuInProcessThreadService>;
+
+ ~GpuInProcessThreadService() override;
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ gpu::SyncPointManager* sync_point_manager_; // Non-owning.
+ scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
+ scoped_refptr<gpu::gles2::FramebufferCompletenessCache>
+ framebuffer_completeness_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuInProcessThreadService);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_GPU_IN_PROCESS_THREAD_SERVICE_H_
diff --git a/chromium/gpu/ipc/host/BUILD.gn b/chromium/gpu/ipc/host/BUILD.gn
new file mode 100644
index 00000000000..daf7057b924
--- /dev/null
+++ b/chromium/gpu/ipc/host/BUILD.gn
@@ -0,0 +1,24 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/ui.gni")
+
+source_set("host") {
+ sources = [
+ "gpu_memory_buffer_support.cc",
+ "gpu_memory_buffer_support.h",
+ "gpu_switches.cc",
+ "gpu_switches.h",
+ "shader_disk_cache.cc",
+ "shader_disk_cache.h",
+ ]
+
+ deps = [
+ "//base",
+ "//gpu/ipc/common",
+ "//net",
+ "//ui/gfx",
+ "//ui/gl",
+ ]
+}
diff --git a/chromium/gpu/ipc/host/DEPS b/chromium/gpu/ipc/host/DEPS
new file mode 100644
index 00000000000..8fa9d48d882
--- /dev/null
+++ b/chromium/gpu/ipc/host/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+net",
+]
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
new file mode 100644
index 00000000000..cbd36559365
--- /dev/null
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
@@ -0,0 +1,113 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/host/gpu_memory_buffer_support.h"
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "gpu/ipc/common/gpu_memory_buffer_support.h"
+#include "gpu/ipc/host/gpu_switches.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_switches.h"
+
+namespace gpu {
+
+bool AreNativeGpuMemoryBuffersEnabled() {
+ // Disable native buffers when using Mesa.
+ if (base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kUseGL) == gl::kGLImplementationOSMesaName) {
+ return false;
+ }
+
+#if defined(OS_MACOSX)
+ return !base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableNativeGpuMemoryBuffers);
+#else
+ return base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableNativeGpuMemoryBuffers);
+#endif
+}
+
+GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations() {
+ GpuMemoryBufferConfigurationSet configurations;
+
+#if defined(USE_OZONE) || defined(OS_MACOSX)
+ if (AreNativeGpuMemoryBuffersEnabled()) {
+ const gfx::BufferFormat kNativeFormats[] = {
+ gfx::BufferFormat::R_8,
+ gfx::BufferFormat::RG_88,
+ gfx::BufferFormat::BGR_565,
+ gfx::BufferFormat::RGBA_4444,
+ gfx::BufferFormat::RGBA_8888,
+ gfx::BufferFormat::BGRA_8888,
+ gfx::BufferFormat::UYVY_422,
+ gfx::BufferFormat::YVU_420,
+ gfx::BufferFormat::YUV_420_BIPLANAR};
+ const gfx::BufferUsage kNativeUsages[] = {
+ gfx::BufferUsage::GPU_READ, gfx::BufferUsage::SCANOUT,
+ gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
+ gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT};
+ for (auto format : kNativeFormats) {
+ for (auto usage : kNativeUsages) {
+ if (IsNativeGpuMemoryBufferConfigurationSupported(format, usage))
+ configurations.insert(std::make_pair(format, usage));
+ }
+ }
+ }
+
+ // Disable native buffers only when using Mesa.
+ bool force_native_gpu_read_write_formats =
+ base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kUseGL) != gl::kGLImplementationOSMesaName;
+ if (force_native_gpu_read_write_formats) {
+ const gfx::BufferFormat kGPUReadWriteFormats[] = {
+ gfx::BufferFormat::BGR_565, gfx::BufferFormat::RGBA_8888,
+ gfx::BufferFormat::RGBX_8888, gfx::BufferFormat::BGRA_8888,
+ gfx::BufferFormat::BGRX_8888, gfx::BufferFormat::UYVY_422,
+ gfx::BufferFormat::YVU_420, gfx::BufferFormat::YUV_420_BIPLANAR};
+ const gfx::BufferUsage kGPUReadWriteUsages[] = {gfx::BufferUsage::GPU_READ,
+ gfx::BufferUsage::SCANOUT};
+ for (auto format : kGPUReadWriteFormats) {
+ for (auto usage : kGPUReadWriteUsages) {
+ if (IsNativeGpuMemoryBufferConfigurationSupported(format, usage))
+ configurations.insert(std::make_pair(format, usage));
+ }
+ }
+ }
+#endif // defined(USE_OZONE) || defined(OS_MACOSX)
+
+ return configurations;
+}
+
+uint32_t GetImageTextureTarget(gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
+#if defined(USE_OZONE) || defined(OS_MACOSX)
+ GpuMemoryBufferConfigurationSet native_configurations =
+ GetNativeGpuMemoryBufferConfigurations();
+ if (native_configurations.find(std::make_pair(format, usage)) ==
+ native_configurations.end()) {
+ return GL_TEXTURE_2D;
+ }
+
+ switch (GetNativeGpuMemoryBufferType()) {
+ case gfx::OZONE_NATIVE_PIXMAP:
+ // GPU memory buffers that are shared with the GL using EGLImages
+ // require TEXTURE_EXTERNAL_OES.
+ return GL_TEXTURE_EXTERNAL_OES;
+ case gfx::IO_SURFACE_BUFFER:
+ // IOSurface backed images require GL_TEXTURE_RECTANGLE_ARB.
+ return GL_TEXTURE_RECTANGLE_ARB;
+ case gfx::SHARED_MEMORY_BUFFER:
+ case gfx::EMPTY_BUFFER:
+ break;
+ }
+ NOTREACHED();
+ return GL_TEXTURE_2D;
+#else // defined(USE_OZONE) || defined(OS_MACOSX)
+ return GL_TEXTURE_2D;
+#endif
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.h b/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
new file mode 100644
index 00000000000..14d396f527d
--- /dev/null
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_HOST_GPU_MEMORY_BUFFER_SUPPORT_H_
+#define GPU_IPC_HOST_GPU_MEMORY_BUFFER_SUPPORT_H_
+
+#include "base/containers/hash_tables.h"
+#include "base/hash.h"
+#include "ui/gfx/buffer_types.h"
+
+namespace gpu {
+
+using GpuMemoryBufferConfigurationKey =
+ std::pair<gfx::BufferFormat, gfx::BufferUsage>;
+using GpuMemoryBufferConfigurationSet =
+ base::hash_set<GpuMemoryBufferConfigurationKey>;
+
+} // namespace gpu
+
+namespace BASE_HASH_NAMESPACE {
+
+template <>
+struct hash<gpu::GpuMemoryBufferConfigurationKey> {
+ size_t operator()(const gpu::GpuMemoryBufferConfigurationKey& key) const {
+ return base::HashInts(static_cast<int>(key.first),
+ static_cast<int>(key.second));
+ }
+};
+
+} // namespace BASE_HASH_NAMESPACE
+
+namespace gpu {
+
+bool AreNativeGpuMemoryBuffersEnabled();
+
+// Returns the set of supported configurations.
+GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations();
+
+// Returns the OpenGL target to use for image textures.
+uint32_t GetImageTextureTarget(gfx::BufferFormat format,
+ gfx::BufferUsage usage);
+
+} // namespace gpu
+
+#endif // GPU_IPC_HOST_GPU_MEMORY_BUFFER_SUPPORT_H_
diff --git a/chromium/gpu/ipc/host/gpu_switches.cc b/chromium/gpu/ipc/host/gpu_switches.cc
new file mode 100644
index 00000000000..1834e3c64b8
--- /dev/null
+++ b/chromium/gpu/ipc/host/gpu_switches.cc
@@ -0,0 +1,16 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/host/gpu_switches.h"
+
+namespace switches {
+
+// Enable native GPU memory buffer support when available.
+const char kEnableNativeGpuMemoryBuffers[] = "enable-native-gpu-memory-buffers";
+
+// Disables native GPU memory buffer support.
+const char kDisableNativeGpuMemoryBuffers[] =
+ "disable-native-gpu-memory-buffers";
+
+} // namespace switches
diff --git a/chromium/gpu/ipc/host/gpu_switches.h b/chromium/gpu/ipc/host/gpu_switches.h
new file mode 100644
index 00000000000..7f205af4f4a
--- /dev/null
+++ b/chromium/gpu/ipc/host/gpu_switches.h
@@ -0,0 +1,17 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the command-line switches used by gpu/ipc/host
+
+#ifndef GPU_IPC_HOST_GPU_SWITCHES_H_
+#define GPU_IPC_HOST_GPU_SWITCHES_H_
+
+namespace switches {
+
+extern const char kEnableNativeGpuMemoryBuffers[];
+extern const char kDisableNativeGpuMemoryBuffers[];
+
+} // namespace switches
+
+#endif // GPU_IPC_HOST_GPU_SWITCHES_H_
diff --git a/chromium/gpu/ipc/host/shader_disk_cache.cc b/chromium/gpu/ipc/host/shader_disk_cache.cc
new file mode 100644
index 00000000000..7d1ad889c70
--- /dev/null
+++ b/chromium/gpu/ipc/host/shader_disk_cache.cc
@@ -0,0 +1,629 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/host/shader_disk_cache.h"
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_checker.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "net/base/cache_type.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+
+namespace gpu {
+
+namespace {
+
+static const base::FilePath::CharType kGpuCachePath[] =
+ FILE_PATH_LITERAL("GPUCache");
+
+} // namespace
+
+// ShaderDiskCacheEntry handles the work of caching/updating the cached
+// shaders.
+class ShaderDiskCacheEntry : public base::ThreadChecker {
+ public:
+ ShaderDiskCacheEntry(ShaderDiskCache* cache,
+ const std::string& key,
+ const std::string& shader);
+ ~ShaderDiskCacheEntry();
+
+ void Cache();
+ void OnOpComplete(int rv);
+ void set_entry(disk_cache::Entry* entry) { entry_ = entry; }
+
+ private:
+ enum OpType {
+ OPEN_ENTRY,
+ WRITE_DATA,
+ CREATE_ENTRY,
+ };
+
+ int OpenCallback(int rv);
+ int WriteCallback(int rv);
+ int IOComplete(int rv);
+
+ ShaderDiskCache* cache_;
+ OpType op_type_;
+ std::string key_;
+ std::string shader_;
+ disk_cache::Entry* entry_;
+ base::WeakPtr<ShaderDiskCacheEntry> weak_ptr_;
+ base::WeakPtrFactory<ShaderDiskCacheEntry> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderDiskCacheEntry);
+};
+
+// ShaderDiskReadHelper is used to load all of the cached shaders from the
+// disk cache and send to the memory cache.
+class ShaderDiskReadHelper : public base::ThreadChecker {
+ public:
+ using ShaderLoadedCallback = ShaderDiskCache::ShaderLoadedCallback;
+ ShaderDiskReadHelper(ShaderDiskCache* cache,
+ const ShaderLoadedCallback& callback);
+ ~ShaderDiskReadHelper();
+
+ void LoadCache();
+ void OnOpComplete(int rv);
+ void set_entry(disk_cache::Entry* entry) { entry_ = entry; }
+
+ private:
+ enum OpType {
+ TERMINATE,
+ OPEN_NEXT,
+ OPEN_NEXT_COMPLETE,
+ READ_COMPLETE,
+ ITERATION_FINISHED
+ };
+
+ int OpenNextEntry();
+ int OpenNextEntryComplete(int rv);
+ int ReadComplete(int rv);
+ int IterationComplete(int rv);
+
+ ShaderDiskCache* cache_;
+ ShaderLoadedCallback shader_loaded_callback_;
+ OpType op_type_;
+ std::unique_ptr<disk_cache::Backend::Iterator> iter_;
+ scoped_refptr<net::IOBufferWithSize> buf_;
+ disk_cache::Entry* entry_;
+ base::WeakPtrFactory<ShaderDiskReadHelper> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderDiskReadHelper);
+};
+
+class ShaderClearHelper : public base::ThreadChecker {
+ public:
+ ShaderClearHelper(ShaderCacheFactory* factory,
+ scoped_refptr<ShaderDiskCache> cache,
+ const base::FilePath& path,
+ const base::Time& delete_begin,
+ const base::Time& delete_end,
+ const base::Closure& callback);
+ ~ShaderClearHelper();
+
+ void Clear();
+
+ private:
+ enum OpType { TERMINATE, VERIFY_CACHE_SETUP, DELETE_CACHE };
+
+ void DoClearShaderCache(int rv);
+
+ ShaderCacheFactory* factory_;
+ scoped_refptr<ShaderDiskCache> cache_;
+ OpType op_type_;
+ base::FilePath path_;
+ base::Time delete_begin_;
+ base::Time delete_end_;
+ base::Closure callback_;
+ base::WeakPtrFactory<ShaderClearHelper> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderClearHelper);
+};
+
+// When the cache is asked to open an entry an Entry** is passed to it. The
+// underying Entry* must stay alive for the duration of the call, so it is
+// owned by the callback. If the underlying state machine is deleted before
+// the callback runs, close the entry.
+template <typename T>
+void OnEntryOpenComplete(base::WeakPtr<T> state_machine,
+ std::unique_ptr<disk_cache::Entry*> entry,
+ int rv) {
+ if (!state_machine) {
+ if (rv == net::OK)
+ (*entry)->Close();
+ return;
+ }
+ state_machine->set_entry(*entry);
+ state_machine->OnOpComplete(rv);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ShaderDiskCacheEntry
+
+ShaderDiskCacheEntry::ShaderDiskCacheEntry(ShaderDiskCache* cache,
+ const std::string& key,
+ const std::string& shader)
+ : cache_(cache),
+ op_type_(OPEN_ENTRY),
+ key_(key),
+ shader_(shader),
+ entry_(nullptr),
+ weak_ptr_factory_(this) {
+ weak_ptr_ = weak_ptr_factory_.GetWeakPtr();
+}
+
+ShaderDiskCacheEntry::~ShaderDiskCacheEntry() {
+ DCHECK(CalledOnValidThread());
+ if (entry_)
+ entry_->Close();
+}
+
+void ShaderDiskCacheEntry::Cache() {
+ DCHECK(CalledOnValidThread());
+
+ // The Entry* passed to the cache must stay alive even if this class is
+ // deleted, so store it in the callback.
+ auto entry = base::MakeUnique<disk_cache::Entry*>(nullptr);
+ disk_cache::Entry** closure_owned_entry_ptr = entry.get();
+ auto callback = base::Bind(&OnEntryOpenComplete<ShaderDiskCacheEntry>,
+ weak_ptr_factory_.GetWeakPtr(),
+ base::Passed(std::move(entry)));
+
+ int rv =
+ cache_->backend()->OpenEntry(key_, closure_owned_entry_ptr, callback);
+
+ if (rv != net::ERR_IO_PENDING) {
+ entry_ = *closure_owned_entry_ptr;
+ OnOpComplete(rv);
+ }
+}
+
+void ShaderDiskCacheEntry::OnOpComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ // The function calls inside the switch block below can end up destroying
+ // |this|. So hold on to a WeakPtr<>, and terminate the while loop if |this|
+ // has been destroyed.
+ auto weak_ptr = std::move(weak_ptr_);
+ do {
+ switch (op_type_) {
+ case OPEN_ENTRY:
+ rv = OpenCallback(rv);
+ break;
+ case CREATE_ENTRY:
+ rv = WriteCallback(rv);
+ break;
+ case WRITE_DATA:
+ rv = IOComplete(rv);
+ break;
+ }
+ } while (rv != net::ERR_IO_PENDING && weak_ptr);
+ if (weak_ptr)
+ weak_ptr_ = std::move(weak_ptr);
+}
+
+int ShaderDiskCacheEntry::OpenCallback(int rv) {
+ DCHECK(CalledOnValidThread());
+ if (rv == net::OK) {
+ cache_->backend()->OnExternalCacheHit(key_);
+ cache_->EntryComplete(this);
+ return rv;
+ }
+
+ op_type_ = CREATE_ENTRY;
+
+ // The Entry* passed to the cache must stay alive even if this class is
+ // deleted, so store it in the callback.
+ auto entry = base::MakeUnique<disk_cache::Entry*>(nullptr);
+ disk_cache::Entry** closure_owned_entry_ptr = entry.get();
+ auto callback = base::Bind(&OnEntryOpenComplete<ShaderDiskCacheEntry>,
+ weak_ptr_factory_.GetWeakPtr(),
+ base::Passed(std::move(entry)));
+
+ int create_rv =
+ cache_->backend()->CreateEntry(key_, closure_owned_entry_ptr, callback);
+
+ if (create_rv != net::ERR_IO_PENDING)
+ entry_ = *closure_owned_entry_ptr;
+ return create_rv;
+}
+
+int ShaderDiskCacheEntry::WriteCallback(int rv) {
+ DCHECK(CalledOnValidThread());
+ if (rv != net::OK) {
+ LOG(ERROR) << "Failed to create shader cache entry: " << rv;
+ cache_->EntryComplete(this);
+ return rv;
+ }
+
+ op_type_ = WRITE_DATA;
+ scoped_refptr<net::StringIOBuffer> io_buf = new net::StringIOBuffer(shader_);
+ return entry_->WriteData(1, 0, io_buf.get(), shader_.length(),
+ base::Bind(&ShaderDiskCacheEntry::OnOpComplete,
+ weak_ptr_factory_.GetWeakPtr()),
+ false);
+}
+
+int ShaderDiskCacheEntry::IOComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ cache_->EntryComplete(this);
+ return rv;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ShaderDiskReadHelper
+
+ShaderDiskReadHelper::ShaderDiskReadHelper(ShaderDiskCache* cache,
+ const ShaderLoadedCallback& callback)
+ : cache_(cache),
+ shader_loaded_callback_(callback),
+ op_type_(OPEN_NEXT),
+ buf_(NULL),
+ entry_(NULL),
+ weak_ptr_factory_(this) {}
+
+ShaderDiskReadHelper::~ShaderDiskReadHelper() {
+ DCHECK(CalledOnValidThread());
+ if (entry_)
+ entry_->Close();
+ iter_ = nullptr;
+}
+
+void ShaderDiskReadHelper::LoadCache() {
+ DCHECK(CalledOnValidThread());
+ OnOpComplete(net::OK);
+}
+
+void ShaderDiskReadHelper::OnOpComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ do {
+ switch (op_type_) {
+ case OPEN_NEXT:
+ rv = OpenNextEntry();
+ break;
+ case OPEN_NEXT_COMPLETE:
+ rv = OpenNextEntryComplete(rv);
+ break;
+ case READ_COMPLETE:
+ rv = ReadComplete(rv);
+ break;
+ case ITERATION_FINISHED:
+ rv = IterationComplete(rv);
+ break;
+ case TERMINATE:
+ cache_->ReadComplete();
+ rv = net::ERR_IO_PENDING; // break the loop
+ break;
+ }
+ } while (rv != net::ERR_IO_PENDING);
+}
+
+int ShaderDiskReadHelper::OpenNextEntry() {
+ DCHECK(CalledOnValidThread());
+ op_type_ = OPEN_NEXT_COMPLETE;
+ if (!iter_)
+ iter_ = cache_->backend()->CreateIterator();
+
+ // The Entry* passed to the cache must stay alive even if this class is
+ // deleted, so store it in the callback.
+ auto entry = base::MakeUnique<disk_cache::Entry*>(nullptr);
+ disk_cache::Entry** closure_owned_entry_ptr = entry.get();
+ auto callback = base::Bind(&OnEntryOpenComplete<ShaderDiskReadHelper>,
+ weak_ptr_factory_.GetWeakPtr(),
+ base::Passed(std::move(entry)));
+
+ int rv = iter_->OpenNextEntry(closure_owned_entry_ptr, callback);
+
+ if (rv != net::ERR_IO_PENDING)
+ entry_ = *closure_owned_entry_ptr;
+ return rv;
+}
+
+int ShaderDiskReadHelper::OpenNextEntryComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ if (rv == net::ERR_FAILED) {
+ iter_.reset();
+ op_type_ = ITERATION_FINISHED;
+ return net::OK;
+ }
+
+ if (rv < 0)
+ return rv;
+
+ op_type_ = READ_COMPLETE;
+ buf_ = new net::IOBufferWithSize(entry_->GetDataSize(1));
+ return entry_->ReadData(1, 0, buf_.get(), buf_->size(),
+ base::Bind(&ShaderDiskReadHelper::OnOpComplete,
+ weak_ptr_factory_.GetWeakPtr()));
+}
+
+int ShaderDiskReadHelper::ReadComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ if (rv && rv == buf_->size() && !shader_loaded_callback_.is_null()) {
+ shader_loaded_callback_.Run(entry_->GetKey(),
+ std::string(buf_->data(), buf_->size()));
+ }
+
+ buf_ = NULL;
+ entry_->Close();
+ entry_ = NULL;
+
+ op_type_ = OPEN_NEXT;
+ return net::OK;
+}
+
+int ShaderDiskReadHelper::IterationComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ iter_.reset();
+ op_type_ = TERMINATE;
+ return net::OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ShaderClearHelper
+
+ShaderClearHelper::ShaderClearHelper(ShaderCacheFactory* factory,
+ scoped_refptr<ShaderDiskCache> cache,
+ const base::FilePath& path,
+ const base::Time& delete_begin,
+ const base::Time& delete_end,
+ const base::Closure& callback)
+ : factory_(factory),
+ cache_(std::move(cache)),
+ op_type_(VERIFY_CACHE_SETUP),
+ path_(path),
+ delete_begin_(delete_begin),
+ delete_end_(delete_end),
+ callback_(callback),
+ weak_ptr_factory_(this) {}
+
+ShaderClearHelper::~ShaderClearHelper() {
+ DCHECK(CalledOnValidThread());
+}
+
+void ShaderClearHelper::Clear() {
+ DCHECK(CalledOnValidThread());
+ DoClearShaderCache(net::OK);
+}
+
+void ShaderClearHelper::DoClearShaderCache(int rv) {
+ DCHECK(CalledOnValidThread());
+ while (rv != net::ERR_IO_PENDING) {
+ switch (op_type_) {
+ case VERIFY_CACHE_SETUP:
+ rv = cache_->SetAvailableCallback(
+ base::Bind(&ShaderClearHelper::DoClearShaderCache,
+ weak_ptr_factory_.GetWeakPtr()));
+ op_type_ = DELETE_CACHE;
+ break;
+ case DELETE_CACHE:
+ rv = cache_->Clear(delete_begin_, delete_end_,
+ base::Bind(&ShaderClearHelper::DoClearShaderCache,
+ weak_ptr_factory_.GetWeakPtr()));
+ op_type_ = TERMINATE;
+ break;
+ case TERMINATE:
+ callback_.Run();
+ // Calling CacheCleared() destroys |this|.
+ factory_->CacheCleared(path_);
+ rv = net::ERR_IO_PENDING; // Break the loop.
+ break;
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ShaderCacheFactory
+
+ShaderCacheFactory::ShaderCacheFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner)
+ : cache_task_runner_(std::move(cache_task_runner)) {}
+
+ShaderCacheFactory::~ShaderCacheFactory() {}
+
+void ShaderCacheFactory::SetCacheInfo(int32_t client_id,
+ const base::FilePath& path) {
+ DCHECK(CalledOnValidThread());
+ client_id_to_path_map_[client_id] = path;
+}
+
+void ShaderCacheFactory::RemoveCacheInfo(int32_t client_id) {
+ DCHECK(CalledOnValidThread());
+ client_id_to_path_map_.erase(client_id);
+}
+
+scoped_refptr<ShaderDiskCache> ShaderCacheFactory::Get(int32_t client_id) {
+ DCHECK(CalledOnValidThread());
+ ClientIdToPathMap::iterator iter = client_id_to_path_map_.find(client_id);
+ if (iter == client_id_to_path_map_.end())
+ return NULL;
+ return ShaderCacheFactory::GetByPath(iter->second);
+}
+
+scoped_refptr<ShaderDiskCache> ShaderCacheFactory::GetByPath(
+ const base::FilePath& path) {
+ DCHECK(CalledOnValidThread());
+ ShaderCacheMap::iterator iter = shader_cache_map_.find(path);
+ if (iter != shader_cache_map_.end())
+ return iter->second;
+
+ ShaderDiskCache* cache = new ShaderDiskCache(this, path);
+ cache->Init(cache_task_runner_);
+ return cache;
+}
+
+void ShaderCacheFactory::AddToCache(const base::FilePath& key,
+ ShaderDiskCache* cache) {
+ DCHECK(CalledOnValidThread());
+ shader_cache_map_[key] = cache;
+}
+
+void ShaderCacheFactory::RemoveFromCache(const base::FilePath& key) {
+ DCHECK(CalledOnValidThread());
+ shader_cache_map_.erase(key);
+}
+
+void ShaderCacheFactory::ClearByPath(const base::FilePath& path,
+ const base::Time& delete_begin,
+ const base::Time& delete_end,
+ const base::Closure& callback) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(!callback.is_null());
+
+ auto helper = base::MakeUnique<ShaderClearHelper>(
+ this, GetByPath(path), path, delete_begin, delete_end, callback);
+
+ // We could receive requests to clear the same path with different
+ // begin/end times. So, we keep a list of requests. If we haven't seen this
+ // path before we kick off the clear and add it to the list. If we have see it
+ // already, then we already have a clear running. We add this clear to the
+ // list and wait for any previous clears to finish.
+ ShaderClearMap::iterator iter = shader_clear_map_.find(path);
+ if (iter != shader_clear_map_.end()) {
+ iter->second.push(std::move(helper));
+ return;
+ }
+
+ // Insert the helper in the map before calling Clear(), since it can lead to a
+ // call back into CacheCleared().
+ ShaderClearHelper* helper_ptr = helper.get();
+ shader_clear_map_.insert(
+ std::pair<base::FilePath, ShaderClearQueue>(path, ShaderClearQueue()));
+ shader_clear_map_[path].push(std::move(helper));
+ helper_ptr->Clear();
+}
+
+void ShaderCacheFactory::CacheCleared(const base::FilePath& path) {
+ DCHECK(CalledOnValidThread());
+
+ ShaderClearMap::iterator iter = shader_clear_map_.find(path);
+ if (iter == shader_clear_map_.end()) {
+ LOG(ERROR) << "Completed clear but missing clear helper.";
+ return;
+ }
+
+ iter->second.pop();
+
+ // If there are remaining items in the list we trigger the Clear on the
+ // next one.
+ if (!iter->second.empty()) {
+ iter->second.front()->Clear();
+ return;
+ }
+
+ shader_clear_map_.erase(iter);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ShaderDiskCache
+
+ShaderDiskCache::ShaderDiskCache(ShaderCacheFactory* factory,
+ const base::FilePath& cache_path)
+ : factory_(factory),
+ cache_available_(false),
+ cache_path_(cache_path),
+ is_initialized_(false) {
+ factory_->AddToCache(cache_path_, this);
+}
+
+ShaderDiskCache::~ShaderDiskCache() {
+ factory_->RemoveFromCache(cache_path_);
+}
+
+void ShaderDiskCache::Init(
+ scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner) {
+ if (is_initialized_) {
+ NOTREACHED(); // can't initialize disk cache twice.
+ return;
+ }
+ is_initialized_ = true;
+
+ int rv = disk_cache::CreateCacheBackend(
+ net::SHADER_CACHE, net::CACHE_BACKEND_DEFAULT,
+ cache_path_.Append(kGpuCachePath),
+ gpu::kDefaultMaxProgramCacheMemoryBytes, true, cache_task_runner, NULL,
+ &backend_, base::Bind(&ShaderDiskCache::CacheCreatedCallback, this));
+
+ if (rv == net::OK)
+ cache_available_ = true;
+}
+
+void ShaderDiskCache::Cache(const std::string& key, const std::string& shader) {
+ if (!cache_available_)
+ return;
+
+ auto shim = base::MakeUnique<ShaderDiskCacheEntry>(this, key, shader);
+ shim->Cache();
+ auto* raw_ptr = shim.get();
+ entries_.insert(std::make_pair(raw_ptr, std::move(shim)));
+}
+
+int ShaderDiskCache::Clear(const base::Time begin_time,
+ const base::Time end_time,
+ const net::CompletionCallback& completion_callback) {
+ int rv;
+ if (begin_time.is_null()) {
+ rv = backend_->DoomAllEntries(completion_callback);
+ } else {
+ rv =
+ backend_->DoomEntriesBetween(begin_time, end_time, completion_callback);
+ }
+ return rv;
+}
+
+int32_t ShaderDiskCache::Size() {
+ if (!cache_available_)
+ return -1;
+ return backend_->GetEntryCount();
+}
+
+int ShaderDiskCache::SetAvailableCallback(
+ const net::CompletionCallback& callback) {
+ if (cache_available_)
+ return net::OK;
+ available_callback_ = callback;
+ return net::ERR_IO_PENDING;
+}
+
+void ShaderDiskCache::CacheCreatedCallback(int rv) {
+ if (rv != net::OK) {
+ LOG(ERROR) << "Shader Cache Creation failed: " << rv;
+ return;
+ }
+ helper_ =
+ base::MakeUnique<ShaderDiskReadHelper>(this, shader_loaded_callback_);
+ helper_->LoadCache();
+}
+
+void ShaderDiskCache::EntryComplete(ShaderDiskCacheEntry* entry) {
+ entries_.erase(entry);
+ if (entries_.empty() && !cache_complete_callback_.is_null())
+ cache_complete_callback_.Run(net::OK);
+}
+
+void ShaderDiskCache::ReadComplete() {
+ helper_ = nullptr;
+
+ // The cache is considered available after we have finished reading any
+ // of the old cache values off disk. This prevents a potential race where we
+ // are reading from disk and execute a cache clear at the same time.
+ cache_available_ = true;
+ if (!available_callback_.is_null()) {
+ available_callback_.Run(net::OK);
+ available_callback_.Reset();
+ }
+}
+
+int ShaderDiskCache::SetCacheCompleteCallback(
+ const net::CompletionCallback& callback) {
+ if (entries_.empty()) {
+ return net::OK;
+ }
+ cache_complete_callback_ = callback;
+ return net::ERR_IO_PENDING;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/host/shader_disk_cache.h b/chromium/gpu/ipc/host/shader_disk_cache.h
new file mode 100644
index 00000000000..4080737d2fe
--- /dev/null
+++ b/chromium/gpu/ipc/host/shader_disk_cache.h
@@ -0,0 +1,157 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_HOST_SHADER_DISK_CACHE_H_
+#define GPU_IPC_HOST_SHADER_DISK_CACHE_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <queue>
+#include <string>
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/threading/thread_checker.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace gpu {
+
+class ShaderCacheFactory;
+class ShaderDiskCacheEntry;
+class ShaderDiskReadHelper;
+class ShaderClearHelper;
+
+// ShaderDiskCache is the interface to the on disk cache for
+// GL shaders.
+class ShaderDiskCache : public base::RefCounted<ShaderDiskCache> {
+ public:
+ using ShaderLoadedCallback =
+ base::Callback<void(const std::string&, const std::string&)>;
+
+ void set_shader_loaded_callback(const ShaderLoadedCallback& callback) {
+ shader_loaded_callback_ = callback;
+ }
+
+ // Store the |shader| into the cache under |key|.
+ void Cache(const std::string& key, const std::string& shader);
+
+ // Clear a range of entries. This supports unbounded deletes in either
+ // direction by using null Time values for either |begin_time| or |end_time|.
+ // The return value is a net error code. If this method returns
+ // ERR_IO_PENDING, the |completion_callback| will be invoked when the
+ // operation completes.
+ int Clear(const base::Time begin_time,
+ const base::Time end_time,
+ const net::CompletionCallback& completion_callback);
+
+ // Sets a callback for when the cache is available. If the cache is
+ // already available the callback will not be called and net::OK is returned.
+ // If the callback is set net::ERR_IO_PENDING is returned and the callback
+ // will be executed when the cache is available.
+ int SetAvailableCallback(const net::CompletionCallback& callback);
+
+ // Returns the number of elements currently in the cache.
+ int32_t Size();
+
+ // Set a callback notification for when all current entries have been
+ // written to the cache.
+ // The return value is a net error code. If this method returns
+ // ERR_IO_PENDING, the |callback| will be invoked when all entries have
+ // been written to the cache.
+ int SetCacheCompleteCallback(const net::CompletionCallback& callback);
+
+ private:
+ friend class base::RefCounted<ShaderDiskCache>;
+ friend class ShaderDiskCacheEntry;
+ friend class ShaderDiskReadHelper;
+ friend class ShaderCacheFactory;
+
+ ShaderDiskCache(ShaderCacheFactory* factory,
+ const base::FilePath& cache_path);
+ ~ShaderDiskCache();
+
+ void Init(scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner);
+ void CacheCreatedCallback(int rv);
+
+ disk_cache::Backend* backend() { return backend_.get(); }
+
+ void EntryComplete(ShaderDiskCacheEntry* entry);
+ void ReadComplete();
+
+ ShaderCacheFactory* factory_;
+ bool cache_available_;
+ base::FilePath cache_path_;
+ bool is_initialized_;
+ net::CompletionCallback available_callback_;
+ net::CompletionCallback cache_complete_callback_;
+ ShaderLoadedCallback shader_loaded_callback_;
+
+ std::unique_ptr<disk_cache::Backend> backend_;
+
+ std::unique_ptr<ShaderDiskReadHelper> helper_;
+ std::unordered_map<ShaderDiskCacheEntry*,
+ std::unique_ptr<ShaderDiskCacheEntry>>
+ entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderDiskCache);
+};
+
+// ShaderCacheFactory maintains a cache of ShaderDiskCache objects
+// so we only create one per profile directory.
+class ShaderCacheFactory : NON_EXPORTED_BASE(public base::ThreadChecker) {
+ public:
+ explicit ShaderCacheFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner);
+ ~ShaderCacheFactory();
+
+ // Clear the shader disk cache for the given |path|. This supports unbounded
+ // deletes in either direction by using null Time values for either
+ // |begin_time| or |end_time|. The |callback| will be executed when the
+ // clear is complete.
+ void ClearByPath(const base::FilePath& path,
+ const base::Time& begin_time,
+ const base::Time& end_time,
+ const base::Closure& callback);
+
+ // Retrieve the shader disk cache for the provided |client_id|.
+ scoped_refptr<ShaderDiskCache> Get(int32_t client_id);
+
+ // Set the |path| to be used for the disk cache for |client_id|.
+ void SetCacheInfo(int32_t client_id, const base::FilePath& path);
+
+ // Remove the path mapping for |client_id|.
+ void RemoveCacheInfo(int32_t client_id);
+
+ // Set the provided |cache| into the cache map for the given |path|.
+ void AddToCache(const base::FilePath& path, ShaderDiskCache* cache);
+
+ // Remove the provided |path| from our cache map.
+ void RemoveFromCache(const base::FilePath& path);
+
+ private:
+ friend class ShaderClearHelper;
+
+ scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner_;
+
+ scoped_refptr<ShaderDiskCache> GetByPath(const base::FilePath& path);
+ void CacheCleared(const base::FilePath& path);
+
+ using ShaderCacheMap = std::map<base::FilePath, ShaderDiskCache*>;
+ ShaderCacheMap shader_cache_map_;
+
+ using ClientIdToPathMap = std::map<int32_t, base::FilePath>;
+ ClientIdToPathMap client_id_to_path_map_;
+
+ using ShaderClearQueue = std::queue<std::unique_ptr<ShaderClearHelper>>;
+ using ShaderClearMap = std::map<base::FilePath, ShaderClearQueue>;
+ ShaderClearMap shader_clear_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderCacheFactory);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_HOST_SHADER_DISK_CACHE_H_
diff --git a/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc b/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
new file mode 100644
index 00000000000..209b6304c67
--- /dev/null
+++ b/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
@@ -0,0 +1,109 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "gpu/ipc/host/shader_disk_cache.h"
+#include "net/base/test_completion_callback.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace {
+
+const int kDefaultClientId = 42;
+const char kCacheKey[] = "key";
+const char kCacheValue[] = "cached value";
+
+} // namespace
+
+class ShaderDiskCacheTest : public testing::Test {
+ public:
+ ShaderDiskCacheTest()
+ : cache_thread_("CacheThread") {
+ base::Thread::Options options;
+ options.message_loop_type = base::MessageLoop::TYPE_IO;
+ CHECK(cache_thread_.StartWithOptions(options));
+ factory_ =
+ base::MakeUnique<ShaderCacheFactory>(cache_thread_.task_runner());
+ }
+
+ ~ShaderDiskCacheTest() override {}
+
+ const base::FilePath& cache_path() { return temp_dir_.GetPath(); }
+
+ void InitCache() {
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+ factory_->SetCacheInfo(kDefaultClientId, cache_path());
+ }
+
+ ShaderCacheFactory* factory() { return factory_.get(); }
+
+ private:
+ void TearDown() override { factory_->RemoveCacheInfo(kDefaultClientId); }
+
+ std::unique_ptr<ShaderCacheFactory> factory_;
+ base::ScopedTempDir temp_dir_;
+ base::Thread cache_thread_;
+ base::MessageLoopForIO message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderDiskCacheTest);
+};
+
+TEST_F(ShaderDiskCacheTest, ClearsCache) {
+ InitCache();
+
+ scoped_refptr<ShaderDiskCache> cache = factory()->Get(kDefaultClientId);
+ ASSERT_TRUE(cache.get() != NULL);
+
+ net::TestCompletionCallback available_cb;
+ int rv = cache->SetAvailableCallback(available_cb.callback());
+ ASSERT_EQ(net::OK, available_cb.GetResult(rv));
+ EXPECT_EQ(0, cache->Size());
+
+ cache->Cache(kCacheKey, kCacheValue);
+
+ net::TestCompletionCallback complete_cb;
+ rv = cache->SetCacheCompleteCallback(complete_cb.callback());
+ ASSERT_EQ(net::OK, complete_cb.GetResult(rv));
+ EXPECT_EQ(1, cache->Size());
+
+ base::Time time;
+ net::TestCompletionCallback clear_cb;
+ rv = cache->Clear(time, time, clear_cb.callback());
+ ASSERT_EQ(net::OK, clear_cb.GetResult(rv));
+ EXPECT_EQ(0, cache->Size());
+};
+
+// For https://crbug.com/663589.
+TEST_F(ShaderDiskCacheTest, SafeToDeleteCacheMidEntryOpen) {
+ InitCache();
+
+ // Create a cache and wait for it to open.
+ scoped_refptr<ShaderDiskCache> cache = factory()->Get(kDefaultClientId);
+ ASSERT_TRUE(cache.get() != NULL);
+ net::TestCompletionCallback available_cb;
+ int rv = cache->SetAvailableCallback(available_cb.callback());
+ ASSERT_EQ(net::OK, available_cb.GetResult(rv));
+ EXPECT_EQ(0, cache->Size());
+
+ // Start writing an entry to the cache but delete it before the backend has
+ // finished opening the entry. There is a race here, so this usually (but
+ // not always) crashes if there is a problem.
+ cache->Cache(kCacheKey, kCacheValue);
+ cache = nullptr;
+
+ // Open a new cache (to pass time on the cache thread) and verify all is
+ // well.
+ cache = factory()->Get(kDefaultClientId);
+ ASSERT_TRUE(cache.get() != NULL);
+ net::TestCompletionCallback available_cb2;
+ int rv2 = cache->SetAvailableCallback(available_cb2.callback());
+ ASSERT_EQ(net::OK, available_cb2.GetResult(rv2));
+};
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc
index 54c0f0d3ebd..e400376138f 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.cc
+++ b/chromium/gpu/ipc/in_process_command_buffer.cc
@@ -17,6 +17,7 @@
#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/weak_ptr.h"
#include "base/numerics/safe_conversions.h"
#include "base/sequence_checker.h"
@@ -40,6 +41,8 @@
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "gpu/ipc/gpu_in_process_thread_service.h"
+#include "gpu/ipc/service/image_transport_surface.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image.h"
@@ -52,6 +55,10 @@
#include "base/process/process_handle.h"
#endif
+#if defined(OS_MACOSX)
+#include "gpu/ipc/client/gpu_process_hosted_ca_layer_tree_params.h"
+#endif
+
namespace gpu {
namespace {
@@ -66,27 +73,27 @@ static void RunTaskWithResult(base::Callback<T(void)> task,
completion->Signal();
}
-struct ScopedOrderNumberProcessor {
- ScopedOrderNumberProcessor(SyncPointOrderData* order_data, uint32_t order_num)
- : order_data_(order_data), order_num_(order_num) {
- order_data_->BeginProcessingOrderNumber(order_num_);
+class GpuInProcessThreadHolder : public base::Thread {
+ public:
+ GpuInProcessThreadHolder()
+ : base::Thread("GpuThread"),
+ sync_point_manager_(new SyncPointManager(false)) {
+ Start();
}
- ~ScopedOrderNumberProcessor() {
- order_data_->FinishProcessingOrderNumber(order_num_);
+ ~GpuInProcessThreadHolder() override { Stop(); }
+
+ const scoped_refptr<InProcessCommandBuffer::Service>& GetGpuThreadService() {
+ if (!gpu_thread_service_) {
+ gpu_thread_service_ = new GpuInProcessThreadService(
+ task_runner(), sync_point_manager_.get(), nullptr, nullptr);
+ }
+ return gpu_thread_service_;
}
private:
- SyncPointOrderData* order_data_;
- uint32_t order_num_;
-};
-
-struct GpuInProcessThreadHolder {
- GpuInProcessThreadHolder()
- : sync_point_manager(new SyncPointManager(false)),
- gpu_thread(new GpuInProcessThread(sync_point_manager.get())) {}
- std::unique_ptr<SyncPointManager> sync_point_manager;
- scoped_refptr<InProcessCommandBuffer::Service> gpu_thread;
+ std::unique_ptr<SyncPointManager> sync_point_manager_;
+ scoped_refptr<InProcessCommandBuffer::Service> gpu_thread_service_;
};
base::LazyInstance<GpuInProcessThreadHolder> g_default_service =
@@ -101,34 +108,6 @@ class ScopedEvent {
base::WaitableEvent* event_;
};
-base::SharedMemoryHandle ShareToGpuThread(
- base::SharedMemoryHandle source_handle) {
- return base::SharedMemory::DuplicateHandle(source_handle);
-}
-
-gfx::GpuMemoryBufferHandle ShareGpuMemoryBufferToGpuThread(
- const gfx::GpuMemoryBufferHandle& source_handle,
- bool* requires_sync_point) {
- switch (source_handle.type) {
- case gfx::SHARED_MEMORY_BUFFER: {
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::SHARED_MEMORY_BUFFER;
- handle.handle = ShareToGpuThread(source_handle.handle);
- handle.offset = source_handle.offset;
- handle.stride = source_handle.stride;
- *requires_sync_point = false;
- return handle;
- }
- case gfx::IO_SURFACE_BUFFER:
- case gfx::OZONE_NATIVE_PIXMAP:
- *requires_sync_point = true;
- return source_handle;
- default:
- NOTREACHED();
- return gfx::GpuMemoryBufferHandle();
- }
-}
-
scoped_refptr<InProcessCommandBuffer::Service> GetInitialService(
const scoped_refptr<InProcessCommandBuffer::Service>& service) {
if (service)
@@ -141,7 +120,7 @@ scoped_refptr<InProcessCommandBuffer::Service> GetInitialService(
// ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager,
// which causes a deadlock because it's already locked.
base::ThreadTaskRunnerHandle::IsSet();
- return g_default_service.Get().gpu_thread;
+ return g_default_service.Get().GetGpuThreadService();
}
} // anonyous namespace
@@ -153,6 +132,13 @@ InProcessCommandBuffer::Service::Service(const GpuPreferences& gpu_preferences)
: gpu_preferences_(gpu_preferences),
gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {}
+InProcessCommandBuffer::Service::Service(
+ gpu::gles2::MailboxManager* mailbox_manager,
+ scoped_refptr<gl::GLShareGroup> share_group)
+ : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()),
+ mailbox_manager_(mailbox_manager),
+ share_group_(share_group) {}
+
InProcessCommandBuffer::Service::~Service() {}
const gpu::GpuPreferences& InProcessCommandBuffer::Service::gpu_preferences() {
@@ -183,11 +169,14 @@ gpu::gles2::ProgramCache* InProcessCommandBuffer::Service::program_cache() {
(gl::g_driver_gl.ext.b_GL_ARB_get_program_binary ||
gl::g_driver_gl.ext.b_GL_OES_get_program_binary) &&
!gpu_preferences().disable_gpu_program_cache) {
+ const GpuDriverBugWorkarounds& workarounds = gpu_driver_bug_workarounds_;
bool disable_disk_cache =
gpu_preferences_.disable_gpu_shader_disk_cache ||
- gpu_driver_bug_workarounds_.disable_program_disk_cache;
+ workarounds.disable_program_disk_cache;
program_cache_.reset(new gles2::MemoryProgramCache(
- gpu_preferences_.gpu_program_cache_size, disable_disk_cache));
+ gpu_preferences_.gpu_program_cache_size,
+ disable_disk_cache,
+ workarounds.disable_program_caching_for_transform_feedback));
}
return program_cache_.get();
}
@@ -251,7 +240,7 @@ void InProcessCommandBuffer::PumpCommandsOnGpuThread() {
bool InProcessCommandBuffer::Initialize(
scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
const gles2::ContextCreationAttribHelper& attribs,
InProcessCommandBuffer* share_group,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
@@ -286,8 +275,8 @@ bool InProcessCommandBuffer::Initialize(
base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
bool result = false;
- QueueTask(
- base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
+ QueueTask(true, base::Bind(&RunTaskWithResult<bool>, init_task, &result,
+ &completion));
completion.Wait();
gpu_memory_buffer_manager_ = gpu_memory_buffer_manager;
@@ -320,14 +309,17 @@ bool InProcessCommandBuffer::InitializeOnGpuThread(
bool bind_generates_resource = false;
scoped_refptr<gles2::FeatureInfo> feature_info =
new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds());
- decoder_.reset(gles2::GLES2Decoder::Create(
+
+ context_group_ =
params.context_group
? params.context_group->decoder_->GetContextGroup()
: new gles2::ContextGroup(
service_->gpu_preferences(), service_->mailbox_manager(), NULL,
service_->shader_translator_cache(),
service_->framebuffer_completeness_cache(), feature_info,
- bind_generates_resource, nullptr, nullptr)));
+ bind_generates_resource, nullptr, nullptr);
+
+ decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get()));
executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(),
decoder_.get()));
@@ -338,10 +330,18 @@ bool InProcessCommandBuffer::InitializeOnGpuThread(
decoder_->set_engine(executor_.get());
if (!surface_.get()) {
- if (params.is_offscreen)
+ if (params.is_offscreen) {
surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
- else
- surface_ = gl::init::CreateViewGLSurface(params.window);
+ } else {
+ surface_ = ImageTransportSurface::CreateNativeSurface(
+ gpu_thread_weak_ptr_factory_.GetWeakPtr(), params.window,
+ gl::GLSurfaceFormat());
+ if (!surface_ || !surface_->Initialize(gl::GLSurfaceFormat())) {
+ surface_ = nullptr;
+ DLOG(ERROR) << "Failed to create surface.";
+ return false;
+ }
+ }
}
if (!surface_.get()) {
@@ -444,8 +444,8 @@ void InProcessCommandBuffer::Destroy() {
bool result = false;
base::Callback<bool(void)> destroy_task = base::Bind(
&InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
- QueueTask(
- base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
+ QueueTask(true, base::Bind(&RunTaskWithResult<bool>, destroy_task, &result,
+ &completion));
completion.Wait();
}
@@ -468,6 +468,10 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
}
gl_share_group_ = nullptr;
+ base::AutoLock lock(task_queue_lock_);
+ std::queue<std::unique_ptr<GpuTask>> empty;
+ task_queue_.swap(empty);
+
return true;
}
@@ -496,52 +500,69 @@ void InProcessCommandBuffer::OnContextLost() {
gpu_control_client_->OnGpuControlLostContext();
}
-CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
- CheckSequencedThread();
- base::AutoLock lock(state_after_last_flush_lock_);
- if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
- last_state_ = state_after_last_flush_;
- return last_state_;
+void InProcessCommandBuffer::QueueTask(bool out_of_order,
+ const base::Closure& task) {
+ if (out_of_order) {
+ service_->ScheduleTask(task);
+ return;
+ }
+ // Release the |task_queue_lock_| before calling ScheduleTask because
+ // the callback may get called immediately and attempt to acquire the lock.
+ SyncPointManager* sync_manager = service_->sync_point_manager();
+ uint32_t order_num =
+ sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
+ {
+ base::AutoLock lock(task_queue_lock_);
+ task_queue_.push(base::MakeUnique<GpuTask>(task, order_num));
+ }
+ service_->ScheduleTask(base::Bind(
+ &InProcessCommandBuffer::ProcessTasksOnGpuThread, gpu_thread_weak_ptr_));
+}
+
+void InProcessCommandBuffer::ProcessTasksOnGpuThread() {
+ while (executor_->scheduled()) {
+ base::AutoLock lock(task_queue_lock_);
+ if (task_queue_.empty())
+ break;
+ GpuTask* task = task_queue_.front().get();
+ sync_point_order_data_->BeginProcessingOrderNumber(task->order_number);
+ task->callback.Run();
+ if (!executor_->scheduled() && !service_->BlockThreadOnWaitSyncToken()) {
+ sync_point_order_data_->PauseProcessingOrderNumber(task->order_number);
+ return;
+ }
+ sync_point_order_data_->FinishProcessingOrderNumber(task->order_number);
+ task_queue_.pop();
+ }
}
CommandBuffer::State InProcessCommandBuffer::GetLastState() {
CheckSequencedThread();
+ base::AutoLock lock(last_state_lock_);
return last_state_;
}
-int32_t InProcessCommandBuffer::GetLastToken() {
+void InProcessCommandBuffer::UpdateLastStateOnGpuThread() {
CheckSequencedThread();
- GetStateFast();
- return last_state_.token;
+ command_buffer_lock_.AssertAcquired();
+ base::AutoLock lock(last_state_lock_);
+ State state = command_buffer_->GetLastState();
+ if (state.generation - last_state_.generation < 0x80000000U)
+ last_state_ = state;
}
-void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset,
- uint32_t order_num) {
+void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset) {
CheckSequencedThread();
ScopedEvent handle_flush(&flush_event_);
base::AutoLock lock(command_buffer_lock_);
- {
- ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(),
- order_num);
- command_buffer_->Flush(put_offset);
- {
- // Update state before signaling the flush event.
- base::AutoLock lock(state_after_last_flush_lock_);
- state_after_last_flush_ = command_buffer_->GetLastState();
- }
-
- // Currently the in process command buffer does not support being
- // descheduled, if it does we would need to back off on calling the finish
- // processing number function until the message is rescheduled and finished
- // processing. This DCHECK is to enforce this.
- DCHECK(error::IsError(state_after_last_flush_.error) ||
- put_offset == state_after_last_flush_.get_offset);
- }
+ command_buffer_->Flush(put_offset);
+ // Update state before signaling the flush event.
+ UpdateLastStateOnGpuThread();
// If we've processed all pending commands but still have pending queries,
// pump idle work until the query is passed.
- if (put_offset == state_after_last_flush_.get_offset &&
+ if (put_offset == command_buffer_->GetLastState().get_offset &&
(executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) {
ScheduleDelayedWorkOnGpuThread();
}
@@ -572,19 +593,16 @@ void InProcessCommandBuffer::ScheduleDelayedWorkOnGpuThread() {
void InProcessCommandBuffer::Flush(int32_t put_offset) {
CheckSequencedThread();
- if (last_state_.error != gpu::error::kNoError)
+ if (GetLastState().error != gpu::error::kNoError)
return;
if (last_put_offset_ == put_offset)
return;
- SyncPointManager* sync_manager = service_->sync_point_manager();
- const uint32_t order_num =
- sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
last_put_offset_ = put_offset;
base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
- gpu_thread_weak_ptr_, put_offset, order_num);
- QueueTask(task);
+ gpu_thread_weak_ptr_, put_offset);
+ QueueTask(false, task);
flushed_fence_sync_release_ = next_fence_sync_release_ - 1;
}
@@ -593,28 +611,34 @@ void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) {
Flush(put_offset);
}
-void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) {
+CommandBuffer::State InProcessCommandBuffer::WaitForTokenInRange(int32_t start,
+ int32_t end) {
CheckSequencedThread();
- while (!InRange(start, end, GetLastToken()) &&
- last_state_.error == gpu::error::kNoError)
+ State last_state = GetLastState();
+ while (!InRange(start, end, last_state.token) &&
+ last_state.error == gpu::error::kNoError) {
flush_event_.Wait();
+ last_state = GetLastState();
+ }
+ return last_state;
}
-void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start,
- int32_t end) {
+CommandBuffer::State InProcessCommandBuffer::WaitForGetOffsetInRange(
+ int32_t start,
+ int32_t end) {
CheckSequencedThread();
-
- GetStateFast();
- while (!InRange(start, end, last_state_.get_offset) &&
- last_state_.error == gpu::error::kNoError) {
+ State last_state = GetLastState();
+ while (!InRange(start, end, last_state.get_offset) &&
+ last_state.error == gpu::error::kNoError) {
flush_event_.Wait();
- GetStateFast();
+ last_state = GetLastState();
}
+ return last_state;
}
void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) {
CheckSequencedThread();
- if (last_state_.error != gpu::error::kNoError)
+ if (GetLastState().error != gpu::error::kNoError)
return;
base::WaitableEvent completion(
@@ -623,13 +647,10 @@ void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) {
base::Closure task =
base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread,
base::Unretained(this), shm_id, &completion);
- QueueTask(task);
+ QueueTask(false, task);
completion.Wait();
- {
- base::AutoLock lock(state_after_last_flush_lock_);
- state_after_last_flush_ = command_buffer_->GetLastState();
- }
+ last_put_offset_ = 0;
}
void InProcessCommandBuffer::SetGetBufferOnGpuThread(
@@ -637,7 +658,7 @@ void InProcessCommandBuffer::SetGetBufferOnGpuThread(
base::WaitableEvent* completion) {
base::AutoLock lock(command_buffer_lock_);
command_buffer_->SetGetBuffer(shm_id);
- last_put_offset_ = 0;
+ UpdateLastStateOnGpuThread();
completion->Signal();
}
@@ -655,7 +676,7 @@ void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) {
base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
base::Unretained(this), id);
- QueueTask(task);
+ QueueTask(false, task);
}
void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) {
@@ -692,13 +713,9 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
// This handle is owned by the GPU thread and must be passed to it or it
// will leak. In otherwords, do not early out on error between here and the
// queuing of the CreateImage task below.
- bool requires_sync_point = false;
- gfx::GpuMemoryBufferHandle handle = ShareGpuMemoryBufferToGpuThread(
- gpu_memory_buffer->GetHandle(), &requires_sync_point);
-
- SyncPointManager* sync_manager = service_->sync_point_manager();
- const uint32_t order_num =
- sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
+ gfx::GpuMemoryBufferHandle handle =
+ gfx::CloneHandleForIPC(gpu_memory_buffer->GetHandle());
+ bool requires_sync_point = handle.type == gfx::IO_SURFACE_BUFFER;
uint64_t fence_sync = 0;
if (requires_sync_point) {
@@ -708,12 +725,13 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_);
}
- QueueTask(base::Bind(
- &InProcessCommandBuffer::CreateImageOnGpuThread, base::Unretained(this),
- new_id, handle, gfx::Size(base::checked_cast<int>(width),
- base::checked_cast<int>(height)),
- gpu_memory_buffer->GetFormat(),
- base::checked_cast<uint32_t>(internalformat), order_num, fence_sync));
+ QueueTask(false, base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
+ base::Unretained(this), new_id, handle,
+ gfx::Size(base::checked_cast<int>(width),
+ base::checked_cast<int>(height)),
+ gpu_memory_buffer->GetFormat(),
+ base::checked_cast<uint32_t>(internalformat),
+ fence_sync));
if (fence_sync) {
flushed_fence_sync_release_ = fence_sync;
@@ -733,10 +751,7 @@ void InProcessCommandBuffer::CreateImageOnGpuThread(
const gfx::Size& size,
gfx::BufferFormat format,
uint32_t internalformat,
- uint32_t order_num,
uint64_t fence_sync) {
- ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(),
- order_num);
if (!decoder_)
return;
@@ -795,8 +810,8 @@ void InProcessCommandBuffer::CreateImageOnGpuThread(
void InProcessCommandBuffer::DestroyImage(int32_t id) {
CheckSequencedThread();
- QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread,
- base::Unretained(this), id));
+ QueueTask(false, base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread,
+ base::Unretained(this), id));
}
void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) {
@@ -822,7 +837,7 @@ int32_t InProcessCommandBuffer::CreateGpuMemoryBufferImage(
DCHECK(gpu_memory_buffer_manager_);
std::unique_ptr<gfx::GpuMemoryBuffer> buffer(
- gpu_memory_buffer_manager_->AllocateGpuMemoryBuffer(
+ gpu_memory_buffer_manager_->CreateGpuMemoryBuffer(
gfx::Size(base::checked_cast<int>(width),
base::checked_cast<int>(height)),
gpu::DefaultBufferFormatForImageFormat(internalformat),
@@ -850,6 +865,7 @@ bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread(
gpu::CommandBufferNamespace namespace_id,
gpu::CommandBufferId command_buffer_id,
uint64_t release) {
+ DCHECK(!waiting_for_sync_point_);
gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager();
DCHECK(sync_point_manager);
@@ -860,28 +876,77 @@ bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread(
if (!release_state)
return true;
- if (!release_state->IsFenceSyncReleased(release)) {
- // Use waitable event which is signalled when the release fence is released.
- sync_point_client_->Wait(
- release_state.get(), release,
- base::Bind(&base::WaitableEvent::Signal,
- base::Unretained(&fence_sync_wait_event_)));
- fence_sync_wait_event_.Wait();
+ if (service_->BlockThreadOnWaitSyncToken()) {
+ if (!release_state->IsFenceSyncReleased(release)) {
+ // Use waitable event which is signalled when the release fence is
+ // released.
+ sync_point_client_->Wait(
+ release_state.get(), release,
+ base::Bind(&base::WaitableEvent::Signal,
+ base::Unretained(&fence_sync_wait_event_)));
+ fence_sync_wait_event_.Wait();
+ }
+
+ gles2::MailboxManager* mailbox_manager =
+ decoder_->GetContextGroup()->mailbox_manager();
+ SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
+ mailbox_manager->PullTextureUpdates(sync_token);
+ return true;
}
+ if (release_state->IsFenceSyncReleased(release)) {
+ gles2::MailboxManager* mailbox_manager =
+ decoder_->GetContextGroup()->mailbox_manager();
+ SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
+ mailbox_manager->PullTextureUpdates(sync_token);
+ return true;
+ }
+
+ waiting_for_sync_point_ = true;
+ sync_point_client_->Wait(
+ release_state.get(), release,
+ base::Bind(&InProcessCommandBuffer::OnWaitFenceSyncCompleted,
+ gpu_thread_weak_ptr_factory_.GetWeakPtr(), namespace_id,
+ command_buffer_id, release));
+
+ if (!waiting_for_sync_point_)
+ return true;
+
+ executor_->SetScheduled(false);
+ return false;
+}
+
+void InProcessCommandBuffer::OnWaitFenceSyncCompleted(
+ CommandBufferNamespace namespace_id,
+ CommandBufferId command_buffer_id,
+ uint64_t release) {
+ DCHECK(waiting_for_sync_point_);
gles2::MailboxManager* mailbox_manager =
decoder_->GetContextGroup()->mailbox_manager();
SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
mailbox_manager->PullTextureUpdates(sync_token);
- return true;
+ waiting_for_sync_point_ = false;
+ executor_->SetScheduled(true);
+ QueueTask(false, base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
+ gpu_thread_weak_ptr_, last_put_offset_));
}
void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() {
- NOTIMPLEMENTED();
+ if (!service_->BlockThreadOnWaitSyncToken()) {
+ DCHECK(executor_->scheduled());
+ DCHECK(executor_->HasPollingWork());
+
+ executor_->SetScheduled(false);
+ }
}
void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() {
- NOTIMPLEMENTED();
+ if (!service_->BlockThreadOnWaitSyncToken()) {
+ DCHECK(!executor_->scheduled());
+
+ executor_->SetScheduled(true);
+ ProcessTasksOnGpuThread();
+ }
}
void InProcessCommandBuffer::SignalSyncTokenOnGpuThread(
@@ -906,9 +971,9 @@ void InProcessCommandBuffer::SignalSyncTokenOnGpuThread(
void InProcessCommandBuffer::SignalQuery(unsigned query_id,
const base::Closure& callback) {
CheckSequencedThread();
- QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
- base::Unretained(this), query_id,
- WrapCallback(callback)));
+ QueueTask(false, base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
+ base::Unretained(this), query_id,
+ WrapCallback(callback)));
}
void InProcessCommandBuffer::SignalQueryOnGpuThread(
@@ -961,12 +1026,17 @@ bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) {
return IsFenceSyncFlushed(release);
}
+bool InProcessCommandBuffer::IsFenceSyncReleased(uint64_t release) {
+ return release <= GetLastState().release_count;
+}
+
void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token,
const base::Closure& callback) {
CheckSequencedThread();
- QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread,
- base::Unretained(this), sync_token,
- WrapCallback(callback)));
+ QueueTask(
+ true,
+ base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread,
+ base::Unretained(this), sync_token, WrapCallback(callback)));
}
bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken(
@@ -974,9 +1044,92 @@ bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken(
return sync_token->namespace_id() == GetNamespaceID();
}
-gpu::error::Error InProcessCommandBuffer::GetLastError() {
- CheckSequencedThread();
- return last_state_.error;
+#if defined(OS_WIN)
+void InProcessCommandBuffer::DidCreateAcceleratedSurfaceChildWindow(
+ SurfaceHandle parent_window,
+ SurfaceHandle child_window) {
+ // TODO(fsamuel): Implement this.
+}
+#endif
+
+void InProcessCommandBuffer::DidSwapBuffersComplete(
+ SwapBuffersCompleteParams params) {
+ if (!origin_task_runner_) {
+ DidSwapBuffersCompleteOnOriginThread(std::move(params));
+ return;
+ }
+ origin_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&InProcessCommandBuffer::DidSwapBuffersCompleteOnOriginThread,
+ client_thread_weak_ptr_, base::Passed(&params)));
+}
+
+const gles2::FeatureInfo* InProcessCommandBuffer::GetFeatureInfo() const {
+ return context_group_->feature_info();
+}
+
+void InProcessCommandBuffer::SetLatencyInfoCallback(
+ const LatencyInfoCallback& callback) {
+ // TODO(fsamuel): Implement this.
+}
+
+void InProcessCommandBuffer::UpdateVSyncParameters(base::TimeTicks timebase,
+ base::TimeDelta interval) {
+ if (!origin_task_runner_) {
+ UpdateVSyncParametersOnOriginThread(timebase, interval);
+ return;
+ }
+ origin_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&InProcessCommandBuffer::UpdateVSyncParametersOnOriginThread,
+ client_thread_weak_ptr_, timebase, interval));
+}
+
+void InProcessCommandBuffer::DidSwapBuffersCompleteOnOriginThread(
+ SwapBuffersCompleteParams params) {
+#if defined(OS_MACOSX)
+ gpu::GpuProcessHostedCALayerTreeParamsMac params_mac;
+ params_mac.ca_context_id = params.ca_context_id;
+ params_mac.fullscreen_low_power_ca_context_valid =
+ params.fullscreen_low_power_ca_context_valid;
+ params_mac.fullscreen_low_power_ca_context_id =
+ params.fullscreen_low_power_ca_context_id;
+ params_mac.io_surface.reset(IOSurfaceLookupFromMachPort(params.io_surface));
+ params_mac.pixel_size = params.pixel_size;
+ params_mac.scale_factor = params.scale_factor;
+ params_mac.responses = std::move(params.in_use_responses);
+ gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = &params_mac;
+#else
+ gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = nullptr;
+#endif
+ if (!swap_buffers_completion_callback_.is_null()) {
+ if (!ui::LatencyInfo::Verify(
+ params.latency_info,
+ "InProcessCommandBuffer::DidSwapBuffersComplete")) {
+ swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(),
+ params.result, mac_frame_ptr);
+ } else {
+ swap_buffers_completion_callback_.Run(params.latency_info, params.result,
+ mac_frame_ptr);
+ }
+ }
+}
+
+void InProcessCommandBuffer::UpdateVSyncParametersOnOriginThread(
+ base::TimeTicks timebase,
+ base::TimeDelta interval) {
+ if (!update_vsync_parameters_completion_callback_.is_null())
+ update_vsync_parameters_completion_callback_.Run(timebase, interval);
+}
+
+void InProcessCommandBuffer::SetSwapBuffersCompletionCallback(
+ const SwapBuffersCompletionCallback& callback) {
+ swap_buffers_completion_callback_ = callback;
+}
+
+void InProcessCommandBuffer::SetUpdateVSyncParametersCallback(
+ const UpdateVSyncParametersCallback& callback) {
+ update_vsync_parameters_completion_callback_ = callback;
}
namespace {
@@ -1015,55 +1168,10 @@ base::Closure InProcessCommandBuffer::WrapCallback(
return wrapped_callback;
}
-GpuInProcessThread::GpuInProcessThread(SyncPointManager* sync_point_manager)
- : base::Thread("GpuThread"), sync_point_manager_(sync_point_manager) {
- Start();
-}
-
-GpuInProcessThread::~GpuInProcessThread() {
- Stop();
-}
-
-void GpuInProcessThread::AddRef() const {
- base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
-}
-void GpuInProcessThread::Release() const {
- base::RefCountedThreadSafe<GpuInProcessThread>::Release();
-}
-
-void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
- task_runner()->PostTask(FROM_HERE, task);
-}
+InProcessCommandBuffer::GpuTask::GpuTask(const base::Closure& callback,
+ uint32_t order_number)
+ : callback(callback), order_number(order_number) {}
-void GpuInProcessThread::ScheduleDelayedWork(const base::Closure& callback) {
- // Match delay with GpuCommandBufferStub.
- task_runner()->PostDelayedTask(FROM_HERE, callback,
- base::TimeDelta::FromMilliseconds(2));
-}
-
-bool GpuInProcessThread::UseVirtualizedGLContexts() {
- return false;
-}
-
-scoped_refptr<gles2::ShaderTranslatorCache>
-GpuInProcessThread::shader_translator_cache() {
- if (!shader_translator_cache_.get()) {
- shader_translator_cache_ =
- new gpu::gles2::ShaderTranslatorCache(gpu_preferences());
- }
- return shader_translator_cache_;
-}
-
-scoped_refptr<gles2::FramebufferCompletenessCache>
-GpuInProcessThread::framebuffer_completeness_cache() {
- if (!framebuffer_completeness_cache_.get())
- framebuffer_completeness_cache_ =
- new gpu::gles2::FramebufferCompletenessCache;
- return framebuffer_completeness_cache_;
-}
-
-SyncPointManager* GpuInProcessThread::sync_point_manager() {
- return sync_point_manager_;
-}
+InProcessCommandBuffer::GpuTask::~GpuTask() {}
} // namespace gpu
diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h
index ac4ef023878..6a55ec011b7 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.h
+++ b/chromium/gpu/ipc/in_process_command_buffer.h
@@ -15,7 +15,6 @@
#include "base/atomic_sequence_num.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
-#include "base/containers/scoped_ptr_hash_map.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -25,9 +24,12 @@
#include "base/threading/thread.h"
#include "gpu/command_buffer/client/gpu_control.h"
#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/service/command_executor.h"
+#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/gpu_preferences.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/gpu_export.h"
+#include "gpu/ipc/service/image_transport_surface_delegate.h"
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/native_widget_types.h"
#include "ui/gl/gl_surface.h"
@@ -48,9 +50,11 @@ class Size;
}
namespace gpu {
+
class SyncPointClient;
class SyncPointOrderData;
class SyncPointManager;
+struct GpuProcessHostedCALayerTreeParamsMac;
namespace gles2 {
struct ContextCreationAttribHelper;
@@ -72,7 +76,8 @@ class TransferBufferManagerInterface;
// However, the behavior for accessing one context (i.e. one instance of this
// class) from different client threads is undefined.
class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
- public GpuControl {
+ public GpuControl,
+ public ImageTransportSurfaceDelegate {
public:
class Service;
explicit InProcessCommandBuffer(const scoped_refptr<Service>& service);
@@ -83,7 +88,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
// a new GLSurface.
bool Initialize(scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
const gles2::ContextCreationAttribHelper& attribs,
InProcessCommandBuffer* share_group,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
@@ -92,16 +97,14 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
// CommandBuffer implementation:
State GetLastState() override;
- int32_t GetLastToken() override;
void Flush(int32_t put_offset) override;
void OrderingBarrier(int32_t put_offset) override;
- void WaitForTokenInRange(int32_t start, int32_t end) override;
- void WaitForGetOffsetInRange(int32_t start, int32_t end) override;
+ State WaitForTokenInRange(int32_t start, int32_t end) override;
+ State WaitForGetOffsetInRange(int32_t start, int32_t end) override;
void SetGetBuffer(int32_t shm_id) override;
scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
int32_t* id) override;
void DestroyTransferBuffer(int32_t id) override;
- gpu::error::Error GetLastError() override;
// GpuControl implementation:
// NOTE: The GpuControlClient will be called on the client thread.
@@ -126,15 +129,47 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;
bool IsFenceSyncFlushReceived(uint64_t release) override;
+ bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const SyncToken& sync_token,
const base::Closure& callback) override;
bool CanWaitUnverifiedSyncToken(const SyncToken* sync_token) override;
+// ImageTransportSurfaceDelegate implementation:
+#if defined(OS_WIN)
+ void DidCreateAcceleratedSurfaceChildWindow(
+ SurfaceHandle parent_window,
+ SurfaceHandle child_window) override;
+#endif
+ void DidSwapBuffersComplete(SwapBuffersCompleteParams params) override;
+ const gles2::FeatureInfo* GetFeatureInfo() const override;
+ void SetLatencyInfoCallback(const LatencyInfoCallback& callback) override;
+ void UpdateVSyncParameters(base::TimeTicks timebase,
+ base::TimeDelta interval) override;
+
+ using SwapBuffersCompletionCallback = base::Callback<void(
+ const std::vector<ui::LatencyInfo>& latency_info,
+ gfx::SwapResult result,
+ const gpu::GpuProcessHostedCALayerTreeParamsMac* params_mac)>;
+ void SetSwapBuffersCompletionCallback(
+ const SwapBuffersCompletionCallback& callback);
+
+ using UpdateVSyncParametersCallback =
+ base::Callback<void(base::TimeTicks timebase, base::TimeDelta interval)>;
+ void SetUpdateVSyncParametersCallback(
+ const UpdateVSyncParametersCallback& callback);
+
+ void DidSwapBuffersCompleteOnOriginThread(SwapBuffersCompleteParams params);
+ void UpdateVSyncParametersOnOriginThread(base::TimeTicks timebase,
+ base::TimeDelta interval);
+
// The serializer interface to the GPU service (i.e. thread).
class Service {
public:
Service();
Service(const gpu::GpuPreferences& gpu_preferences);
+ Service(gpu::gles2::MailboxManager* mailbox_manager,
+ scoped_refptr<gl::GLShareGroup> share_group);
+
virtual ~Service();
virtual void AddRef() const = 0;
@@ -158,19 +193,20 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
scoped_refptr<gl::GLShareGroup> share_group();
scoped_refptr<gles2::MailboxManager> mailbox_manager();
gpu::gles2::ProgramCache* program_cache();
+ virtual bool BlockThreadOnWaitSyncToken() const = 0;
- private:
+ protected:
const GpuPreferences gpu_preferences_;
const GpuDriverBugWorkarounds gpu_driver_bug_workarounds_;
- scoped_refptr<gl::GLShareGroup> share_group_;
scoped_refptr<gles2::MailboxManager> mailbox_manager_;
+ scoped_refptr<gl::GLShareGroup> share_group_;
std::unique_ptr<gpu::gles2::ProgramCache> program_cache_;
};
private:
struct InitializeOnGpuThreadParams {
bool is_offscreen;
- gfx::AcceleratedWidget window;
+ SurfaceHandle window;
const gles2::ContextCreationAttribHelper& attribs;
gpu::Capabilities* capabilities; // Ouptut.
InProcessCommandBuffer* context_group;
@@ -178,7 +214,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
InitializeOnGpuThreadParams(
bool is_offscreen,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
const gles2::ContextCreationAttribHelper& attribs,
gpu::Capabilities* capabilities,
InProcessCommandBuffer* share_group,
@@ -194,17 +230,21 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
bool InitializeOnGpuThread(const InitializeOnGpuThreadParams& params);
void Destroy();
bool DestroyOnGpuThread();
- void FlushOnGpuThread(int32_t put_offset, uint32_t order_num);
+ void FlushOnGpuThread(int32_t put_offset);
+ void UpdateLastStateOnGpuThread();
void ScheduleDelayedWorkOnGpuThread();
bool MakeCurrent();
base::Closure WrapCallback(const base::Closure& callback);
- State GetStateFast();
- void QueueTask(const base::Closure& task) { service_->ScheduleTask(task); }
+ void QueueTask(bool out_of_order, const base::Closure& task);
+ void ProcessTasksOnGpuThread();
void CheckSequencedThread();
void FenceSyncReleaseOnGpuThread(uint64_t release);
bool WaitFenceSyncOnGpuThread(gpu::CommandBufferNamespace namespace_id,
gpu::CommandBufferId command_buffer_id,
uint64_t release);
+ void OnWaitFenceSyncCompleted(CommandBufferNamespace namespace_id,
+ CommandBufferId command_buffer_id,
+ uint64_t release);
void DescheduleUntilFinishedOnGpuThread();
void RescheduleAfterFinishedOnGpuThread();
void SignalSyncTokenOnGpuThread(const SyncToken& sync_token,
@@ -216,7 +256,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
const gfx::Size& size,
gfx::BufferFormat format,
uint32_t internalformat,
- uint32_t order_num,
+ // uint32_t order_num,
uint64_t fence_sync);
void DestroyImageOnGpuThread(int32_t id);
void SetGetBufferOnGpuThread(int32_t shm_id, base::WaitableEvent* completion);
@@ -232,6 +272,8 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
// Members accessed on the gpu thread (possibly with the exception of
// creation):
+ bool waiting_for_sync_point_ = false;
+
scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_;
scoped_refptr<TransferBufferManagerInterface> transfer_buffer_manager_;
std::unique_ptr<CommandExecutor> executor_;
@@ -251,6 +293,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
bool context_lost_;
#endif
State last_state_;
+ base::Lock last_state_lock_;
int32_t last_put_offset_;
gpu::Capabilities capabilities_;
GpuMemoryBufferManager* gpu_memory_buffer_manager_;
@@ -263,8 +306,10 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
base::Lock command_buffer_lock_;
base::WaitableEvent flush_event_;
scoped_refptr<Service> service_;
- State state_after_last_flush_;
- base::Lock state_after_last_flush_lock_;
+
+ // The group of contexts that share namespaces with this context.
+ scoped_refptr<gles2::ContextGroup> context_group_;
+
scoped_refptr<gl::GLShareGroup> gl_share_group_;
base::WaitableEvent fence_sync_wait_event_;
@@ -272,6 +317,18 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
// the client thread.
std::unique_ptr<base::SequenceChecker> sequence_checker_;
+ base::Lock task_queue_lock_;
+ struct GpuTask {
+ GpuTask(const base::Closure& callback, uint32_t order_number);
+ ~GpuTask();
+ base::Closure callback;
+ uint32_t order_number;
+ };
+ std::queue<std::unique_ptr<GpuTask>> task_queue_;
+
+ SwapBuffersCompletionCallback swap_buffers_completion_callback_;
+ UpdateVSyncParametersCallback update_vsync_parameters_completion_callback_;
+
base::WeakPtr<InProcessCommandBuffer> client_thread_weak_ptr_;
base::WeakPtr<InProcessCommandBuffer> gpu_thread_weak_ptr_;
base::WeakPtrFactory<InProcessCommandBuffer> client_thread_weak_ptr_factory_;
@@ -280,36 +337,6 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
DISALLOW_COPY_AND_ASSIGN(InProcessCommandBuffer);
};
-// Default Service class when a null service is used.
-class GPU_EXPORT GpuInProcessThread
- : public base::Thread,
- public NON_EXPORTED_BASE(InProcessCommandBuffer::Service),
- public base::RefCountedThreadSafe<GpuInProcessThread> {
- public:
- explicit GpuInProcessThread(SyncPointManager* sync_point_manager);
-
- void AddRef() const override;
- void Release() const override;
- void ScheduleTask(const base::Closure& task) override;
- void ScheduleDelayedWork(const base::Closure& callback) override;
- bool UseVirtualizedGLContexts() override;
- scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache()
- override;
- scoped_refptr<gles2::FramebufferCompletenessCache>
- framebuffer_completeness_cache() override;
- SyncPointManager* sync_point_manager() override;
-
- private:
- ~GpuInProcessThread() override;
- friend class base::RefCountedThreadSafe<GpuInProcessThread>;
-
- SyncPointManager* sync_point_manager_; // Non-owning.
- scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
- scoped_refptr<gpu::gles2::FramebufferCompletenessCache>
- framebuffer_completeness_cache_;
- DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
-};
-
} // namespace gpu
#endif // GPU_IPC_IN_PROCESS_COMMAND_BUFFER_H_
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index 59ca51e0496..e8c5edcc427 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -44,6 +44,9 @@ target(link_target_type, "ipc_service_sources") {
"gpu_memory_manager.h",
"gpu_memory_tracking.cc",
"gpu_memory_tracking.h",
+ "gpu_vsync_provider.h",
+ "gpu_vsync_provider_posix.cc",
+ "gpu_vsync_provider_win.cc",
"gpu_watchdog_thread.cc",
"gpu_watchdog_thread.h",
"image_transport_surface.h",
@@ -78,6 +81,8 @@ target(link_target_type, "ipc_service_sources") {
sources += [
"child_window_surface_win.cc",
"child_window_surface_win.h",
+ "child_window_win.cc",
+ "child_window_win.h",
"image_transport_surface_win.cc",
]
}
@@ -129,9 +134,6 @@ source_set("test_support") {
"//testing/gtest:gtest",
]
deps = [
- # TODO(markdittmer): Shouldn't depend on client code for server tests.
- # See crbug.com/608800.
- "//gpu/ipc/client",
"//gpu/ipc/common",
]
}
@@ -143,6 +145,7 @@ test("gpu_ipc_service_unittests") {
"gpu_channel_test_common.cc",
"gpu_channel_test_common.h",
"gpu_channel_unittest.cc",
+ "gpu_vsync_provider_unittest_win.cc",
]
deps = [
":service",
diff --git a/chromium/gpu/ipc/service/child_window_surface_win.cc b/chromium/gpu/ipc/service/child_window_surface_win.cc
index e7f482064f4..92a9392b1d5 100644
--- a/chromium/gpu/ipc/service/child_window_surface_win.cc
+++ b/chromium/gpu/ipc/service/child_window_surface_win.cc
@@ -8,15 +8,10 @@
#include "base/compiler_specific.h"
#include "base/memory/ptr_util.h"
-#include "base/threading/thread.h"
-#include "base/win/scoped_hdc.h"
-#include "base/win/wrapped_window_proc.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
#include "ui/gfx/native_widget_types.h"
-#include "ui/gfx/win/hwnd_util.h"
-#include "ui/gfx/win/window_impl.h"
#include "ui/gl/egl_util.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_surface_egl.h"
@@ -24,144 +19,11 @@
namespace gpu {
-// This owns the thread and contains data that's shared between the threads.
-struct SharedData {
- SharedData() : thread("Window owner thread") {}
-
- base::Lock rect_lock;
- gfx::Rect rect_to_clear;
-
- base::Thread thread;
-};
-
-namespace {
-
-ATOM g_window_class;
-
-// This runs on the window owner thread.
-LRESULT CALLBACK IntermediateWindowProc(HWND window,
- UINT message,
- WPARAM w_param,
- LPARAM l_param) {
- switch (message) {
- case WM_ERASEBKGND:
- // Prevent windows from erasing the background.
- return 1;
- case WM_PAINT:
- PAINTSTRUCT paint;
- if (BeginPaint(window, &paint)) {
- SharedData* shared_data =
- reinterpret_cast<SharedData*>(gfx::GetWindowUserData(window));
- DCHECK(shared_data);
- {
- base::AutoLock lock(shared_data->rect_lock);
- shared_data->rect_to_clear.Union(gfx::Rect(paint.rcPaint));
- }
-
- EndPaint(window, &paint);
- }
- return 0;
- default:
- return DefWindowProc(window, message, w_param, l_param);
- }
-}
-
-// This runs on the window owner thread.
-void InitializeWindowClass() {
- if (g_window_class)
- return;
-
- WNDCLASSEX intermediate_class;
- base::win::InitializeWindowClass(
- L"Intermediate D3D Window",
- &base::win::WrappedWindowProc<IntermediateWindowProc>, CS_OWNDC, 0, 0,
- nullptr, reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)), nullptr,
- nullptr, nullptr, &intermediate_class);
- g_window_class = RegisterClassEx(&intermediate_class);
- if (!g_window_class) {
- LOG(ERROR) << "RegisterClass failed.";
- return;
- }
-}
-
-// Hidden popup window used as a parent for the child surface window.
-// Must be created and destroyed on the thread.
-class HiddenPopupWindow : public gfx::WindowImpl {
- public:
- static HWND Create() {
- gfx::WindowImpl* window = new HiddenPopupWindow;
-
- window->set_window_style(WS_POPUP);
- window->set_window_ex_style(WS_EX_TOOLWINDOW);
- window->Init(GetDesktopWindow(), gfx::Rect());
- EnableWindow(window->hwnd(), FALSE);
- // The |window| instance is now owned by the window user data.
- DCHECK_EQ(window, gfx::GetWindowUserData(window->hwnd()));
- return window->hwnd();
- }
-
- static void Destroy(HWND window) {
- // This uses the fact that the window user data contains a pointer
- // to gfx::WindowImpl instance.
- gfx::WindowImpl* window_data =
- reinterpret_cast<gfx::WindowImpl*>(gfx::GetWindowUserData(window));
- DCHECK_EQ(window, window_data->hwnd());
- DestroyWindow(window);
- delete window_data;
- }
-
- private:
- // Explicitly do nothing in Close. We do this as some external apps may get a
- // handle to this window and attempt to close it.
- void OnClose() {}
-
- CR_BEGIN_MSG_MAP_EX(HiddenPopupWindow)
- CR_MSG_WM_CLOSE(OnClose)
- CR_END_MSG_MAP()
-};
-
-// This runs on the window owner thread.
-void CreateWindowsOnThread(const gfx::Size& size,
- base::WaitableEvent* event,
- SharedData* shared_data,
- HWND* child_window,
- HWND* parent_window) {
- InitializeWindowClass();
- DCHECK(g_window_class);
-
- // Create hidden parent window on the current thread.
- *parent_window = HiddenPopupWindow::Create();
- // Create child window.
- HWND window = CreateWindowEx(
- WS_EX_NOPARENTNOTIFY, reinterpret_cast<wchar_t*>(g_window_class), L"",
- WS_CHILDWINDOW | WS_DISABLED | WS_VISIBLE, 0, 0, size.width(),
- size.height(), *parent_window, NULL, NULL, NULL);
- CHECK(window);
- *child_window = window;
- gfx::SetWindowUserData(window, shared_data);
- event->Signal();
-}
-
-// This runs on the main thread after the window was destroyed on window owner
-// thread.
-void DestroySharedData(std::unique_ptr<SharedData> shared_data) {
- shared_data->thread.Stop();
-}
-
-// This runs on the window owner thread.
-void DestroyWindowsOnThread(HWND child_window, HWND hidden_popup_window) {
- DestroyWindow(child_window);
- HiddenPopupWindow::Destroy(hidden_popup_window);
-}
-
-} // namespace
-
ChildWindowSurfaceWin::ChildWindowSurfaceWin(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
HWND parent_window)
: gl::NativeViewGLSurfaceEGL(0),
- parent_window_(parent_window),
- delegate_(delegate),
+ child_window_(delegate, parent_window),
alpha_(true),
first_swap_(true) {
// Don't use EGL_ANGLE_window_fixed_size so that we can avoid recreating the
@@ -203,25 +65,9 @@ bool ChildWindowSurfaceWin::InitializeNativeWindow() {
if (window_)
return true;
- shared_data_ = base::MakeUnique<SharedData>();
-
- base::Thread::Options options(base::MessageLoop::TYPE_UI, 0);
- shared_data_->thread.StartWithOptions(options);
-
- base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
- base::WaitableEvent::InitialState::NOT_SIGNALED);
-
- RECT window_rect;
- GetClientRect(parent_window_, &window_rect);
-
- shared_data_->thread.task_runner()->PostTask(
- FROM_HERE,
- base::Bind(&CreateWindowsOnThread, gfx::Rect(window_rect).size(), &event,
- shared_data_.get(), &window_, &initial_parent_window_));
- event.Wait();
-
- delegate_->DidCreateAcceleratedSurfaceChildWindow(parent_window_, window_);
- return true;
+ bool result = child_window_.Initialize();
+ window_ = child_window_.window();
+ return result;
}
bool ChildWindowSurfaceWin::Resize(const gfx::Size& size,
@@ -281,7 +127,7 @@ gfx::SwapResult ChildWindowSurfaceWin::SwapBuffers() {
glFinish();
first_swap_ = false;
}
- ClearInvalidContents();
+ child_window_.ClearInvalidContents();
return result;
}
@@ -291,33 +137,11 @@ gfx::SwapResult ChildWindowSurfaceWin::PostSubBuffer(int x,
int height) {
gfx::SwapResult result =
NativeViewGLSurfaceEGL::PostSubBuffer(x, y, width, height);
- ClearInvalidContents();
+ child_window_.ClearInvalidContents();
return result;
}
-void ChildWindowSurfaceWin::ClearInvalidContents() {
- base::AutoLock lock(shared_data_->rect_lock);
- if (!shared_data_->rect_to_clear.IsEmpty()) {
- base::win::ScopedGetDC dc(window_);
-
- RECT rect = shared_data_->rect_to_clear.ToRECT();
-
- // DirectComposition composites with the contents under the SwapChain,
- // so ensure that's cleared. GDI treats black as transparent.
- FillRect(dc, &rect, reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)));
- shared_data_->rect_to_clear = gfx::Rect();
- }
-}
-
ChildWindowSurfaceWin::~ChildWindowSurfaceWin() {
- if (shared_data_) {
- scoped_refptr<base::TaskRunner> task_runner =
- shared_data_->thread.task_runner();
- task_runner->PostTaskAndReply(
- FROM_HERE,
- base::Bind(&DestroyWindowsOnThread, window_, initial_parent_window_),
- base::Bind(&DestroySharedData, base::Passed(std::move(shared_data_))));
- }
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/child_window_surface_win.h b/chromium/gpu/ipc/service/child_window_surface_win.h
index add4d490e8f..9f5017b35e5 100644
--- a/chromium/gpu/ipc/service/child_window_surface_win.h
+++ b/chromium/gpu/ipc/service/child_window_surface_win.h
@@ -6,6 +6,7 @@
#define GPU_IPC_SERVICE_CHILD_WINDOW_SURFACE_WIN_H_
#include "base/memory/weak_ptr.h"
+#include "gpu/ipc/service/child_window_win.h"
#include "gpu/ipc/service/image_transport_surface_delegate.h"
#include "ui/gl/gl_surface_egl.h"
@@ -13,9 +14,6 @@
namespace gpu {
-class GpuChannelManager;
-struct SharedData;
-
class ChildWindowSurfaceWin : public gl::NativeViewGLSurfaceEGL {
public:
ChildWindowSurfaceWin(base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
@@ -34,17 +32,7 @@ class ChildWindowSurfaceWin : public gl::NativeViewGLSurfaceEGL {
~ChildWindowSurfaceWin() override;
private:
- void ClearInvalidContents();
-
- // This member contains all the data that can be accessed from the main or
- // window owner threads.
- std::unique_ptr<SharedData> shared_data_;
- // The eventual parent of the window living in the browser process.
- HWND parent_window_;
- // The window is initially created with this parent window. We need to keep it
- // around so that we can destroy it at the end.
- HWND initial_parent_window_;
- base::WeakPtr<ImageTransportSurfaceDelegate> delegate_;
+ ChildWindowWin child_window_;
bool alpha_;
bool first_swap_;
diff --git a/chromium/gpu/ipc/service/child_window_win.cc b/chromium/gpu/ipc/service/child_window_win.cc
new file mode 100644
index 00000000000..1246c577318
--- /dev/null
+++ b/chromium/gpu/ipc/service/child_window_win.cc
@@ -0,0 +1,210 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/service/child_window_win.h"
+
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/thread.h"
+#include "base/win/scoped_hdc.h"
+#include "base/win/wrapped_window_proc.h"
+#include "gpu/ipc/common/gpu_messages.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
+#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
+#include "ui/gfx/native_widget_types.h"
+#include "ui/gfx/win/hwnd_util.h"
+#include "ui/gfx/win/window_impl.h"
+
+namespace gpu {
+
+// This owns the thread and contains data that's shared between the threads.
+struct SharedData {
+ SharedData() : thread("Window owner thread") {}
+
+ base::Lock rect_lock;
+ gfx::Rect rect_to_clear;
+
+ base::Thread thread;
+};
+
+namespace {
+
+ATOM g_window_class;
+
+// This runs on the window owner thread.
+LRESULT CALLBACK IntermediateWindowProc(HWND window,
+ UINT message,
+ WPARAM w_param,
+ LPARAM l_param) {
+ switch (message) {
+ case WM_ERASEBKGND:
+ // Prevent windows from erasing the background.
+ return 1;
+ case WM_PAINT:
+ PAINTSTRUCT paint;
+ if (BeginPaint(window, &paint)) {
+ SharedData* shared_data =
+ reinterpret_cast<SharedData*>(gfx::GetWindowUserData(window));
+ DCHECK(shared_data);
+ {
+ base::AutoLock lock(shared_data->rect_lock);
+ shared_data->rect_to_clear.Union(gfx::Rect(paint.rcPaint));
+ }
+
+ EndPaint(window, &paint);
+ }
+ return 0;
+ default:
+ return DefWindowProc(window, message, w_param, l_param);
+ }
+}
+
+// This runs on the window owner thread.
+void InitializeWindowClass() {
+ if (g_window_class)
+ return;
+
+ WNDCLASSEX intermediate_class;
+ base::win::InitializeWindowClass(
+ L"Intermediate D3D Window",
+ &base::win::WrappedWindowProc<IntermediateWindowProc>, CS_OWNDC, 0, 0,
+ nullptr, reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)), nullptr,
+ nullptr, nullptr, &intermediate_class);
+ g_window_class = RegisterClassEx(&intermediate_class);
+ if (!g_window_class) {
+ LOG(ERROR) << "RegisterClass failed.";
+ return;
+ }
+}
+
+// Hidden popup window used as a parent for the child surface window.
+// Must be created and destroyed on the thread.
+class HiddenPopupWindow : public gfx::WindowImpl {
+ public:
+ static HWND Create() {
+ gfx::WindowImpl* window = new HiddenPopupWindow;
+
+ window->set_window_style(WS_POPUP);
+ window->set_window_ex_style(WS_EX_TOOLWINDOW);
+ window->Init(GetDesktopWindow(), gfx::Rect());
+ EnableWindow(window->hwnd(), FALSE);
+ // The |window| instance is now owned by the window user data.
+ DCHECK_EQ(window, gfx::GetWindowUserData(window->hwnd()));
+ return window->hwnd();
+ }
+
+ static void Destroy(HWND window) {
+ // This uses the fact that the window user data contains a pointer
+ // to gfx::WindowImpl instance.
+ gfx::WindowImpl* window_data =
+ reinterpret_cast<gfx::WindowImpl*>(gfx::GetWindowUserData(window));
+ DCHECK_EQ(window, window_data->hwnd());
+ DestroyWindow(window);
+ delete window_data;
+ }
+
+ private:
+ // Explicitly do nothing in Close. We do this as some external apps may get a
+ // handle to this window and attempt to close it.
+ void OnClose() {}
+
+ CR_BEGIN_MSG_MAP_EX(HiddenPopupWindow)
+ CR_MSG_WM_CLOSE(OnClose)
+ CR_END_MSG_MAP()
+};
+
+// This runs on the window owner thread.
+void CreateWindowsOnThread(const gfx::Size& size,
+ base::WaitableEvent* event,
+ SharedData* shared_data,
+ HWND* child_window,
+ HWND* parent_window) {
+ InitializeWindowClass();
+ DCHECK(g_window_class);
+
+ // Create hidden parent window on the current thread.
+ *parent_window = HiddenPopupWindow::Create();
+ // Create child window.
+ HWND window = CreateWindowEx(
+ WS_EX_NOPARENTNOTIFY, reinterpret_cast<wchar_t*>(g_window_class), L"",
+ WS_CHILDWINDOW | WS_DISABLED | WS_VISIBLE, 0, 0, size.width(),
+ size.height(), *parent_window, NULL, NULL, NULL);
+ CHECK(window);
+ *child_window = window;
+ gfx::SetWindowUserData(window, shared_data);
+ event->Signal();
+}
+
+// This runs on the main thread after the window was destroyed on window owner
+// thread.
+void DestroySharedData(std::unique_ptr<SharedData> shared_data) {
+ shared_data->thread.Stop();
+}
+
+// This runs on the window owner thread.
+void DestroyWindowsOnThread(HWND child_window, HWND hidden_popup_window) {
+ DestroyWindow(child_window);
+ HiddenPopupWindow::Destroy(hidden_popup_window);
+}
+
+} // namespace
+
+ChildWindowWin::ChildWindowWin(
+ base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
+ HWND parent_window)
+ : parent_window_(parent_window), window_(nullptr), delegate_(delegate) {}
+
+bool ChildWindowWin::Initialize() {
+ if (window_)
+ return true;
+
+ shared_data_ = base::MakeUnique<SharedData>();
+
+ base::Thread::Options options(base::MessageLoop::TYPE_UI, 0);
+ shared_data_->thread.StartWithOptions(options);
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ RECT window_rect;
+ GetClientRect(parent_window_, &window_rect);
+
+ shared_data_->thread.task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&CreateWindowsOnThread, gfx::Rect(window_rect).size(), &event,
+ shared_data_.get(), &window_, &initial_parent_window_));
+ event.Wait();
+
+ delegate_->DidCreateAcceleratedSurfaceChildWindow(parent_window_, window_);
+ return true;
+}
+
+void ChildWindowWin::ClearInvalidContents() {
+ base::AutoLock lock(shared_data_->rect_lock);
+ if (!shared_data_->rect_to_clear.IsEmpty()) {
+ base::win::ScopedGetDC dc(window_);
+
+ RECT rect = shared_data_->rect_to_clear.ToRECT();
+
+ // DirectComposition composites with the contents under the SwapChain,
+ // so ensure that's cleared. GDI treats black as transparent.
+ FillRect(dc, &rect, reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)));
+ shared_data_->rect_to_clear = gfx::Rect();
+ }
+}
+
+ChildWindowWin::~ChildWindowWin() {
+ if (shared_data_) {
+ scoped_refptr<base::TaskRunner> task_runner =
+ shared_data_->thread.task_runner();
+ task_runner->PostTaskAndReply(
+ FROM_HERE,
+ base::Bind(&DestroyWindowsOnThread, window_, initial_parent_window_),
+ base::Bind(&DestroySharedData, base::Passed(std::move(shared_data_))));
+ }
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/service/child_window_win.h b/chromium/gpu/ipc/service/child_window_win.h
new file mode 100644
index 00000000000..2bccf9ff926
--- /dev/null
+++ b/chromium/gpu/ipc/service/child_window_win.h
@@ -0,0 +1,47 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_SERVICE_CHILD_WINDOW_WIN_H_
+#define GPU_IPC_SERVICE_CHILD_WINDOW_WIN_H_
+
+#include "base/memory/weak_ptr.h"
+#include "gpu/ipc/service/image_transport_surface_delegate.h"
+
+#include <windows.h>
+
+namespace gpu {
+
+struct SharedData;
+
+// The window DirectComposition renders into needs to be owned by the process
+// that's currently doing the rendering. The class creates and owns a window
+// which is reparented by the browser to be a child of its window.
+class ChildWindowWin {
+ public:
+ ChildWindowWin(base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
+ HWND parent_window);
+ ~ChildWindowWin();
+
+ bool Initialize();
+ void ClearInvalidContents();
+ HWND window() const { return window_; }
+
+ private:
+ // This member contains all the data that can be accessed from the main or
+ // window owner threads.
+ std::unique_ptr<SharedData> shared_data_;
+ // The eventual parent of the window living in the browser process.
+ HWND parent_window_;
+ HWND window_;
+ // The window is initially created with this parent window. We need to keep it
+ // around so that we can destroy it at the end.
+ HWND initial_parent_window_;
+ base::WeakPtr<ImageTransportSurfaceDelegate> delegate_;
+
+ DISALLOW_COPY_AND_ASSIGN(ChildWindowWin);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_SERVICE_CHILD_WINDOW_WIN_H_
diff --git a/chromium/gpu/ipc/service/gpu_channel.cc b/chromium/gpu/ipc/service/gpu_channel.cc
index 8ed788a3817..4a99a6db4b6 100644
--- a/chromium/gpu/ipc/service/gpu_channel.cc
+++ b/chromium/gpu/ipc/service/gpu_channel.cc
@@ -701,7 +701,11 @@ void GpuChannel::OnStreamRescheduled(int32_t stream_id, bool scheduled) {
}
GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) {
- return stubs_.get(route_id);
+ auto it = stubs_.find(route_id);
+ if (it == stubs_.end())
+ return nullptr;
+
+ return it->second.get();
}
void GpuChannel::LoseAllContexts() {
@@ -772,7 +776,7 @@ void GpuChannel::HandleMessage(
const IPC::Message& msg = channel_msg->message;
int32_t routing_id = msg.routing_id();
- GpuCommandBufferStub* stub = stubs_.get(routing_id);
+ GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id);
DCHECK(!stub || stub->IsScheduled());
@@ -873,7 +877,7 @@ void GpuChannel::RemoveRouteFromStream(int32_t route_id) {
#if defined(OS_ANDROID)
const GpuCommandBufferStub* GpuChannel::GetOneStub() const {
for (const auto& kv : stubs_) {
- const GpuCommandBufferStub* stub = kv.second;
+ const GpuCommandBufferStub* stub = kv.second.get();
if (stub->decoder() && !stub->decoder()->WasContextLost())
return stub;
}
@@ -896,7 +900,7 @@ void GpuChannel::OnCreateCommandBuffer(
if (stub) {
*result = true;
*capabilities = stub->decoder()->GetCapabilities();
- stubs_.set(route_id, std::move(stub));
+ stubs_[route_id] = std::move(stub);
} else {
*result = false;
*capabilities = gpu::Capabilities();
@@ -915,7 +919,7 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer(
}
int32_t share_group_id = init_params.share_group_id;
- GpuCommandBufferStub* share_group = stubs_.get(share_group_id);
+ GpuCommandBufferStub* share_group = LookupCommandBuffer(share_group_id);
if (!share_group && share_group_id != MSG_ROUTING_NONE) {
DLOG(ERROR)
@@ -977,7 +981,12 @@ void GpuChannel::OnDestroyCommandBuffer(int32_t route_id) {
TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
"route_id", route_id);
- std::unique_ptr<GpuCommandBufferStub> stub = stubs_.take_and_erase(route_id);
+ std::unique_ptr<GpuCommandBufferStub> stub;
+ auto it = stubs_.find(route_id);
+ if (it != stubs_.end()) {
+ stub = std::move(it->second);
+ stubs_.erase(it);
+ }
// In case the renderer is currently blocked waiting for a sync reply from the
// stub, we need to make sure to reschedule the correct stream here.
if (stub && !stub->IsScheduled()) {
diff --git a/chromium/gpu/ipc/service/gpu_channel.h b/chromium/gpu/ipc/service/gpu_channel.h
index 47c1ba63604..cbe08696305 100644
--- a/chromium/gpu/ipc/service/gpu_channel.h
+++ b/chromium/gpu/ipc/service/gpu_channel.h
@@ -10,9 +10,9 @@
#include <memory>
#include <string>
+#include <unordered_map>
#include "base/containers/hash_tables.h"
-#include "base/containers/scoped_ptr_hash_map.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -177,7 +177,7 @@ class GPU_EXPORT GpuChannel
scoped_refptr<GpuChannelMessageFilter> filter_;
// Map of routing id to command buffer stub.
- base::ScopedPtrHashMap<int32_t, std::unique_ptr<GpuCommandBufferStub>> stubs_;
+ std::unordered_map<int32_t, std::unique_ptr<GpuCommandBufferStub>> stubs_;
private:
friend class TestGpuChannel;
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc
index 34f2be4a1cc..d043e1304aa 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc
@@ -11,6 +11,7 @@
#include "base/command_line.h"
#include "base/location.h"
#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
@@ -87,11 +88,14 @@ gles2::ProgramCache* GpuChannelManager::program_cache() {
(gl::g_driver_gl.ext.b_GL_ARB_get_program_binary ||
gl::g_driver_gl.ext.b_GL_OES_get_program_binary) &&
!gpu_preferences_.disable_gpu_program_cache) {
+ const GpuDriverBugWorkarounds& workarounds = gpu_driver_bug_workarounds_;
bool disable_disk_cache =
gpu_preferences_.disable_gpu_shader_disk_cache ||
- gpu_driver_bug_workarounds_.disable_program_disk_cache;
+ workarounds.disable_program_disk_cache;
program_cache_.reset(new gles2::MemoryProgramCache(
- gpu_preferences_.gpu_program_cache_size, disable_disk_cache));
+ gpu_preferences_.gpu_program_cache_size,
+ disable_disk_cache,
+ workarounds.disable_program_caching_for_transform_feedback));
}
return program_cache_.get();
}
@@ -120,7 +124,7 @@ void GpuChannelManager::RemoveChannel(int client_id) {
GpuChannel* GpuChannelManager::LookupChannel(int32_t client_id) const {
const auto& it = gpu_channels_.find(client_id);
- return it != gpu_channels_.end() ? it->second : nullptr;
+ return it != gpu_channels_.end() ? it->second.get() : nullptr;
}
std::unique_ptr<GpuChannel> GpuChannelManager::CreateGpuChannel(
@@ -147,7 +151,7 @@ IPC::ChannelHandle GpuChannelManager::EstablishChannel(
CreateGpuChannel(client_id, client_tracing_id, preempts,
allow_view_command_buffers, allow_real_time_streams));
IPC::ChannelHandle channel_handle = channel->Init(shutdown_event_);
- gpu_channels_.set(client_id, std::move(channel));
+ gpu_channels_[client_id] = std::move(channel);
return channel_handle;
}
@@ -275,7 +279,7 @@ void GpuChannelManager::ScheduleWakeUpGpu() {
void GpuChannelManager::DoWakeUpGpu() {
const GpuCommandBufferStub* stub = nullptr;
for (const auto& kv : gpu_channels_) {
- const GpuChannel* channel = kv.second;
+ const GpuChannel* channel = kv.second.get();
stub = channel->GetOneStub();
if (stub) {
DCHECK(stub->decoder());
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h
index a71ca9b41a5..70a10e4f70b 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.h
@@ -10,9 +10,9 @@
#include <deque>
#include <memory>
#include <string>
+#include <unordered_map>
#include <vector>
-#include "base/containers/scoped_ptr_hash_map.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -133,6 +133,12 @@ class GPU_EXPORT GpuChannelManager {
return exiting_for_lost_context_;
}
+ gles2::MailboxManager* mailbox_manager() const {
+ return mailbox_manager_.get();
+ }
+
+ gl::GLShareGroup* share_group() const { return share_group_.get(); }
+
protected:
virtual std::unique_ptr<GpuChannel> CreateGpuChannel(
int client_id,
@@ -145,21 +151,15 @@ class GPU_EXPORT GpuChannelManager {
return sync_point_manager_;
}
- gl::GLShareGroup* share_group() const { return share_group_.get(); }
- gles2::MailboxManager* mailbox_manager() const {
- return mailbox_manager_.get();
- }
- PreemptionFlag* preemption_flag() const {
- return preemption_flag_.get();
- }
+ PreemptionFlag* preemption_flag() const { return preemption_flag_.get(); }
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
- // These objects manage channels to individual renderer processes there is
+ // These objects manage channels to individual renderer processes. There is
// one channel for each renderer process that has connected to this GPU
// process.
- base::ScopedPtrHashMap<int32_t, std::unique_ptr<GpuChannel>> gpu_channels_;
+ std::unordered_map<int32_t, std::unique_ptr<GpuChannel>> gpu_channels_;
private:
void InternalDestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id, int client_id);
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
index 419cc8c1ac1..9201a5391a1 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
@@ -10,14 +10,8 @@
class GURL;
-namespace IPC {
-struct ChannelHandle;
-}
-
namespace gpu {
-struct GPUMemoryUmaStats;
-
class GpuChannelManagerDelegate {
public:
// Tells the delegate that an offscreen context was created for the provided
diff --git a/chromium/gpu/ipc/service/gpu_channel_unittest.cc b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
index b48b7ae14e3..8387448b8e1 100644
--- a/chromium/gpu/ipc/service/gpu_channel_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
@@ -35,7 +35,7 @@ class GpuChannelTest : public GpuChannelTestCommon {
void TearDown() override {
GpuChannelTestCommon::TearDown();
- gl::init::ClearGLBindings();
+ gl::init::ShutdownGL();
}
GpuChannel* CreateChannel(int32_t client_id,
diff --git a/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc b/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
index b6396195afa..3f41a305550 100644
--- a/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
@@ -11,8 +11,10 @@
#include "base/hash.h"
#include "base/json/json_writer.h"
#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
#include "base/memory/ptr_util.h"
#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram_macros.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
@@ -54,6 +56,27 @@
#include "gpu/ipc/service/stream_texture_android.h"
#endif
+// Macro to reduce code duplication when logging memory in
+// GpuCommandBufferMemoryTracker. This is needed as the UMA_HISTOGRAM_* macros
+// require a unique call-site per histogram (you can't funnel multiple strings
+// into the same call-site).
+#define GPU_COMMAND_BUFFER_MEMORY_BLOCK(category) \
+ do { \
+ uint64_t mb_used = tracking_group_->GetSize() / (1024 * 1024); \
+ switch (context_type_) { \
+ case gles2::CONTEXT_TYPE_WEBGL1: \
+ case gles2::CONTEXT_TYPE_WEBGL2: \
+ UMA_HISTOGRAM_MEMORY_LARGE_MB("GPU.ContextMemory.WebGL." category, \
+ mb_used); \
+ break; \
+ case gles2::CONTEXT_TYPE_OPENGLES2: \
+ case gles2::CONTEXT_TYPE_OPENGLES3: \
+ UMA_HISTOGRAM_MEMORY_LARGE_MB("GPU.ContextMemory.GLES." category, \
+ mb_used); \
+ break; \
+ } \
+ } while (false)
+
namespace gpu {
struct WaitForCommandState {
WaitForCommandState(int32_t start, int32_t end, IPC::Message* reply)
@@ -70,15 +93,29 @@ namespace {
// ContextGroup's memory type managers and the GpuMemoryManager class.
class GpuCommandBufferMemoryTracker : public gles2::MemoryTracker {
public:
- explicit GpuCommandBufferMemoryTracker(GpuChannel* channel,
- uint64_t share_group_tracing_guid)
+ explicit GpuCommandBufferMemoryTracker(
+ GpuChannel* channel,
+ uint64_t share_group_tracing_guid,
+ gles2::ContextType context_type,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner)
: tracking_group_(
channel->gpu_channel_manager()
->gpu_memory_manager()
->CreateTrackingGroup(channel->GetClientPID(), this)),
client_tracing_id_(channel->client_tracing_id()),
client_id_(channel->client_id()),
- share_group_tracing_guid_(share_group_tracing_guid) {}
+ share_group_tracing_guid_(share_group_tracing_guid),
+ context_type_(context_type),
+ memory_pressure_listener_(new base::MemoryPressureListener(
+ base::Bind(&GpuCommandBufferMemoryTracker::LogMemoryStatsPressure,
+ base::Unretained(this)))) {
+ // Set up |memory_stats_timer_| to call LogMemoryPeriodic periodically
+ // via the provided |task_runner|.
+ memory_stats_timer_.SetTaskRunner(std::move(task_runner));
+ memory_stats_timer_.Start(
+ FROM_HERE, base::TimeDelta::FromSeconds(30), this,
+ &GpuCommandBufferMemoryTracker::LogMemoryStatsPeriodic);
+ }
void TrackMemoryAllocatedChange(
size_t old_size, size_t new_size) override {
@@ -88,7 +125,7 @@ class GpuCommandBufferMemoryTracker : public gles2::MemoryTracker {
bool EnsureGPUMemoryAvailable(size_t size_needed) override {
return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
- };
+ }
uint64_t ClientTracingId() const override { return client_tracing_id_; }
int ClientId() const override { return client_id_; }
@@ -97,12 +134,29 @@ class GpuCommandBufferMemoryTracker : public gles2::MemoryTracker {
}
private:
- ~GpuCommandBufferMemoryTracker() override {}
+ ~GpuCommandBufferMemoryTracker() override { LogMemoryStatsShutdown(); }
+
+ void LogMemoryStatsPeriodic() { GPU_COMMAND_BUFFER_MEMORY_BLOCK("Periodic"); }
+ void LogMemoryStatsShutdown() { GPU_COMMAND_BUFFER_MEMORY_BLOCK("Shutdown"); }
+ void LogMemoryStatsPressure(
+ base::MemoryPressureListener::MemoryPressureLevel pressure_level) {
+ // Only log on CRITICAL memory pressure.
+ if (pressure_level ==
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
+ GPU_COMMAND_BUFFER_MEMORY_BLOCK("Pressure");
+ }
+ }
+
std::unique_ptr<GpuMemoryTrackingGroup> tracking_group_;
const uint64_t client_tracing_id_;
const int client_id_;
const uint64_t share_group_tracing_guid_;
+ // Variables used in memory stat histogram logging.
+ const gles2::ContextType context_type_;
+ base::RepeatingTimer memory_stats_timer_;
+ std::unique_ptr<base::MemoryPressureListener> memory_pressure_listener_;
+
DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
};
@@ -508,8 +562,9 @@ bool GpuCommandBufferStub::Initialize(
channel_->gpu_channel_manager()->gpu_memory_buffer_factory();
context_group_ = new gles2::ContextGroup(
manager->gpu_preferences(), channel_->mailbox_manager(),
- new GpuCommandBufferMemoryTracker(channel_,
- command_buffer_id_.GetUnsafeValue()),
+ new GpuCommandBufferMemoryTracker(
+ channel_, command_buffer_id_.GetUnsafeValue(),
+ init_params.attribs.context_type, channel_->task_runner()),
manager->shader_translator_cache(),
manager->framebuffer_completeness_cache(), feature_info,
init_params.attribs.bind_generates_resource,
@@ -532,7 +587,7 @@ bool GpuCommandBufferStub::Initialize(
// only a single context. See crbug.com/510243 for details.
use_virtualized_gl_context_ |= channel_->mailbox_manager()->UsesSync();
- gl::GLSurface::Format surface_format = gl::GLSurface::SURFACE_DEFAULT;
+ gl::GLSurfaceFormat surface_format = gl::GLSurfaceFormat();
bool offscreen = (surface_handle_ == kNullSurfaceHandle);
gl::GLSurface* default_surface = manager->GetDefaultOffscreenSurface();
if (!default_surface) {
@@ -544,10 +599,12 @@ bool GpuCommandBufferStub::Initialize(
init_params.attribs.green_size <= 6 &&
init_params.attribs.blue_size <= 5 &&
init_params.attribs.alpha_size == 0)
- surface_format = gl::GLSurface::SURFACE_RGB565;
+ surface_format.SetRGB565();
+ // TODO(klausw): explicitly copy rgba sizes?
+
// We can only use virtualized contexts for onscreen command buffers if their
// config is compatible with the offscreen ones - otherwise MakeCurrent fails.
- if (surface_format != default_surface->GetFormat() && !offscreen)
+ if (!surface_format.IsCompatible(default_surface->GetFormat()) && !offscreen)
use_virtualized_gl_context_ = false;
#endif
@@ -947,6 +1004,7 @@ void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) {
mailbox_manager->PushTextureUpdates(sync_token);
}
+ command_buffer_->SetReleaseCount(release);
sync_point_client_->ReleaseFenceSync(release);
}
diff --git a/chromium/gpu/ipc/service/gpu_command_buffer_stub.h b/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
index a0999f8c059..91b8dfe1c36 100644
--- a/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
@@ -41,20 +41,14 @@ namespace gpu {
struct Mailbox;
struct SyncToken;
class SyncPointClient;
-class SyncPointManager;
-namespace gles2 {
-class MailboxManager;
-}
}
struct GPUCreateCommandBufferConfig;
struct GpuCommandBufferMsg_CreateImage_Params;
-struct GpuCommandBufferMsg_SwapBuffersCompleted_Params;
namespace gpu {
class GpuChannel;
-class GpuWatchdogThread;
struct WaitForCommandState;
class GPU_EXPORT GpuCommandBufferStub
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index 1e912037747..add75a14728 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -17,6 +17,7 @@
#include "gpu/config/gpu_util.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "gpu/ipc/service/switches.h"
+#include "ui/gfx/switches.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_switches.h"
#include "ui/gl/init/gl_factory.h"
@@ -127,6 +128,7 @@ bool GpuInit::InitializeAndStartSandbox(const base::CommandLine& command_line) {
// to run slowly in that case.
bool enable_watchdog =
!command_line.HasSwitch(switches::kDisableGpuWatchdog) &&
+ !command_line.HasSwitch(switches::kHeadless) &&
!RunningOnValgrind();
// Disable the watchdog in debug builds because they tend to only be run by
@@ -158,6 +160,9 @@ bool GpuInit::InitializeAndStartSandbox(const base::CommandLine& command_line) {
#endif
gpu_info_.in_process_gpu = false;
+ gpu_info_.passthrough_cmd_decoder =
+ command_line.HasSwitch(switches::kUsePassthroughCmdDecoder);
+
sandbox_helper_->PreSandboxStartup();
#if defined(OS_LINUX)
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc
index e59f155c6ee..8b266b8933b 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc
@@ -32,9 +32,9 @@ GpuMemoryBufferFactoryOzoneNativePixmap::CreateGpuMemoryBuffer(
->GetSurfaceFactoryOzone()
->CreateNativePixmap(surface_handle, size, format, usage);
if (!pixmap.get()) {
- DLOG(ERROR) << "Failed to create pixmap " << size.width() << "x"
- << size.height() << " format " << static_cast<int>(format)
- << ", usage " << static_cast<int>(usage);
+ DLOG(ERROR) << "Failed to create pixmap " << size.ToString() << " format "
+ << static_cast<int>(format) << ", usage "
+ << static_cast<int>(usage);
return gfx::GpuMemoryBufferHandle();
}
@@ -109,10 +109,40 @@ GpuMemoryBufferFactoryOzoneNativePixmap::CreateImageForGpuMemoryBuffer(
scoped_refptr<ui::GLImageOzoneNativePixmap> image(
new ui::GLImageOzoneNativePixmap(size, internalformat));
if (!image->Initialize(pixmap.get(), format)) {
- LOG(ERROR) << "Failed to create GLImage";
+ LOG(ERROR) << "Failed to create GLImage " << size.ToString() << " format "
+ << static_cast<int>(format);
return nullptr;
}
return image;
}
+scoped_refptr<gl::GLImage>
+GpuMemoryBufferFactoryOzoneNativePixmap::CreateAnonymousImage(
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ unsigned internalformat) {
+ scoped_refptr<ui::NativePixmap> pixmap =
+ ui::OzonePlatform::GetInstance()
+ ->GetSurfaceFactoryOzone()
+ ->CreateNativePixmap(gpu::kNullSurfaceHandle, size, format,
+ gfx::BufferUsage::SCANOUT);
+ if (!pixmap.get()) {
+ LOG(ERROR) << "Failed to create pixmap " << size.ToString() << " format "
+ << static_cast<int>(format);
+ return nullptr;
+ }
+ scoped_refptr<ui::GLImageOzoneNativePixmap> image(
+ new ui::GLImageOzoneNativePixmap(size, internalformat));
+ if (!image->Initialize(pixmap.get(), format)) {
+ LOG(ERROR) << "Failed to create GLImage " << size.ToString() << " format "
+ << static_cast<int>(format);
+ return nullptr;
+ }
+ return image;
+}
+
+unsigned GpuMemoryBufferFactoryOzoneNativePixmap::RequiredTextureType() {
+ return GL_TEXTURE_EXTERNAL_OES;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h
index 5a132bbdaeb..45be7524496 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h
@@ -49,6 +49,11 @@ class GPU_EXPORT GpuMemoryBufferFactoryOzoneNativePixmap
unsigned internalformat,
int client_id,
SurfaceHandle surface_handle) override;
+ scoped_refptr<gl::GLImage> CreateAnonymousImage(
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ unsigned internalformat) override;
+ unsigned RequiredTextureType() override;
private:
using NativePixmapMapKey = std::pair<int, int>;
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
index 4c958f2c440..0af2d6e80bd 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
@@ -8,14 +8,6 @@
#ifndef GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_TEST_TEMPLATE_H_
#define GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_TEST_TEMPLATE_H_
-#if defined(OS_ANDROID)
-// TODO(markdittmer): Service code shouldn't depend on client code.
-// See crbug.com/608800.
-#include "gpu/ipc/client/android/in_process_surface_texture_manager.h"
-
-#include "gpu/ipc/common/android/surface_texture_manager.h"
-#endif
-
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -32,11 +24,6 @@ class GpuMemoryBufferFactoryTest : public testing::Test {
TYPED_TEST_CASE_P(GpuMemoryBufferFactoryTest);
TYPED_TEST_P(GpuMemoryBufferFactoryTest, CreateGpuMemoryBuffer) {
-#if defined(OS_ANDROID)
- SurfaceTextureManager::SetInstance(
- InProcessSurfaceTextureManager::GetInstance());
-#endif
-
const gfx::GpuMemoryBufferId kBufferId(1);
const int kClientId = 1;
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider.h b/chromium/gpu/ipc/service/gpu_vsync_provider.h
new file mode 100644
index 00000000000..13d0ac210a1
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_SERVICE_GPU_VSYNC_PROVIDER_H_
+#define GPU_IPC_SERVICE_GPU_VSYNC_PROVIDER_H_
+
+#include <memory>
+
+#include "base/callback.h"
+#include "base/time/time.h"
+#include "gpu/gpu_export.h"
+#include "gpu/ipc/common/surface_handle.h"
+
+namespace gpu {
+
+class GpuVSyncWorker;
+
+// Implements waiting for VSync signal on background thread.
+class GPU_EXPORT GpuVSyncProvider {
+ public:
+ // Once VSync is enabled, this callback is repeatedly invoked on every VSync.
+ // The call is made on background thread to avoid increased latency due to
+ // serializing callback invocation with other GPU tasks. The code that
+ // implements the callback function is expected to handle that.
+ using VSyncCallback = base::Callback<void(base::TimeTicks timestamp)>;
+
+ ~GpuVSyncProvider();
+
+ static std::unique_ptr<GpuVSyncProvider> Create(const VSyncCallback& callback,
+ SurfaceHandle surface_handle);
+
+ // Enable or disable VSync production.
+ void EnableVSync(bool enabled);
+
+ private:
+#if defined(OS_WIN)
+ GpuVSyncProvider(const VSyncCallback& callback, SurfaceHandle surface_handle);
+
+ std::unique_ptr<GpuVSyncWorker> vsync_worker_;
+#endif // defined(OS_WIN)
+
+ DISALLOW_COPY_AND_ASSIGN(GpuVSyncProvider);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_SERVICE_GPU_VSYNC_PROVIDER_H_
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider_posix.cc b/chromium/gpu/ipc/service/gpu_vsync_provider_posix.cc
new file mode 100644
index 00000000000..00039f65023
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider_posix.cc
@@ -0,0 +1,22 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/service/gpu_vsync_provider.h"
+
+namespace gpu {
+
+/* static */
+std::unique_ptr<GpuVSyncProvider> GpuVSyncProvider::Create(
+ const VSyncCallback& callback,
+ SurfaceHandle surface_handle) {
+ return std::unique_ptr<GpuVSyncProvider>();
+}
+
+GpuVSyncProvider::~GpuVSyncProvider() = default;
+
+void GpuVSyncProvider::EnableVSync(bool enabled) {
+ NOTREACHED();
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc b/chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc
new file mode 100644
index 00000000000..2b96b4a493c
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc
@@ -0,0 +1,84 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/service/gpu_vsync_provider.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/base/win/hidden_window.h"
+
+namespace gpu {
+
+class GpuVSyncProviderTest : public testing::Test {
+ public:
+ GpuVSyncProviderTest()
+ : vsync_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED) {}
+ ~GpuVSyncProviderTest() override {}
+
+ void SetUp() override {}
+
+ void TearDown() override {}
+
+ void OnVSync(base::TimeTicks timestamp) {
+ // This is called on VSync worker thread.
+ base::AutoLock lock(lock_);
+ if (++vsync_count_ == 3)
+ vsync_event_.Signal();
+ }
+
+ int vsync_count() {
+ base::AutoLock lock(lock_);
+ return vsync_count_;
+ }
+
+ void reset_vsync_count() {
+ base::AutoLock lock(lock_);
+ vsync_count_ = 0;
+ }
+
+ protected:
+ base::WaitableEvent vsync_event_;
+
+ private:
+ base::Lock lock_;
+ int vsync_count_ = 0;
+};
+
+TEST_F(GpuVSyncProviderTest, VSyncSignalTest) {
+ SurfaceHandle window = ui::GetHiddenWindow();
+
+ std::unique_ptr<GpuVSyncProvider> provider = GpuVSyncProvider::Create(
+ base::Bind(&GpuVSyncProviderTest::OnVSync, base::Unretained(this)),
+ window);
+
+ constexpr base::TimeDelta wait_timeout =
+ base::TimeDelta::FromMilliseconds(300);
+
+ // Verify that there are no VSync signals before provider is enabled
+ bool wait_result = vsync_event_.TimedWait(wait_timeout);
+ EXPECT_FALSE(wait_result);
+ EXPECT_EQ(0, vsync_count());
+
+ provider->EnableVSync(true);
+
+ vsync_event_.Wait();
+
+ provider->EnableVSync(false);
+
+ // Verify that VSync callbacks stop coming after disabling.
+ // Please note that it might still be possible for one
+ // callback to be in flight on VSync worker thread, so |vsync_count_|
+ // could still be incremented once, but not enough times to trigger
+ // |vsync_event_|.
+ reset_vsync_count();
+ wait_result = vsync_event_.TimedWait(wait_timeout);
+ EXPECT_FALSE(wait_result);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc b/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc
new file mode 100644
index 00000000000..a996e6b6b02
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc
@@ -0,0 +1,264 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/service/gpu_vsync_provider.h"
+
+#include <string>
+
+#include "base/atomicops.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread.h"
+#include "base/trace_event/trace_event.h"
+
+#include <windows.h>
+
+namespace gpu {
+
+namespace {
+// from <D3dkmthk.h>
+typedef LONG NTSTATUS;
+typedef UINT D3DKMT_HANDLE;
+typedef UINT D3DDDI_VIDEO_PRESENT_SOURCE_ID;
+
+#define STATUS_SUCCESS ((NTSTATUS)0x00000000L)
+
+typedef struct _D3DKMT_OPENADAPTERFROMHDC {
+ HDC hDc;
+ D3DKMT_HANDLE hAdapter;
+ LUID AdapterLuid;
+ D3DDDI_VIDEO_PRESENT_SOURCE_ID VidPnSourceId;
+} D3DKMT_OPENADAPTERFROMHDC;
+
+typedef struct _D3DKMT_CLOSEADAPTER {
+ D3DKMT_HANDLE hAdapter;
+} D3DKMT_CLOSEADAPTER;
+
+typedef struct _D3DKMT_WAITFORVERTICALBLANKEVENT {
+ D3DKMT_HANDLE hAdapter;
+ D3DKMT_HANDLE hDevice;
+ D3DDDI_VIDEO_PRESENT_SOURCE_ID VidPnSourceId;
+} D3DKMT_WAITFORVERTICALBLANKEVENT;
+
+typedef NTSTATUS(APIENTRY* PFND3DKMTOPENADAPTERFROMHDC)(
+ D3DKMT_OPENADAPTERFROMHDC*);
+typedef NTSTATUS(APIENTRY* PFND3DKMTCLOSEADAPTER)(D3DKMT_CLOSEADAPTER*);
+typedef NTSTATUS(APIENTRY* PFND3DKMTWAITFORVERTICALBLANKEVENT)(
+ D3DKMT_WAITFORVERTICALBLANKEVENT*);
+} // namespace
+
+// The actual implementation of background tasks plus any state that might be
+// needed on the worker thread.
+class GpuVSyncWorker : public base::Thread {
+ public:
+ GpuVSyncWorker(const GpuVSyncProvider::VSyncCallback& callback,
+ SurfaceHandle surface_handle);
+ ~GpuVSyncWorker() override;
+
+ void Enable(bool enabled);
+ void StartRunningVSyncOnThread();
+ void WaitForVSyncOnThread();
+ void SendVSyncUpdate(base::TimeTicks timestamp);
+
+ private:
+ void Reschedule();
+ void OpenAdapter(const wchar_t* device_name);
+ void CloseAdapter();
+ bool WaitForVBlankEvent();
+
+ // Specifies whether background tasks are running.
+ // This can be set on background thread only.
+ bool running_ = false;
+
+ // Specified whether the worker is enabled. This is accessed from both
+ // threads but can be changed on the main thread only.
+ base::subtle::AtomicWord enabled_ = false;
+
+ const GpuVSyncProvider::VSyncCallback callback_;
+ const SurfaceHandle surface_handle_;
+
+ PFND3DKMTOPENADAPTERFROMHDC open_adapter_from_hdc_ptr_;
+ PFND3DKMTCLOSEADAPTER close_adapter_ptr_;
+ PFND3DKMTWAITFORVERTICALBLANKEVENT wait_for_vertical_blank_event_ptr_;
+
+ std::wstring current_device_name_;
+ D3DKMT_HANDLE current_adapter_handle_ = 0;
+ D3DDDI_VIDEO_PRESENT_SOURCE_ID current_source_id_ = 0;
+};
+
+GpuVSyncWorker::GpuVSyncWorker(const GpuVSyncProvider::VSyncCallback& callback,
+ SurfaceHandle surface_handle)
+ : base::Thread(base::StringPrintf("VSync-%d", surface_handle)),
+ callback_(callback),
+ surface_handle_(surface_handle) {
+ HMODULE gdi32 = GetModuleHandle(L"gdi32");
+ if (!gdi32) {
+ NOTREACHED() << "Can't open gdi32.dll";
+ return;
+ }
+
+ open_adapter_from_hdc_ptr_ = reinterpret_cast<PFND3DKMTOPENADAPTERFROMHDC>(
+ ::GetProcAddress(gdi32, "D3DKMTOpenAdapterFromHdc"));
+ if (!open_adapter_from_hdc_ptr_) {
+ NOTREACHED() << "Can't find D3DKMTOpenAdapterFromHdc in gdi32.dll";
+ return;
+ }
+
+ close_adapter_ptr_ = reinterpret_cast<PFND3DKMTCLOSEADAPTER>(
+ ::GetProcAddress(gdi32, "D3DKMTCloseAdapter"));
+ if (!close_adapter_ptr_) {
+ NOTREACHED() << "Can't find D3DKMTCloseAdapter in gdi32.dll";
+ return;
+ }
+
+ wait_for_vertical_blank_event_ptr_ =
+ reinterpret_cast<PFND3DKMTWAITFORVERTICALBLANKEVENT>(
+ ::GetProcAddress(gdi32, "D3DKMTWaitForVerticalBlankEvent"));
+ if (!wait_for_vertical_blank_event_ptr_) {
+ NOTREACHED() << "Can't find D3DKMTWaitForVerticalBlankEvent in gdi32.dll";
+ return;
+ }
+}
+
+GpuVSyncWorker::~GpuVSyncWorker() {
+ // Thread::Close() call below will block until this task has finished running
+ // so it is safe to post it here and pass unretained pointer.
+ task_runner()->PostTask(FROM_HERE, base::Bind(&GpuVSyncWorker::CloseAdapter,
+ base::Unretained(this)));
+ Stop();
+
+ DCHECK_EQ(0u, current_adapter_handle_);
+ DCHECK(current_device_name_.empty());
+}
+
+void GpuVSyncWorker::Enable(bool enabled) {
+ auto was_enabled = base::subtle::NoBarrier_AtomicExchange(&enabled_, enabled);
+
+ if (enabled && !was_enabled)
+ task_runner()->PostTask(
+ FROM_HERE, base::Bind(&GpuVSyncWorker::StartRunningVSyncOnThread,
+ base::Unretained(this)));
+}
+
+void GpuVSyncWorker::StartRunningVSyncOnThread() {
+ DCHECK(base::PlatformThread::CurrentId() == GetThreadId());
+
+ if (!running_) {
+ running_ = true;
+ WaitForVSyncOnThread();
+ }
+}
+
+void GpuVSyncWorker::WaitForVSyncOnThread() {
+ DCHECK(base::PlatformThread::CurrentId() == GetThreadId());
+
+ TRACE_EVENT0("gpu", "GpuVSyncWorker::WaitForVSyncOnThread");
+
+ HMONITOR monitor =
+ MonitorFromWindow(surface_handle_, MONITOR_DEFAULTTONEAREST);
+ MONITORINFOEX monitor_info;
+ monitor_info.cbSize = sizeof(MONITORINFOEX);
+ BOOL success = GetMonitorInfo(monitor, &monitor_info);
+ CHECK(success);
+
+ if (current_device_name_.compare(monitor_info.szDevice) != 0) {
+ // Monitor changed. Close the current adapter handle and open a new one.
+ CloseAdapter();
+ OpenAdapter(monitor_info.szDevice);
+ }
+
+ if (WaitForVBlankEvent()) {
+ // Note: this sends update on background thread which the callback is
+ // expected to handle.
+ SendVSyncUpdate(base::TimeTicks::Now());
+ }
+
+ Reschedule();
+}
+
+void GpuVSyncWorker::SendVSyncUpdate(base::TimeTicks timestamp) {
+ if (base::subtle::NoBarrier_Load(&enabled_)) {
+ TRACE_EVENT0("gpu", "GpuVSyncWorker::SendVSyncUpdate");
+ callback_.Run(timestamp);
+ }
+}
+
+void GpuVSyncWorker::Reschedule() {
+ // Restart the task if still enabled.
+ if (base::subtle::NoBarrier_Load(&enabled_)) {
+ task_runner()->PostTask(FROM_HERE,
+ base::Bind(&GpuVSyncWorker::WaitForVSyncOnThread,
+ base::Unretained(this)));
+ } else {
+ running_ = false;
+ }
+}
+
+void GpuVSyncWorker::OpenAdapter(const wchar_t* device_name) {
+ DCHECK_EQ(0u, current_adapter_handle_);
+
+ HDC hdc = CreateDC(NULL, device_name, NULL, NULL);
+
+ D3DKMT_OPENADAPTERFROMHDC open_adapter_data;
+ open_adapter_data.hDc = hdc;
+
+ NTSTATUS result = open_adapter_from_hdc_ptr_(&open_adapter_data);
+ DeleteDC(hdc);
+
+ CHECK(result == STATUS_SUCCESS);
+
+ current_device_name_ = device_name;
+ current_adapter_handle_ = open_adapter_data.hAdapter;
+ current_source_id_ = open_adapter_data.VidPnSourceId;
+}
+
+void GpuVSyncWorker::CloseAdapter() {
+ if (current_adapter_handle_ != 0) {
+ D3DKMT_CLOSEADAPTER close_adapter_data;
+ close_adapter_data.hAdapter = current_adapter_handle_;
+
+ NTSTATUS result = close_adapter_ptr_(&close_adapter_data);
+ CHECK(result == STATUS_SUCCESS);
+
+ current_adapter_handle_ = 0;
+ current_device_name_.clear();
+ }
+}
+
+bool GpuVSyncWorker::WaitForVBlankEvent() {
+ D3DKMT_WAITFORVERTICALBLANKEVENT wait_for_vertical_blank_event_data;
+ wait_for_vertical_blank_event_data.hAdapter = current_adapter_handle_;
+ wait_for_vertical_blank_event_data.hDevice = 0;
+ wait_for_vertical_blank_event_data.VidPnSourceId = current_source_id_;
+
+ NTSTATUS result =
+ wait_for_vertical_blank_event_ptr_(&wait_for_vertical_blank_event_data);
+
+ return result == STATUS_SUCCESS;
+}
+
+/* static */
+std::unique_ptr<GpuVSyncProvider> GpuVSyncProvider::Create(
+ const VSyncCallback& callback,
+ SurfaceHandle surface_handle) {
+ return std::unique_ptr<GpuVSyncProvider>(
+ new GpuVSyncProvider(callback, surface_handle));
+}
+
+GpuVSyncProvider::GpuVSyncProvider(const VSyncCallback& callback,
+ SurfaceHandle surface_handle)
+ : vsync_worker_(new GpuVSyncWorker(callback, surface_handle)) {
+ // Start the thread.
+ base::Thread::Options options;
+ // TODO(stanisc): might consider even higher priority - REALTIME_AUDIO.
+ options.priority = base::ThreadPriority::DISPLAY;
+ vsync_worker_->StartWithOptions(options);
+}
+
+GpuVSyncProvider::~GpuVSyncProvider() = default;
+
+void GpuVSyncProvider::EnableVSync(bool enabled) {
+ vsync_worker_->Enable(enabled);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_transport_surface.h b/chromium/gpu/ipc/service/image_transport_surface.h
index 1b6b78f5196..41f3c974a31 100644
--- a/chromium/gpu/ipc/service/image_transport_surface.h
+++ b/chromium/gpu/ipc/service/image_transport_surface.h
@@ -16,7 +16,6 @@
#include "ui/gl/gl_surface.h"
namespace gpu {
-class GpuChannelManager;
class ImageTransportSurfaceDelegate;
// The GPU process is agnostic as to how it displays results. On some platforms
@@ -36,7 +35,7 @@ class ImageTransportSurface {
static scoped_refptr<gl::GLSurface> CreateNativeSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> stub,
SurfaceHandle surface_handle,
- gl::GLSurface::Format format);
+ gl::GLSurfaceFormat format);
private:
DISALLOW_COPY_AND_ASSIGN(ImageTransportSurface);
diff --git a/chromium/gpu/ipc/service/image_transport_surface_android.cc b/chromium/gpu/ipc/service/image_transport_surface_android.cc
index 214265901bf..f0eee668e7d 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_android.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_android.cc
@@ -16,7 +16,7 @@ namespace gpu {
scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
SurfaceHandle surface_handle,
- gl::GLSurface::Format format) {
+ gl::GLSurfaceFormat format) {
if (gl::GetGLImplementation() == gl::kGLImplementationMockGL)
return new gl::GLSurfaceStub;
DCHECK(GpuSurfaceLookup::GetInstance());
diff --git a/chromium/gpu/ipc/service/image_transport_surface_linux.cc b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
index 2cbce86d811..54cf4b2571a 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_linux.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
@@ -13,7 +13,7 @@ namespace gpu {
scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
SurfaceHandle surface_handle,
- gl::GLSurface::Format format) {
+ gl::GLSurfaceFormat format) {
DCHECK_NE(surface_handle, kNullSurfaceHandle);
scoped_refptr<gl::GLSurface> surface;
#if defined(USE_OZONE)
diff --git a/chromium/gpu/ipc/service/image_transport_surface_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_mac.mm
index 42591a7bcc9..2d306748ad6 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_mac.mm
@@ -21,7 +21,9 @@ class DRTSurfaceOSMesa : public gl::GLSurfaceOSMesa {
public:
// Size doesn't matter, the surface is resized to the right size later.
DRTSurfaceOSMesa()
- : GLSurfaceOSMesa(gl::GLSurface::SURFACE_OSMESA_RGBA, gfx::Size(1, 1)) {}
+ : GLSurfaceOSMesa(
+ gl::GLSurfaceFormat(gl::GLSurfaceFormat::PIXEL_LAYOUT_RGBA),
+ gfx::Size(1, 1)) {}
// Implement a subset of GLSurface.
gfx::SwapResult SwapBuffers() override;
@@ -43,7 +45,7 @@ bool g_allow_os_mesa = false;
scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
SurfaceHandle surface_handle,
- gl::GLSurface::Format format) {
+ gl::GLSurfaceFormat format) {
DCHECK_NE(surface_handle, kNullSurfaceHandle);
switch (gl::GetGLImplementation()) {
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
index d2261e151a8..cdd267df5cf 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
@@ -39,7 +39,7 @@ class ImageTransportSurfaceOverlayMac : public gl::GLSurface,
base::WeakPtr<ImageTransportSurfaceDelegate> delegate);
// GLSurface implementation
- bool Initialize(gl::GLSurface::Format format) override;
+ bool Initialize(gl::GLSurfaceFormat format) override;
void Destroy() override;
bool Resize(const gfx::Size& size,
float scale_factor,
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index 84a8fc82d00..5c570f7d346 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -91,7 +91,7 @@ ImageTransportSurfaceOverlayMac::~ImageTransportSurfaceOverlayMac() {
Destroy();
}
-bool ImageTransportSurfaceOverlayMac::Initialize(gl::GLSurface::Format format) {
+bool ImageTransportSurfaceOverlayMac::Initialize(gl::GLSurfaceFormat format) {
delegate_->SetLatencyInfoCallback(
base::Bind(&ImageTransportSurfaceOverlayMac::SetLatencyInfo,
base::Unretained(this)));
diff --git a/chromium/gpu/ipc/service/image_transport_surface_win.cc b/chromium/gpu/ipc/service/image_transport_surface_win.cc
index 8ba97f66a3b..a4b0a5d8b58 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_win.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_win.cc
@@ -21,7 +21,7 @@ namespace gpu {
scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
SurfaceHandle surface_handle,
- gl::GLSurface::Format format) {
+ gl::GLSurfaceFormat format) {
DCHECK_NE(surface_handle, kNullSurfaceHandle);
scoped_refptr<gl::GLSurface> surface;
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
index e269cd2c935..e3587409bbf 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
@@ -23,7 +23,7 @@ PassThroughImageTransportSurface::PassThroughImageTransportSurface(
weak_ptr_factory_(this) {}
bool PassThroughImageTransportSurface::Initialize(
- gl::GLSurface::Format format) {
+ gl::GLSurfaceFormat format) {
// The surface is assumed to have already been initialized.
delegate_->SetLatencyInfoCallback(
base::Bind(&PassThroughImageTransportSurface::SetLatencyInfo,
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
index 941120da81d..eb1f9f0b373 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
@@ -18,7 +18,6 @@
#include "ui/gl/gl_surface.h"
namespace gpu {
-class GpuChannelManager;
// An implementation of ImageTransportSurface that implements GLSurface through
// GLSurfaceAdapter, thereby forwarding GLSurface methods through to it.
@@ -29,7 +28,7 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
gl::GLSurface* surface);
// GLSurface implementation.
- bool Initialize(gl::GLSurface::Format format) override;
+ bool Initialize(gl::GLSurfaceFormat format) override;
void Destroy() override;
gfx::SwapResult SwapBuffers() override;
void SwapBuffersAsync(const SwapCompletionCallback& callback) override;