summaryrefslogtreecommitdiff
path: root/chromium/content/common/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/content/common/gpu')
-rw-r--r--chromium/content/common/gpu/DEPS3
-rw-r--r--chromium/content/common/gpu/ca_layer_partial_damage_tree_mac.h56
-rw-r--r--chromium/content/common/gpu/ca_layer_partial_damage_tree_mac.mm287
-rw-r--r--chromium/content/common/gpu/ca_layer_tree_mac.h190
-rw-r--r--chromium/content/common/gpu/ca_layer_tree_mac.mm455
-rw-r--r--chromium/content/common/gpu/child_window_surface_win.cc214
-rw-r--r--chromium/content/common/gpu/child_window_surface_win.h45
-rw-r--r--chromium/content/common/gpu/client/DEPS7
-rw-r--r--chromium/content/common/gpu/client/command_buffer_metrics.cc6
-rw-r--r--chromium/content/common/gpu/client/command_buffer_metrics.h2
-rw-r--r--chromium/content/common/gpu/client/command_buffer_proxy_impl.cc820
-rw-r--r--chromium/content/common/gpu/client/command_buffer_proxy_impl.h279
-rw-r--r--chromium/content/common/gpu/client/context_provider_command_buffer.cc63
-rw-r--r--chromium/content/common/gpu/client/context_provider_command_buffer.h12
-rw-r--r--chromium/content/common/gpu/client/gl_helper.cc1391
-rw-r--r--chromium/content/common/gpu/client/gl_helper.h382
-rw-r--r--chromium/content/common/gpu/client/gl_helper_benchmark.cc310
-rw-r--r--chromium/content/common/gpu/client/gl_helper_readback_support.cc183
-rw-r--r--chromium/content/common/gpu/client/gl_helper_readback_support.h72
-rw-r--r--chromium/content/common/gpu/client/gl_helper_scaling.cc934
-rw-r--r--chromium/content/common/gpu/client/gl_helper_scaling.h213
-rw-r--r--chromium/content/common/gpu/client/gl_helper_unittest.cc2016
-rw-r--r--chromium/content/common/gpu/client/gpu_channel_host.cc552
-rw-r--r--chromium/content/common/gpu/client/gpu_channel_host.h318
-rw-r--r--chromium/content/common/gpu/client/gpu_context_tests.h72
-rw-r--r--chromium/content/common/gpu/client/gpu_in_process_context_tests.cc42
-rw-r--r--chromium/content/common/gpu/client/gpu_jpeg_decode_accelerator_host.cc204
-rw-r--r--chromium/content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h73
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl.cc94
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl.h68
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface.cc125
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface.h63
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface_unittest.cc16
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc112
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap.h63
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap_unittest.cc16
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory.cc224
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory.h77
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory_unittest.cc40
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture.cc151
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture.h60
-rw-r--r--chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture_unittest.cc8
-rw-r--r--chromium/content/common/gpu/client/gpu_video_decode_accelerator_host.cc292
-rw-r--r--chromium/content/common/gpu/client/gpu_video_decode_accelerator_host.h106
-rw-r--r--chromium/content/common/gpu/client/gpu_video_encode_accelerator_host.cc323
-rw-r--r--chromium/content/common/gpu/client/gpu_video_encode_accelerator_host.h131
-rw-r--r--chromium/content/common/gpu/client/grcontext_for_gles2_interface.cc62
-rw-r--r--chromium/content/common/gpu/client/grcontext_for_gles2_interface.h42
-rw-r--r--chromium/content/common/gpu/client/grcontext_for_webgraphicscontext3d.cc108
-rw-r--r--chromium/content/common/gpu/client/grcontext_for_webgraphicscontext3d.h66
-rw-r--r--chromium/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.cc126
-rw-r--r--chromium/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h53
-rw-r--r--chromium/content/common/gpu/gpu_channel.cc1086
-rw-r--r--chromium/content/common/gpu/gpu_channel.h485
-rw-r--r--chromium/content/common/gpu/gpu_channel_manager.cc380
-rw-r--r--chromium/content/common/gpu/gpu_channel_manager.h227
-rw-r--r--chromium/content/common/gpu/gpu_channel_manager_unittest.cc120
-rw-r--r--chromium/content/common/gpu/gpu_channel_test_common.cc112
-rw-r--r--chromium/content/common/gpu/gpu_channel_test_common.h93
-rw-r--r--chromium/content/common/gpu/gpu_channel_unittest.cc329
-rw-r--r--chromium/content/common/gpu/gpu_command_buffer_stub.cc1269
-rw-r--r--chromium/content/common/gpu/gpu_command_buffer_stub.h307
-rw-r--r--chromium/content/common/gpu/gpu_config.h12
-rw-r--r--chromium/content/common/gpu/gpu_memory_buffer_factory.cc53
-rw-r--r--chromium/content/common/gpu/gpu_memory_buffer_factory.h71
-rw-r--r--chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface.cc121
-rw-r--r--chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface.h80
-rw-r--r--chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface_unittest.cc16
-rw-r--r--chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.cc139
-rw-r--r--chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.h70
-rw-r--r--chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap_unittest.cc20
-rw-r--r--chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture.cc124
-rw-r--r--chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture.h75
-rw-r--r--chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture_unittest.cc16
-rw-r--r--chromium/content/common/gpu/gpu_memory_manager.cc124
-rw-r--r--chromium/content/common/gpu/gpu_memory_manager.h78
-rw-r--r--chromium/content/common/gpu/gpu_memory_tracking.cc37
-rw-r--r--chromium/content/common/gpu/gpu_memory_tracking.h53
-rw-r--r--chromium/content/common/gpu/gpu_memory_uma_stats.h33
-rw-r--r--chromium/content/common/gpu/gpu_messages.h878
-rw-r--r--chromium/content/common/gpu/gpu_process_launch_causes.h35
-rw-r--r--chromium/content/common/gpu/gpu_result_codes.h20
-rw-r--r--chromium/content/common/gpu/gpu_stream_priority.h20
-rw-r--r--chromium/content/common/gpu/gpu_surface_lookup.cc33
-rw-r--r--chromium/content/common/gpu/gpu_surface_lookup.h40
-rw-r--r--chromium/content/common/gpu/gpu_watchdog.h28
-rw-r--r--chromium/content/common/gpu/image_transport_surface.cc302
-rw-r--r--chromium/content/common/gpu/image_transport_surface.h224
-rw-r--r--chromium/content/common/gpu/image_transport_surface_android.cc42
-rw-r--r--chromium/content/common/gpu/image_transport_surface_linux.cc28
-rw-r--r--chromium/content/common/gpu/image_transport_surface_mac.mm82
-rw-r--r--chromium/content/common/gpu/image_transport_surface_overlay_mac.h148
-rw-r--r--chromium/content/common/gpu/image_transport_surface_overlay_mac.mm522
-rw-r--r--chromium/content/common/gpu/image_transport_surface_win.cc53
-rw-r--r--chromium/content/common/gpu/media/OWNERS13
-rw-r--r--chromium/content/common/gpu/media/android_copying_backing_strategy.cc113
-rw-r--r--chromium/content/common/gpu/media/android_copying_backing_strategy.h10
-rw-r--r--chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc329
-rw-r--r--chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.h40
-rw-r--r--chromium/content/common/gpu/media/android_video_decode_accelerator.cc897
-rw-r--r--chromium/content/common/gpu/media/android_video_decode_accelerator.h206
-rw-r--r--chromium/content/common/gpu/media/android_video_decode_accelerator_unittest.cc23
-rw-r--r--chromium/content/common/gpu/media/android_video_encode_accelerator.cc86
-rw-r--r--chromium/content/common/gpu/media/android_video_encode_accelerator.h9
-rw-r--r--chromium/content/common/gpu/media/avda_codec_image.cc154
-rw-r--r--chromium/content/common/gpu/media/avda_codec_image.h60
-rw-r--r--chromium/content/common/gpu/media/avda_shared_state.cc14
-rw-r--r--chromium/content/common/gpu/media/avda_shared_state.h11
-rw-r--r--chromium/content/common/gpu/media/avda_state_provider.h3
-rw-r--r--chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.cc1017
-rw-r--r--chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.h118
-rw-r--r--chromium/content/common/gpu/media/fake_video_decode_accelerator.cc35
-rw-r--r--chromium/content/common/gpu/media/fake_video_decode_accelerator.h14
-rw-r--r--chromium/content/common/gpu/media/gpu_arc_video_service.cc92
-rw-r--r--chromium/content/common/gpu/media/gpu_arc_video_service.h68
-rw-r--r--chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc56
-rw-r--r--chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.h12
-rw-r--r--chromium/content/common/gpu/media/gpu_video_accelerator_util.cc155
-rw-r--r--chromium/content/common/gpu/media/gpu_video_accelerator_util.h63
-rw-r--r--chromium/content/common/gpu/media/gpu_video_decode_accelerator.cc446
-rw-r--r--chromium/content/common/gpu/media/gpu_video_decode_accelerator.h71
-rw-r--r--chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc242
-rw-r--r--chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h123
-rw-r--r--chromium/content/common/gpu/media/gpu_video_decode_accelerator_helpers.h59
-rw-r--r--chromium/content/common/gpu/media/gpu_video_encode_accelerator.cc207
-rw-r--r--chromium/content/common/gpu/media/gpu_video_encode_accelerator.h41
-rw-r--r--chromium/content/common/gpu/media/media_channel.cc145
-rw-r--r--chromium/content/common/gpu/media/media_channel.h57
-rw-r--r--chromium/content/common/gpu/media/media_service.cc40
-rw-r--r--chromium/content/common/gpu/media/media_service.h42
-rw-r--r--chromium/content/common/gpu/media/rendering_helper.cc14
-rw-r--r--chromium/content/common/gpu/media/rendering_helper.h7
-rw-r--r--chromium/content/common/gpu/media/shared_memory_region.cc42
-rw-r--r--chromium/content/common/gpu/media/shared_memory_region.h57
-rw-r--r--chromium/content/common/gpu/media/v4l2_image_processor.cc30
-rw-r--r--chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc45
-rw-r--r--chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h11
-rw-r--r--chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc98
-rw-r--r--chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h29
-rw-r--r--chromium/content/common/gpu/media/v4l2_video_decode_accelerator.cc202
-rw-r--r--chromium/content/common/gpu/media/v4l2_video_decode_accelerator.h31
-rw-r--r--chromium/content/common/gpu/media/v4l2_video_encode_accelerator.cc103
-rw-r--r--chromium/content/common/gpu/media/vaapi_drm_picture.cc8
-rw-r--r--chromium/content/common/gpu/media/vaapi_drm_picture.h5
-rw-r--r--chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc39
-rw-r--r--chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h9
-rw-r--r--chromium/content/common/gpu/media/vaapi_picture.cc6
-rw-r--r--chromium/content/common/gpu/media/vaapi_picture.h6
-rw-r--r--chromium/content/common/gpu/media/vaapi_tfp_picture.cc8
-rw-r--r--chromium/content/common/gpu/media/vaapi_tfp_picture.h5
-rw-r--r--chromium/content/common/gpu/media/vaapi_video_decode_accelerator.cc63
-rw-r--r--chromium/content/common/gpu/media/vaapi_video_decode_accelerator.h30
-rw-r--r--chromium/content/common/gpu/media/vaapi_video_encode_accelerator.cc40
-rw-r--r--chromium/content/common/gpu/media/vaapi_wrapper.cc24
-rw-r--r--chromium/content/common/gpu/media/vaapi_wrapper.h2
-rw-r--r--chromium/content/common/gpu/media/video_decode_accelerator_unittest.cc187
-rw-r--r--chromium/content/common/gpu/media/video_encode_accelerator_unittest.cc58
-rw-r--r--chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.cc135
-rw-r--r--chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.h19
-rw-r--r--chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.cc552
-rw-r--r--chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.h142
-rw-r--r--chromium/content/common/gpu/stream_texture_android.cc386
-rw-r--r--chromium/content/common/gpu/stream_texture_android.h110
-rw-r--r--chromium/content/common/gpu/x_util.h40
164 files changed, 5143 insertions, 22083 deletions
diff --git a/chromium/content/common/gpu/DEPS b/chromium/content/common/gpu/DEPS
index 2c01d5f2d7b..56ee2c3e9b8 100644
--- a/chromium/content/common/gpu/DEPS
+++ b/chromium/content/common/gpu/DEPS
@@ -2,9 +2,6 @@ include_rules = [
"+gpu/command_buffer",
"+libEGL",
"+libGLESv2",
- "+media/video/jpeg_decode_accelerator.h",
- "+media/video/video_decode_accelerator.h",
- "+media/video/video_encode_accelerator.h",
"+skia",
"+third_party/mesa",
]
diff --git a/chromium/content/common/gpu/ca_layer_partial_damage_tree_mac.h b/chromium/content/common/gpu/ca_layer_partial_damage_tree_mac.h
deleted file mode 100644
index f52000d6ddf..00000000000
--- a/chromium/content/common/gpu/ca_layer_partial_damage_tree_mac.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CA_LAYER_PARTIAL_DAMAGE_TREE_MAC_H_
-#define CONTENT_COMMON_GPU_CA_LAYER_PARTIAL_DAMAGE_TREE_MAC_H_
-
-#include <IOSurface/IOSurface.h>
-#include <QuartzCore/QuartzCore.h>
-#include <deque>
-
-#include "base/mac/scoped_cftyperef.h"
-#include "base/memory/scoped_ptr.h"
-#include "ui/gfx/geometry/rect.h"
-#include "ui/gfx/geometry/rect_f.h"
-
-namespace content {
-
-class CALayerPartialDamageTree {
- public:
- CALayerPartialDamageTree(bool allow_partial_swap,
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::Rect& pixel_frame_rect);
- ~CALayerPartialDamageTree();
-
- base::ScopedCFTypeRef<IOSurfaceRef> RootLayerIOSurface();
- void CommitCALayers(CALayer* superlayer,
- scoped_ptr<CALayerPartialDamageTree> old_tree,
- float scale_factor,
- const gfx::Rect& pixel_damage_rect);
-
- private:
- class OverlayPlane;
-
- // This will populate |partial_damage_planes_|, potentially re-using the
- // CALayers and |partial_damage_planes_| from |old_tree|. After this function
- // completes, the back() of |partial_damage_planes_| is the plane that will
- // be updated this frame (and if it is empty, then the root plane will be
- // updated).
- void UpdatePartialDamagePlanes(CALayerPartialDamageTree* old_tree,
- const gfx::Rect& pixel_damage_rect);
-
- void UpdateRootAndPartialDamagePlanes(
- scoped_ptr<CALayerPartialDamageTree> old_tree,
- const gfx::Rect& pixel_damage_rect);
-
- void UpdateCALayers(CALayer* superlayer, float scale_factor);
-
- const bool allow_partial_swap_;
- scoped_ptr<OverlayPlane> root_plane_;
- std::deque<scoped_ptr<OverlayPlane>> partial_damage_planes_;
-};
-
-} // content
-
-#endif // CONTENT_COMMON_GPU_CA_LAYER_PARTIAL_DAMAGE_TREE_MAC_H_
diff --git a/chromium/content/common/gpu/ca_layer_partial_damage_tree_mac.mm b/chromium/content/common/gpu/ca_layer_partial_damage_tree_mac.mm
deleted file mode 100644
index f42738947a0..00000000000
--- a/chromium/content/common/gpu/ca_layer_partial_damage_tree_mac.mm
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/ca_layer_partial_damage_tree_mac.h"
-
-#include "base/command_line.h"
-#include "base/mac/scoped_nsobject.h"
-#include "base/mac/sdk_forward_declarations.h"
-#include "base/trace_event/trace_event.h"
-#include "ui/base/ui_base_switches.h"
-#include "ui/gfx/transform.h"
-
-@interface CALayer(Private)
--(void)setContentsChanged;
-@end
-
-namespace content {
-namespace {
-
-// When selecting a CALayer to re-use for partial damage, this is the maximum
-// fraction of the merged layer's pixels that may be not-updated by the swap
-// before we consider the CALayer to not be a good enough match, and create a
-// new one.
-const float kMaximumPartialDamageWasteFraction = 1.2f;
-
-// The maximum number of partial damage layers that may be created before we
-// give up and remove them all (doing full damage in the process).
-const size_t kMaximumPartialDamageLayers = 8;
-
-} // namespace
-
-class CALayerPartialDamageTree::OverlayPlane {
- public:
- OverlayPlane(base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::Rect& pixel_frame_rect,
- const gfx::RectF& contents_rect)
- : io_surface(io_surface),
- contents_rect(contents_rect),
- pixel_frame_rect(pixel_frame_rect),
- layer_needs_update(true) {}
-
- ~OverlayPlane() {
- [ca_layer setContents:nil];
- [ca_layer removeFromSuperlayer];
- ca_layer.reset();
- }
-
- const base::ScopedCFTypeRef<IOSurfaceRef> io_surface;
- const gfx::RectF contents_rect;
- const gfx::Rect pixel_frame_rect;
- bool layer_needs_update;
- base::scoped_nsobject<CALayer> ca_layer;
-
- void TakeCALayerFrom(OverlayPlane* other_plane) {
- ca_layer.swap(other_plane->ca_layer);
- }
-
- void UpdateProperties(float scale_factor) {
- if (layer_needs_update) {
- [ca_layer setOpaque:YES];
-
- id new_contents = static_cast<id>(io_surface.get());
- if ([ca_layer contents] == new_contents)
- [ca_layer setContentsChanged];
- else
- [ca_layer setContents:new_contents];
- [ca_layer setContentsRect:contents_rect.ToCGRect()];
-
- [ca_layer setAnchorPoint:CGPointZero];
-
- if ([ca_layer respondsToSelector:(@selector(setContentsScale:))])
- [ca_layer setContentsScale:scale_factor];
- gfx::RectF dip_frame_rect = gfx::RectF(pixel_frame_rect);
- dip_frame_rect.Scale(1 / scale_factor);
- [ca_layer setBounds:CGRectMake(0, 0, dip_frame_rect.width(),
- dip_frame_rect.height())];
- [ca_layer
- setPosition:CGPointMake(dip_frame_rect.x(), dip_frame_rect.y())];
- }
- static bool show_borders =
- base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kShowMacOverlayBorders);
- if (show_borders) {
- base::ScopedCFTypeRef<CGColorRef> color;
- if (!layer_needs_update) {
- // Green represents contents that are unchanged across frames.
- color.reset(CGColorCreateGenericRGB(0, 1, 0, 1));
- } else {
- // Red represents damaged contents.
- color.reset(CGColorCreateGenericRGB(1, 0, 0, 1));
- }
- [ca_layer setBorderWidth:1];
- [ca_layer setBorderColor:color];
- }
- layer_needs_update = false;
- }
-
- private:
-};
-
-void CALayerPartialDamageTree::UpdatePartialDamagePlanes(
- CALayerPartialDamageTree* old_tree,
- const gfx::Rect& pixel_damage_rect) {
- // Don't create partial damage layers if partial swap is disabled.
- if (!allow_partial_swap_)
- return;
- // Only create partial damage layers when building on top of an existing tree.
- if (!old_tree)
- return;
- // If the frame size has changed, discard all of the old partial damage
- // layers.
- if (old_tree->root_plane_->pixel_frame_rect != root_plane_->pixel_frame_rect)
- return;
- // If there is full damage, discard all of the old partial damage layers.
- if (pixel_damage_rect == root_plane_->pixel_frame_rect)
- return;
-
- // If there is no damage, don't change anything.
- if (pixel_damage_rect.IsEmpty()) {
- std::swap(partial_damage_planes_, old_tree->partial_damage_planes_);
- return;
- }
-
- // Find the last partial damage plane to re-use the CALayer from. Grow the
- // new rect for this layer to include this damage, and all nearby partial
- // damage layers.
- scoped_ptr<OverlayPlane> plane_for_swap;
- {
- auto plane_to_reuse_iter = old_tree->partial_damage_planes_.end();
- gfx::Rect plane_to_reuse_enlarged_pixel_damage_rect;
-
- for (auto old_plane_iter = old_tree->partial_damage_planes_.begin();
- old_plane_iter != old_tree->partial_damage_planes_.end();
- ++old_plane_iter) {
- gfx::Rect enlarged_pixel_damage_rect =
- (*old_plane_iter)->pixel_frame_rect;
- enlarged_pixel_damage_rect.Union(pixel_damage_rect);
-
- // Compute the fraction of the pixels that would not be updated by this
- // swap. If it is too big, try another layer.
- float waste_fraction = enlarged_pixel_damage_rect.size().GetArea() * 1.f /
- pixel_damage_rect.size().GetArea();
- if (waste_fraction > kMaximumPartialDamageWasteFraction)
- continue;
-
- plane_to_reuse_iter = old_plane_iter;
- plane_to_reuse_enlarged_pixel_damage_rect.Union(
- enlarged_pixel_damage_rect);
- }
- if (plane_to_reuse_iter != old_tree->partial_damage_planes_.end()) {
- gfx::RectF enlarged_contents_rect =
- gfx::RectF(plane_to_reuse_enlarged_pixel_damage_rect);
- enlarged_contents_rect.Scale(1. / root_plane_->pixel_frame_rect.width(),
- 1. / root_plane_->pixel_frame_rect.height());
-
- plane_for_swap.reset(new OverlayPlane(
- root_plane_->io_surface, plane_to_reuse_enlarged_pixel_damage_rect,
- enlarged_contents_rect));
-
- plane_for_swap->TakeCALayerFrom((*plane_to_reuse_iter).get());
- if (*plane_to_reuse_iter != old_tree->partial_damage_planes_.back()) {
- CALayer* superlayer = [plane_for_swap->ca_layer superlayer];
- [plane_for_swap->ca_layer removeFromSuperlayer];
- [superlayer addSublayer:plane_for_swap->ca_layer];
- }
- }
- }
-
- // If we haven't found an appropriate layer to re-use, create a new one, if
- // we haven't already created too many.
- if (!plane_for_swap.get() &&
- old_tree->partial_damage_planes_.size() < kMaximumPartialDamageLayers) {
- gfx::RectF contents_rect = gfx::RectF(pixel_damage_rect);
- contents_rect.Scale(1. / root_plane_->pixel_frame_rect.width(),
- 1. / root_plane_->pixel_frame_rect.height());
- plane_for_swap.reset(new OverlayPlane(root_plane_->io_surface,
- pixel_damage_rect, contents_rect));
- }
-
- // And if we still don't have a layer, do full damage.
- if (!plane_for_swap.get())
- return;
-
- // Walk all old partial damage planes. Remove anything that is now completely
- // covered, and move everything else into the new |partial_damage_planes_|.
- for (auto& old_plane : old_tree->partial_damage_planes_) {
- if (!old_plane.get())
- continue;
- // Intersect the planes' frames with the new root plane to ensure that
- // they don't get kept alive inappropriately.
- gfx::Rect old_plane_frame_rect = old_plane->pixel_frame_rect;
- old_plane_frame_rect.Intersect(root_plane_->pixel_frame_rect);
-
- bool old_plane_covered_by_swap = false;
- if (plane_for_swap.get() &&
- plane_for_swap->pixel_frame_rect.Contains(old_plane_frame_rect)) {
- old_plane_covered_by_swap = true;
- }
- if (!old_plane_covered_by_swap) {
- DCHECK(old_plane->ca_layer);
- partial_damage_planes_.push_back(std::move(old_plane));
- }
- }
-
- partial_damage_planes_.push_back(std::move(plane_for_swap));
-}
-
-void CALayerPartialDamageTree::UpdateRootAndPartialDamagePlanes(
- scoped_ptr<CALayerPartialDamageTree> old_tree,
- const gfx::Rect& pixel_damage_rect) {
- // First update the partial damage tree.
- UpdatePartialDamagePlanes(old_tree.get(), pixel_damage_rect);
- if (old_tree) {
- if (partial_damage_planes_.empty()) {
- // If there are no partial damage planes, then we will be updating the
- // root layer. Take the CALayer from the old tree.
- root_plane_->TakeCALayerFrom(old_tree->root_plane_.get());
- } else {
- // If there is a partial damage tree, then just take the old plane
- // from the previous frame, so that there is no update to it.
- root_plane_.swap(old_tree->root_plane_);
- }
- }
-}
-
-void CALayerPartialDamageTree::UpdateCALayers(CALayer* superlayer,
- float scale_factor) {
- if (!allow_partial_swap_) {
- DCHECK(partial_damage_planes_.empty());
- return;
- }
-
- // Allocate and update CALayers for the backbuffer and partial damage layers.
- if (!root_plane_->ca_layer) {
- DCHECK(partial_damage_planes_.empty());
- root_plane_->ca_layer.reset([[CALayer alloc] init]);
- [superlayer setSublayers:nil];
- [superlayer addSublayer:root_plane_->ca_layer];
- }
- // Excessive logging to debug white screens (crbug.com/583805).
- // TODO(ccameron): change this back to a DLOG.
- if ([root_plane_->ca_layer superlayer] != superlayer) {
- LOG(ERROR) << "CALayerPartialDamageTree root layer not attached to tree.";
- }
- for (auto& plane : partial_damage_planes_) {
- if (!plane->ca_layer) {
- DCHECK(plane == partial_damage_planes_.back());
- plane->ca_layer.reset([[CALayer alloc] init]);
- }
- if (![plane->ca_layer superlayer]) {
- DCHECK(plane == partial_damage_planes_.back());
- [superlayer addSublayer:plane->ca_layer];
- }
- }
- root_plane_->UpdateProperties(scale_factor);
- for (auto& plane : partial_damage_planes_)
- plane->UpdateProperties(scale_factor);
-}
-
-CALayerPartialDamageTree::CALayerPartialDamageTree(
- bool allow_partial_swap,
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::Rect& pixel_frame_rect)
- : allow_partial_swap_(allow_partial_swap) {
- root_plane_.reset(
- new OverlayPlane(io_surface, pixel_frame_rect, gfx::RectF(0, 0, 1, 1)));
-}
-
-CALayerPartialDamageTree::~CALayerPartialDamageTree() {}
-
-base::ScopedCFTypeRef<IOSurfaceRef>
-CALayerPartialDamageTree::RootLayerIOSurface() {
- return root_plane_->io_surface;
-}
-
-void CALayerPartialDamageTree::CommitCALayers(
- CALayer* superlayer,
- scoped_ptr<CALayerPartialDamageTree> old_tree,
- float scale_factor,
- const gfx::Rect& pixel_damage_rect) {
- TRACE_EVENT0("gpu", "CALayerPartialDamageTree::CommitCALayers");
- UpdateRootAndPartialDamagePlanes(std::move(old_tree), pixel_damage_rect);
- UpdateCALayers(superlayer, scale_factor);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/ca_layer_tree_mac.h b/chromium/content/common/gpu/ca_layer_tree_mac.h
deleted file mode 100644
index d20bdb0d8be..00000000000
--- a/chromium/content/common/gpu/ca_layer_tree_mac.h
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CA_LAYER_TREE_MAC_H_
-#define CONTENT_COMMON_GPU_CA_LAYER_TREE_MAC_H_
-
-#include <IOSurface/IOSurface.h>
-#include <QuartzCore/QuartzCore.h>
-#include <deque>
-#include <vector>
-
-#include "base/mac/scoped_cftyperef.h"
-#include "base/mac/scoped_nsobject.h"
-#include "base/memory/scoped_ptr.h"
-#include "ui/gfx/geometry/rect.h"
-#include "ui/gfx/geometry/rect_f.h"
-#include "ui/gfx/transform.h"
-
-namespace content {
-
-// The CALayerTree will construct a hierarchy of CALayers from a linear list,
-// using the algorithm and structure referenced described in
-// https://docs.google.com/document/d/1DtSN9zzvCF44_FQPM7ie01UxGHagQ66zfF5L9HnigQY/edit?usp=sharing
-class CALayerTree {
- public:
- CALayerTree();
-
- // This will remove all CALayers from this tree from their superlayer.
- ~CALayerTree();
-
- // Append the description of a new CALayer to the tree. This will not
- // create any new CALayers until CommitScheduledCALayers is called. This
- // cannot be called anymore after CommitScheduledCALayers has been called.
- bool ScheduleCALayer(bool is_clipped,
- const gfx::Rect& clip_rect,
- unsigned sorting_context_id,
- const gfx::Transform& transform,
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::RectF& contents_rect,
- const gfx::Rect& rect,
- unsigned background_color,
- unsigned edge_aa_mask,
- float opacity);
-
- // Create a CALayer tree for the scheduled layers, and set |superlayer| to
- // have only this tree as its sublayers. If |old_tree| is non-null, then try
- // to re-use the CALayers of |old_tree| as much as possible. |old_tree| will
- // be destroyed at the end of the function, and any CALayers in it which were
- // not re-used by |this| will be removed from the CALayer hierarchy.
- void CommitScheduledCALayers(CALayer* superlayer,
- scoped_ptr<CALayerTree> old_tree,
- float scale_factor);
-
- private:
- struct RootLayer;
- struct ClipAndSortingLayer;
- struct TransformLayer;
- struct ContentLayer;
-
- struct RootLayer {
- RootLayer();
-
- // This will remove |ca_layer| from its superlayer, if |ca_layer| is
- // non-nil.
- ~RootLayer();
-
- // Append a new content layer, without modifying the actual CALayer
- // structure.
- bool AddContentLayer(bool is_clipped,
- const gfx::Rect& clip_rect,
- unsigned sorting_context_id,
- const gfx::Transform& transform,
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::RectF& contents_rect,
- const gfx::Rect& rect,
- unsigned background_color,
- unsigned edge_aa_mask,
- float opacity);
-
- // Allocate CALayers for this layer and its children, and set their
- // properties appropriately. Re-use the CALayers from |old_layer| if
- // possible. If re-using a CALayer from |old_layer|, reset its |ca_layer|
- // to nil, so that its destructor will not remove an active CALayer.
- void CommitToCA(CALayer* superlayer,
- RootLayer* old_layer,
- float scale_factor);
-
- std::vector<ClipAndSortingLayer> clip_and_sorting_layers;
- base::scoped_nsobject<CALayer> ca_layer;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(RootLayer);
- };
- struct ClipAndSortingLayer {
- ClipAndSortingLayer(bool is_clipped,
- gfx::Rect clip_rect,
- unsigned sorting_context_id,
- bool is_singleton_sorting_context);
- ClipAndSortingLayer(ClipAndSortingLayer&& layer);
-
- // See the behavior of RootLayer for the effects of these functions on the
- // |ca_layer| member and |old_layer| argument.
- ~ClipAndSortingLayer();
- void AddContentLayer(const gfx::Transform& transform,
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::RectF& contents_rect,
- const gfx::Rect& rect,
- unsigned background_color,
- unsigned edge_aa_mask,
- float opacity);
- void CommitToCA(CALayer* superlayer,
- ClipAndSortingLayer* old_layer,
- float scale_factor);
-
- std::vector<TransformLayer> transform_layers;
- bool is_clipped = false;
- gfx::Rect clip_rect;
- unsigned sorting_context_id = 0;
- bool is_singleton_sorting_context = false;
- base::scoped_nsobject<CALayer> ca_layer;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ClipAndSortingLayer);
- };
- struct TransformLayer {
- TransformLayer(const gfx::Transform& transform);
- TransformLayer(TransformLayer&& layer);
-
- // See the behavior of RootLayer for the effects of these functions on the
- // |ca_layer| member and |old_layer| argument.
- ~TransformLayer();
- void AddContentLayer(base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::RectF& contents_rect,
- const gfx::Rect& rect,
- unsigned background_color,
- unsigned edge_aa_mask,
- float opacity);
- void CommitToCA(CALayer* superlayer,
- TransformLayer* old_layer,
- float scale_factor);
-
- gfx::Transform transform;
- std::vector<ContentLayer> content_layers;
- base::scoped_nsobject<CALayer> ca_layer;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TransformLayer);
- };
- struct ContentLayer {
- ContentLayer(base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::RectF& contents_rect,
- const gfx::Rect& rect,
- unsigned background_color,
- unsigned edge_aa_mask,
- float opacity);
- ContentLayer(ContentLayer&& layer);
-
- // See the behavior of RootLayer for the effects of these functions on the
- // |ca_layer| member and |old_layer| argument.
- ~ContentLayer();
- void CommitToCA(CALayer* parent,
- ContentLayer* old_layer,
- float scale_factor);
-
- const base::ScopedCFTypeRef<IOSurfaceRef> io_surface;
- gfx::RectF contents_rect;
- gfx::Rect rect;
- unsigned background_color = 0;
- // Note that the CoreAnimation edge antialiasing mask is not the same as
- // the edge antialiasing mask passed to the constructor.
- CAEdgeAntialiasingMask ca_edge_aa_mask = 0;
- float opacity = 1;
- base::scoped_nsobject<CALayer> ca_layer;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ContentLayer);
- };
-
- RootLayer root_layer_;
- float scale_factor_ = 1;
- bool has_committed_ = false;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CALayerTree);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CA_LAYER_TREE_MAC_H_
diff --git a/chromium/content/common/gpu/ca_layer_tree_mac.mm b/chromium/content/common/gpu/ca_layer_tree_mac.mm
deleted file mode 100644
index 8c7d93886a5..00000000000
--- a/chromium/content/common/gpu/ca_layer_tree_mac.mm
+++ /dev/null
@@ -1,455 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/ca_layer_tree_mac.h"
-
-#include "base/command_line.h"
-#include "base/mac/sdk_forward_declarations.h"
-#include "base/trace_event/trace_event.h"
-#include "gpu/GLES2/gl2extchromium.h"
-#include "third_party/skia/include/core/SkColor.h"
-#include "ui/base/cocoa/animation_utils.h"
-#include "ui/base/ui_base_switches.h"
-#include "ui/gfx/geometry/dip_util.h"
-
-namespace content {
-
-CALayerTree::CALayerTree() {}
-CALayerTree::~CALayerTree() {}
-
-bool CALayerTree::ScheduleCALayer(
- bool is_clipped,
- const gfx::Rect& clip_rect,
- unsigned sorting_context_id,
- const gfx::Transform& transform,
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::RectF& contents_rect,
- const gfx::Rect& rect,
- unsigned background_color,
- unsigned edge_aa_mask,
- float opacity) {
- // Excessive logging to debug white screens (crbug.com/583805).
- // TODO(ccameron): change this back to a DLOG.
- if (has_committed_) {
- LOG(ERROR) << "ScheduleCALayer called after CommitScheduledCALayers.";
- return false;
- }
- return root_layer_.AddContentLayer(is_clipped, clip_rect, sorting_context_id,
- transform, io_surface, contents_rect, rect,
- background_color, edge_aa_mask, opacity);
-}
-
-void CALayerTree::CommitScheduledCALayers(CALayer* superlayer,
- scoped_ptr<CALayerTree> old_tree,
- float scale_factor) {
- TRACE_EVENT0("gpu", "CALayerTree::CommitScheduledCALayers");
- RootLayer* old_root_layer = nullptr;
- if (old_tree) {
- DCHECK(old_tree->has_committed_);
- if (old_tree->scale_factor_ == scale_factor)
- old_root_layer = &old_tree->root_layer_;
- }
-
- root_layer_.CommitToCA(superlayer, old_root_layer, scale_factor);
- // If there are any extra CALayers in |old_tree| that were not stolen by this
- // tree, they will be removed from the CALayer tree in this deallocation.
- old_tree.reset();
- has_committed_ = true;
- scale_factor_ = scale_factor;
-}
-
-CALayerTree::RootLayer::RootLayer() {}
-
-// Note that for all destructors, the the CALayer will have been reset to nil if
-// another layer has taken it.
-CALayerTree::RootLayer::~RootLayer() {
- [ca_layer removeFromSuperlayer];
-}
-
-CALayerTree::ClipAndSortingLayer::ClipAndSortingLayer(
- bool is_clipped,
- gfx::Rect clip_rect,
- unsigned sorting_context_id,
- bool is_singleton_sorting_context)
- : is_clipped(is_clipped),
- clip_rect(clip_rect),
- sorting_context_id(sorting_context_id),
- is_singleton_sorting_context(is_singleton_sorting_context) {}
-
-CALayerTree::ClipAndSortingLayer::ClipAndSortingLayer(
- ClipAndSortingLayer&& layer)
- : transform_layers(std::move(layer.transform_layers)),
- is_clipped(layer.is_clipped),
- clip_rect(layer.clip_rect),
- sorting_context_id(layer.sorting_context_id),
- is_singleton_sorting_context(
- layer.is_singleton_sorting_context),
- ca_layer(layer.ca_layer) {
- // Ensure that the ca_layer be reset, so that when the destructor is called,
- // the layer hierarchy is unaffected.
- // TODO(ccameron): Add a move constructor for scoped_nsobject to do this
- // automatically.
- layer.ca_layer.reset();
-}
-
-CALayerTree::ClipAndSortingLayer::~ClipAndSortingLayer() {
- [ca_layer removeFromSuperlayer];
-}
-
-CALayerTree::TransformLayer::TransformLayer(const gfx::Transform& transform)
- : transform(transform) {}
-
-CALayerTree::TransformLayer::TransformLayer(TransformLayer&& layer)
- : transform(layer.transform),
- content_layers(std::move(layer.content_layers)),
- ca_layer(layer.ca_layer) {
- layer.ca_layer.reset();
-}
-
-CALayerTree::TransformLayer::~TransformLayer() {
- [ca_layer removeFromSuperlayer];
-}
-
-CALayerTree::ContentLayer::ContentLayer(
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::RectF& contents_rect,
- const gfx::Rect& rect,
- unsigned background_color,
- unsigned edge_aa_mask,
- float opacity)
- : io_surface(io_surface),
- contents_rect(contents_rect),
- rect(rect),
- background_color(background_color),
- ca_edge_aa_mask(0),
- opacity(opacity) {
- // Because the root layer has setGeometryFlipped:YES, there is some ambiguity
- // about what exactly top and bottom mean. This ambiguity is resolved in
- // different ways for solid color CALayers and for CALayers that have content
- // (surprise!). For CALayers with IOSurface content, the top edge in the AA
- // mask refers to what appears as the bottom edge on-screen. For CALayers
- // without content (solid color layers), the top edge in the AA mask is the
- // top edge on-screen.
- // http://crbug.com/567946
- if (edge_aa_mask & GL_CA_LAYER_EDGE_LEFT_CHROMIUM)
- ca_edge_aa_mask |= kCALayerLeftEdge;
- if (edge_aa_mask & GL_CA_LAYER_EDGE_RIGHT_CHROMIUM)
- ca_edge_aa_mask |= kCALayerRightEdge;
- if (io_surface) {
- if (edge_aa_mask & GL_CA_LAYER_EDGE_TOP_CHROMIUM)
- ca_edge_aa_mask |= kCALayerBottomEdge;
- if (edge_aa_mask & GL_CA_LAYER_EDGE_BOTTOM_CHROMIUM)
- ca_edge_aa_mask |= kCALayerTopEdge;
- } else {
- if (edge_aa_mask & GL_CA_LAYER_EDGE_TOP_CHROMIUM)
- ca_edge_aa_mask |= kCALayerTopEdge;
- if (edge_aa_mask & GL_CA_LAYER_EDGE_BOTTOM_CHROMIUM)
- ca_edge_aa_mask |= kCALayerBottomEdge;
- }
-
- // Ensure that the IOSurface be in use as soon as it is added to a
- // ContentLayer, so that, by the time that the call to SwapBuffers completes,
- // all IOSurfaces that can be used as CALayer contents in the future will be
- // marked as InUse.
- if (io_surface)
- IOSurfaceIncrementUseCount(io_surface);
-}
-
-CALayerTree::ContentLayer::ContentLayer(ContentLayer&& layer)
- : io_surface(layer.io_surface),
- contents_rect(layer.contents_rect),
- rect(layer.rect),
- background_color(layer.background_color),
- ca_edge_aa_mask(layer.ca_edge_aa_mask),
- opacity(layer.opacity),
- ca_layer(layer.ca_layer) {
- DCHECK(!layer.ca_layer);
- layer.ca_layer.reset();
- // See remarks in the non-move constructor.
- if (io_surface)
- IOSurfaceIncrementUseCount(io_surface);
-}
-
-CALayerTree::ContentLayer::~ContentLayer() {
- [ca_layer removeFromSuperlayer];
- // By the time the destructor is called, the IOSurface will have been passed
- // to the WindowServer, and will remain InUse by the WindowServer as long as
- // is needed to avoid recycling bugs.
- if (io_surface)
- IOSurfaceDecrementUseCount(io_surface);
-}
-
-bool CALayerTree::RootLayer::AddContentLayer(
- bool is_clipped,
- const gfx::Rect& clip_rect,
- unsigned sorting_context_id,
- const gfx::Transform& transform,
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::RectF& contents_rect,
- const gfx::Rect& rect,
- unsigned background_color,
- unsigned edge_aa_mask,
- float opacity) {
- bool needs_new_clip_and_sorting_layer = true;
-
- // In sorting_context_id 0, all quads are listed in back-to-front order.
- // This is accomplished by having the CALayers be siblings of each other.
- // If a quad has a 3D transform, it is necessary to put it in its own sorting
- // context, so that it will not intersect with quads before and after it.
- bool is_singleton_sorting_context =
- !sorting_context_id && !transform.IsFlat();
-
- if (!clip_and_sorting_layers.empty()) {
- ClipAndSortingLayer& current_layer = clip_and_sorting_layers.back();
- // It is in error to change the clipping settings within a non-zero sorting
- // context. The result will be incorrect layering and intersection.
- if (sorting_context_id &&
- current_layer.sorting_context_id == sorting_context_id &&
- (current_layer.is_clipped != is_clipped ||
- current_layer.clip_rect != clip_rect)) {
- // Excessive logging to debug white screens (crbug.com/583805).
- // TODO(ccameron): change this back to a DLOG.
- LOG(ERROR) << "CALayer changed clip inside non-zero sorting context.";
- return false;
- }
- if (!is_singleton_sorting_context &&
- !current_layer.is_singleton_sorting_context &&
- current_layer.is_clipped == is_clipped &&
- current_layer.clip_rect == clip_rect &&
- current_layer.sorting_context_id == sorting_context_id) {
- needs_new_clip_and_sorting_layer = false;
- }
- }
- if (needs_new_clip_and_sorting_layer) {
- clip_and_sorting_layers.push_back(
- ClipAndSortingLayer(is_clipped, clip_rect, sorting_context_id,
- is_singleton_sorting_context));
- }
- clip_and_sorting_layers.back().AddContentLayer(
- transform, io_surface, contents_rect, rect, background_color,
- edge_aa_mask, opacity);
- return true;
-}
-
-void CALayerTree::ClipAndSortingLayer::AddContentLayer(
- const gfx::Transform& transform,
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::RectF& contents_rect,
- const gfx::Rect& rect,
- unsigned background_color,
- unsigned edge_aa_mask,
- float opacity) {
- bool needs_new_transform_layer = true;
- if (!transform_layers.empty()) {
- const TransformLayer& current_layer = transform_layers.back();
- if (current_layer.transform == transform)
- needs_new_transform_layer = false;
- }
- if (needs_new_transform_layer)
- transform_layers.push_back(TransformLayer(transform));
- transform_layers.back().AddContentLayer(
- io_surface, contents_rect, rect, background_color, edge_aa_mask, opacity);
-}
-
-void CALayerTree::TransformLayer::AddContentLayer(
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
- const gfx::RectF& contents_rect,
- const gfx::Rect& rect,
- unsigned background_color,
- unsigned edge_aa_mask,
- float opacity) {
- content_layers.push_back(ContentLayer(io_surface, contents_rect, rect,
- background_color, edge_aa_mask,
- opacity));
-}
-
-void CALayerTree::RootLayer::CommitToCA(CALayer* superlayer,
- RootLayer* old_layer,
- float scale_factor) {
- if (old_layer) {
- DCHECK(old_layer->ca_layer);
- std::swap(ca_layer, old_layer->ca_layer);
- } else {
- ca_layer.reset([[CALayer alloc] init]);
- [ca_layer setAnchorPoint:CGPointZero];
- [superlayer setSublayers:nil];
- [superlayer addSublayer:ca_layer];
- [superlayer setBorderWidth:0];
- }
- // Excessive logging to debug white screens (crbug.com/583805).
- // TODO(ccameron): change this back to a DCHECK.
- if ([ca_layer superlayer] != superlayer) {
- LOG(ERROR) << "CALayerTree root layer not attached to tree.";
- }
-
- for (size_t i = 0; i < clip_and_sorting_layers.size(); ++i) {
- ClipAndSortingLayer* old_clip_and_sorting_layer = nullptr;
- if (old_layer && i < old_layer->clip_and_sorting_layers.size()) {
- old_clip_and_sorting_layer = &old_layer->clip_and_sorting_layers[i];
- }
- clip_and_sorting_layers[i].CommitToCA(
- ca_layer.get(), old_clip_and_sorting_layer, scale_factor);
- }
-}
-
-void CALayerTree::ClipAndSortingLayer::CommitToCA(
- CALayer* superlayer,
- ClipAndSortingLayer* old_layer,
- float scale_factor) {
- bool update_is_clipped = true;
- bool update_clip_rect = true;
- if (old_layer) {
- DCHECK(old_layer->ca_layer);
- std::swap(ca_layer, old_layer->ca_layer);
- update_is_clipped = old_layer->is_clipped != is_clipped;
- update_clip_rect = old_layer->clip_rect != clip_rect;
- } else {
- ca_layer.reset([[CALayer alloc] init]);
- [ca_layer setAnchorPoint:CGPointZero];
- [superlayer addSublayer:ca_layer];
- }
- // Excessive logging to debug white screens (crbug.com/583805).
- // TODO(ccameron): change this back to a DCHECK.
- if ([ca_layer superlayer] != superlayer) {
- LOG(ERROR) << "CALayerTree root layer not attached to tree.";
- }
-
- if (update_is_clipped)
- [ca_layer setMasksToBounds:is_clipped];
-
- if (update_clip_rect) {
- if (is_clipped) {
- gfx::RectF dip_clip_rect = gfx::RectF(clip_rect);
- dip_clip_rect.Scale(1 / scale_factor);
- [ca_layer setPosition:CGPointMake(dip_clip_rect.x(), dip_clip_rect.y())];
- [ca_layer setBounds:CGRectMake(0, 0, dip_clip_rect.width(),
- dip_clip_rect.height())];
- [ca_layer
- setSublayerTransform:CATransform3DMakeTranslation(
- -dip_clip_rect.x(), -dip_clip_rect.y(), 0)];
- } else {
- [ca_layer setPosition:CGPointZero];
- [ca_layer setBounds:CGRectZero];
- [ca_layer setSublayerTransform:CATransform3DIdentity];
- }
- }
-
- for (size_t i = 0; i < transform_layers.size(); ++i) {
- TransformLayer* old_transform_layer = nullptr;
- if (old_layer && i < old_layer->transform_layers.size())
- old_transform_layer = &old_layer->transform_layers[i];
- transform_layers[i].CommitToCA(ca_layer.get(), old_transform_layer,
- scale_factor);
- }
-}
-
-void CALayerTree::TransformLayer::CommitToCA(CALayer* superlayer,
- TransformLayer* old_layer,
- float scale_factor) {
- bool update_transform = true;
- if (old_layer) {
- DCHECK(old_layer->ca_layer);
- std::swap(ca_layer, old_layer->ca_layer);
- update_transform = old_layer->transform != transform;
- } else {
- ca_layer.reset([[CATransformLayer alloc] init]);
- [superlayer addSublayer:ca_layer];
- }
- DCHECK_EQ([ca_layer superlayer], superlayer);
-
- if (update_transform) {
- gfx::Transform pre_scale;
- gfx::Transform post_scale;
- pre_scale.Scale(1 / scale_factor, 1 / scale_factor);
- post_scale.Scale(scale_factor, scale_factor);
- gfx::Transform conjugated_transform = pre_scale * transform * post_scale;
-
- CATransform3D ca_transform;
- conjugated_transform.matrix().asColMajord(&ca_transform.m11);
- [ca_layer setTransform:ca_transform];
- }
-
- for (size_t i = 0; i < content_layers.size(); ++i) {
- ContentLayer* old_content_layer = nullptr;
- if (old_layer && i < old_layer->content_layers.size())
- old_content_layer = &old_layer->content_layers[i];
- content_layers[i].CommitToCA(ca_layer.get(), old_content_layer,
- scale_factor);
- }
-}
-
-void CALayerTree::ContentLayer::CommitToCA(CALayer* superlayer,
- ContentLayer* old_layer,
- float scale_factor) {
- bool update_contents = true;
- bool update_contents_rect = true;
- bool update_rect = true;
- bool update_background_color = true;
- bool update_ca_edge_aa_mask = true;
- bool update_opacity = true;
- if (old_layer) {
- DCHECK(old_layer->ca_layer);
- std::swap(ca_layer, old_layer->ca_layer);
- update_contents = old_layer->io_surface != io_surface;
- update_contents_rect = old_layer->contents_rect != contents_rect;
- update_rect = old_layer->rect != rect;
- update_background_color = old_layer->background_color != background_color;
- update_ca_edge_aa_mask = old_layer->ca_edge_aa_mask != ca_edge_aa_mask;
- update_opacity = old_layer->opacity != opacity;
- } else {
- ca_layer.reset([[CALayer alloc] init]);
- [ca_layer setAnchorPoint:CGPointZero];
- [superlayer addSublayer:ca_layer];
- }
- DCHECK_EQ([ca_layer superlayer], superlayer);
- bool update_anything = update_contents || update_contents_rect ||
- update_rect || update_background_color ||
- update_ca_edge_aa_mask || update_opacity;
-
- if (update_contents) {
- [ca_layer setContents:static_cast<id>(io_surface.get())];
- if ([ca_layer respondsToSelector:(@selector(setContentsScale:))])
- [ca_layer setContentsScale:scale_factor];
- }
- if (update_contents_rect)
- [ca_layer setContentsRect:contents_rect.ToCGRect()];
- if (update_rect) {
- gfx::RectF dip_rect = gfx::RectF(rect);
- dip_rect.Scale(1 / scale_factor);
- [ca_layer setPosition:CGPointMake(dip_rect.x(), dip_rect.y())];
- [ca_layer setBounds:CGRectMake(0, 0, dip_rect.width(), dip_rect.height())];
- }
- if (update_background_color) {
- CGFloat rgba_color_components[4] = {
- SkColorGetR(background_color) / 255.,
- SkColorGetG(background_color) / 255.,
- SkColorGetB(background_color) / 255.,
- SkColorGetA(background_color) / 255.,
- };
- base::ScopedCFTypeRef<CGColorRef> srgb_background_color(CGColorCreate(
- CGColorSpaceCreateWithName(kCGColorSpaceSRGB), rgba_color_components));
- [ca_layer setBackgroundColor:srgb_background_color];
- }
- if (update_ca_edge_aa_mask)
- [ca_layer setEdgeAntialiasingMask:ca_edge_aa_mask];
- if (update_opacity)
- [ca_layer setOpacity:opacity];
-
- static bool show_borders = base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kShowMacOverlayBorders);
- if (show_borders) {
- base::ScopedCFTypeRef<CGColorRef> color;
- if (update_anything) {
- // Pink represents a CALayer that changed this frame.
- color.reset(CGColorCreateGenericRGB(1, 0, 1, 1));
- } else {
- // Grey represents a CALayer that has not changed.
- color.reset(CGColorCreateGenericRGB(0, 0, 0, 0.1));
- }
- [ca_layer setBorderWidth:1];
- [ca_layer setBorderColor:color];
- }
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/child_window_surface_win.cc b/chromium/content/common/gpu/child_window_surface_win.cc
deleted file mode 100644
index 738caea87ad..00000000000
--- a/chromium/content/common/gpu/child_window_surface_win.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/child_window_surface_win.h"
-
-#include "base/compiler_specific.h"
-#include "base/win/scoped_hdc.h"
-#include "base/win/wrapped_window_proc.h"
-#include "content/common/gpu/gpu_channel_manager.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "ui/base/win/hidden_window.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gfx/win/hwnd_util.h"
-#include "ui/gl/egl_util.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_surface_egl.h"
-#include "ui/gl/scoped_make_current.h"
-
-namespace content {
-
-namespace {
-
-ATOM g_window_class;
-
-LRESULT CALLBACK IntermediateWindowProc(HWND window,
- UINT message,
- WPARAM w_param,
- LPARAM l_param) {
- switch (message) {
- case WM_ERASEBKGND:
- // Prevent windows from erasing the background.
- return 1;
- case WM_PAINT:
- PAINTSTRUCT paint;
- if (BeginPaint(window, &paint)) {
- ChildWindowSurfaceWin* window_surface =
- reinterpret_cast<ChildWindowSurfaceWin*>(
- gfx::GetWindowUserData(window));
- DCHECK(window_surface);
-
- // Wait to clear the contents until a GL draw occurs, as otherwise an
- // unsightly black flash may happen if the GL contents are still
- // transparent.
- window_surface->InvalidateWindowRect(gfx::Rect(paint.rcPaint));
- EndPaint(window, &paint);
- }
- return 0;
- default:
- return DefWindowProc(window, message, w_param, l_param);
- }
-}
-
-void InitializeWindowClass() {
- if (g_window_class)
- return;
-
- WNDCLASSEX intermediate_class;
- base::win::InitializeWindowClass(
- L"Intermediate D3D Window",
- &base::win::WrappedWindowProc<IntermediateWindowProc>, CS_OWNDC, 0, 0,
- nullptr, reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)), nullptr,
- nullptr, nullptr, &intermediate_class);
- g_window_class = RegisterClassEx(&intermediate_class);
- if (!g_window_class) {
- LOG(ERROR) << "RegisterClass failed.";
- return;
- }
-}
-}
-
-ChildWindowSurfaceWin::ChildWindowSurfaceWin(GpuChannelManager* manager,
- HWND parent_window)
- : gfx::NativeViewGLSurfaceEGL(0),
- parent_window_(parent_window),
- manager_(manager) {
- // Don't use EGL_ANGLE_window_fixed_size so that we can avoid recreating the
- // window surface, which can cause flicker on DirectComposition.
- enable_fixed_size_angle_ = false;
-}
-
-EGLConfig ChildWindowSurfaceWin::GetConfig() {
- if (!config_) {
- int alpha_size = alpha_ ? 8 : EGL_DONT_CARE;
-
- EGLint config_attribs[] = {EGL_ALPHA_SIZE,
- alpha_size,
- EGL_BLUE_SIZE,
- 8,
- EGL_GREEN_SIZE,
- 8,
- EGL_RED_SIZE,
- 8,
- EGL_RENDERABLE_TYPE,
- EGL_OPENGL_ES2_BIT,
- EGL_SURFACE_TYPE,
- EGL_WINDOW_BIT | EGL_PBUFFER_BIT,
- EGL_NONE};
-
- EGLDisplay display = GetHardwareDisplay();
- EGLint num_configs;
- if (!eglChooseConfig(display, config_attribs, &config_, 1, &num_configs)) {
- LOG(ERROR) << "eglChooseConfig failed with error "
- << ui::GetLastEGLErrorString();
- return NULL;
- }
- }
-
- return config_;
-}
-
-bool ChildWindowSurfaceWin::InitializeNativeWindow() {
- if (window_)
- return true;
- InitializeWindowClass();
- DCHECK(g_window_class);
-
- RECT windowRect;
- GetClientRect(parent_window_, &windowRect);
-
- window_ = CreateWindowEx(
- WS_EX_NOPARENTNOTIFY, reinterpret_cast<wchar_t*>(g_window_class), L"",
- WS_CHILDWINDOW | WS_DISABLED | WS_VISIBLE, 0, 0,
- windowRect.right - windowRect.left, windowRect.bottom - windowRect.top,
- ui::GetHiddenWindow(), NULL, NULL, NULL);
- gfx::SetWindowUserData(window_, this);
- manager_->Send(new GpuHostMsg_AcceleratedSurfaceCreatedChildWindow(
- parent_window_, window_));
- return true;
-}
-
-bool ChildWindowSurfaceWin::Resize(const gfx::Size& size,
- float scale_factor,
- bool has_alpha) {
- if (!SupportsPostSubBuffer()) {
- if (!MoveWindow(window_, 0, 0, size.width(), size.height(), FALSE)) {
- return false;
- }
- return gfx::NativeViewGLSurfaceEGL::Resize(size, scale_factor, has_alpha);
- } else {
- if (size == GetSize() && has_alpha == alpha_)
- return true;
-
- if (!MoveWindow(window_, 0, 0, size.width(), size.height(), FALSE)) {
- return false;
- }
- size_ = size;
- if (has_alpha == alpha_) {
- // A 0-size PostSubBuffer doesn't swap but forces the swap chain to resize
- // to match the window.
- PostSubBuffer(0, 0, 0, 0);
- } else {
- alpha_ = has_alpha;
- config_ = nullptr;
-
- scoped_ptr<ui::ScopedMakeCurrent> scoped_make_current;
- gfx::GLContext* current_context = gfx::GLContext::GetCurrent();
- bool was_current = current_context && current_context->IsCurrent(this);
- if (was_current) {
- scoped_make_current.reset(
- new ui::ScopedMakeCurrent(current_context, this));
- current_context->ReleaseCurrent(this);
- }
-
- Destroy();
-
- if (!Initialize()) {
- LOG(ERROR) << "Failed to resize window.";
- return false;
- }
- }
- return true;
- }
-}
-
-gfx::SwapResult ChildWindowSurfaceWin::SwapBuffers() {
- gfx::SwapResult result = NativeViewGLSurfaceEGL::SwapBuffers();
- ClearInvalidContents();
- return result;
-}
-
-gfx::SwapResult ChildWindowSurfaceWin::PostSubBuffer(int x,
- int y,
- int width,
- int height) {
- gfx::SwapResult result =
- NativeViewGLSurfaceEGL::PostSubBuffer(x, y, width, height);
- ClearInvalidContents();
- return result;
-}
-
-void ChildWindowSurfaceWin::InvalidateWindowRect(const gfx::Rect& rect) {
- rect_to_clear_.Union(rect);
-}
-
-void ChildWindowSurfaceWin::ClearInvalidContents() {
- if (!rect_to_clear_.IsEmpty()) {
- base::win::ScopedGetDC dc(window_);
-
- RECT rect = rect_to_clear_.ToRECT();
-
- // DirectComposition composites with the contents under the SwapChain,
- // so ensure that's cleared. GDI treats black as transparent.
- FillRect(dc, &rect, reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)));
- rect_to_clear_ = gfx::Rect();
- }
-}
-
-ChildWindowSurfaceWin::~ChildWindowSurfaceWin() {
- gfx::SetWindowUserData(window_, nullptr);
- DestroyWindow(window_);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/child_window_surface_win.h b/chromium/content/common/gpu/child_window_surface_win.h
deleted file mode 100644
index 83acd88ae49..00000000000
--- a/chromium/content/common/gpu/child_window_surface_win.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CHILD_WINDOW_SURFACE_WIN_H_
-#define CONTENT_COMMON_GPU_CHILD_WINDOW_SURFACE_WIN_H_
-#include "ui/gl/gl_surface_egl.h"
-
-#include <windows.h>
-
-namespace content {
-
-class GpuChannelManager;
-
-class ChildWindowSurfaceWin : public gfx::NativeViewGLSurfaceEGL {
- public:
- ChildWindowSurfaceWin(GpuChannelManager* manager, HWND parent_window);
-
- // GLSurface implementation.
- EGLConfig GetConfig() override;
- bool Resize(const gfx::Size& size,
- float scale_factor,
- bool has_alpha) override;
- bool InitializeNativeWindow() override;
- gfx::SwapResult SwapBuffers() override;
- gfx::SwapResult PostSubBuffer(int x, int y, int width, int height) override;
-
- void InvalidateWindowRect(const gfx::Rect& rect);
-
- protected:
- ~ChildWindowSurfaceWin() override;
-
- private:
- void ClearInvalidContents();
-
- HWND parent_window_;
- GpuChannelManager* manager_;
- gfx::Rect rect_to_clear_;
-
- DISALLOW_COPY_AND_ASSIGN(ChildWindowSurfaceWin);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CHILD_WINDOW_SURFACE_WIN_H_
diff --git a/chromium/content/common/gpu/client/DEPS b/chromium/content/common/gpu/client/DEPS
index 1aea6635f14..409909c2df0 100644
--- a/chromium/content/common/gpu/client/DEPS
+++ b/chromium/content/common/gpu/client/DEPS
@@ -1,10 +1,3 @@
include_rules = [
"+cc/blink",
]
-
-specific_include_rules = {
- # Tests can make use of content/browser/ infrastructure.
- ".*browsertest\.cc": [
- "+content/browser"
- ]
-}
diff --git a/chromium/content/common/gpu/client/command_buffer_metrics.cc b/chromium/content/common/gpu/client/command_buffer_metrics.cc
index 38590c36c08..2ff84f8b1f8 100644
--- a/chromium/content/common/gpu/client/command_buffer_metrics.cc
+++ b/chromium/content/common/gpu/client/command_buffer_metrics.cc
@@ -75,7 +75,7 @@ CommandBufferContextLostReason GetContextLostReason(
void RecordContextLost(CommandBufferContextType type,
CommandBufferContextLostReason reason) {
switch (type) {
- case BROWSER_COMPOSITOR_ONSCREEN_CONTEXT:
+ case DISPLAY_COMPOSITOR_ONSCREEN_CONTEXT:
UMA_HISTOGRAM_ENUMERATION("GPU.ContextLost.BrowserCompositor", reason,
CONTEXT_LOST_REASON_MAX_ENUM);
break;
@@ -124,8 +124,8 @@ std::string CommandBufferContextTypeToString(CommandBufferContextType type) {
switch (type) {
case OFFSCREEN_CONTEXT_FOR_TESTING:
return "Context-For-Testing";
- case BROWSER_COMPOSITOR_ONSCREEN_CONTEXT:
- return "Compositor";
+ case DISPLAY_COMPOSITOR_ONSCREEN_CONTEXT:
+ return "DisplayCompositor";
case BROWSER_OFFSCREEN_MAINTHREAD_CONTEXT:
return "Offscreen-MainThread";
case BROWSER_WORKER_CONTEXT:
diff --git a/chromium/content/common/gpu/client/command_buffer_metrics.h b/chromium/content/common/gpu/client/command_buffer_metrics.h
index 0b4790cd4b0..0cd62bb2209 100644
--- a/chromium/content/common/gpu/client/command_buffer_metrics.h
+++ b/chromium/content/common/gpu/client/command_buffer_metrics.h
@@ -12,7 +12,7 @@
namespace content {
enum CommandBufferContextType {
- BROWSER_COMPOSITOR_ONSCREEN_CONTEXT,
+ DISPLAY_COMPOSITOR_ONSCREEN_CONTEXT,
BROWSER_OFFSCREEN_MAINTHREAD_CONTEXT,
BROWSER_WORKER_CONTEXT,
RENDER_COMPOSITOR_CONTEXT,
diff --git a/chromium/content/common/gpu/client/command_buffer_proxy_impl.cc b/chromium/content/common/gpu/client/command_buffer_proxy_impl.cc
deleted file mode 100644
index 1b5d14b2ff4..00000000000
--- a/chromium/content/common/gpu/client/command_buffer_proxy_impl.cc
+++ /dev/null
@@ -1,820 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/command_buffer_proxy_impl.h"
-
-#include <utility>
-#include <vector>
-
-#include "base/callback.h"
-#include "base/logging.h"
-#include "base/memory/shared_memory.h"
-#include "base/stl_util.h"
-#include "base/trace_event/trace_event.h"
-#include "content/common/child_process_messages.h"
-#include "content/common/gpu/client/gpu_channel_host.h"
-#include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
-#include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/view_messages.h"
-#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
-#include "gpu/command_buffer/common/cmd_buffer_common.h"
-#include "gpu/command_buffer/common/command_buffer_shared.h"
-#include "gpu/command_buffer/common/gpu_memory_allocation.h"
-#include "gpu/command_buffer/common/sync_token.h"
-#include "gpu/command_buffer/service/image_factory.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gl/gl_bindings.h"
-
-namespace content {
-
-namespace {
-
-uint64_t CommandBufferProxyID(int channel_id, int32_t route_id) {
- return (static_cast<uint64_t>(channel_id) << 32) | route_id;
-}
-
-} // namespace
-
-CommandBufferProxyImpl::CommandBufferProxyImpl(GpuChannelHost* channel,
- int32_t route_id,
- int32_t stream_id)
- : lock_(nullptr),
- channel_(channel),
- command_buffer_id_(CommandBufferProxyID(channel->channel_id(), route_id)),
- route_id_(route_id),
- stream_id_(stream_id),
- flush_count_(0),
- last_put_offset_(-1),
- last_barrier_put_offset_(-1),
- next_fence_sync_release_(1),
- flushed_fence_sync_release_(0),
- verified_fence_sync_release_(0),
- next_signal_id_(0),
- weak_this_(AsWeakPtr()),
- callback_thread_(base::ThreadTaskRunnerHandle::Get()) {
- DCHECK(channel);
- DCHECK(stream_id);
-}
-
-CommandBufferProxyImpl::~CommandBufferProxyImpl() {
- FOR_EACH_OBSERVER(DeletionObserver,
- deletion_observers_,
- OnWillDeleteImpl());
- if (channel_) {
- channel_->DestroyCommandBuffer(this);
- channel_ = nullptr;
- }
-}
-
-bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
- scoped_ptr<base::AutoLock> lock;
- if (lock_)
- lock.reset(new base::AutoLock(*lock_));
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalAck,
- OnSignalAck);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted,
- OnSwapBuffersCompleted);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateVSyncParameters,
- OnUpdateVSyncParameters);
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
-
- if (!handled) {
- DLOG(ERROR) << "Gpu process sent invalid message.";
- InvalidGpuMessage();
- }
- return handled;
-}
-
-void CommandBufferProxyImpl::OnChannelError() {
- scoped_ptr<base::AutoLock> lock;
- if (lock_)
- lock.reset(new base::AutoLock(*lock_));
-
- gpu::error::ContextLostReason context_lost_reason =
- gpu::error::kGpuChannelLost;
- if (shared_state_shm_ && shared_state_shm_->memory()) {
- TryUpdateState();
- // The GPU process might have intentionally been crashed
- // (exit_on_context_lost), so try to find out the original reason.
- if (last_state_.error == gpu::error::kLostContext)
- context_lost_reason = last_state_.context_lost_reason;
- }
- OnDestroyed(context_lost_reason, gpu::error::kLostContext);
-}
-
-void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason,
- gpu::error::Error error) {
- CheckLock();
- // Prevent any further messages from being sent.
- if (channel_) {
- channel_->DestroyCommandBuffer(this);
- channel_ = nullptr;
- }
-
- // When the client sees that the context is lost, they should delete this
- // CommandBufferProxyImpl and create a new one.
- last_state_.error = error;
- last_state_.context_lost_reason = reason;
-
- if (!context_lost_callback_.is_null()) {
- context_lost_callback_.Run();
- // Avoid calling the error callback more than once.
- context_lost_callback_.Reset();
- }
-}
-
-void CommandBufferProxyImpl::OnConsoleMessage(
- const GPUCommandBufferConsoleMessage& message) {
- if (!console_message_callback_.is_null()) {
- console_message_callback_.Run(message.message, message.id);
- }
-}
-
-void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
- scoped_ptr<base::AutoLock> lock;
- if (lock_)
- lock.reset(new base::AutoLock(*lock_));
- deletion_observers_.AddObserver(observer);
-}
-
-void CommandBufferProxyImpl::RemoveDeletionObserver(
- DeletionObserver* observer) {
- scoped_ptr<base::AutoLock> lock;
- if (lock_)
- lock.reset(new base::AutoLock(*lock_));
- deletion_observers_.RemoveObserver(observer);
-}
-
-void CommandBufferProxyImpl::OnSignalAck(uint32_t id) {
- SignalTaskMap::iterator it = signal_tasks_.find(id);
- if (it == signal_tasks_.end()) {
- DLOG(ERROR) << "Gpu process sent invalid SignalAck.";
- InvalidGpuMessage();
- return;
- }
- base::Closure callback = it->second;
- signal_tasks_.erase(it);
- callback.Run();
-}
-
-void CommandBufferProxyImpl::SetContextLostCallback(
- const base::Closure& callback) {
- CheckLock();
- context_lost_callback_ = callback;
-}
-
-bool CommandBufferProxyImpl::Initialize() {
- TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
- shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
- sizeof(*shared_state())).release());
- if (!shared_state_shm_)
- return false;
-
- if (!shared_state_shm_->Map(sizeof(*shared_state())))
- return false;
-
- shared_state()->Initialize();
-
- // This handle is owned by the GPU process and must be passed to it or it
- // will leak. In otherwords, do not early out on error between here and the
- // sending of the Initialize IPC below.
- base::SharedMemoryHandle handle =
- channel_->ShareToGpuProcess(shared_state_shm_->handle());
- if (!base::SharedMemory::IsHandleValid(handle))
- return false;
-
- bool result = false;
- if (!Send(new GpuCommandBufferMsg_Initialize(
- route_id_, handle, &result, &capabilities_))) {
- LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
- return false;
- }
-
- if (!result) {
- LOG(ERROR) << "Failed to initialize command buffer service.";
- return false;
- }
-
- capabilities_.image = true;
-
- return true;
-}
-
-gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
- return last_state_;
-}
-
-int32_t CommandBufferProxyImpl::GetLastToken() {
- TryUpdateState();
- return last_state_.token;
-}
-
-void CommandBufferProxyImpl::Flush(int32_t put_offset) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return;
-
- TRACE_EVENT1("gpu",
- "CommandBufferProxyImpl::Flush",
- "put_offset",
- put_offset);
-
- bool put_offset_changed = last_put_offset_ != put_offset;
- last_put_offset_ = put_offset;
- last_barrier_put_offset_ = put_offset;
-
- if (channel_) {
- const uint32_t flush_id = channel_->OrderingBarrier(
- route_id_, stream_id_, put_offset, ++flush_count_, latency_info_,
- put_offset_changed, true);
- if (put_offset_changed) {
- DCHECK(flush_id);
- const uint64_t fence_sync_release = next_fence_sync_release_ - 1;
- if (fence_sync_release > flushed_fence_sync_release_) {
- flushed_fence_sync_release_ = fence_sync_release;
- flushed_release_flush_id_.push(
- std::make_pair(fence_sync_release, flush_id));
- }
- }
- }
-
- if (put_offset_changed)
- latency_info_.clear();
-}
-
-void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) {
- if (last_state_.error != gpu::error::kNoError)
- return;
-
- TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset",
- put_offset);
-
- bool put_offset_changed = last_barrier_put_offset_ != put_offset;
- last_barrier_put_offset_ = put_offset;
-
- if (channel_) {
- const uint32_t flush_id = channel_->OrderingBarrier(
- route_id_, stream_id_, put_offset, ++flush_count_, latency_info_,
- put_offset_changed, false);
- if (put_offset_changed) {
- DCHECK(flush_id);
- const uint64_t fence_sync_release = next_fence_sync_release_ - 1;
- if (fence_sync_release > flushed_fence_sync_release_) {
- flushed_fence_sync_release_ = fence_sync_release;
- flushed_release_flush_id_.push(
- std::make_pair(fence_sync_release, flush_id));
- }
- }
- }
-
- if (put_offset_changed)
- latency_info_.clear();
-}
-
-void CommandBufferProxyImpl::SetLatencyInfo(
- const std::vector<ui::LatencyInfo>& latency_info) {
- CheckLock();
- for (size_t i = 0; i < latency_info.size(); i++)
- latency_info_.push_back(latency_info[i]);
-}
-
-void CommandBufferProxyImpl::SetSwapBuffersCompletionCallback(
- const SwapBuffersCompletionCallback& callback) {
- CheckLock();
- swap_buffers_completion_callback_ = callback;
-}
-
-void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback(
- const UpdateVSyncParametersCallback& callback) {
- CheckLock();
- update_vsync_parameters_completion_callback_ = callback;
-}
-
-void CommandBufferProxyImpl::WaitForTokenInRange(int32_t start, int32_t end) {
- CheckLock();
- TRACE_EVENT2("gpu",
- "CommandBufferProxyImpl::WaitForToken",
- "start",
- start,
- "end",
- end);
- TryUpdateState();
- if (!InRange(start, end, last_state_.token) &&
- last_state_.error == gpu::error::kNoError) {
- gpu::CommandBuffer::State state;
- if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
- route_id_, start, end, &state)))
- OnUpdateState(state);
- }
- if (!InRange(start, end, last_state_.token) &&
- last_state_.error == gpu::error::kNoError) {
- DLOG(ERROR) << "GPU state invalid after WaitForTokenInRange.";
- InvalidGpuReply();
- }
-}
-
-void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start,
- int32_t end) {
- CheckLock();
- TRACE_EVENT2("gpu",
- "CommandBufferProxyImpl::WaitForGetOffset",
- "start",
- start,
- "end",
- end);
- TryUpdateState();
- if (!InRange(start, end, last_state_.get_offset) &&
- last_state_.error == gpu::error::kNoError) {
- gpu::CommandBuffer::State state;
- if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
- route_id_, start, end, &state)))
- OnUpdateState(state);
- }
- if (!InRange(start, end, last_state_.get_offset) &&
- last_state_.error == gpu::error::kNoError) {
- DLOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange.";
- InvalidGpuReply();
- }
-}
-
-void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return;
-
- Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
- last_put_offset_ = -1;
-}
-
-scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
- size_t size,
- int32_t* id) {
- CheckLock();
- *id = -1;
-
- if (last_state_.error != gpu::error::kNoError)
- return NULL;
-
- int32_t new_id = channel_->ReserveTransferBufferId();
-
- scoped_ptr<base::SharedMemory> shared_memory(
- channel_->factory()->AllocateSharedMemory(size));
- if (!shared_memory) {
- if (last_state_.error == gpu::error::kNoError)
- last_state_.error = gpu::error::kOutOfBounds;
- return NULL;
- }
-
- DCHECK(!shared_memory->memory());
- if (!shared_memory->Map(size)) {
- if (last_state_.error == gpu::error::kNoError)
- last_state_.error = gpu::error::kOutOfBounds;
- return NULL;
- }
-
- // This handle is owned by the GPU process and must be passed to it or it
- // will leak. In otherwords, do not early out on error between here and the
- // sending of the RegisterTransferBuffer IPC below.
- base::SharedMemoryHandle handle =
- channel_->ShareToGpuProcess(shared_memory->handle());
- if (!base::SharedMemory::IsHandleValid(handle)) {
- if (last_state_.error == gpu::error::kNoError)
- last_state_.error = gpu::error::kLostContext;
- return NULL;
- }
-
- if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
- new_id,
- handle,
- size))) {
- return NULL;
- }
-
- *id = new_id;
- scoped_refptr<gpu::Buffer> buffer(
- gpu::MakeBufferFromSharedMemory(std::move(shared_memory), size));
- return buffer;
-}
-
-void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return;
-
- Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
-}
-
-gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() {
- return capabilities_;
-}
-
-int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
- size_t width,
- size_t height,
- unsigned internal_format) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return -1;
-
- int32_t new_id = channel_->ReserveImageId();
-
- gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager =
- channel_->gpu_memory_buffer_manager();
- gfx::GpuMemoryBuffer* gpu_memory_buffer =
- gpu_memory_buffer_manager->GpuMemoryBufferFromClientBuffer(buffer);
- DCHECK(gpu_memory_buffer);
-
- // This handle is owned by the GPU process and must be passed to it or it
- // will leak. In otherwords, do not early out on error between here and the
- // sending of the CreateImage IPC below.
- bool requires_sync_token = false;
- gfx::GpuMemoryBufferHandle handle =
- channel_->ShareGpuMemoryBufferToGpuProcess(gpu_memory_buffer->GetHandle(),
- &requires_sync_token);
-
- uint64_t image_fence_sync = 0;
- if (requires_sync_token) {
- image_fence_sync = GenerateFenceSyncRelease();
-
- // Make sure fence syncs were flushed before CreateImage() was called.
- DCHECK_LE(image_fence_sync - 1, flushed_fence_sync_release_);
- }
-
- DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
- gpu_memory_buffer->GetFormat(), capabilities_));
- DCHECK(gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(
- gfx::Size(width, height), gpu_memory_buffer->GetFormat()));
- DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
- internal_format, gpu_memory_buffer->GetFormat()));
-
- GpuCommandBufferMsg_CreateImage_Params params;
- params.id = new_id;
- params.gpu_memory_buffer = handle;
- params.size = gfx::Size(width, height);
- params.format = gpu_memory_buffer->GetFormat();
- params.internal_format = internal_format;
- params.image_release_count = image_fence_sync;
-
- if (!Send(new GpuCommandBufferMsg_CreateImage(route_id_, params)))
- return -1;
-
- if (image_fence_sync) {
- gpu::SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(),
- GetCommandBufferID(), image_fence_sync);
-
- // Force a synchronous IPC to validate sync token.
- EnsureWorkVisible();
- sync_token.SetVerifyFlush();
-
- gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer,
- sync_token);
- }
-
- return new_id;
-}
-
-void CommandBufferProxyImpl::DestroyImage(int32_t id) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return;
-
- Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id));
-}
-
-int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage(
- size_t width,
- size_t height,
- unsigned internal_format,
- unsigned usage) {
- CheckLock();
- scoped_ptr<gfx::GpuMemoryBuffer> buffer(
- channel_->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer(
- gfx::Size(width, height),
- gpu::ImageFactory::DefaultBufferFormatForImageFormat(internal_format),
- gfx::BufferUsage::SCANOUT));
- if (!buffer)
- return -1;
-
- return CreateImage(buffer->AsClientBuffer(), width, height, internal_format);
-}
-
-uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return 0;
-
- int32_t stream_id = channel_->GenerateRouteID();
- bool succeeded = false;
- Send(new GpuCommandBufferMsg_CreateStreamTexture(
- route_id_, texture_id, stream_id, &succeeded));
- if (!succeeded) {
- DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
- return 0;
- }
- return stream_id;
-}
-
-void CommandBufferProxyImpl::SetLock(base::Lock* lock) {
- lock_ = lock;
-}
-
-bool CommandBufferProxyImpl::IsGpuChannelLost() {
- return !channel_ || channel_->IsLost();
-}
-
-void CommandBufferProxyImpl::EnsureWorkVisible() {
- if (channel_)
- channel_->ValidateFlushIDReachedServer(stream_id_, true);
-}
-
-gpu::CommandBufferNamespace CommandBufferProxyImpl::GetNamespaceID() const {
- return gpu::CommandBufferNamespace::GPU_IO;
-}
-
-uint64_t CommandBufferProxyImpl::GetCommandBufferID() const {
- return command_buffer_id_;
-}
-
-int32_t CommandBufferProxyImpl::GetExtraCommandBufferData() const {
- return stream_id_;
-}
-
-uint64_t CommandBufferProxyImpl::GenerateFenceSyncRelease() {
- return next_fence_sync_release_++;
-}
-
-bool CommandBufferProxyImpl::IsFenceSyncRelease(uint64_t release) {
- return release != 0 && release < next_fence_sync_release_;
-}
-
-bool CommandBufferProxyImpl::IsFenceSyncFlushed(uint64_t release) {
- return release != 0 && release <= flushed_fence_sync_release_;
-}
-
-bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return false;
-
- if (release <= verified_fence_sync_release_)
- return true;
-
- // Check if we have actually flushed the fence sync release.
- if (release <= flushed_fence_sync_release_) {
- DCHECK(!flushed_release_flush_id_.empty());
- // Check if it has already been validated by another context.
- UpdateVerifiedReleases(channel_->GetHighestValidatedFlushID(stream_id_));
- if (release <= verified_fence_sync_release_)
- return true;
-
- // Has not been validated, validate it now.
- UpdateVerifiedReleases(
- channel_->ValidateFlushIDReachedServer(stream_id_, false));
- return release <= verified_fence_sync_release_;
- }
-
- return false;
-}
-
-void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token,
- const base::Closure& callback) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return;
-
- uint32_t signal_id = next_signal_id_++;
- if (!Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_,
- sync_token,
- signal_id))) {
- return;
- }
-
- signal_tasks_.insert(std::make_pair(signal_id, callback));
-}
-
-bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken(
- const gpu::SyncToken* sync_token) {
- // Can only wait on an unverified sync token if it is from the same channel.
- const uint64_t token_channel = sync_token->command_buffer_id() >> 32;
- const uint64_t channel = command_buffer_id_ >> 32;
- if (sync_token->namespace_id() != gpu::CommandBufferNamespace::GPU_IO ||
- token_channel != channel) {
- return false;
- }
-
- // If waiting on a different stream, flush pending commands on that stream.
- const int32_t release_stream_id = sync_token->extra_data_field();
- if (release_stream_id == 0)
- return false;
-
- if (release_stream_id != stream_id_)
- channel_->FlushPendingStream(release_stream_id);
-
- return true;
-}
-
-uint32_t CommandBufferProxyImpl::InsertSyncPoint() {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return 0;
-
- uint32_t sync_point = 0;
- Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, true, &sync_point));
- return sync_point;
-}
-
-uint32_t CommandBufferProxyImpl::InsertFutureSyncPoint() {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return 0;
-
- uint32_t sync_point = 0;
- Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, false, &sync_point));
- return sync_point;
-}
-
-void CommandBufferProxyImpl::RetireSyncPoint(uint32_t sync_point) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return;
-
- Send(new GpuCommandBufferMsg_RetireSyncPoint(route_id_, sync_point));
-}
-
-void CommandBufferProxyImpl::SignalSyncPoint(uint32_t sync_point,
- const base::Closure& callback) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return;
-
- uint32_t signal_id = next_signal_id_++;
- if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
- sync_point,
- signal_id))) {
- return;
- }
-
- signal_tasks_.insert(std::make_pair(signal_id, callback));
-}
-
-void CommandBufferProxyImpl::SignalQuery(uint32_t query,
- const base::Closure& callback) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return;
-
- // Signal identifiers are hidden, so nobody outside of this class will see
- // them. (And thus, they cannot save them.) The IDs themselves only last
- // until the callback is invoked, which will happen as soon as the GPU
- // catches upwith the command buffer.
- // A malicious caller trying to create a collision by making next_signal_id
- // would have to make calls at an astounding rate (300B/s) and even if they
- // could do that, all they would do is to prevent some callbacks from getting
- // called, leading to stalled threads and/or memory leaks.
- uint32_t signal_id = next_signal_id_++;
- if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
- query,
- signal_id))) {
- return;
- }
-
- signal_tasks_.insert(std::make_pair(signal_id, callback));
-}
-
-bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
- CheckLock();
- if (last_state_.error != gpu::error::kNoError)
- return false;
-
- return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-CommandBufferProxyImpl::CreateVideoDecoder() {
- if (!channel_)
- return scoped_ptr<media::VideoDecodeAccelerator>();
- return scoped_ptr<media::VideoDecodeAccelerator>(
- new GpuVideoDecodeAcceleratorHost(channel_, this));
-}
-
-scoped_ptr<media::VideoEncodeAccelerator>
-CommandBufferProxyImpl::CreateVideoEncoder() {
- if (!channel_)
- return scoped_ptr<media::VideoEncodeAccelerator>();
- return scoped_ptr<media::VideoEncodeAccelerator>(
- new GpuVideoEncodeAcceleratorHost(channel_, this));
-}
-
-gpu::error::Error CommandBufferProxyImpl::GetLastError() {
- return last_state_.error;
-}
-
-bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
- // Caller should not intentionally send a message if the context is lost.
- DCHECK(last_state_.error == gpu::error::kNoError);
-
- if (channel_) {
- if (channel_->Send(msg)) {
- return true;
- } else {
- // Flag the command buffer as lost. Defer deleting the channel until
- // OnChannelError is called after returning to the message loop in case
- // it is referenced elsewhere.
- DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
- last_state_.error = gpu::error::kLostContext;
- return false;
- }
- }
-
- // Callee takes ownership of message, regardless of whether Send is
- // successful. See IPC::Sender.
- delete msg;
- return false;
-}
-
-void CommandBufferProxyImpl::OnUpdateState(
- const gpu::CommandBuffer::State& state) {
- // Handle wraparound. It works as long as we don't have more than 2B state
- // updates in flight across which reordering occurs.
- if (state.generation - last_state_.generation < 0x80000000U)
- last_state_ = state;
-}
-
-void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
- const GpuConsoleMessageCallback& callback) {
- CheckLock();
- console_message_callback_ = callback;
-}
-
-void CommandBufferProxyImpl::TryUpdateState() {
- if (last_state_.error == gpu::error::kNoError)
- shared_state()->Read(&last_state_);
-}
-
-void CommandBufferProxyImpl::UpdateVerifiedReleases(uint32_t verified_flush) {
- while (!flushed_release_flush_id_.empty()) {
- const std::pair<uint64_t, uint32_t>& front_item =
- flushed_release_flush_id_.front();
- if (front_item.second > verified_flush)
- break;
- verified_fence_sync_release_ = front_item.first;
- flushed_release_flush_id_.pop();
- }
-}
-
-gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const {
- return reinterpret_cast<gpu::CommandBufferSharedState*>(
- shared_state_shm_->memory());
-}
-
-void CommandBufferProxyImpl::OnSwapBuffersCompleted(
- const std::vector<ui::LatencyInfo>& latency_info,
- gfx::SwapResult result) {
- if (!swap_buffers_completion_callback_.is_null()) {
- if (!ui::LatencyInfo::Verify(
- latency_info, "CommandBufferProxyImpl::OnSwapBuffersCompleted")) {
- swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(),
- result);
- return;
- }
- swap_buffers_completion_callback_.Run(latency_info, result);
- }
-}
-
-void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase,
- base::TimeDelta interval) {
- if (!update_vsync_parameters_completion_callback_.is_null())
- update_vsync_parameters_completion_callback_.Run(timebase, interval);
-}
-
-void CommandBufferProxyImpl::InvalidGpuMessage() {
- LOG(ERROR) << "Received invalid message from the GPU process.";
- OnDestroyed(gpu::error::kInvalidGpuMessage, gpu::error::kLostContext);
-}
-
-void CommandBufferProxyImpl::InvalidGpuReply() {
- CheckLock();
- LOG(ERROR) << "Received invalid reply from the GPU process.";
- last_state_.error = gpu::error::kLostContext;
- last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage;
- callback_thread_->PostTask(
- FROM_HERE,
- base::Bind(&CommandBufferProxyImpl::InvalidGpuReplyOnClientThread,
- weak_this_));
-}
-
-void CommandBufferProxyImpl::InvalidGpuReplyOnClientThread() {
- scoped_ptr<base::AutoLock> lock;
- if (lock_)
- lock.reset(new base::AutoLock(*lock_));
- OnDestroyed(gpu::error::kInvalidGpuMessage, gpu::error::kLostContext);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/command_buffer_proxy_impl.h b/chromium/content/common/gpu/client/command_buffer_proxy_impl.h
deleted file mode 100644
index 17589a7538e..00000000000
--- a/chromium/content/common/gpu/client/command_buffer_proxy_impl.h
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_
-#define CONTENT_COMMON_GPU_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <map>
-#include <queue>
-#include <string>
-
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/containers/hash_tables.h"
-#include "base/containers/scoped_ptr_hash_map.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/weak_ptr.h"
-#include "base/observer_list.h"
-#include "gpu/command_buffer/client/gpu_control.h"
-#include "gpu/command_buffer/common/command_buffer.h"
-#include "gpu/command_buffer/common/command_buffer_shared.h"
-#include "gpu/command_buffer/common/gpu_memory_allocation.h"
-#include "ipc/ipc_listener.h"
-#include "ui/events/latency_info.h"
-#include "ui/gfx/swap_result.h"
-
-struct GPUCommandBufferConsoleMessage;
-
-namespace base {
-class SharedMemory;
-}
-
-namespace gpu {
-struct Mailbox;
-struct SyncToken;
-}
-
-namespace media {
-class VideoDecodeAccelerator;
-class VideoEncodeAccelerator;
-}
-
-namespace content {
-class GpuChannelHost;
-
-// Client side proxy that forwards messages synchronously to a
-// CommandBufferStub.
-class CommandBufferProxyImpl
- : public gpu::CommandBuffer,
- public gpu::GpuControl,
- public IPC::Listener,
- public base::SupportsWeakPtr<CommandBufferProxyImpl> {
- public:
- class DeletionObserver {
- public:
- // Called during the destruction of the CommandBufferProxyImpl.
- virtual void OnWillDeleteImpl() = 0;
-
- protected:
- virtual ~DeletionObserver() {}
- };
-
- typedef base::Callback<void(
- const std::string& msg, int id)> GpuConsoleMessageCallback;
-
- CommandBufferProxyImpl(GpuChannelHost* channel,
- int32_t route_id,
- int32_t stream_id);
- ~CommandBufferProxyImpl() override;
-
- // Sends an IPC message to create a GpuVideoDecodeAccelerator. Creates and
- // returns it as an owned pointer to a media::VideoDecodeAccelerator. Returns
- // NULL on failure to create the GpuVideoDecodeAcceleratorHost.
- // Note that the GpuVideoDecodeAccelerator may still fail to be created in
- // the GPU process, even if this returns non-NULL. In this case the VDA client
- // is notified of an error later, after Initialize().
- scoped_ptr<media::VideoDecodeAccelerator> CreateVideoDecoder();
-
- // Sends an IPC message to create a GpuVideoEncodeAccelerator. Creates and
- // returns it as an owned pointer to a media::VideoEncodeAccelerator. Returns
- // NULL on failure to create the GpuVideoEncodeAcceleratorHost.
- // Note that the GpuVideoEncodeAccelerator may still fail to be created in
- // the GPU process, even if this returns non-NULL. In this case the VEA client
- // is notified of an error later, after Initialize();
- scoped_ptr<media::VideoEncodeAccelerator> CreateVideoEncoder();
-
- // IPC::Listener implementation:
- bool OnMessageReceived(const IPC::Message& message) override;
- void OnChannelError() override;
-
- // CommandBuffer implementation:
- bool Initialize() override;
- State GetLastState() override;
- int32_t GetLastToken() override;
- void Flush(int32_t put_offset) override;
- void OrderingBarrier(int32_t put_offset) override;
- void WaitForTokenInRange(int32_t start, int32_t end) override;
- void WaitForGetOffsetInRange(int32_t start, int32_t end) override;
- void SetGetBuffer(int32_t shm_id) override;
- scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
- int32_t* id) override;
- void DestroyTransferBuffer(int32_t id) override;
-
- // gpu::GpuControl implementation:
- gpu::Capabilities GetCapabilities() override;
- int32_t CreateImage(ClientBuffer buffer,
- size_t width,
- size_t height,
- unsigned internal_format) override;
- void DestroyImage(int32_t id) override;
- int32_t CreateGpuMemoryBufferImage(size_t width,
- size_t height,
- unsigned internal_format,
- unsigned usage) override;
- uint32_t InsertSyncPoint() override;
- uint32_t InsertFutureSyncPoint() override;
- void RetireSyncPoint(uint32_t sync_point) override;
- void SignalSyncPoint(uint32_t sync_point,
- const base::Closure& callback) override;
- void SignalQuery(uint32_t query, const base::Closure& callback) override;
- void SetLock(base::Lock* lock) override;
- bool IsGpuChannelLost() override;
- void EnsureWorkVisible() override;
- gpu::CommandBufferNamespace GetNamespaceID() const override;
- uint64_t GetCommandBufferID() const override;
- int32_t GetExtraCommandBufferData() const override;
- uint64_t GenerateFenceSyncRelease() override;
- bool IsFenceSyncRelease(uint64_t release) override;
- bool IsFenceSyncFlushed(uint64_t release) override;
- bool IsFenceSyncFlushReceived(uint64_t release) override;
- void SignalSyncToken(const gpu::SyncToken& sync_token,
- const base::Closure& callback) override;
- bool CanWaitUnverifiedSyncToken(const gpu::SyncToken* sync_token) override;
-
- bool ProduceFrontBuffer(const gpu::Mailbox& mailbox);
- void SetContextLostCallback(const base::Closure& callback);
-
- void AddDeletionObserver(DeletionObserver* observer);
- void RemoveDeletionObserver(DeletionObserver* observer);
-
- bool EnsureBackbuffer();
-
- void SetOnConsoleMessageCallback(
- const GpuConsoleMessageCallback& callback);
-
- void SetLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info);
- using SwapBuffersCompletionCallback =
- base::Callback<void(const std::vector<ui::LatencyInfo>& latency_info,
- gfx::SwapResult result)>;
- void SetSwapBuffersCompletionCallback(
- const SwapBuffersCompletionCallback& callback);
-
- using UpdateVSyncParametersCallback =
- base::Callback<void(base::TimeTicks timebase, base::TimeDelta interval)>;
- void SetUpdateVSyncParametersCallback(
- const UpdateVSyncParametersCallback& callback);
-
- // TODO(apatrick): this is a temporary optimization while skia is calling
- // ContentGLContext::MakeCurrent prior to every GL call. It saves returning 6
- // ints redundantly when only the error is needed for the
- // CommandBufferProxyImpl implementation.
- gpu::error::Error GetLastError() override;
-
- int32_t route_id() const { return route_id_; }
-
- int32_t stream_id() const { return stream_id_; }
-
- GpuChannelHost* channel() const { return channel_; }
-
- base::SharedMemoryHandle GetSharedStateHandle() const {
- return shared_state_shm_->handle();
- }
- uint32_t CreateStreamTexture(uint32_t texture_id);
-
- private:
- typedef std::map<int32_t, scoped_refptr<gpu::Buffer>> TransferBufferMap;
- typedef base::hash_map<uint32_t, base::Closure> SignalTaskMap;
-
- void CheckLock() {
- if (lock_)
- lock_->AssertAcquired();
- }
-
- // Send an IPC message over the GPU channel. This is private to fully
- // encapsulate the channel; all callers of this function must explicitly
- // verify that the context has not been lost.
- bool Send(IPC::Message* msg);
-
- // Message handlers:
- void OnUpdateState(const gpu::CommandBuffer::State& state);
- void OnDestroyed(gpu::error::ContextLostReason reason,
- gpu::error::Error error);
- void OnConsoleMessage(const GPUCommandBufferConsoleMessage& message);
- void OnSignalAck(uint32_t id);
- void OnSwapBuffersCompleted(const std::vector<ui::LatencyInfo>& latency_info,
- gfx::SwapResult result);
- void OnUpdateVSyncParameters(base::TimeTicks timebase,
- base::TimeDelta interval);
-
- // Try to read an updated copy of the state from shared memory.
- void TryUpdateState();
-
- // Updates the highest verified release fence sync.
- void UpdateVerifiedReleases(uint32_t verified_flush);
-
- // Loses the context after we received an invalid message from the GPU
- // process. Will call the lost context callback reentrantly if any.
- void InvalidGpuMessage();
-
- // Loses the context after we received an invalid reply from the GPU
- // process. Will post a task to call the lost context callback if any.
- void InvalidGpuReply();
-
- void InvalidGpuReplyOnClientThread();
-
- // The shared memory area used to update state.
- gpu::CommandBufferSharedState* shared_state() const;
-
- base::Lock* lock_;
-
- // Unowned list of DeletionObservers.
- base::ObserverList<DeletionObserver> deletion_observers_;
-
- // The last cached state received from the service.
- State last_state_;
-
- // The shared memory area used to update state.
- scoped_ptr<base::SharedMemory> shared_state_shm_;
-
- // |*this| is owned by |*channel_| and so is always outlived by it, so using a
- // raw pointer is ok.
- GpuChannelHost* channel_;
- const uint64_t command_buffer_id_;
- const int32_t route_id_;
- const int32_t stream_id_;
- uint32_t flush_count_;
- int32_t last_put_offset_;
- int32_t last_barrier_put_offset_;
-
- // Next generated fence sync.
- uint64_t next_fence_sync_release_;
-
- // Unverified flushed fence syncs with their corresponding flush id.
- std::queue<std::pair<uint64_t, uint32_t>> flushed_release_flush_id_;
-
- // Last flushed fence sync release, same as last item in queue if not empty.
- uint64_t flushed_fence_sync_release_;
-
- // Last verified fence sync.
- uint64_t verified_fence_sync_release_;
-
- base::Closure context_lost_callback_;
-
- GpuConsoleMessageCallback console_message_callback_;
-
- // Tasks to be invoked in SignalSyncPoint responses.
- uint32_t next_signal_id_;
- SignalTaskMap signal_tasks_;
-
- gpu::Capabilities capabilities_;
-
- std::vector<ui::LatencyInfo> latency_info_;
-
- SwapBuffersCompletionCallback swap_buffers_completion_callback_;
- UpdateVSyncParametersCallback update_vsync_parameters_completion_callback_;
-
- base::WeakPtr<CommandBufferProxyImpl> weak_this_;
- scoped_refptr<base::SequencedTaskRunner> callback_thread_;
-
- DISALLOW_COPY_AND_ASSIGN(CommandBufferProxyImpl);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_
diff --git a/chromium/content/common/gpu/client/context_provider_command_buffer.cc b/chromium/content/common/gpu/client/context_provider_command_buffer.cc
index 2560d78067b..7ef9e73294e 100644
--- a/chromium/content/common/gpu/client/context_provider_command_buffer.cc
+++ b/chromium/content/common/gpu/client/context_provider_command_buffer.cc
@@ -12,7 +12,7 @@
#include "base/callback_helpers.h"
#include "base/strings/stringprintf.h"
#include "cc/output/managed_memory_policy.h"
-#include "content/common/gpu/client/grcontext_for_webgraphicscontext3d.h"
+#include "content/common/gpu/client/grcontext_for_gles2_interface.h"
#include "gpu/command_buffer/client/gles2_implementation.h"
#include "third_party/skia/include/gpu/GrContext.h"
@@ -23,11 +23,11 @@ class ContextProviderCommandBuffer::LostContextCallbackProxy
public:
explicit LostContextCallbackProxy(ContextProviderCommandBuffer* provider)
: provider_(provider) {
- provider_->WebContext3DNoChecks()->setContextLostCallback(this);
+ provider_->context3d_->setContextLostCallback(this);
}
~LostContextCallbackProxy() override {
- provider_->WebContext3DNoChecks()->setContextLostCallback(NULL);
+ provider_->context3d_->setContextLostCallback(NULL);
}
void onContextLost() override { provider_->OnLostContext(); }
@@ -49,12 +49,11 @@ ContextProviderCommandBuffer::Create(
ContextProviderCommandBuffer::ContextProviderCommandBuffer(
scoped_ptr<WebGraphicsContext3DCommandBufferImpl> context3d,
CommandBufferContextType type)
- : context_type_(type),
+ : context3d_(std::move(context3d)),
+ context_type_(type),
debug_name_(CommandBufferContextTypeToString(type)) {
- gr_interface_ = skia::AdoptRef(
- new GrGLInterfaceForWebGraphicsContext3D(std::move(context3d)));
DCHECK(main_thread_checker_.CalledOnValidThread());
- DCHECK(gr_interface_->WebContext3D());
+ DCHECK(context3d_);
context_thread_checker_.DetachFromThread();
}
@@ -63,52 +62,42 @@ ContextProviderCommandBuffer::~ContextProviderCommandBuffer() {
context_thread_checker_.CalledOnValidThread());
// Destroy references to the context3d_ before leaking it.
- if (WebContext3DNoChecks()->GetCommandBufferProxy())
- WebContext3DNoChecks()->GetCommandBufferProxy()->SetLock(nullptr);
+ if (context3d_->GetCommandBufferProxy())
+ context3d_->GetCommandBufferProxy()->SetLock(nullptr);
lost_context_callback_proxy_.reset();
}
-
-CommandBufferProxyImpl* ContextProviderCommandBuffer::GetCommandBufferProxy() {
- return WebContext3DNoChecks()->GetCommandBufferProxy();
+gpu::CommandBufferProxyImpl*
+ContextProviderCommandBuffer::GetCommandBufferProxy() {
+ return context3d_->GetCommandBufferProxy();
}
WebGraphicsContext3DCommandBufferImpl*
ContextProviderCommandBuffer::WebContext3D() {
- DCHECK(gr_interface_);
- DCHECK(gr_interface_->WebContext3D());
+ DCHECK(context3d_);
DCHECK(lost_context_callback_proxy_); // Is bound to thread.
DCHECK(context_thread_checker_.CalledOnValidThread());
- return WebContext3DNoChecks();
-}
-
-WebGraphicsContext3DCommandBufferImpl*
- ContextProviderCommandBuffer::WebContext3DNoChecks() {
- DCHECK(gr_interface_);
- return static_cast<WebGraphicsContext3DCommandBufferImpl*>(
- gr_interface_->WebContext3D());
+ return context3d_.get();
}
bool ContextProviderCommandBuffer::BindToCurrentThread() {
// This is called on the thread the context will be used.
DCHECK(context_thread_checker_.CalledOnValidThread());
- DCHECK(gr_interface_ && gr_interface_->WebContext3D());
if (lost_context_callback_proxy_)
return true;
- WebContext3DNoChecks()->SetContextType(context_type_);
- if (!WebContext3DNoChecks()->InitializeOnCurrentThread())
+ context3d_->SetContextType(context_type_);
+ if (!context3d_->InitializeOnCurrentThread())
return false;
- gr_interface_->BindToCurrentThread();
InitializeCapabilities();
std::string unique_context_name =
- base::StringPrintf("%s-%p", debug_name_.c_str(), WebContext3DNoChecks());
- WebContext3DNoChecks()->traceBeginCHROMIUM("gpu_toplevel",
- unique_context_name.c_str());
+ base::StringPrintf("%s-%p", debug_name_.c_str(), context3d_.get());
+ context3d_->GetGLInterface()->TraceBeginCHROMIUM("gpu_toplevel",
+ unique_context_name.c_str());
lost_context_callback_proxy_.reset(new LostContextCallbackProxy(this));
return true;
@@ -119,13 +108,15 @@ void ContextProviderCommandBuffer::DetachFromThread() {
}
gpu::gles2::GLES2Interface* ContextProviderCommandBuffer::ContextGL() {
+ DCHECK(context3d_);
DCHECK(lost_context_callback_proxy_); // Is bound to thread.
+ DCHECK(context_thread_checker_.CalledOnValidThread());
- return WebContext3D()->GetImplementation();
+ return context3d_->GetImplementation();
}
gpu::ContextSupport* ContextProviderCommandBuffer::ContextSupport() {
- return WebContext3DNoChecks()->GetContextSupport();
+ return context3d_->GetContextSupport();
}
class GrContext* ContextProviderCommandBuffer::GrContext() {
@@ -135,7 +126,8 @@ class GrContext* ContextProviderCommandBuffer::GrContext() {
if (gr_context_)
return gr_context_->get();
- gr_context_.reset(new GrContextForWebGraphicsContext3D(gr_interface_));
+ gr_context_.reset(
+ new GrContextForGLES2Interface(context3d_->GetGLInterface()));
// If GlContext is already lost, also abandon the new GrContext.
if (gr_context_->get() &&
@@ -154,7 +146,8 @@ void ContextProviderCommandBuffer::InvalidateGrContext(uint32_t state) {
}
void ContextProviderCommandBuffer::SetupLock() {
- WebContext3D()->GetCommandBufferProxy()->SetLock(&context_lock_);
+ DCHECK(context3d_);
+ context3d_->GetCommandBufferProxy()->SetLock(&context_lock_);
}
base::Lock* ContextProviderCommandBuffer::GetLock() {
@@ -187,9 +180,9 @@ void ContextProviderCommandBuffer::OnLostContext() {
void ContextProviderCommandBuffer::InitializeCapabilities() {
Capabilities caps;
- caps.gpu = WebContext3DNoChecks()->GetImplementation()->capabilities();
+ caps.gpu = context3d_->GetImplementation()->capabilities();
- size_t mapped_memory_limit = WebContext3DNoChecks()->GetMappedMemoryLimit();
+ size_t mapped_memory_limit = context3d_->GetMappedMemoryLimit();
caps.max_transfer_buffer_usage_bytes =
mapped_memory_limit == WebGraphicsContext3DCommandBufferImpl::kNoLimit
? std::numeric_limits<size_t>::max() : mapped_memory_limit;
diff --git a/chromium/content/common/gpu/client/context_provider_command_buffer.h b/chromium/content/common/gpu/client/context_provider_command_buffer.h
index 564e76c4040..65b0e50dc7e 100644
--- a/chromium/content/common/gpu/client/context_provider_command_buffer.h
+++ b/chromium/content/common/gpu/client/context_provider_command_buffer.h
@@ -16,12 +16,10 @@
#include "content/common/content_export.h"
#include "content/common/gpu/client/command_buffer_metrics.h"
#include "content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h"
-#include "skia/ext/refptr.h"
namespace content {
-class GrContextForWebGraphicsContext3D;
-class GrGLInterfaceForWebGraphicsContext3D;
+class GrContextForGLES2Interface;
// Implementation of cc::ContextProvider that provides a
// WebGraphicsContext3DCommandBufferImpl context and a GrContext.
@@ -32,7 +30,7 @@ class CONTENT_EXPORT ContextProviderCommandBuffer
scoped_ptr<WebGraphicsContext3DCommandBufferImpl> context3d,
CommandBufferContextType type);
- CommandBufferProxyImpl* GetCommandBufferProxy();
+ gpu::CommandBufferProxyImpl* GetCommandBufferProxy();
// cc_blink::ContextProviderWebContext implementation.
WebGraphicsContext3DCommandBufferImpl* WebContext3D() override;
@@ -60,14 +58,13 @@ class CONTENT_EXPORT ContextProviderCommandBuffer
void OnLostContext();
private:
- WebGraphicsContext3DCommandBufferImpl* WebContext3DNoChecks();
void InitializeCapabilities();
base::ThreadChecker main_thread_checker_;
base::ThreadChecker context_thread_checker_;
- skia::RefPtr<GrGLInterfaceForWebGraphicsContext3D> gr_interface_;
- scoped_ptr<GrContextForWebGraphicsContext3D> gr_context_;
+ scoped_ptr<WebGraphicsContext3DCommandBufferImpl> context3d_;
+ scoped_ptr<GrContextForGLES2Interface> gr_context_;
cc::ContextProvider::Capabilities capabilities_;
CommandBufferContextType context_type_;
@@ -78,7 +75,6 @@ class CONTENT_EXPORT ContextProviderCommandBuffer
base::Lock context_lock_;
class LostContextCallbackProxy;
- friend class LostContextCallbackProxy;
scoped_ptr<LostContextCallbackProxy> lost_context_callback_proxy_;
};
diff --git a/chromium/content/common/gpu/client/gl_helper.cc b/chromium/content/common/gpu/client/gl_helper.cc
deleted file mode 100644
index 5549caaacb8..00000000000
--- a/chromium/content/common/gpu/client/gl_helper.cc
+++ /dev/null
@@ -1,1391 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gl_helper.h"
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <queue>
-#include <string>
-
-#include "base/bind.h"
-#include "base/lazy_instance.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop.h"
-#include "base/strings/string_util.h"
-#include "base/time/time.h"
-#include "base/trace_event/trace_event.h"
-#include "content/common/gpu/client/gl_helper_readback_support.h"
-#include "content/common/gpu/client/gl_helper_scaling.h"
-#include "gpu/GLES2/gl2extchromium.h"
-#include "gpu/command_buffer/client/context_support.h"
-#include "gpu/command_buffer/common/mailbox.h"
-#include "gpu/command_buffer/common/mailbox_holder.h"
-#include "media/base/video_frame.h"
-#include "media/base/video_util.h"
-#include "third_party/skia/include/core/SkRegion.h"
-#include "ui/gfx/geometry/point.h"
-#include "ui/gfx/geometry/rect.h"
-#include "ui/gfx/geometry/size.h"
-
-using gpu::gles2::GLES2Interface;
-
-namespace {
-
-class ScopedFlush {
- public:
- explicit ScopedFlush(gpu::gles2::GLES2Interface* gl) : gl_(gl) {}
-
- ~ScopedFlush() { gl_->Flush(); }
-
- private:
- gpu::gles2::GLES2Interface* gl_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedFlush);
-};
-
-// Helper class for allocating and holding an RGBA texture of a given
-// size and an associated framebuffer.
-class TextureFrameBufferPair {
- public:
- TextureFrameBufferPair(GLES2Interface* gl, gfx::Size size)
- : texture_(gl), framebuffer_(gl), size_(size) {
- content::ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl, texture_);
- gl->TexImage2D(GL_TEXTURE_2D,
- 0,
- GL_RGBA,
- size.width(),
- size.height(),
- 0,
- GL_RGBA,
- GL_UNSIGNED_BYTE,
- NULL);
- content::ScopedFramebufferBinder<GL_FRAMEBUFFER> framebuffer_binder(
- gl, framebuffer_);
- gl->FramebufferTexture2D(
- GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture_, 0);
- }
-
- GLuint texture() const { return texture_.id(); }
- GLuint framebuffer() const { return framebuffer_.id(); }
- gfx::Size size() const { return size_; }
-
- private:
- content::ScopedTexture texture_;
- content::ScopedFramebuffer framebuffer_;
- gfx::Size size_;
-
- DISALLOW_COPY_AND_ASSIGN(TextureFrameBufferPair);
-};
-
-// Helper class for holding a scaler, a texture for the output of that
-// scaler and an associated frame buffer. This is inteded to be used
-// when the output of a scaler is to be sent to a readback.
-class ScalerHolder {
- public:
- ScalerHolder(GLES2Interface* gl, content::GLHelper::ScalerInterface* scaler)
- : texture_and_framebuffer_(gl, scaler->DstSize()), scaler_(scaler) {}
-
- void Scale(GLuint src_texture) {
- scaler_->Scale(src_texture, texture_and_framebuffer_.texture());
- }
-
- content::GLHelper::ScalerInterface* scaler() const { return scaler_.get(); }
- TextureFrameBufferPair* texture_and_framebuffer() {
- return &texture_and_framebuffer_;
- }
- GLuint texture() const { return texture_and_framebuffer_.texture(); }
-
- private:
- TextureFrameBufferPair texture_and_framebuffer_;
- scoped_ptr<content::GLHelper::ScalerInterface> scaler_;
-
- DISALLOW_COPY_AND_ASSIGN(ScalerHolder);
-};
-
-} // namespace
-
-namespace content {
-typedef GLHelperReadbackSupport::FormatSupport FormatSupport;
-
-// Implements GLHelper::CropScaleReadbackAndCleanTexture and encapsulates
-// the data needed for it.
-class GLHelper::CopyTextureToImpl
- : public base::SupportsWeakPtr<GLHelper::CopyTextureToImpl> {
- public:
- CopyTextureToImpl(GLES2Interface* gl,
- gpu::ContextSupport* context_support,
- GLHelper* helper)
- : gl_(gl),
- context_support_(context_support),
- helper_(helper),
- flush_(gl),
- max_draw_buffers_(0) {
- const GLubyte* extensions = gl_->GetString(GL_EXTENSIONS);
- if (!extensions)
- return;
- std::string extensions_string =
- " " + std::string(reinterpret_cast<const char*>(extensions)) + " ";
- if (extensions_string.find(" GL_EXT_draw_buffers ") != std::string::npos) {
- gl_->GetIntegerv(GL_MAX_DRAW_BUFFERS_EXT, &max_draw_buffers_);
- }
- }
- ~CopyTextureToImpl() { CancelRequests(); }
-
- GLuint ConsumeMailboxToTexture(const gpu::Mailbox& mailbox,
- const gpu::SyncToken& sync_token) {
- return helper_->ConsumeMailboxToTexture(mailbox, sync_token);
- }
-
- void CropScaleReadbackAndCleanTexture(
- GLuint src_texture,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- unsigned char* out,
- const SkColorType out_color_type,
- const base::Callback<void(bool)>& callback,
- GLHelper::ScalerQuality quality);
-
- void ReadbackTextureSync(GLuint texture,
- const gfx::Rect& src_rect,
- unsigned char* out,
- SkColorType format);
-
- void ReadbackTextureAsync(GLuint texture,
- const gfx::Size& dst_size,
- unsigned char* out,
- SkColorType color_type,
- const base::Callback<void(bool)>& callback);
-
- // Reads back bytes from the currently bound frame buffer.
- // Note that dst_size is specified in bytes, not pixels.
- void ReadbackAsync(
- const gfx::Size& dst_size,
- int32_t bytes_per_row, // generally dst_size.width() * 4
- int32_t row_stride_bytes, // generally dst_size.width() * 4
- unsigned char* out,
- GLenum format,
- GLenum type,
- size_t bytes_per_pixel,
- const base::Callback<void(bool)>& callback);
-
- void ReadbackPlane(TextureFrameBufferPair* source,
- const scoped_refptr<media::VideoFrame>& target,
- int plane,
- int size_shift,
- const gfx::Rect& paste_rect,
- ReadbackSwizzle swizzle,
- const base::Callback<void(bool)>& callback);
-
- GLuint CopyAndScaleTexture(GLuint texture,
- const gfx::Size& src_size,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- GLHelper::ScalerQuality quality);
-
- ReadbackYUVInterface* CreateReadbackPipelineYUV(
- GLHelper::ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool flip_vertically,
- bool use_mrt);
-
- // Returns the maximum number of draw buffers available,
- // 0 if GL_EXT_draw_buffers is not available.
- GLint MaxDrawBuffers() const { return max_draw_buffers_; }
-
- FormatSupport GetReadbackConfig(SkColorType color_type,
- bool can_swizzle,
- GLenum* format,
- GLenum* type,
- size_t* bytes_per_pixel);
-
- private:
- // A single request to CropScaleReadbackAndCleanTexture.
- // The main thread can cancel the request, before it's handled by the helper
- // thread, by resetting the texture and pixels fields. Alternatively, the
- // thread marks that it handles the request by resetting the pixels field
- // (meaning it guarantees that the callback with be called).
- // In either case, the callback must be called exactly once, and the texture
- // must be deleted by the main thread gl.
- struct Request {
- Request(const gfx::Size& size_,
- int32_t bytes_per_row_,
- int32_t row_stride_bytes_,
- unsigned char* pixels_,
- const base::Callback<void(bool)>& callback_)
- : done(false),
- size(size_),
- bytes_per_row(bytes_per_row_),
- row_stride_bytes(row_stride_bytes_),
- pixels(pixels_),
- callback(callback_),
- buffer(0),
- query(0) {}
-
- bool done;
- bool result;
- gfx::Size size;
- int bytes_per_row;
- int row_stride_bytes;
- unsigned char* pixels;
- base::Callback<void(bool)> callback;
- GLuint buffer;
- GLuint query;
- };
-
- // We must take care to call the callbacks last, as they may
- // end up destroying the gl_helper and make *this invalid.
- // We stick the finished requests in a stack object that calls
- // the callbacks when it goes out of scope.
- class FinishRequestHelper {
- public:
- FinishRequestHelper() {}
- ~FinishRequestHelper() {
- while (!requests_.empty()) {
- Request* request = requests_.front();
- requests_.pop();
- request->callback.Run(request->result);
- delete request;
- }
- }
- void Add(Request* r) {
- requests_.push(r);
- }
- private:
- std::queue<Request*> requests_;
- DISALLOW_COPY_AND_ASSIGN(FinishRequestHelper);
- };
-
- // A readback pipeline that also converts the data to YUV before
- // reading it back.
- class ReadbackYUVImpl : public ReadbackYUVInterface {
- public:
- ReadbackYUVImpl(GLES2Interface* gl,
- CopyTextureToImpl* copy_impl,
- GLHelperScaling* scaler_impl,
- GLHelper::ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool flip_vertically,
- ReadbackSwizzle swizzle);
-
- void ReadbackYUV(const gpu::Mailbox& mailbox,
- const gpu::SyncToken& sync_token,
- const scoped_refptr<media::VideoFrame>& target,
- const gfx::Point& paste_location,
- const base::Callback<void(bool)>& callback) override;
-
- ScalerInterface* scaler() override { return scaler_.scaler(); }
-
- private:
- GLES2Interface* gl_;
- CopyTextureToImpl* copy_impl_;
- gfx::Size dst_size_;
- ReadbackSwizzle swizzle_;
- ScalerHolder scaler_;
- ScalerHolder y_;
- ScalerHolder u_;
- ScalerHolder v_;
-
- DISALLOW_COPY_AND_ASSIGN(ReadbackYUVImpl);
- };
-
- // A readback pipeline that also converts the data to YUV before
- // reading it back. This one uses Multiple Render Targets, which
- // may not be supported on all platforms.
- class ReadbackYUV_MRT : public ReadbackYUVInterface {
- public:
- ReadbackYUV_MRT(GLES2Interface* gl,
- CopyTextureToImpl* copy_impl,
- GLHelperScaling* scaler_impl,
- GLHelper::ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool flip_vertically,
- ReadbackSwizzle swizzle);
-
- void ReadbackYUV(const gpu::Mailbox& mailbox,
- const gpu::SyncToken& sync_token,
- const scoped_refptr<media::VideoFrame>& target,
- const gfx::Point& paste_location,
- const base::Callback<void(bool)>& callback) override;
-
- ScalerInterface* scaler() override { return scaler_.scaler(); }
-
- private:
- GLES2Interface* gl_;
- CopyTextureToImpl* copy_impl_;
- gfx::Size dst_size_;
- GLHelper::ScalerQuality quality_;
- ReadbackSwizzle swizzle_;
- ScalerHolder scaler_;
- scoped_ptr<content::GLHelperScaling::ShaderInterface> pass1_shader_;
- scoped_ptr<content::GLHelperScaling::ShaderInterface> pass2_shader_;
- TextureFrameBufferPair y_;
- ScopedTexture uv_;
- TextureFrameBufferPair u_;
- TextureFrameBufferPair v_;
-
- DISALLOW_COPY_AND_ASSIGN(ReadbackYUV_MRT);
- };
-
- // Copies the block of pixels specified with |src_subrect| from |src_texture|,
- // scales it to |dst_size|, writes it into a texture, and returns its ID.
- // |src_size| is the size of |src_texture|.
- GLuint ScaleTexture(GLuint src_texture,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle,
- SkColorType color_type,
- GLHelper::ScalerQuality quality);
-
- // Converts each four consecutive pixels of the source texture into one pixel
- // in the result texture with each pixel channel representing the grayscale
- // color of one of the four original pixels:
- // R1G1B1A1 R2G2B2A2 R3G3B3A3 R4G4B4A4 -> X1X2X3X4
- // The resulting texture is still an RGBA texture (which is ~4 times narrower
- // than the original). If rendered directly, it wouldn't show anything useful,
- // but the data in it can be used to construct a grayscale image.
- // |encoded_texture_size| is the exact size of the resulting RGBA texture. It
- // is equal to src_size.width()/4 rounded upwards. Some channels in the last
- // pixel ((-src_size.width()) % 4) to be exact) are padding and don't contain
- // useful data.
- // If swizzle is set to true, the transformed pixels are reordered:
- // R1G1B1A1 R2G2B2A2 R3G3B3A3 R4G4B4A4 -> X3X2X1X4.
- GLuint EncodeTextureAsGrayscale(GLuint src_texture,
- const gfx::Size& src_size,
- gfx::Size* const encoded_texture_size,
- bool vertically_flip_texture,
- bool swizzle);
-
- static void nullcallback(bool success) {}
- void ReadbackDone(Request *request, int bytes_per_pixel);
- void FinishRequest(Request* request,
- bool result,
- FinishRequestHelper* helper);
- void CancelRequests();
-
- static const float kRGBtoYColorWeights[];
- static const float kRGBtoUColorWeights[];
- static const float kRGBtoVColorWeights[];
- static const float kRGBtoGrayscaleColorWeights[];
-
- GLES2Interface* gl_;
- gpu::ContextSupport* context_support_;
- GLHelper* helper_;
-
- // A scoped flush that will ensure all resource deletions are flushed when
- // this object is destroyed. Must be declared before other Scoped* fields.
- ScopedFlush flush_;
-
- std::queue<Request*> request_queue_;
- GLint max_draw_buffers_;
-};
-
-GLHelper::ScalerInterface* GLHelper::CreateScaler(ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle) {
- InitScalerImpl();
- return scaler_impl_->CreateScaler(quality,
- src_size,
- src_subrect,
- dst_size,
- vertically_flip_texture,
- swizzle);
-}
-
-GLuint GLHelper::CopyTextureToImpl::ScaleTexture(
- GLuint src_texture,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle,
- SkColorType color_type,
- GLHelper::ScalerQuality quality) {
- GLuint dst_texture = 0u;
- gl_->GenTextures(1, &dst_texture);
- {
- GLenum format = GL_RGBA, type = GL_UNSIGNED_BYTE;
- ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, dst_texture);
-
- // Use GL_RGBA for destination/temporary texture unless we're working with
- // 16-bit data
- if (color_type == kRGB_565_SkColorType) {
- format = GL_RGB;
- type = GL_UNSIGNED_SHORT_5_6_5;
- }
-
- gl_->TexImage2D(GL_TEXTURE_2D,
- 0,
- format,
- dst_size.width(),
- dst_size.height(),
- 0,
- format,
- type,
- NULL);
- }
- scoped_ptr<ScalerInterface> scaler(
- helper_->CreateScaler(quality,
- src_size,
- src_subrect,
- dst_size,
- vertically_flip_texture,
- swizzle));
- scaler->Scale(src_texture, dst_texture);
- return dst_texture;
-}
-
-GLuint GLHelper::CopyTextureToImpl::EncodeTextureAsGrayscale(
- GLuint src_texture,
- const gfx::Size& src_size,
- gfx::Size* const encoded_texture_size,
- bool vertically_flip_texture,
- bool swizzle) {
- GLuint dst_texture = 0u;
- gl_->GenTextures(1, &dst_texture);
- // The size of the encoded texture.
- *encoded_texture_size =
- gfx::Size((src_size.width() + 3) / 4, src_size.height());
- {
- ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, dst_texture);
- gl_->TexImage2D(GL_TEXTURE_2D,
- 0,
- GL_RGBA,
- encoded_texture_size->width(),
- encoded_texture_size->height(),
- 0,
- GL_RGBA,
- GL_UNSIGNED_BYTE,
- NULL);
- }
-
- helper_->InitScalerImpl();
- scoped_ptr<ScalerInterface> grayscale_scaler(
- helper_->scaler_impl_.get()->CreatePlanarScaler(
- src_size,
- gfx::Rect(0, 0, (src_size.width() + 3) & ~3, src_size.height()),
- *encoded_texture_size,
- vertically_flip_texture,
- swizzle,
- kRGBtoGrayscaleColorWeights));
- grayscale_scaler->Scale(src_texture, dst_texture);
- return dst_texture;
-}
-
-void GLHelper::CopyTextureToImpl::ReadbackAsync(
- const gfx::Size& dst_size,
- int32_t bytes_per_row,
- int32_t row_stride_bytes,
- unsigned char* out,
- GLenum format,
- GLenum type,
- size_t bytes_per_pixel,
- const base::Callback<void(bool)>& callback) {
- TRACE_EVENT0("gpu.capture", "GLHelper::CopyTextureToImpl::ReadbackAsync");
- Request* request =
- new Request(dst_size, bytes_per_row, row_stride_bytes, out, callback);
- request_queue_.push(request);
- request->buffer = 0u;
-
- gl_->GenBuffers(1, &request->buffer);
- gl_->BindBuffer(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, request->buffer);
- gl_->BufferData(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM,
- bytes_per_pixel * dst_size.GetArea(),
- NULL,
- GL_STREAM_READ);
-
- request->query = 0u;
- gl_->GenQueriesEXT(1, &request->query);
- gl_->BeginQueryEXT(GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM, request->query);
- gl_->ReadPixels(0,
- 0,
- dst_size.width(),
- dst_size.height(),
- format,
- type,
- NULL);
- gl_->EndQueryEXT(GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM);
- gl_->BindBuffer(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, 0);
- context_support_->SignalQuery(
- request->query,
- base::Bind(&CopyTextureToImpl::ReadbackDone, AsWeakPtr(),
- request, bytes_per_pixel));
-}
-
-void GLHelper::CopyTextureToImpl::CropScaleReadbackAndCleanTexture(
- GLuint src_texture,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- unsigned char* out,
- const SkColorType out_color_type,
- const base::Callback<void(bool)>& callback,
- GLHelper::ScalerQuality quality) {
- GLenum format, type;
- size_t bytes_per_pixel;
- SkColorType readback_color_type = out_color_type;
- // Single-component textures are not supported by all GPUs, so we implement
- // kAlpha_8_SkColorType support here via a special encoding (see below) using
- // a 32-bit texture to represent an 8-bit image.
- // Thus we use generic 32-bit readback in this case.
- if (out_color_type == kAlpha_8_SkColorType) {
- readback_color_type = kRGBA_8888_SkColorType;
- }
-
- FormatSupport supported = GetReadbackConfig(
- readback_color_type, true, &format, &type, &bytes_per_pixel);
-
- if (supported == GLHelperReadbackSupport::NOT_SUPPORTED) {
- callback.Run(false);
- return;
- }
-
- GLuint texture = src_texture;
-
- // Scale texture if needed
- // Optimization: SCALER_QUALITY_FAST is just a single bilinear pass, which we
- // can do just as well in EncodeTextureAsGrayscale, which we will do if
- // out_color_type is kAlpha_8_SkColorType, so let's skip the scaling step
- // in that case.
- bool scale_texture = out_color_type != kAlpha_8_SkColorType ||
- quality != GLHelper::SCALER_QUALITY_FAST;
- if (scale_texture) {
- // Don't swizzle during the scale step for kAlpha_8_SkColorType.
- // We will swizzle in the encode step below if needed.
- bool scale_swizzle = out_color_type == kAlpha_8_SkColorType
- ? false
- : supported == GLHelperReadbackSupport::SWIZZLE;
- texture =
- ScaleTexture(src_texture,
- src_size,
- src_subrect,
- dst_size,
- true,
- scale_swizzle,
- out_color_type == kAlpha_8_SkColorType ? kN32_SkColorType
- : out_color_type,
- quality);
- DCHECK(texture);
- }
-
- gfx::Size readback_texture_size = dst_size;
- // Encode texture to grayscale if needed.
- if (out_color_type == kAlpha_8_SkColorType) {
- // Do the vertical flip here if we haven't already done it when we scaled
- // the texture.
- bool encode_as_grayscale_vertical_flip = !scale_texture;
- // EncodeTextureAsGrayscale by default creates a texture which should be
- // read back as RGBA, so need to swizzle if the readback format is BGRA.
- bool encode_as_grayscale_swizzle = format == GL_BGRA_EXT;
- GLuint tmp_texture =
- EncodeTextureAsGrayscale(texture,
- dst_size,
- &readback_texture_size,
- encode_as_grayscale_vertical_flip,
- encode_as_grayscale_swizzle);
- // If the scaled texture was created - delete it
- if (scale_texture)
- gl_->DeleteTextures(1, &texture);
- texture = tmp_texture;
- DCHECK(texture);
- }
-
- // Readback the pixels of the resulting texture
- ScopedFramebuffer dst_framebuffer(gl_);
- ScopedFramebufferBinder<GL_FRAMEBUFFER> framebuffer_binder(gl_,
- dst_framebuffer);
- ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, texture);
- gl_->FramebufferTexture2D(GL_FRAMEBUFFER,
- GL_COLOR_ATTACHMENT0,
- GL_TEXTURE_2D,
- texture,
- 0);
-
- int32_t bytes_per_row = out_color_type == kAlpha_8_SkColorType
- ? dst_size.width()
- : dst_size.width() * bytes_per_pixel;
-
- ReadbackAsync(readback_texture_size,
- bytes_per_row,
- bytes_per_row,
- out,
- format,
- type,
- bytes_per_pixel,
- callback);
- gl_->DeleteTextures(1, &texture);
-}
-
-void GLHelper::CopyTextureToImpl::ReadbackTextureSync(
- GLuint texture,
- const gfx::Rect& src_rect,
- unsigned char* out,
- SkColorType color_type) {
- GLenum format, type;
- size_t bytes_per_pixel;
- FormatSupport supported =
- GetReadbackConfig(color_type, false, &format, &type, &bytes_per_pixel);
- if (supported == GLHelperReadbackSupport::NOT_SUPPORTED) {
- return;
- }
-
- ScopedFramebuffer dst_framebuffer(gl_);
- ScopedFramebufferBinder<GL_FRAMEBUFFER> framebuffer_binder(gl_,
- dst_framebuffer);
- ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, texture);
- gl_->FramebufferTexture2D(
- GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
- gl_->ReadPixels(src_rect.x(),
- src_rect.y(),
- src_rect.width(),
- src_rect.height(),
- format,
- type,
- out);
-}
-
-void GLHelper::CopyTextureToImpl::ReadbackTextureAsync(
- GLuint texture,
- const gfx::Size& dst_size,
- unsigned char* out,
- SkColorType color_type,
- const base::Callback<void(bool)>& callback) {
- GLenum format, type;
- size_t bytes_per_pixel;
- FormatSupport supported =
- GetReadbackConfig(color_type, false, &format, &type, &bytes_per_pixel);
- if (supported == GLHelperReadbackSupport::NOT_SUPPORTED) {
- callback.Run(false);
- return;
- }
-
- ScopedFramebuffer dst_framebuffer(gl_);
- ScopedFramebufferBinder<GL_FRAMEBUFFER> framebuffer_binder(gl_,
- dst_framebuffer);
- ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, texture);
- gl_->FramebufferTexture2D(GL_FRAMEBUFFER,
- GL_COLOR_ATTACHMENT0,
- GL_TEXTURE_2D,
- texture,
- 0);
- ReadbackAsync(dst_size,
- dst_size.width() * bytes_per_pixel,
- dst_size.width() * bytes_per_pixel,
- out,
- format,
- type,
- bytes_per_pixel,
- callback);
-}
-
-GLuint GLHelper::CopyTextureToImpl::CopyAndScaleTexture(
- GLuint src_texture,
- const gfx::Size& src_size,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- GLHelper::ScalerQuality quality) {
- return ScaleTexture(src_texture,
- src_size,
- gfx::Rect(src_size),
- dst_size,
- vertically_flip_texture,
- false,
- kRGBA_8888_SkColorType, // GL_RGBA
- quality);
-}
-
-void GLHelper::CopyTextureToImpl::ReadbackDone(Request* finished_request,
- int bytes_per_pixel) {
- TRACE_EVENT0("gpu.capture",
- "GLHelper::CopyTextureToImpl::CheckReadbackFramebufferComplete");
- finished_request->done = true;
-
- FinishRequestHelper finish_request_helper;
-
- // We process transfer requests in the order they were received, regardless
- // of the order we get the callbacks in.
- while (!request_queue_.empty()) {
- Request* request = request_queue_.front();
- if (!request->done) {
- break;
- }
-
- bool result = false;
- if (request->buffer != 0) {
- gl_->BindBuffer(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, request->buffer);
- unsigned char* data = static_cast<unsigned char*>(gl_->MapBufferCHROMIUM(
- GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, GL_READ_ONLY));
- if (data) {
- result = true;
- if (request->bytes_per_row == request->size.width() * bytes_per_pixel &&
- request->bytes_per_row == request->row_stride_bytes) {
- memcpy(request->pixels, data,
- request->size.GetArea() * bytes_per_pixel);
- } else {
- unsigned char* out = request->pixels;
- for (int y = 0; y < request->size.height(); y++) {
- memcpy(out, data, request->bytes_per_row);
- out += request->row_stride_bytes;
- data += request->size.width() * bytes_per_pixel;
- }
- }
- gl_->UnmapBufferCHROMIUM(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM);
- }
- gl_->BindBuffer(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, 0);
- }
- FinishRequest(request, result, &finish_request_helper);
- }
-}
-
-void GLHelper::CopyTextureToImpl::FinishRequest(
- Request* request,
- bool result,
- FinishRequestHelper* finish_request_helper) {
- TRACE_EVENT0("gpu.capture", "GLHelper::CopyTextureToImpl::FinishRequest");
- DCHECK(request_queue_.front() == request);
- request_queue_.pop();
- request->result = result;
- ScopedFlush flush(gl_);
- if (request->query != 0) {
- gl_->DeleteQueriesEXT(1, &request->query);
- request->query = 0;
- }
- if (request->buffer != 0) {
- gl_->DeleteBuffers(1, &request->buffer);
- request->buffer = 0;
- }
- finish_request_helper->Add(request);
-}
-
-void GLHelper::CopyTextureToImpl::CancelRequests() {
- FinishRequestHelper finish_request_helper;
- while (!request_queue_.empty()) {
- Request* request = request_queue_.front();
- FinishRequest(request, false, &finish_request_helper);
- }
-}
-
-FormatSupport GLHelper::CopyTextureToImpl::GetReadbackConfig(
- SkColorType color_type,
- bool can_swizzle,
- GLenum* format,
- GLenum* type,
- size_t* bytes_per_pixel) {
- return helper_->readback_support_->GetReadbackConfig(
- color_type, can_swizzle, format, type, bytes_per_pixel);
-}
-
-GLHelper::GLHelper(GLES2Interface* gl, gpu::ContextSupport* context_support)
- : gl_(gl),
- context_support_(context_support),
- readback_support_(new GLHelperReadbackSupport(gl)) {}
-
-GLHelper::~GLHelper() {}
-
-void GLHelper::CropScaleReadbackAndCleanTexture(
- GLuint src_texture,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- unsigned char* out,
- const SkColorType out_color_type,
- const base::Callback<void(bool)>& callback,
- GLHelper::ScalerQuality quality) {
- InitCopyTextToImpl();
- copy_texture_to_impl_->CropScaleReadbackAndCleanTexture(src_texture,
- src_size,
- src_subrect,
- dst_size,
- out,
- out_color_type,
- callback,
- quality);
-}
-
-void GLHelper::CropScaleReadbackAndCleanMailbox(
- const gpu::Mailbox& src_mailbox,
- const gpu::SyncToken& sync_token,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- unsigned char* out,
- const SkColorType out_color_type,
- const base::Callback<void(bool)>& callback,
- GLHelper::ScalerQuality quality) {
- GLuint mailbox_texture = ConsumeMailboxToTexture(src_mailbox, sync_token);
- CropScaleReadbackAndCleanTexture(mailbox_texture,
- src_size,
- src_subrect,
- dst_size,
- out,
- out_color_type,
- callback,
- quality);
- gl_->DeleteTextures(1, &mailbox_texture);
-}
-
-void GLHelper::ReadbackTextureSync(GLuint texture,
- const gfx::Rect& src_rect,
- unsigned char* out,
- SkColorType format) {
- InitCopyTextToImpl();
- copy_texture_to_impl_->ReadbackTextureSync(texture, src_rect, out, format);
-}
-
-void GLHelper::ReadbackTextureAsync(
- GLuint texture,
- const gfx::Size& dst_size,
- unsigned char* out,
- SkColorType color_type,
- const base::Callback<void(bool)>& callback) {
- InitCopyTextToImpl();
- copy_texture_to_impl_->ReadbackTextureAsync(texture,
- dst_size,
- out,
- color_type,
- callback);
-}
-
-GLuint GLHelper::CopyTexture(GLuint texture, const gfx::Size& size) {
- InitCopyTextToImpl();
- return copy_texture_to_impl_->CopyAndScaleTexture(
- texture, size, size, false, GLHelper::SCALER_QUALITY_FAST);
-}
-
-GLuint GLHelper::CopyAndScaleTexture(GLuint texture,
- const gfx::Size& src_size,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- ScalerQuality quality) {
- InitCopyTextToImpl();
- return copy_texture_to_impl_->CopyAndScaleTexture(
- texture, src_size, dst_size, vertically_flip_texture, quality);
-}
-
-GLuint GLHelper::CompileShaderFromSource(const GLchar* source, GLenum type) {
- GLuint shader = gl_->CreateShader(type);
- GLint length = strlen(source);
- gl_->ShaderSource(shader, 1, &source, &length);
- gl_->CompileShader(shader);
- GLint compile_status = 0;
- gl_->GetShaderiv(shader, GL_COMPILE_STATUS, &compile_status);
- if (!compile_status) {
- GLint log_length = 0;
- gl_->GetShaderiv(shader, GL_INFO_LOG_LENGTH, &log_length);
- if (log_length) {
- scoped_ptr<GLchar[]> log(new GLchar[log_length]);
- GLsizei returned_log_length = 0;
- gl_->GetShaderInfoLog(
- shader, log_length, &returned_log_length, log.get());
- LOG(ERROR) << std::string(log.get(), returned_log_length);
- }
- gl_->DeleteShader(shader);
- return 0;
- }
- return shader;
-}
-
-void GLHelper::InitCopyTextToImpl() {
- // Lazily initialize |copy_texture_to_impl_|
- if (!copy_texture_to_impl_)
- copy_texture_to_impl_.reset(
- new CopyTextureToImpl(gl_, context_support_, this));
-}
-
-void GLHelper::InitScalerImpl() {
- // Lazily initialize |scaler_impl_|
- if (!scaler_impl_)
- scaler_impl_.reset(new GLHelperScaling(gl_, this));
-}
-
-GLint GLHelper::MaxDrawBuffers() {
- InitCopyTextToImpl();
- return copy_texture_to_impl_->MaxDrawBuffers();
-}
-
-void GLHelper::CopySubBufferDamage(GLenum target,
- GLuint texture,
- GLuint previous_texture,
- const SkRegion& new_damage,
- const SkRegion& old_damage) {
- SkRegion region(old_damage);
- if (region.op(new_damage, SkRegion::kDifference_Op)) {
- ScopedFramebuffer dst_framebuffer(gl_);
- ScopedFramebufferBinder<GL_FRAMEBUFFER> framebuffer_binder(gl_,
- dst_framebuffer);
- gl_->BindTexture(target, texture);
- gl_->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target,
- previous_texture, 0);
- for (SkRegion::Iterator it(region); !it.done(); it.next()) {
- const SkIRect& rect = it.rect();
- gl_->CopyTexSubImage2D(target, 0, rect.x(), rect.y(), rect.x(), rect.y(),
- rect.width(), rect.height());
- }
- gl_->BindTexture(target, 0);
- gl_->Flush();
- }
-}
-
-GLuint GLHelper::CreateTexture() {
- GLuint texture = 0u;
- gl_->GenTextures(1, &texture);
- content::ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, texture);
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- return texture;
-}
-
-void GLHelper::DeleteTexture(GLuint texture_id) {
- gl_->DeleteTextures(1, &texture_id);
-}
-
-void GLHelper::GenerateSyncToken(gpu::SyncToken* sync_token) {
- const uint64_t fence_sync = gl_->InsertFenceSyncCHROMIUM();
- gl_->ShallowFlushCHROMIUM();
- gl_->GenSyncTokenCHROMIUM(fence_sync, sync_token->GetData());
-}
-
-void GLHelper::WaitSyncToken(const gpu::SyncToken& sync_token) {
- gl_->WaitSyncTokenCHROMIUM(sync_token.GetConstData());
-}
-
-gpu::MailboxHolder GLHelper::ProduceMailboxHolderFromTexture(
- GLuint texture_id) {
- gpu::Mailbox mailbox;
- gl_->GenMailboxCHROMIUM(mailbox.name);
- gl_->ProduceTextureDirectCHROMIUM(texture_id, GL_TEXTURE_2D, mailbox.name);
-
- gpu::SyncToken sync_token;
- GenerateSyncToken(&sync_token);
-
- return gpu::MailboxHolder(mailbox, sync_token, GL_TEXTURE_2D);
-}
-
-GLuint GLHelper::ConsumeMailboxToTexture(const gpu::Mailbox& mailbox,
- const gpu::SyncToken& sync_token) {
- if (mailbox.IsZero())
- return 0;
- if (sync_token.HasData())
- WaitSyncToken(sync_token);
- GLuint texture =
- gl_->CreateAndConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
- return texture;
-}
-
-void GLHelper::ResizeTexture(GLuint texture, const gfx::Size& size) {
- content::ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, texture);
- gl_->TexImage2D(GL_TEXTURE_2D,
- 0,
- GL_RGB,
- size.width(),
- size.height(),
- 0,
- GL_RGB,
- GL_UNSIGNED_BYTE,
- NULL);
-}
-
-void GLHelper::CopyTextureSubImage(GLuint texture, const gfx::Rect& rect) {
- content::ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, texture);
- gl_->CopyTexSubImage2D(GL_TEXTURE_2D,
- 0,
- rect.x(),
- rect.y(),
- rect.x(),
- rect.y(),
- rect.width(),
- rect.height());
-}
-
-void GLHelper::CopyTextureFullImage(GLuint texture, const gfx::Size& size) {
- content::ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, texture);
- gl_->CopyTexImage2D(
- GL_TEXTURE_2D, 0, GL_RGB, 0, 0, size.width(), size.height(), 0);
-}
-
-void GLHelper::Flush() {
- gl_->Flush();
-}
-
-void GLHelper::InsertOrderingBarrier() {
- gl_->OrderingBarrierCHROMIUM();
-}
-
-void GLHelper::CopyTextureToImpl::ReadbackPlane(
- TextureFrameBufferPair* source,
- const scoped_refptr<media::VideoFrame>& target,
- int plane,
- int size_shift,
- const gfx::Rect& paste_rect,
- ReadbackSwizzle swizzle,
- const base::Callback<void(bool)>& callback) {
- gl_->BindFramebuffer(GL_FRAMEBUFFER, source->framebuffer());
- const size_t offset = target->stride(plane) * (paste_rect.y() >> size_shift) +
- (paste_rect.x() >> size_shift);
- ReadbackAsync(source->size(),
- paste_rect.width() >> size_shift,
- target->stride(plane),
- target->data(plane) + offset,
- (swizzle == kSwizzleBGRA) ? GL_BGRA_EXT : GL_RGBA,
- GL_UNSIGNED_BYTE,
- 4,
- callback);
-}
-
-const float GLHelper::CopyTextureToImpl::kRGBtoYColorWeights[] = {
- 0.257f, 0.504f, 0.098f, 0.0625f};
-const float GLHelper::CopyTextureToImpl::kRGBtoUColorWeights[] = {
- -0.148f, -0.291f, 0.439f, 0.5f};
-const float GLHelper::CopyTextureToImpl::kRGBtoVColorWeights[] = {
- 0.439f, -0.368f, -0.071f, 0.5f};
-const float GLHelper::CopyTextureToImpl::kRGBtoGrayscaleColorWeights[] = {
- 0.213f, 0.715f, 0.072f, 0.0f};
-
-// YUV readback constructors. Initiates the main scaler pipeline and
-// one planar scaler for each of the Y, U and V planes.
-GLHelper::CopyTextureToImpl::ReadbackYUVImpl::ReadbackYUVImpl(
- GLES2Interface* gl,
- CopyTextureToImpl* copy_impl,
- GLHelperScaling* scaler_impl,
- GLHelper::ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool flip_vertically,
- ReadbackSwizzle swizzle)
- : gl_(gl),
- copy_impl_(copy_impl),
- dst_size_(dst_size),
- swizzle_(swizzle),
- scaler_(gl,
- scaler_impl->CreateScaler(quality,
- src_size,
- src_subrect,
- dst_size,
- flip_vertically,
- false)),
- y_(gl,
- scaler_impl->CreatePlanarScaler(
- dst_size,
- gfx::Rect(0,
- 0,
- (dst_size.width() + 3) & ~3,
- dst_size.height()),
- gfx::Size((dst_size.width() + 3) / 4, dst_size.height()),
- false,
- (swizzle == kSwizzleBGRA),
- kRGBtoYColorWeights)),
- u_(gl,
- scaler_impl->CreatePlanarScaler(
- dst_size,
- gfx::Rect(0,
- 0,
- (dst_size.width() + 7) & ~7,
- (dst_size.height() + 1) & ~1),
- gfx::Size((dst_size.width() + 7) / 8,
- (dst_size.height() + 1) / 2),
- false,
- (swizzle == kSwizzleBGRA),
- kRGBtoUColorWeights)),
- v_(gl,
- scaler_impl->CreatePlanarScaler(
- dst_size,
- gfx::Rect(0,
- 0,
- (dst_size.width() + 7) & ~7,
- (dst_size.height() + 1) & ~1),
- gfx::Size((dst_size.width() + 7) / 8,
- (dst_size.height() + 1) / 2),
- false,
- (swizzle == kSwizzleBGRA),
- kRGBtoVColorWeights)) {
- DCHECK(!(dst_size.width() & 1));
- DCHECK(!(dst_size.height() & 1));
-}
-
-static void CallbackKeepingVideoFrameAlive(
- scoped_refptr<media::VideoFrame> video_frame,
- const base::Callback<void(bool)>& callback,
- bool success) {
- callback.Run(success);
-}
-
-void GLHelper::CopyTextureToImpl::ReadbackYUVImpl::ReadbackYUV(
- const gpu::Mailbox& mailbox,
- const gpu::SyncToken& sync_token,
- const scoped_refptr<media::VideoFrame>& target,
- const gfx::Point& paste_location,
- const base::Callback<void(bool)>& callback) {
- DCHECK(!(paste_location.x() & 1));
- DCHECK(!(paste_location.y() & 1));
-
- GLuint mailbox_texture =
- copy_impl_->ConsumeMailboxToTexture(mailbox, sync_token);
-
- // Scale texture to right size.
- scaler_.Scale(mailbox_texture);
- gl_->DeleteTextures(1, &mailbox_texture);
-
- // Convert the scaled texture in to Y, U and V planes.
- y_.Scale(scaler_.texture());
- u_.Scale(scaler_.texture());
- v_.Scale(scaler_.texture());
-
- const gfx::Rect paste_rect(paste_location, dst_size_);
- if (!target->visible_rect().Contains(paste_rect)) {
- LOG(DFATAL) << "Paste rect not inside VideoFrame's visible rect!";
- callback.Run(false);
- return;
- }
-
- // Read back planes, one at a time. Keep the video frame alive while doing the
- // readback.
- copy_impl_->ReadbackPlane(y_.texture_and_framebuffer(),
- target,
- media::VideoFrame::kYPlane,
- 0,
- paste_rect,
- swizzle_,
- base::Bind(&nullcallback));
- copy_impl_->ReadbackPlane(u_.texture_and_framebuffer(),
- target,
- media::VideoFrame::kUPlane,
- 1,
- paste_rect,
- swizzle_,
- base::Bind(&nullcallback));
- copy_impl_->ReadbackPlane(
- v_.texture_and_framebuffer(),
- target,
- media::VideoFrame::kVPlane,
- 1,
- paste_rect,
- swizzle_,
- base::Bind(&CallbackKeepingVideoFrameAlive, target, callback));
- gl_->BindFramebuffer(GL_FRAMEBUFFER, 0);
- media::LetterboxYUV(target.get(), paste_rect);
-}
-
-// YUV readback constructors. Initiates the main scaler pipeline and
-// one planar scaler for each of the Y, U and V planes.
-GLHelper::CopyTextureToImpl::ReadbackYUV_MRT::ReadbackYUV_MRT(
- GLES2Interface* gl,
- CopyTextureToImpl* copy_impl,
- GLHelperScaling* scaler_impl,
- GLHelper::ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool flip_vertically,
- ReadbackSwizzle swizzle)
- : gl_(gl),
- copy_impl_(copy_impl),
- dst_size_(dst_size),
- quality_(quality),
- swizzle_(swizzle),
- scaler_(gl,
- scaler_impl->CreateScaler(quality,
- src_size,
- src_subrect,
- dst_size,
- false,
- false)),
- pass1_shader_(scaler_impl->CreateYuvMrtShader(
- dst_size,
- gfx::Rect(0, 0, (dst_size.width() + 3) & ~3, dst_size.height()),
- gfx::Size((dst_size.width() + 3) / 4, dst_size.height()),
- flip_vertically,
- (swizzle == kSwizzleBGRA),
- GLHelperScaling::SHADER_YUV_MRT_PASS1)),
- pass2_shader_(scaler_impl->CreateYuvMrtShader(
- gfx::Size((dst_size.width() + 3) / 4, dst_size.height()),
- gfx::Rect(0,
- 0,
- (dst_size.width() + 7) / 8 * 2,
- dst_size.height()),
- gfx::Size((dst_size.width() + 7) / 8,
- (dst_size.height() + 1) / 2),
- false,
- (swizzle == kSwizzleBGRA),
- GLHelperScaling::SHADER_YUV_MRT_PASS2)),
- y_(gl, gfx::Size((dst_size.width() + 3) / 4, dst_size.height())),
- uv_(gl),
- u_(gl,
- gfx::Size((dst_size.width() + 7) / 8,
- (dst_size.height() + 1) / 2)),
- v_(gl,
- gfx::Size((dst_size.width() + 7) / 8,
- (dst_size.height() + 1) / 2)) {
- DCHECK(!(dst_size.width() & 1));
- DCHECK(!(dst_size.height() & 1));
-
- content::ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl, uv_);
- gl->TexImage2D(GL_TEXTURE_2D,
- 0,
- GL_RGBA,
- (dst_size.width() + 3) / 4,
- dst_size.height(),
- 0,
- GL_RGBA,
- GL_UNSIGNED_BYTE,
- NULL);
-}
-
-void GLHelper::CopyTextureToImpl::ReadbackYUV_MRT::ReadbackYUV(
- const gpu::Mailbox& mailbox,
- const gpu::SyncToken& sync_token,
- const scoped_refptr<media::VideoFrame>& target,
- const gfx::Point& paste_location,
- const base::Callback<void(bool)>& callback) {
- DCHECK(!(paste_location.x() & 1));
- DCHECK(!(paste_location.y() & 1));
-
- GLuint mailbox_texture =
- copy_impl_->ConsumeMailboxToTexture(mailbox, sync_token);
-
- GLuint texture;
- if (quality_ == GLHelper::SCALER_QUALITY_FAST) {
- // Optimization: SCALER_QUALITY_FAST is just a single bilinear
- // pass, which pass1_shader_ can do just as well, so let's skip
- // the actual scaling in that case.
- texture = mailbox_texture;
- } else {
- // Scale texture to right size.
- scaler_.Scale(mailbox_texture);
- texture = scaler_.texture();
- }
-
- std::vector<GLuint> outputs(2);
- // Convert the scaled texture in to Y, U and V planes.
- outputs[0] = y_.texture();
- outputs[1] = uv_;
- pass1_shader_->Execute(texture, outputs);
-
- gl_->DeleteTextures(1, &mailbox_texture);
-
- outputs[0] = u_.texture();
- outputs[1] = v_.texture();
- pass2_shader_->Execute(uv_, outputs);
-
- const gfx::Rect paste_rect(paste_location, dst_size_);
- if (!target->visible_rect().Contains(paste_rect)) {
- LOG(DFATAL) << "Paste rect not inside VideoFrame's visible rect!";
- callback.Run(false);
- return;
- }
-
- // Read back planes, one at a time.
- copy_impl_->ReadbackPlane(&y_,
- target,
- media::VideoFrame::kYPlane,
- 0,
- paste_rect,
- swizzle_,
- base::Bind(&nullcallback));
- copy_impl_->ReadbackPlane(&u_,
- target,
- media::VideoFrame::kUPlane,
- 1,
- paste_rect,
- swizzle_,
- base::Bind(&nullcallback));
- copy_impl_->ReadbackPlane(
- &v_,
- target,
- media::VideoFrame::kVPlane,
- 1,
- paste_rect,
- swizzle_,
- base::Bind(&CallbackKeepingVideoFrameAlive, target, callback));
- gl_->BindFramebuffer(GL_FRAMEBUFFER, 0);
- media::LetterboxYUV(target.get(), paste_rect);
-}
-
-bool GLHelper::IsReadbackConfigSupported(SkColorType color_type) {
- DCHECK(readback_support_.get());
- GLenum format, type;
- size_t bytes_per_pixel;
- FormatSupport support = readback_support_->GetReadbackConfig(
- color_type, false, &format, &type, &bytes_per_pixel);
-
- return (support == GLHelperReadbackSupport::SUPPORTED);
-}
-
-ReadbackYUVInterface* GLHelper::CopyTextureToImpl::CreateReadbackPipelineYUV(
- GLHelper::ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool flip_vertically,
- bool use_mrt) {
- helper_->InitScalerImpl();
- // Just query if the best readback configuration needs a swizzle In
- // ReadbackPlane() we will choose GL_RGBA/GL_BGRA_EXT based on swizzle
- GLenum format, type;
- size_t bytes_per_pixel;
- FormatSupport supported = GetReadbackConfig(
- kRGBA_8888_SkColorType, true, &format, &type, &bytes_per_pixel);
- DCHECK((format == GL_RGBA || format == GL_BGRA_EXT) &&
- type == GL_UNSIGNED_BYTE);
-
- ReadbackSwizzle swizzle = kSwizzleNone;
- if (supported == GLHelperReadbackSupport::SWIZZLE)
- swizzle = kSwizzleBGRA;
-
- if (max_draw_buffers_ >= 2 && use_mrt) {
- return new ReadbackYUV_MRT(gl_,
- this,
- helper_->scaler_impl_.get(),
- quality,
- src_size,
- src_subrect,
- dst_size,
- flip_vertically,
- swizzle);
- }
- return new ReadbackYUVImpl(gl_,
- this,
- helper_->scaler_impl_.get(),
- quality,
- src_size,
- src_subrect,
- dst_size,
- flip_vertically,
- swizzle);
-}
-
-ReadbackYUVInterface* GLHelper::CreateReadbackPipelineYUV(
- ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool flip_vertically,
- bool use_mrt) {
- InitCopyTextToImpl();
- return copy_texture_to_impl_->CreateReadbackPipelineYUV(quality,
- src_size,
- src_subrect,
- dst_size,
- flip_vertically,
- use_mrt);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gl_helper.h b/chromium/content/common/gpu/client/gl_helper.h
deleted file mode 100644
index 2be3f5ae210..00000000000
--- a/chromium/content/common/gpu/client/gl_helper.h
+++ /dev/null
@@ -1,382 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GL_HELPER_H_
-#define CONTENT_COMMON_GPU_CLIENT_GL_HELPER_H_
-
-#include "base/atomicops.h"
-#include "base/callback.h"
-#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "content/common/content_export.h"
-#include "gpu/command_buffer/client/gles2_interface.h"
-#include "gpu/command_buffer/common/mailbox_holder.h"
-#include "third_party/skia/include/core/SkBitmap.h"
-
-namespace gfx {
-class Point;
-class Rect;
-class Size;
-}
-
-namespace gpu {
-class ContextSupport;
-struct Mailbox;
-}
-
-namespace media {
-class VideoFrame;
-};
-
-class SkRegion;
-
-namespace content {
-
-class GLHelperScaling;
-
-class ScopedGLuint {
- public:
- typedef void (gpu::gles2::GLES2Interface::*GenFunc)(GLsizei n, GLuint* ids);
- typedef void (gpu::gles2::GLES2Interface::*DeleteFunc)(GLsizei n,
- const GLuint* ids);
- ScopedGLuint(gpu::gles2::GLES2Interface* gl,
- GenFunc gen_func,
- DeleteFunc delete_func)
- : gl_(gl), id_(0u), delete_func_(delete_func) {
- (gl_->*gen_func)(1, &id_);
- }
-
- operator GLuint() const { return id_; }
-
- GLuint id() const { return id_; }
-
- ~ScopedGLuint() {
- if (id_ != 0) {
- (gl_->*delete_func_)(1, &id_);
- }
- }
-
- private:
- gpu::gles2::GLES2Interface* gl_;
- GLuint id_;
- DeleteFunc delete_func_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedGLuint);
-};
-
-class ScopedBuffer : public ScopedGLuint {
- public:
- explicit ScopedBuffer(gpu::gles2::GLES2Interface* gl)
- : ScopedGLuint(gl,
- &gpu::gles2::GLES2Interface::GenBuffers,
- &gpu::gles2::GLES2Interface::DeleteBuffers) {}
-};
-
-class ScopedFramebuffer : public ScopedGLuint {
- public:
- explicit ScopedFramebuffer(gpu::gles2::GLES2Interface* gl)
- : ScopedGLuint(gl,
- &gpu::gles2::GLES2Interface::GenFramebuffers,
- &gpu::gles2::GLES2Interface::DeleteFramebuffers) {}
-};
-
-class ScopedTexture : public ScopedGLuint {
- public:
- explicit ScopedTexture(gpu::gles2::GLES2Interface* gl)
- : ScopedGLuint(gl,
- &gpu::gles2::GLES2Interface::GenTextures,
- &gpu::gles2::GLES2Interface::DeleteTextures) {}
-};
-
-template <GLenum Target>
-class ScopedBinder {
- public:
- typedef void (gpu::gles2::GLES2Interface::*BindFunc)(GLenum target,
- GLuint id);
- ScopedBinder(gpu::gles2::GLES2Interface* gl, GLuint id, BindFunc bind_func)
- : gl_(gl), bind_func_(bind_func) {
- (gl_->*bind_func_)(Target, id);
- }
-
- virtual ~ScopedBinder() { (gl_->*bind_func_)(Target, 0); }
-
- private:
- gpu::gles2::GLES2Interface* gl_;
- BindFunc bind_func_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedBinder);
-};
-
-template <GLenum Target>
-class ScopedBufferBinder : ScopedBinder<Target> {
- public:
- ScopedBufferBinder(gpu::gles2::GLES2Interface* gl, GLuint id)
- : ScopedBinder<Target>(gl, id, &gpu::gles2::GLES2Interface::BindBuffer) {}
-};
-
-template <GLenum Target>
-class ScopedFramebufferBinder : ScopedBinder<Target> {
- public:
- ScopedFramebufferBinder(gpu::gles2::GLES2Interface* gl, GLuint id)
- : ScopedBinder<Target>(gl,
- id,
- &gpu::gles2::GLES2Interface::BindFramebuffer) {}
-};
-
-template <GLenum Target>
-class ScopedTextureBinder : ScopedBinder<Target> {
- public:
- ScopedTextureBinder(gpu::gles2::GLES2Interface* gl, GLuint id)
- : ScopedBinder<Target>(gl, id, &gpu::gles2::GLES2Interface::BindTexture) {
- }
-};
-
-class ReadbackYUVInterface;
-class GLHelperReadbackSupport;
-
-// Provides higher level operations on top of the gpu::gles2::GLES2Interface
-// interfaces.
-class CONTENT_EXPORT GLHelper {
- public:
- GLHelper(gpu::gles2::GLES2Interface* gl,
- gpu::ContextSupport* context_support);
- ~GLHelper();
-
- enum ScalerQuality {
- // Bilinear single pass, fastest possible.
- SCALER_QUALITY_FAST = 1,
-
- // Bilinear upscale + N * 50% bilinear downscales.
- // This is still fast enough for most purposes and
- // Image quality is nearly as good as the BEST option.
- SCALER_QUALITY_GOOD = 2,
-
- // Bicubic upscale + N * 50% bicubic downscales.
- // Produces very good quality scaled images, but it's
- // 2-8x slower than the "GOOD" quality, so it's not always
- // worth it.
- SCALER_QUALITY_BEST = 3,
- };
-
- // Copies the block of pixels specified with |src_subrect| from |src_texture|,
- // scales it to |dst_size|, and writes it into |out|.
- // |src_size| is the size of |src_texture|. The result is in |out_color_type|
- // format and is potentially flipped vertically to make it a correct image
- // representation. |callback| is invoked with the copy result when the copy
- // operation has completed.
- // Note that the src_texture will have the min/mag filter set to GL_LINEAR
- // and wrap_s/t set to CLAMP_TO_EDGE in this call.
- void CropScaleReadbackAndCleanTexture(
- GLuint src_texture,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- unsigned char* out,
- const SkColorType out_color_type,
- const base::Callback<void(bool)>& callback,
- GLHelper::ScalerQuality quality);
-
- // Copies the block of pixels specified with |src_subrect| from |src_mailbox|,
- // scales it to |dst_size|, and writes it into |out|.
- // |src_size| is the size of |src_mailbox|. The result is in |out_color_type|
- // format and is potentially flipped vertically to make it a correct image
- // representation. |callback| is invoked with the copy result when the copy
- // operation has completed.
- // Note that the texture bound to src_mailbox will have the min/mag filter set
- // to GL_LINEAR and wrap_s/t set to CLAMP_TO_EDGE in this call. src_mailbox is
- // assumed to be GL_TEXTURE_2D.
- void CropScaleReadbackAndCleanMailbox(
- const gpu::Mailbox& src_mailbox,
- const gpu::SyncToken& sync_token,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- unsigned char* out,
- const SkColorType out_color_type,
- const base::Callback<void(bool)>& callback,
- GLHelper::ScalerQuality quality);
-
- // Copies the texture data out of |texture| into |out|. |size| is the
- // size of the texture. No post processing is applied to the pixels. The
- // texture is assumed to have a format of GL_RGBA with a pixel type of
- // GL_UNSIGNED_BYTE. This is a blocking call that calls glReadPixels on the
- // current OpenGL context.
- void ReadbackTextureSync(GLuint texture,
- const gfx::Rect& src_rect,
- unsigned char* out,
- SkColorType format);
-
- void ReadbackTextureAsync(GLuint texture,
- const gfx::Size& dst_size,
- unsigned char* out,
- SkColorType color_type,
- const base::Callback<void(bool)>& callback);
-
- // Creates a copy of the specified texture. |size| is the size of the texture.
- // Note that the src_texture will have the min/mag filter set to GL_LINEAR
- // and wrap_s/t set to CLAMP_TO_EDGE in this call.
- GLuint CopyTexture(GLuint texture, const gfx::Size& size);
-
- // Creates a scaled copy of the specified texture. |src_size| is the size of
- // the texture and |dst_size| is the size of the resulting copy.
- // Note that the src_texture will have the min/mag filter set to GL_LINEAR
- // and wrap_s/t set to CLAMP_TO_EDGE in this call.
- GLuint CopyAndScaleTexture(GLuint texture,
- const gfx::Size& src_size,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- ScalerQuality quality);
-
- // Returns the shader compiled from the source.
- GLuint CompileShaderFromSource(const GLchar* source, GLenum type);
-
- // Copies all pixels from |previous_texture| into |texture| that are
- // inside the region covered by |old_damage| but not part of |new_damage|.
- void CopySubBufferDamage(GLenum target,
- GLuint texture,
- GLuint previous_texture,
- const SkRegion& new_damage,
- const SkRegion& old_damage);
-
- // Simply creates a texture.
- GLuint CreateTexture();
- // Deletes a texture.
- void DeleteTexture(GLuint texture_id);
-
- // Inserts a fence sync, flushes, and generates a sync token.
- void GenerateSyncToken(gpu::SyncToken* sync_token);
-
- // Wait for the sync token before executing further GL commands.
- void WaitSyncToken(const gpu::SyncToken& sync_token);
-
- // Creates a mailbox holder that is attached to the given texture id, with a
- // sync point to wait on before using the mailbox. Returns a holder with an
- // empty mailbox on failure.
- // Note the texture is assumed to be GL_TEXTURE_2D.
- gpu::MailboxHolder ProduceMailboxHolderFromTexture(GLuint texture_id);
-
- // Creates a texture and consumes a mailbox into it. Returns 0 on failure.
- // Note the mailbox is assumed to be GL_TEXTURE_2D.
- GLuint ConsumeMailboxToTexture(const gpu::Mailbox& mailbox,
- const gpu::SyncToken& sync_token);
-
- // Resizes the texture's size to |size|.
- void ResizeTexture(GLuint texture, const gfx::Size& size);
-
- // Copies the framebuffer data given in |rect| to |texture|.
- void CopyTextureSubImage(GLuint texture, const gfx::Rect& rect);
-
- // Copies the all framebuffer data to |texture|. |size| specifies the
- // size of the framebuffer.
- void CopyTextureFullImage(GLuint texture, const gfx::Size& size);
-
- // Flushes GL commands.
- void Flush();
-
- // Force commands in the current command buffer to be executed before commands
- // in other command buffers from the same process (ie channel to the GPU
- // process).
- void InsertOrderingBarrier();
-
- // A scaler will cache all intermediate textures and programs
- // needed to scale from a specified size to a destination size.
- // If the source or destination sizes changes, you must create
- // a new scaler.
- class CONTENT_EXPORT ScalerInterface {
- public:
- ScalerInterface() {}
- virtual ~ScalerInterface() {}
-
- // Note that the src_texture will have the min/mag filter set to GL_LINEAR
- // and wrap_s/t set to CLAMP_TO_EDGE in this call.
- virtual void Scale(GLuint source_texture, GLuint dest_texture) = 0;
- virtual const gfx::Size& SrcSize() = 0;
- virtual const gfx::Rect& SrcSubrect() = 0;
- virtual const gfx::Size& DstSize() = 0;
- };
-
- // Note that the quality may be adjusted down if texture
- // allocations fail or hardware doesn't support the requtested
- // quality. Note that ScalerQuality enum is arranged in
- // numerical order for simplicity.
- ScalerInterface* CreateScaler(ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle);
-
- // Create a readback pipeline that will scale a subsection of the source
- // texture, then convert it to YUV422 planar form and then read back that.
- // This reduces the amount of memory read from GPU to CPU memory by a factor
- // 2.6, which can be quite handy since readbacks have very limited speed
- // on some platforms. All values in |dst_size| must be a multiple of two. If
- // |use_mrt| is true, the pipeline will try to optimize the YUV conversion
- // using the multi-render-target extension. |use_mrt| should only be set to
- // false for testing.
- ReadbackYUVInterface* CreateReadbackPipelineYUV(ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool flip_vertically,
- bool use_mrt);
-
- // Returns the maximum number of draw buffers available,
- // 0 if GL_EXT_draw_buffers is not available.
- GLint MaxDrawBuffers();
-
- // Checks whether the readbback is supported for texture with the
- // matching config. This doesnt check for cross format readbacks.
- bool IsReadbackConfigSupported(SkColorType texture_format);
-
- protected:
- class CopyTextureToImpl;
-
- // Creates |copy_texture_to_impl_| if NULL.
- void InitCopyTextToImpl();
- // Creates |scaler_impl_| if NULL.
- void InitScalerImpl();
-
- enum ReadbackSwizzle {
- kSwizzleNone = 0,
- kSwizzleBGRA
- };
-
- gpu::gles2::GLES2Interface* gl_;
- gpu::ContextSupport* context_support_;
- scoped_ptr<CopyTextureToImpl> copy_texture_to_impl_;
- scoped_ptr<GLHelperScaling> scaler_impl_;
- scoped_ptr<GLHelperReadbackSupport> readback_support_;
-
- DISALLOW_COPY_AND_ASSIGN(GLHelper);
-};
-
-// Similar to a ScalerInterface, a yuv readback pipeline will
-// cache a scaler and all intermediate textures and frame buffers
-// needed to scale, crop, letterbox and read back a texture from
-// the GPU into CPU-accessible RAM. A single readback pipeline
-// can handle multiple outstanding readbacks at the same time, but
-// if the source or destination sizes change, you'll need to create
-// a new readback pipeline.
-class CONTENT_EXPORT ReadbackYUVInterface {
- public:
- ReadbackYUVInterface() {}
- virtual ~ReadbackYUVInterface() {}
-
- // Note that |target| must use YV12 format. |paste_location| specifies where
- // the captured pixels that are read back will be placed in the video frame.
- // The region defined by the |paste_location| and the |dst_size| specified in
- // the call to CreateReadbackPipelineYUV() must be fully contained within
- // |target->visible_rect()|.
- virtual void ReadbackYUV(const gpu::Mailbox& mailbox,
- const gpu::SyncToken& sync_token,
- const scoped_refptr<media::VideoFrame>& target,
- const gfx::Point& paste_location,
- const base::Callback<void(bool)>& callback) = 0;
- virtual GLHelper::ScalerInterface* scaler() = 0;
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GL_HELPER_H_
diff --git a/chromium/content/common/gpu/client/gl_helper_benchmark.cc b/chromium/content/common/gpu/client/gl_helper_benchmark.cc
deleted file mode 100644
index 9e751b37852..00000000000
--- a/chromium/content/common/gpu/client/gl_helper_benchmark.cc
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file looks like a unit test, but it contains benchmarks and test
-// utilities intended for manual evaluation of the scalers in
-// gl_helper*. These tests produce output in the form of files and printouts,
-// but cannot really "fail". There is no point in making these tests part
-// of any test automation run.
-
-#include <stddef.h>
-#include <stdio.h>
-#include <cmath>
-#include <string>
-#include <vector>
-
-#include <GLES2/gl2.h>
-#include <GLES2/gl2ext.h>
-#include <GLES2/gl2extchromium.h>
-
-#include "base/at_exit.h"
-#include "base/command_line.h"
-#include "base/files/file_util.h"
-#include "base/macros.h"
-#include "base/strings/stringprintf.h"
-#include "base/time/time.h"
-#include "build/build_config.h"
-#include "content/common/gpu/client/gl_helper.h"
-#include "content/common/gpu/client/gl_helper_scaling.h"
-#include "content/public/test/unittest_test_suite.h"
-#include "content/test/content_test_suite.h"
-#include "gpu/blink/webgraphicscontext3d_in_process_command_buffer_impl.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/skia/include/core/SkBitmap.h"
-#include "third_party/skia/include/core/SkTypes.h"
-#include "ui/gfx/codec/png_codec.h"
-#include "ui/gl/gl_surface.h"
-
-#if defined(OS_MACOSX)
-#include "base/mac/scoped_nsautorelease_pool.h"
-#endif
-
-namespace content {
-
-using blink::WebGLId;
-using blink::WebGraphicsContext3D;
-
-content::GLHelper::ScalerQuality kQualities[] = {
- content::GLHelper::SCALER_QUALITY_BEST,
- content::GLHelper::SCALER_QUALITY_GOOD,
- content::GLHelper::SCALER_QUALITY_FAST,
-};
-
-const char *kQualityNames[] = {
- "best",
- "good",
- "fast",
-};
-
-class GLHelperTest : public testing::Test {
- protected:
- void SetUp() override {
- WebGraphicsContext3D::Attributes attributes;
- bool lose_context_when_out_of_memory = false;
- context_ = gpu_blink::WebGraphicsContext3DInProcessCommandBufferImpl::
- CreateOffscreenContext(attributes, lose_context_when_out_of_memory);
- context_->InitializeOnCurrentThread();
-
- helper_.reset(
- new content::GLHelper(context_->GetGLInterface(),
- context_->GetContextSupport()));
- helper_scaling_.reset(new content::GLHelperScaling(
- context_->GetGLInterface(),
- helper_.get()));
- }
-
- void TearDown() override {
- helper_scaling_.reset(NULL);
- helper_.reset(NULL);
- context_.reset(NULL);
- }
-
-
- void LoadPngFileToSkBitmap(const base::FilePath& filename,
- SkBitmap* bitmap) {
- std::string compressed;
- base::ReadFileToString(base::MakeAbsoluteFilePath(filename), &compressed);
- ASSERT_TRUE(compressed.size());
- ASSERT_TRUE(gfx::PNGCodec::Decode(
- reinterpret_cast<const unsigned char*>(compressed.data()),
- compressed.size(), bitmap));
- }
-
- // Save the image to a png file. Used to create the initial test files.
- void SaveToFile(SkBitmap* bitmap, const base::FilePath& filename) {
- std::vector<unsigned char> compressed;
- ASSERT_TRUE(gfx::PNGCodec::Encode(
- static_cast<unsigned char*>(bitmap->getPixels()),
- gfx::PNGCodec::FORMAT_BGRA,
- gfx::Size(bitmap->width(), bitmap->height()),
- static_cast<int>(bitmap->rowBytes()),
- true,
- std::vector<gfx::PNGCodec::Comment>(),
- &compressed));
- ASSERT_TRUE(compressed.size());
- FILE* f = base::OpenFile(filename, "wb");
- ASSERT_TRUE(f);
- ASSERT_EQ(fwrite(&*compressed.begin(), 1, compressed.size(), f),
- compressed.size());
- base::CloseFile(f);
- }
-
- scoped_ptr<gpu_blink::WebGraphicsContext3DInProcessCommandBufferImpl>
- context_;
- scoped_ptr<content::GLHelper> helper_;
- scoped_ptr<content::GLHelperScaling> helper_scaling_;
- std::deque<GLHelperScaling::ScaleOp> x_ops_, y_ops_;
-};
-
-
-TEST_F(GLHelperTest, ScaleBenchmark) {
- int output_sizes[] = { 1920, 1080,
- 1249, 720, // Output size on pixel
- 256, 144 };
- int input_sizes[] = { 3200, 2040,
- 2560, 1476, // Pixel tab size
- 1920, 1080,
- 1280, 720,
- 800, 480,
- 256, 144 };
-
- for (size_t q = 0; q < arraysize(kQualities); q++) {
- for (size_t outsize = 0;
- outsize < arraysize(output_sizes);
- outsize += 2) {
- for (size_t insize = 0;
- insize < arraysize(input_sizes);
- insize += 2) {
- WebGLId src_texture = context_->createTexture();
- WebGLId dst_texture = context_->createTexture();
- WebGLId framebuffer = context_->createFramebuffer();
- const gfx::Size src_size(input_sizes[insize],
- input_sizes[insize + 1]);
- const gfx::Size dst_size(output_sizes[outsize],
- output_sizes[outsize + 1]);
- SkBitmap input;
- input.allocN32Pixels(src_size.width(), src_size.height());
-
- SkBitmap output_pixels;
- output_pixels.allocN32Pixels(dst_size.width(), dst_size.height());
-
- context_->bindFramebuffer(GL_FRAMEBUFFER, framebuffer);
- context_->bindTexture(GL_TEXTURE_2D, dst_texture);
- context_->texImage2D(GL_TEXTURE_2D,
- 0,
- GL_RGBA,
- dst_size.width(),
- dst_size.height(),
- 0,
- GL_RGBA,
- GL_UNSIGNED_BYTE,
- 0);
- context_->bindTexture(GL_TEXTURE_2D, src_texture);
- context_->texImage2D(GL_TEXTURE_2D,
- 0,
- GL_RGBA,
- src_size.width(),
- src_size.height(),
- 0,
- GL_RGBA,
- GL_UNSIGNED_BYTE,
- input.getPixels());
-
- gfx::Rect src_subrect(0, 0,
- src_size.width(), src_size.height());
- scoped_ptr<content::GLHelper::ScalerInterface> scaler(
- helper_->CreateScaler(kQualities[q],
- src_size,
- src_subrect,
- dst_size,
- false,
- false));
- // Scale once beforehand before we start measuring.
- scaler->Scale(src_texture, dst_texture);
- context_->finish();
-
- base::TimeTicks start_time = base::TimeTicks::Now();
- int iterations = 0;
- base::TimeTicks end_time;
- while (true) {
- for (int i = 0; i < 50; i++) {
- iterations++;
- scaler->Scale(src_texture, dst_texture);
- context_->flush();
- }
- context_->finish();
- end_time = base::TimeTicks::Now();
- if (iterations > 2000) {
- break;
- }
- if ((end_time - start_time).InMillisecondsF() > 1000) {
- break;
- }
- }
- context_->deleteTexture(dst_texture);
- context_->deleteTexture(src_texture);
- context_->deleteFramebuffer(framebuffer);
-
- std::string name;
- name = base::StringPrintf("scale_%dx%d_to_%dx%d_%s",
- src_size.width(),
- src_size.height(),
- dst_size.width(),
- dst_size.height(),
- kQualityNames[q]);
-
- float ms = (end_time - start_time).InMillisecondsF() / iterations;
- printf("*RESULT gpu_scale_time: %s=%.2f ms\n", name.c_str(), ms);
- }
- }
- }
-}
-
-// This is more of a test utility than a test.
-// Put an PNG image called "testimage.png" in your
-// current directory, then run this test. It will
-// create testoutput_Q_P.png, where Q is the scaling
-// mode and P is the scaling percentage taken from
-// the table below.
-TEST_F(GLHelperTest, DISABLED_ScaleTestImage) {
- int percents[] = {
- 230,
- 180,
- 150,
- 110,
- 90,
- 70,
- 50,
- 49,
- 40,
- 20,
- 10,
- };
-
- SkBitmap input;
- LoadPngFileToSkBitmap(base::FilePath(
- FILE_PATH_LITERAL("testimage.png")), &input);
-
- WebGLId framebuffer = context_->createFramebuffer();
- WebGLId src_texture = context_->createTexture();
- const gfx::Size src_size(input.width(), input.height());
- context_->bindFramebuffer(GL_FRAMEBUFFER, framebuffer);
- context_->bindTexture(GL_TEXTURE_2D, src_texture);
- context_->texImage2D(GL_TEXTURE_2D,
- 0,
- GL_RGBA,
- src_size.width(),
- src_size.height(),
- 0,
- GL_RGBA,
- GL_UNSIGNED_BYTE,
- input.getPixels());
-
- for (size_t q = 0; q < arraysize(kQualities); q++) {
- for (size_t p = 0; p < arraysize(percents); p++) {
- const gfx::Size dst_size(input.width() * percents[p] / 100,
- input.height() * percents[p] / 100);
- WebGLId dst_texture = helper_->CopyAndScaleTexture(
- src_texture,
- src_size,
- dst_size,
- false,
- kQualities[q]);
-
- SkBitmap output_pixels;
- output_pixels.allocN32Pixels(dst_size.width(), dst_size.height());
-
- helper_->ReadbackTextureSync(
- dst_texture,
- gfx::Rect(0, 0,
- dst_size.width(),
- dst_size.height()),
- static_cast<unsigned char *>(output_pixels.getPixels()),
- kN32_SkColorType);
- context_->deleteTexture(dst_texture);
- std::string filename = base::StringPrintf("testoutput_%s_%d.ppm",
- kQualityNames[q],
- percents[p]);
- VLOG(0) << "Writing " << filename;
- SaveToFile(&output_pixels, base::FilePath::FromUTF8Unsafe(filename));
- }
- }
- context_->deleteTexture(src_texture);
- context_->deleteFramebuffer(framebuffer);
-}
-
-} // namespace
-
-// These tests needs to run against a proper GL environment, so we
-// need to set it up before we can run the tests.
-int main(int argc, char** argv) {
- base::CommandLine::Init(argc, argv);
- base::TestSuite* suite = new content::ContentTestSuite(argc, argv);
-#if defined(OS_MACOSX)
- base::mac::ScopedNSAutoreleasePool pool;
-#endif
- gfx::GLSurface::InitializeOneOff();
-
- return content::UnitTestTestSuite(suite).Run();
-}
diff --git a/chromium/content/common/gpu/client/gl_helper_readback_support.cc b/chromium/content/common/gpu/client/gl_helper_readback_support.cc
deleted file mode 100644
index e773ca90782..00000000000
--- a/chromium/content/common/gpu/client/gl_helper_readback_support.cc
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gl_helper_readback_support.h"
-#include "base/logging.h"
-#include "gpu/GLES2/gl2extchromium.h"
-#include "third_party/skia/include/core/SkImageInfo.h"
-
-namespace content {
-
-GLHelperReadbackSupport::GLHelperReadbackSupport(gpu::gles2::GLES2Interface* gl)
- : gl_(gl) {
- InitializeReadbackSupport();
-}
-
-GLHelperReadbackSupport::~GLHelperReadbackSupport() {}
-
-void GLHelperReadbackSupport::InitializeReadbackSupport() {
- // We are concerned about 16, 32-bit formats only. The below are the most
- // used 16, 32-bit formats. In future if any new format support is needed
- // that should be added here. Initialize the array with
- // GLHelperReadbackSupport::NOT_SUPPORTED as we dont know the supported
- // formats yet.
- for (int i = 0; i <= kLastEnum_SkColorType; ++i) {
- format_support_table_[i] = GLHelperReadbackSupport::NOT_SUPPORTED;
- }
- // TODO(sikugu): kAlpha_8_SkColorType support check is failing on mesa.
- // See crbug.com/415667.
- CheckForReadbackSupport(kRGB_565_SkColorType);
- CheckForReadbackSupport(kARGB_4444_SkColorType);
- CheckForReadbackSupport(kRGBA_8888_SkColorType);
- CheckForReadbackSupport(kBGRA_8888_SkColorType);
- // Further any formats, support should be checked here.
-}
-
-void GLHelperReadbackSupport::CheckForReadbackSupport(
- SkColorType texture_format) {
- bool supports_format = false;
- switch (texture_format) {
- case kRGB_565_SkColorType:
- supports_format = SupportsFormat(GL_RGB, GL_UNSIGNED_SHORT_5_6_5);
- break;
- case kRGBA_8888_SkColorType:
- // This is the baseline, assume always true.
- supports_format = true;
- break;
- case kBGRA_8888_SkColorType:
- supports_format = SupportsFormat(GL_BGRA_EXT, GL_UNSIGNED_BYTE);
- break;
- case kARGB_4444_SkColorType:
- supports_format = false;
- break;
- default:
- NOTREACHED();
- supports_format = false;
- break;
- }
- DCHECK((int)texture_format <= (int)kLastEnum_SkColorType);
- format_support_table_[texture_format] =
- supports_format ? GLHelperReadbackSupport::SUPPORTED
- : GLHelperReadbackSupport::NOT_SUPPORTED;
-}
-
-void GLHelperReadbackSupport::GetAdditionalFormat(GLenum format,
- GLenum type,
- GLenum* format_out,
- GLenum* type_out) {
- for (unsigned int i = 0; i < format_cache_.size(); i++) {
- if (format_cache_[i].format == format && format_cache_[i].type == type) {
- *format_out = format_cache_[i].read_format;
- *type_out = format_cache_[i].read_type;
- return;
- }
- }
-
- const int kTestSize = 64;
- content::ScopedTexture dst_texture(gl_);
- ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, dst_texture);
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- gl_->TexImage2D(
- GL_TEXTURE_2D, 0, format, kTestSize, kTestSize, 0, format, type, NULL);
- ScopedFramebuffer dst_framebuffer(gl_);
- ScopedFramebufferBinder<GL_FRAMEBUFFER> framebuffer_binder(gl_,
- dst_framebuffer);
- gl_->FramebufferTexture2D(
- GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, dst_texture, 0);
- GLint format_tmp = 0, type_tmp = 0;
- gl_->GetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &format_tmp);
- gl_->GetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &type_tmp);
- *format_out = format_tmp;
- *type_out = type_tmp;
-
- struct FormatCacheEntry entry = { format, type, *format_out, *type_out };
- format_cache_.push_back(entry);
-}
-
-bool GLHelperReadbackSupport::SupportsFormat(GLenum format, GLenum type) {
- // GLES2.0 Specification says this pairing is always supported
- // with additional format from GL_IMPLEMENTATION_COLOR_READ_FORMAT/TYPE
- if (format == GL_RGBA && type == GL_UNSIGNED_BYTE)
- return true;
-
- if (format == GL_BGRA_EXT && type == GL_UNSIGNED_BYTE) {
- const GLubyte* tmp = gl_->GetString(GL_EXTENSIONS);
- if (tmp) {
- std::string extensions =
- " " + std::string(reinterpret_cast<const char*>(tmp)) + " ";
- if (extensions.find(" GL_EXT_read_format_bgra ") != std::string::npos) {
- return true;
- }
- }
- }
-
- bool supports_format = false;
- GLenum ext_format = 0, ext_type = 0;
- GetAdditionalFormat(format, type, &ext_format, &ext_type);
- if ((ext_format == format) && (ext_type == type)) {
- supports_format = true;
- }
- return supports_format;
-}
-
-GLHelperReadbackSupport::FormatSupport
-GLHelperReadbackSupport::GetReadbackConfig(SkColorType color_type,
- bool can_swizzle,
- GLenum* format,
- GLenum* type,
- size_t* bytes_per_pixel) {
- DCHECK(format && type && bytes_per_pixel);
- *bytes_per_pixel = 4;
- *type = GL_UNSIGNED_BYTE;
- GLenum new_format = 0, new_type = 0;
- switch (color_type) {
- case kRGB_565_SkColorType:
- if (format_support_table_[color_type] ==
- GLHelperReadbackSupport::SUPPORTED) {
- *format = GL_RGB;
- *type = GL_UNSIGNED_SHORT_5_6_5;
- *bytes_per_pixel = 2;
- return GLHelperReadbackSupport::SUPPORTED;
- }
- break;
- case kRGBA_8888_SkColorType:
- *format = GL_RGBA;
- if (can_swizzle) {
- // If GL_BGRA_EXT is advertised as the readback format through
- // GL_IMPLEMENTATION_COLOR_READ_FORMAT then assume it is preferred by
- // the implementation for performance.
- GetAdditionalFormat(*format, *type, &new_format, &new_type);
-
- if (new_format == GL_BGRA_EXT && new_type == GL_UNSIGNED_BYTE) {
- *format = GL_BGRA_EXT;
- return GLHelperReadbackSupport::SWIZZLE;
- }
- }
- return GLHelperReadbackSupport::SUPPORTED;
- case kBGRA_8888_SkColorType:
- *format = GL_BGRA_EXT;
- if (format_support_table_[color_type] ==
- GLHelperReadbackSupport::SUPPORTED)
- return GLHelperReadbackSupport::SUPPORTED;
-
- if (can_swizzle) {
- *format = GL_RGBA;
- return GLHelperReadbackSupport::SWIZZLE;
- }
-
- break;
- case kARGB_4444_SkColorType:
- return GLHelperReadbackSupport::NOT_SUPPORTED;
- default:
- NOTREACHED();
- break;
- }
-
- return GLHelperReadbackSupport::NOT_SUPPORTED;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gl_helper_readback_support.h b/chromium/content/common/gpu/client/gl_helper_readback_support.h
deleted file mode 100644
index f9329e9912c..00000000000
--- a/chromium/content/common/gpu/client/gl_helper_readback_support.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GL_HELPER_READBACK_SUPPORT_H_
-#define CONTENT_COMMON_GPU_CLIENT_GL_HELPER_READBACK_SUPPORT_H_
-
-#include <stddef.h>
-
-#include <vector>
-
-#include "content/common/gpu/client/gl_helper.h"
-
-namespace content {
-
-class CONTENT_EXPORT GLHelperReadbackSupport {
- public:
- enum FormatSupport { SUPPORTED, SWIZZLE, NOT_SUPPORTED };
-
- GLHelperReadbackSupport(gpu::gles2::GLES2Interface* gl);
-
- ~GLHelperReadbackSupport();
-
- // For a given color type retrieve whether readback is supported and if so
- // how it should be performed. The |format|, |type| and |bytes_per_pixel| are
- // the values that should be used with glReadPixels to facilitate the
- // readback. If |can_swizzle| is true then this method will return SWIZZLE if
- // the data needs to be swizzled before using the returned |format| otherwise
- // the method will return SUPPORTED to indicate that readback is permitted of
- // this color othewise NOT_SUPPORTED will be returned. This method always
- // overwrites the out values irrespective of the return value.
- FormatSupport GetReadbackConfig(SkColorType color_type,
- bool can_swizzle,
- GLenum* format,
- GLenum* type,
- size_t* bytes_per_pixel);
- // Provides the additional readback format/type pairing for a render target
- // of a given format/type pairing
- void GetAdditionalFormat(GLenum format, GLenum type, GLenum *format_out,
- GLenum *type_out);
- private:
- struct FormatCacheEntry {
- GLenum format;
- GLenum type;
- GLenum read_format;
- GLenum read_type;
- };
-
- // This populates the format_support_table with the list of supported
- // formats.
- void InitializeReadbackSupport();
-
- // This api is called once per format and it is done in the
- // InitializeReadbackSupport. We should not use this any where
- // except the InitializeReadbackSupport.Calling this at other places
- // can distrub the state of normal gl operations.
- void CheckForReadbackSupport(SkColorType texture_format);
-
- // Helper functions for checking the supported texture formats.
- // Avoid using this API in between texture operations, as this does some
- // teture opertions (bind, attach) internally.
- bool SupportsFormat(GLenum format, GLenum type);
-
- FormatSupport format_support_table_[kLastEnum_SkColorType + 1];
-
- gpu::gles2::GLES2Interface* gl_;
- std::vector<struct FormatCacheEntry> format_cache_;
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GL_HELPER_READBACK_SUPPORT_H_
diff --git a/chromium/content/common/gpu/client/gl_helper_scaling.cc b/chromium/content/common/gpu/client/gl_helper_scaling.cc
deleted file mode 100644
index f4cd6b3eb52..00000000000
--- a/chromium/content/common/gpu/client/gl_helper_scaling.cc
+++ /dev/null
@@ -1,934 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gl_helper_scaling.h"
-
-#include <stddef.h>
-
-#include <deque>
-#include <string>
-#include <vector>
-
-#include "base/bind.h"
-#include "base/lazy_instance.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop.h"
-#include "base/time/time.h"
-#include "base/trace_event/trace_event.h"
-#include "gpu/command_buffer/client/gles2_interface.h"
-#include "third_party/skia/include/core/SkRegion.h"
-#include "ui/gfx/geometry/rect.h"
-#include "ui/gfx/geometry/size.h"
-
-using gpu::gles2::GLES2Interface;
-
-namespace content {
-
-GLHelperScaling::GLHelperScaling(GLES2Interface* gl, GLHelper* helper)
- : gl_(gl), helper_(helper), vertex_attributes_buffer_(gl_) {
- InitBuffer();
-}
-
-GLHelperScaling::~GLHelperScaling() {}
-
-// Used to keep track of a generated shader program. The program
-// is passed in as text through Setup and is used by calling
-// UseProgram() with the right parameters. Note that |gl_|
-// and |helper_| are assumed to live longer than this program.
-class ShaderProgram : public base::RefCounted<ShaderProgram> {
- public:
- ShaderProgram(GLES2Interface* gl, GLHelper* helper)
- : gl_(gl),
- helper_(helper),
- program_(gl_->CreateProgram()),
- position_location_(-1),
- texcoord_location_(-1),
- src_subrect_location_(-1),
- src_pixelsize_location_(-1),
- dst_pixelsize_location_(-1),
- scaling_vector_location_(-1),
- color_weights_location_(-1) {}
-
- // Compile shader program.
- void Setup(const GLchar* vertex_shader_text,
- const GLchar* fragment_shader_text);
-
- // UseProgram must be called with GL_TEXTURE_2D bound to the
- // source texture and GL_ARRAY_BUFFER bound to a vertex
- // attribute buffer.
- void UseProgram(const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool scale_x,
- bool flip_y,
- GLfloat color_weights[4]);
-
- bool Initialized() const { return position_location_ != -1; }
-
- private:
- friend class base::RefCounted<ShaderProgram>;
- ~ShaderProgram() { gl_->DeleteProgram(program_); }
-
- GLES2Interface* gl_;
- GLHelper* helper_;
-
- // A program for copying a source texture into a destination texture.
- GLuint program_;
-
- // The location of the position in the program.
- GLint position_location_;
- // The location of the texture coordinate in the program.
- GLint texcoord_location_;
- // The location of the source texture in the program.
- GLint texture_location_;
- // The location of the texture coordinate of
- // the sub-rectangle in the program.
- GLint src_subrect_location_;
- // Location of size of source image in pixels.
- GLint src_pixelsize_location_;
- // Location of size of destination image in pixels.
- GLint dst_pixelsize_location_;
- // Location of vector for scaling direction.
- GLint scaling_vector_location_;
- // Location of color weights.
- GLint color_weights_location_;
-
- DISALLOW_COPY_AND_ASSIGN(ShaderProgram);
-};
-
-// Implementation of a single stage in a scaler pipeline. If the pipeline has
-// multiple stages, it calls Scale() on the subscaler, then further scales the
-// output. Caches textures and framebuffers to avoid allocating/deleting
-// them once per frame, which can be expensive on some drivers.
-class ScalerImpl : public GLHelper::ScalerInterface,
- public GLHelperScaling::ShaderInterface {
- public:
- // |gl| and |copy_impl| are expected to live longer than this object.
- // |src_size| is the size of the input texture in pixels.
- // |dst_size| is the size of the output texutre in pixels.
- // |src_subrect| is the portion of the src to copy to the output texture.
- // If |scale_x| is true, we are scaling along the X axis, otherwise Y.
- // If we are scaling in both X and Y, |scale_x| is ignored.
- // If |vertically_flip_texture| is true, output will be upside-down.
- // If |swizzle| is true, RGBA will be transformed into BGRA.
- // |color_weights| are only used together with SHADER_PLANAR to specify
- // how to convert RGB colors into a single value.
- ScalerImpl(GLES2Interface* gl,
- GLHelperScaling* scaler_helper,
- const GLHelperScaling::ScalerStage& scaler_stage,
- ScalerImpl* subscaler,
- const float* color_weights)
- : gl_(gl),
- scaler_helper_(scaler_helper),
- spec_(scaler_stage),
- intermediate_texture_(0),
- dst_framebuffer_(gl),
- subscaler_(subscaler) {
- if (color_weights) {
- color_weights_[0] = color_weights[0];
- color_weights_[1] = color_weights[1];
- color_weights_[2] = color_weights[2];
- color_weights_[3] = color_weights[3];
- } else {
- color_weights_[0] = 0.0;
- color_weights_[1] = 0.0;
- color_weights_[2] = 0.0;
- color_weights_[3] = 0.0;
- }
- shader_program_ =
- scaler_helper_->GetShaderProgram(spec_.shader, spec_.swizzle);
-
- if (subscaler_) {
- intermediate_texture_ = 0u;
- gl_->GenTextures(1, &intermediate_texture_);
- ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_,
- intermediate_texture_);
- gl_->TexImage2D(GL_TEXTURE_2D,
- 0,
- GL_RGBA,
- spec_.src_size.width(),
- spec_.src_size.height(),
- 0,
- GL_RGBA,
- GL_UNSIGNED_BYTE,
- NULL);
- }
- }
-
- ~ScalerImpl() override {
- if (intermediate_texture_) {
- gl_->DeleteTextures(1, &intermediate_texture_);
- }
- }
-
- // GLHelperShader::ShaderInterface implementation.
- void Execute(GLuint source_texture,
- const std::vector<GLuint>& dest_textures) override {
- if (subscaler_) {
- subscaler_->Scale(source_texture, intermediate_texture_);
- source_texture = intermediate_texture_;
- }
-
- ScopedFramebufferBinder<GL_FRAMEBUFFER> framebuffer_binder(
- gl_, dst_framebuffer_);
- DCHECK_GT(dest_textures.size(), 0U);
- scoped_ptr<GLenum[]> buffers(new GLenum[dest_textures.size()]);
- for (size_t t = 0; t < dest_textures.size(); t++) {
- ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, dest_textures[t]);
- gl_->FramebufferTexture2D(GL_FRAMEBUFFER,
- GL_COLOR_ATTACHMENT0 + t,
- GL_TEXTURE_2D,
- dest_textures[t],
- 0);
- buffers[t] = GL_COLOR_ATTACHMENT0 + t;
- }
- ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, source_texture);
-
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
-
- ScopedBufferBinder<GL_ARRAY_BUFFER> buffer_binder(
- gl_, scaler_helper_->vertex_attributes_buffer_);
- shader_program_->UseProgram(spec_.src_size,
- spec_.src_subrect,
- spec_.dst_size,
- spec_.scale_x,
- spec_.vertically_flip_texture,
- color_weights_);
- gl_->Viewport(0, 0, spec_.dst_size.width(), spec_.dst_size.height());
-
- if (dest_textures.size() > 1) {
- DCHECK_LE(static_cast<int>(dest_textures.size()),
- scaler_helper_->helper_->MaxDrawBuffers());
- gl_->DrawBuffersEXT(dest_textures.size(), buffers.get());
- }
- // Conduct texture mapping by drawing a quad composed of two triangles.
- gl_->DrawArrays(GL_TRIANGLE_STRIP, 0, 4);
- if (dest_textures.size() > 1) {
- // Set the draw buffers back to not confuse others.
- gl_->DrawBuffersEXT(1, &buffers[0]);
- }
- }
-
- // GLHelper::ScalerInterface implementation.
- void Scale(GLuint source_texture, GLuint dest_texture) override {
- std::vector<GLuint> tmp(1);
- tmp[0] = dest_texture;
- Execute(source_texture, tmp);
- }
-
- const gfx::Size& SrcSize() override {
- if (subscaler_) {
- return subscaler_->SrcSize();
- }
- return spec_.src_size;
- }
- const gfx::Rect& SrcSubrect() override {
- if (subscaler_) {
- return subscaler_->SrcSubrect();
- }
- return spec_.src_subrect;
- }
- const gfx::Size& DstSize() override { return spec_.dst_size; }
-
- private:
- GLES2Interface* gl_;
- GLHelperScaling* scaler_helper_;
- GLHelperScaling::ScalerStage spec_;
- GLfloat color_weights_[4];
- GLuint intermediate_texture_;
- scoped_refptr<ShaderProgram> shader_program_;
- ScopedFramebuffer dst_framebuffer_;
- scoped_ptr<ScalerImpl> subscaler_;
-};
-
-GLHelperScaling::ScalerStage::ScalerStage(ShaderType shader_,
- gfx::Size src_size_,
- gfx::Rect src_subrect_,
- gfx::Size dst_size_,
- bool scale_x_,
- bool vertically_flip_texture_,
- bool swizzle_)
- : shader(shader_),
- src_size(src_size_),
- src_subrect(src_subrect_),
- dst_size(dst_size_),
- scale_x(scale_x_),
- vertically_flip_texture(vertically_flip_texture_),
- swizzle(swizzle_) {}
-
-// The important inputs for this function is |x_ops| and
-// |y_ops|. They represent scaling operations to be done
-// on an imag of size |src_size|. If |quality| is SCALER_QUALITY_BEST,
-// then we will interpret these scale operations literally and we'll
-// create one scaler stage for each ScaleOp. However, if |quality|
-// is SCALER_QUALITY_GOOD, then we can do a whole bunch of optimizations
-// by combining two or more ScaleOps in to a single scaler stage.
-// Normally we process ScaleOps from |y_ops| first and |x_ops| after
-// all |y_ops| are processed, but sometimes we can combine one or more
-// operation from both queues essentially for free. This is the reason
-// why |x_ops| and |y_ops| aren't just one single queue.
-void GLHelperScaling::ConvertScalerOpsToScalerStages(
- GLHelper::ScalerQuality quality,
- gfx::Size src_size,
- gfx::Rect src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle,
- std::deque<GLHelperScaling::ScaleOp>* x_ops,
- std::deque<GLHelperScaling::ScaleOp>* y_ops,
- std::vector<ScalerStage>* scaler_stages) {
- while (!x_ops->empty() || !y_ops->empty()) {
- gfx::Size intermediate_size = src_subrect.size();
- std::deque<ScaleOp>* current_queue = NULL;
-
- if (!y_ops->empty()) {
- current_queue = y_ops;
- } else {
- current_queue = x_ops;
- }
-
- ShaderType current_shader = SHADER_BILINEAR;
- switch (current_queue->front().scale_factor) {
- case 0:
- if (quality == GLHelper::SCALER_QUALITY_BEST) {
- current_shader = SHADER_BICUBIC_UPSCALE;
- }
- break;
- case 2:
- if (quality == GLHelper::SCALER_QUALITY_BEST) {
- current_shader = SHADER_BICUBIC_HALF_1D;
- }
- break;
- case 3:
- DCHECK(quality != GLHelper::SCALER_QUALITY_BEST);
- current_shader = SHADER_BILINEAR3;
- break;
- default:
- NOTREACHED();
- }
- bool scale_x = current_queue->front().scale_x;
- current_queue->front().UpdateSize(&intermediate_size);
- current_queue->pop_front();
-
- // Optimization: Sometimes we can combine 2-4 scaling operations into
- // one operation.
- if (quality == GLHelper::SCALER_QUALITY_GOOD) {
- if (!current_queue->empty() && current_shader == SHADER_BILINEAR) {
- // Combine two steps in the same dimension.
- current_queue->front().UpdateSize(&intermediate_size);
- current_queue->pop_front();
- current_shader = SHADER_BILINEAR2;
- if (!current_queue->empty()) {
- // Combine three steps in the same dimension.
- current_queue->front().UpdateSize(&intermediate_size);
- current_queue->pop_front();
- current_shader = SHADER_BILINEAR4;
- }
- }
- // Check if we can combine some steps in the other dimension as well.
- // Since all shaders currently use GL_LINEAR, we can easily scale up
- // or scale down by exactly 2x at the same time as we do another
- // operation. Currently, the following mergers are supported:
- // * 1 bilinear Y-pass with 1 bilinear X-pass (up or down)
- // * 2 bilinear Y-passes with 2 bilinear X-passes
- // * 1 bilinear Y-pass with N bilinear X-pass
- // * N bilinear Y-passes with 1 bilinear X-pass (down only)
- // Measurements indicate that generalizing this for 3x3 and 4x4
- // makes it slower on some platforms, such as the Pixel.
- if (!scale_x && x_ops->size() > 0 && x_ops->front().scale_factor <= 2) {
- int x_passes = 0;
- if (current_shader == SHADER_BILINEAR2 && x_ops->size() >= 2) {
- // 2y + 2x passes
- x_passes = 2;
- current_shader = SHADER_BILINEAR2X2;
- } else if (current_shader == SHADER_BILINEAR) {
- // 1y + Nx passes
- scale_x = true;
- switch (x_ops->size()) {
- case 0:
- NOTREACHED();
- case 1:
- if (x_ops->front().scale_factor == 3) {
- current_shader = SHADER_BILINEAR3;
- }
- x_passes = 1;
- break;
- case 2:
- x_passes = 2;
- current_shader = SHADER_BILINEAR2;
- break;
- default:
- x_passes = 3;
- current_shader = SHADER_BILINEAR4;
- break;
- }
- } else if (x_ops->front().scale_factor == 2) {
- // Ny + 1x-downscale
- x_passes = 1;
- }
-
- for (int i = 0; i < x_passes; i++) {
- x_ops->front().UpdateSize(&intermediate_size);
- x_ops->pop_front();
- }
- }
- }
-
- scaler_stages->push_back(ScalerStage(current_shader,
- src_size,
- src_subrect,
- intermediate_size,
- scale_x,
- vertically_flip_texture,
- swizzle));
- src_size = intermediate_size;
- src_subrect = gfx::Rect(intermediate_size);
- vertically_flip_texture = false;
- swizzle = false;
- }
-}
-
-void GLHelperScaling::ComputeScalerStages(
- GLHelper::ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle,
- std::vector<ScalerStage>* scaler_stages) {
- if (quality == GLHelper::SCALER_QUALITY_FAST ||
- src_subrect.size() == dst_size) {
- scaler_stages->push_back(ScalerStage(SHADER_BILINEAR,
- src_size,
- src_subrect,
- dst_size,
- false,
- vertically_flip_texture,
- swizzle));
- return;
- }
-
- std::deque<GLHelperScaling::ScaleOp> x_ops, y_ops;
- GLHelperScaling::ScaleOp::AddOps(src_subrect.width(),
- dst_size.width(),
- true,
- quality == GLHelper::SCALER_QUALITY_GOOD,
- &x_ops);
- GLHelperScaling::ScaleOp::AddOps(src_subrect.height(),
- dst_size.height(),
- false,
- quality == GLHelper::SCALER_QUALITY_GOOD,
- &y_ops);
-
- ConvertScalerOpsToScalerStages(quality,
- src_size,
- src_subrect,
- dst_size,
- vertically_flip_texture,
- swizzle,
- &x_ops,
- &y_ops,
- scaler_stages);
-}
-
-GLHelper::ScalerInterface* GLHelperScaling::CreateScaler(
- GLHelper::ScalerQuality quality,
- gfx::Size src_size,
- gfx::Rect src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle) {
- std::vector<ScalerStage> scaler_stages;
- ComputeScalerStages(quality,
- src_size,
- src_subrect,
- dst_size,
- vertically_flip_texture,
- swizzle,
- &scaler_stages);
-
- ScalerImpl* ret = NULL;
- for (unsigned int i = 0; i < scaler_stages.size(); i++) {
- ret = new ScalerImpl(gl_, this, scaler_stages[i], ret, NULL);
- }
- return ret;
-}
-
-GLHelper::ScalerInterface* GLHelperScaling::CreatePlanarScaler(
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle,
- const float color_weights[4]) {
- ScalerStage stage(SHADER_PLANAR,
- src_size,
- src_subrect,
- dst_size,
- true,
- vertically_flip_texture,
- swizzle);
- return new ScalerImpl(gl_, this, stage, NULL, color_weights);
-}
-
-GLHelperScaling::ShaderInterface* GLHelperScaling::CreateYuvMrtShader(
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle,
- ShaderType shader) {
- DCHECK(shader == SHADER_YUV_MRT_PASS1 || shader == SHADER_YUV_MRT_PASS2);
- ScalerStage stage(shader,
- src_size,
- src_subrect,
- dst_size,
- true,
- vertically_flip_texture,
- swizzle);
- return new ScalerImpl(gl_, this, stage, NULL, NULL);
-}
-
-const GLfloat GLHelperScaling::kVertexAttributes[] = {
- -1.0f, -1.0f, 0.0f, 0.0f, // vertex 0
- 1.0f, -1.0f, 1.0f, 0.0f, // vertex 1
- -1.0f, 1.0f, 0.0f, 1.0f, // vertex 2
- 1.0f, 1.0f, 1.0f, 1.0f, }; // vertex 3
-
-void GLHelperScaling::InitBuffer() {
- ScopedBufferBinder<GL_ARRAY_BUFFER> buffer_binder(gl_,
- vertex_attributes_buffer_);
- gl_->BufferData(GL_ARRAY_BUFFER,
- sizeof(kVertexAttributes),
- kVertexAttributes,
- GL_STATIC_DRAW);
-}
-
-scoped_refptr<ShaderProgram> GLHelperScaling::GetShaderProgram(ShaderType type,
- bool swizzle) {
- ShaderProgramKeyType key(type, swizzle);
- scoped_refptr<ShaderProgram>& cache_entry(shader_programs_[key]);
- if (!cache_entry.get()) {
- cache_entry = new ShaderProgram(gl_, helper_);
- std::basic_string<GLchar> vertex_program;
- std::basic_string<GLchar> fragment_program;
- std::basic_string<GLchar> vertex_header;
- std::basic_string<GLchar> fragment_directives;
- std::basic_string<GLchar> fragment_header;
- std::basic_string<GLchar> shared_variables;
-
- vertex_header.append(
- "precision highp float;\n"
- "attribute vec2 a_position;\n"
- "attribute vec2 a_texcoord;\n"
- "uniform vec4 src_subrect;\n");
-
- fragment_header.append(
- "precision mediump float;\n"
- "uniform sampler2D s_texture;\n");
-
- vertex_program.append(
- " gl_Position = vec4(a_position, 0.0, 1.0);\n"
- " vec2 texcoord = src_subrect.xy + a_texcoord * src_subrect.zw;\n");
-
- switch (type) {
- case SHADER_BILINEAR:
- shared_variables.append("varying vec2 v_texcoord;\n");
- vertex_program.append(" v_texcoord = texcoord;\n");
- fragment_program.append(
- " gl_FragColor = texture2D(s_texture, v_texcoord);\n");
- break;
-
- case SHADER_BILINEAR2:
- // This is equivialent to two passes of the BILINEAR shader above.
- // It can be used to scale an image down 1.0x-2.0x in either dimension,
- // or exactly 4x.
- shared_variables.append(
- "varying vec4 v_texcoords;\n"); // 2 texcoords packed in one quad
- vertex_header.append(
- "uniform vec2 scaling_vector;\n"
- "uniform vec2 dst_pixelsize;\n");
- vertex_program.append(
- " vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
- " step /= 4.0;\n"
- " v_texcoords.xy = texcoord + step;\n"
- " v_texcoords.zw = texcoord - step;\n");
-
- fragment_program.append(
- " gl_FragColor = (texture2D(s_texture, v_texcoords.xy) +\n"
- " texture2D(s_texture, v_texcoords.zw)) / 2.0;\n");
- break;
-
- case SHADER_BILINEAR3:
- // This is kind of like doing 1.5 passes of the BILINEAR shader.
- // It can be used to scale an image down 1.5x-3.0x, or exactly 6x.
- shared_variables.append(
- "varying vec4 v_texcoords1;\n" // 2 texcoords packed in one quad
- "varying vec2 v_texcoords2;\n");
- vertex_header.append(
- "uniform vec2 scaling_vector;\n"
- "uniform vec2 dst_pixelsize;\n");
- vertex_program.append(
- " vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
- " step /= 3.0;\n"
- " v_texcoords1.xy = texcoord + step;\n"
- " v_texcoords1.zw = texcoord;\n"
- " v_texcoords2 = texcoord - step;\n");
- fragment_program.append(
- " gl_FragColor = (texture2D(s_texture, v_texcoords1.xy) +\n"
- " texture2D(s_texture, v_texcoords1.zw) +\n"
- " texture2D(s_texture, v_texcoords2)) / 3.0;\n");
- break;
-
- case SHADER_BILINEAR4:
- // This is equivialent to three passes of the BILINEAR shader above,
- // It can be used to scale an image down 2.0x-4.0x or exactly 8x.
- shared_variables.append("varying vec4 v_texcoords[2];\n");
- vertex_header.append(
- "uniform vec2 scaling_vector;\n"
- "uniform vec2 dst_pixelsize;\n");
- vertex_program.append(
- " vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
- " step /= 8.0;\n"
- " v_texcoords[0].xy = texcoord - step * 3.0;\n"
- " v_texcoords[0].zw = texcoord - step;\n"
- " v_texcoords[1].xy = texcoord + step;\n"
- " v_texcoords[1].zw = texcoord + step * 3.0;\n");
- fragment_program.append(
- " gl_FragColor = (\n"
- " texture2D(s_texture, v_texcoords[0].xy) +\n"
- " texture2D(s_texture, v_texcoords[0].zw) +\n"
- " texture2D(s_texture, v_texcoords[1].xy) +\n"
- " texture2D(s_texture, v_texcoords[1].zw)) / 4.0;\n");
- break;
-
- case SHADER_BILINEAR2X2:
- // This is equivialent to four passes of the BILINEAR shader above.
- // Two in each dimension. It can be used to scale an image down
- // 1.0x-2.0x in both X and Y directions. Or, it could be used to
- // scale an image down by exactly 4x in both dimensions.
- shared_variables.append("varying vec4 v_texcoords[2];\n");
- vertex_header.append("uniform vec2 dst_pixelsize;\n");
- vertex_program.append(
- " vec2 step = src_subrect.zw / 4.0 / dst_pixelsize;\n"
- " v_texcoords[0].xy = texcoord + vec2(step.x, step.y);\n"
- " v_texcoords[0].zw = texcoord + vec2(step.x, -step.y);\n"
- " v_texcoords[1].xy = texcoord + vec2(-step.x, step.y);\n"
- " v_texcoords[1].zw = texcoord + vec2(-step.x, -step.y);\n");
- fragment_program.append(
- " gl_FragColor = (\n"
- " texture2D(s_texture, v_texcoords[0].xy) +\n"
- " texture2D(s_texture, v_texcoords[0].zw) +\n"
- " texture2D(s_texture, v_texcoords[1].xy) +\n"
- " texture2D(s_texture, v_texcoords[1].zw)) / 4.0;\n");
- break;
-
- case SHADER_BICUBIC_HALF_1D:
- // This scales down texture by exactly half in one dimension.
- // directions in one pass. We use bilinear lookup to reduce
- // the number of texture reads from 8 to 4
- shared_variables.append(
- "const float CenterDist = 99.0 / 140.0;\n"
- "const float LobeDist = 11.0 / 4.0;\n"
- "const float CenterWeight = 35.0 / 64.0;\n"
- "const float LobeWeight = -3.0 / 64.0;\n"
- "varying vec4 v_texcoords[2];\n");
- vertex_header.append(
- "uniform vec2 scaling_vector;\n"
- "uniform vec2 src_pixelsize;\n");
- vertex_program.append(
- " vec2 step = src_subrect.zw * scaling_vector / src_pixelsize;\n"
- " v_texcoords[0].xy = texcoord - LobeDist * step;\n"
- " v_texcoords[0].zw = texcoord - CenterDist * step;\n"
- " v_texcoords[1].xy = texcoord + CenterDist * step;\n"
- " v_texcoords[1].zw = texcoord + LobeDist * step;\n");
- fragment_program.append(
- " gl_FragColor = \n"
- // Lobe pixels
- " (texture2D(s_texture, v_texcoords[0].xy) +\n"
- " texture2D(s_texture, v_texcoords[1].zw)) *\n"
- " LobeWeight +\n"
- // Center pixels
- " (texture2D(s_texture, v_texcoords[0].zw) +\n"
- " texture2D(s_texture, v_texcoords[1].xy)) *\n"
- " CenterWeight;\n");
- break;
-
- case SHADER_BICUBIC_UPSCALE:
- // When scaling up, we need 4 texture reads, but we can
- // save some instructions because will know in which range of
- // the bicubic function each call call to the bicubic function
- // will be in.
- // Also, when sampling the bicubic function like this, the sum
- // is always exactly one, so we can skip normalization as well.
- shared_variables.append("varying vec2 v_texcoord;\n");
- vertex_program.append(" v_texcoord = texcoord;\n");
- fragment_header.append(
- "uniform vec2 src_pixelsize;\n"
- "uniform vec2 scaling_vector;\n"
- "const float a = -0.5;\n"
- // This function is equivialent to calling the bicubic
- // function with x-1, x, 1-x and 2-x
- // (assuming 0 <= x < 1)
- "vec4 filt4(float x) {\n"
- " return vec4(x * x * x, x * x, x, 1) *\n"
- " mat4( a, -2.0 * a, a, 0.0,\n"
- " a + 2.0, -a - 3.0, 0.0, 1.0,\n"
- " -a - 2.0, 3.0 + 2.0 * a, -a, 0.0,\n"
- " -a, a, 0.0, 0.0);\n"
- "}\n"
- "mat4 pixels_x(vec2 pos, vec2 step) {\n"
- " return mat4(\n"
- " texture2D(s_texture, pos - step),\n"
- " texture2D(s_texture, pos),\n"
- " texture2D(s_texture, pos + step),\n"
- " texture2D(s_texture, pos + step * 2.0));\n"
- "}\n");
- fragment_program.append(
- " vec2 pixel_pos = v_texcoord * src_pixelsize - \n"
- " scaling_vector / 2.0;\n"
- " float frac = fract(dot(pixel_pos, scaling_vector));\n"
- " vec2 base = (floor(pixel_pos) + vec2(0.5)) / src_pixelsize;\n"
- " vec2 step = scaling_vector / src_pixelsize;\n"
- " gl_FragColor = pixels_x(base, step) * filt4(frac);\n");
- break;
-
- case SHADER_PLANAR:
- // Converts four RGBA pixels into one pixel. Each RGBA
- // pixel will be dot-multiplied with the color weights and
- // then placed into a component of the output. This is used to
- // convert RGBA textures into Y, U and V textures. We do this
- // because single-component textures are not renderable on all
- // architectures.
- shared_variables.append("varying vec4 v_texcoords[2];\n");
- vertex_header.append(
- "uniform vec2 scaling_vector;\n"
- "uniform vec2 dst_pixelsize;\n");
- vertex_program.append(
- " vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
- " step /= 4.0;\n"
- " v_texcoords[0].xy = texcoord - step * 1.5;\n"
- " v_texcoords[0].zw = texcoord - step * 0.5;\n"
- " v_texcoords[1].xy = texcoord + step * 0.5;\n"
- " v_texcoords[1].zw = texcoord + step * 1.5;\n");
- fragment_header.append("uniform vec4 color_weights;\n");
- fragment_program.append(
- " gl_FragColor = color_weights * mat4(\n"
- " vec4(texture2D(s_texture, v_texcoords[0].xy).rgb, 1.0),\n"
- " vec4(texture2D(s_texture, v_texcoords[0].zw).rgb, 1.0),\n"
- " vec4(texture2D(s_texture, v_texcoords[1].xy).rgb, 1.0),\n"
- " vec4(texture2D(s_texture, v_texcoords[1].zw).rgb, 1.0));\n");
- break;
-
- case SHADER_YUV_MRT_PASS1:
- // RGB24 to YV12 in two passes; writing two 8888 targets each pass.
- //
- // YV12 is full-resolution luma and half-resolution blue/red chroma.
- //
- // (original)
- // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
- // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
- // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
- // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
- // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
- // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
- // |
- // | (y plane) (temporary)
- // | YYYY YYYY UUVV UUVV
- // +--> { YYYY YYYY + UUVV UUVV }
- // YYYY YYYY UUVV UUVV
- // First YYYY YYYY UUVV UUVV
- // pass YYYY YYYY UUVV UUVV
- // YYYY YYYY UUVV UUVV
- // |
- // | (u plane) (v plane)
- // Second | UUUU VVVV
- // pass +--> { UUUU + VVVV }
- // UUUU VVVV
- //
- shared_variables.append("varying vec4 v_texcoords[2];\n");
- vertex_header.append(
- "uniform vec2 scaling_vector;\n"
- "uniform vec2 dst_pixelsize;\n");
- vertex_program.append(
- " vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
- " step /= 4.0;\n"
- " v_texcoords[0].xy = texcoord - step * 1.5;\n"
- " v_texcoords[0].zw = texcoord - step * 0.5;\n"
- " v_texcoords[1].xy = texcoord + step * 0.5;\n"
- " v_texcoords[1].zw = texcoord + step * 1.5;\n");
- fragment_directives.append("#extension GL_EXT_draw_buffers : enable\n");
- fragment_header.append(
- "const vec3 kRGBtoY = vec3(0.257, 0.504, 0.098);\n"
- "const float kYBias = 0.0625;\n"
- // Divide U and V by two to compensate for averaging below.
- "const vec3 kRGBtoU = vec3(-0.148, -0.291, 0.439) / 2.0;\n"
- "const vec3 kRGBtoV = vec3(0.439, -0.368, -0.071) / 2.0;\n"
- "const float kUVBias = 0.5;\n");
- fragment_program.append(
- " vec3 pixel1 = texture2D(s_texture, v_texcoords[0].xy).rgb;\n"
- " vec3 pixel2 = texture2D(s_texture, v_texcoords[0].zw).rgb;\n"
- " vec3 pixel3 = texture2D(s_texture, v_texcoords[1].xy).rgb;\n"
- " vec3 pixel4 = texture2D(s_texture, v_texcoords[1].zw).rgb;\n"
- " vec3 pixel12 = pixel1 + pixel2;\n"
- " vec3 pixel34 = pixel3 + pixel4;\n"
- " gl_FragData[0] = vec4(dot(pixel1, kRGBtoY),\n"
- " dot(pixel2, kRGBtoY),\n"
- " dot(pixel3, kRGBtoY),\n"
- " dot(pixel4, kRGBtoY)) + kYBias;\n"
- " gl_FragData[1] = vec4(dot(pixel12, kRGBtoU),\n"
- " dot(pixel34, kRGBtoU),\n"
- " dot(pixel12, kRGBtoV),\n"
- " dot(pixel34, kRGBtoV)) + kUVBias;\n");
- break;
-
- case SHADER_YUV_MRT_PASS2:
- // We're just sampling two pixels and unswizzling them. There's
- // no need to do vertical scaling with math, since bilinear
- // interpolation in the sampler takes care of that.
- shared_variables.append("varying vec4 v_texcoords;\n");
- vertex_header.append(
- "uniform vec2 scaling_vector;\n"
- "uniform vec2 dst_pixelsize;\n");
- vertex_program.append(
- " vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
- " step /= 2.0;\n"
- " v_texcoords.xy = texcoord - step * 0.5;\n"
- " v_texcoords.zw = texcoord + step * 0.5;\n");
- fragment_directives.append("#extension GL_EXT_draw_buffers : enable\n");
- fragment_program.append(
- " vec4 lo_uuvv = texture2D(s_texture, v_texcoords.xy);\n"
- " vec4 hi_uuvv = texture2D(s_texture, v_texcoords.zw);\n"
- " gl_FragData[0] = vec4(lo_uuvv.rg, hi_uuvv.rg);\n"
- " gl_FragData[1] = vec4(lo_uuvv.ba, hi_uuvv.ba);\n");
- break;
- }
- if (swizzle) {
- switch(type) {
- case SHADER_YUV_MRT_PASS1:
- fragment_program.append(" gl_FragData[0] = gl_FragData[0].bgra;\n");
- break;
- case SHADER_YUV_MRT_PASS2:
- fragment_program.append(" gl_FragData[0] = gl_FragData[0].bgra;\n");
- fragment_program.append(" gl_FragData[1] = gl_FragData[1].bgra;\n");
- break;
- default:
- fragment_program.append(" gl_FragColor = gl_FragColor.bgra;\n");
- break;
- }
- }
-
- vertex_program = vertex_header + shared_variables + "void main() {\n" +
- vertex_program + "}\n";
-
- fragment_program = fragment_directives + fragment_header +
- shared_variables + "void main() {\n" + fragment_program +
- "}\n";
-
- cache_entry->Setup(vertex_program.c_str(), fragment_program.c_str());
- }
- return cache_entry;
-}
-
-void ShaderProgram::Setup(const GLchar* vertex_shader_text,
- const GLchar* fragment_shader_text) {
- // Shaders to map the source texture to |dst_texture_|.
- GLuint vertex_shader =
- helper_->CompileShaderFromSource(vertex_shader_text, GL_VERTEX_SHADER);
- if (vertex_shader == 0)
- return;
-
- gl_->AttachShader(program_, vertex_shader);
- gl_->DeleteShader(vertex_shader);
-
- GLuint fragment_shader = helper_->CompileShaderFromSource(
- fragment_shader_text, GL_FRAGMENT_SHADER);
- if (fragment_shader == 0)
- return;
- gl_->AttachShader(program_, fragment_shader);
- gl_->DeleteShader(fragment_shader);
-
- gl_->LinkProgram(program_);
-
- GLint link_status = 0;
- gl_->GetProgramiv(program_, GL_LINK_STATUS, &link_status);
- if (!link_status)
- return;
-
- position_location_ = gl_->GetAttribLocation(program_, "a_position");
- texcoord_location_ = gl_->GetAttribLocation(program_, "a_texcoord");
- texture_location_ = gl_->GetUniformLocation(program_, "s_texture");
- src_subrect_location_ = gl_->GetUniformLocation(program_, "src_subrect");
- src_pixelsize_location_ = gl_->GetUniformLocation(program_, "src_pixelsize");
- dst_pixelsize_location_ = gl_->GetUniformLocation(program_, "dst_pixelsize");
- scaling_vector_location_ =
- gl_->GetUniformLocation(program_, "scaling_vector");
- color_weights_location_ = gl_->GetUniformLocation(program_, "color_weights");
- // The only reason fetching these attribute locations should fail is
- // if the context was spontaneously lost (i.e., because the GPU
- // process crashed, perhaps deliberately for testing).
- DCHECK(Initialized() || gl_->GetGraphicsResetStatusKHR() != GL_NO_ERROR);
-}
-
-void ShaderProgram::UseProgram(const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool scale_x,
- bool flip_y,
- GLfloat color_weights[4]) {
- gl_->UseProgram(program_);
-
- // OpenGL defines the last parameter to VertexAttribPointer as type
- // "const GLvoid*" even though it is actually an offset into the buffer
- // object's data store and not a pointer to the client's address space.
- const void* offsets[2] = {
- 0, reinterpret_cast<const void*>(2 * sizeof(GLfloat))
- };
-
- gl_->VertexAttribPointer(position_location_,
- 2,
- GL_FLOAT,
- GL_FALSE,
- 4 * sizeof(GLfloat),
- offsets[0]);
- gl_->EnableVertexAttribArray(position_location_);
-
- gl_->VertexAttribPointer(texcoord_location_,
- 2,
- GL_FLOAT,
- GL_FALSE,
- 4 * sizeof(GLfloat),
- offsets[1]);
- gl_->EnableVertexAttribArray(texcoord_location_);
-
- gl_->Uniform1i(texture_location_, 0);
-
- // Convert |src_subrect| to texture coordinates.
- GLfloat src_subrect_texcoord[] = {
- static_cast<float>(src_subrect.x()) / src_size.width(),
- static_cast<float>(src_subrect.y()) / src_size.height(),
- static_cast<float>(src_subrect.width()) / src_size.width(),
- static_cast<float>(src_subrect.height()) / src_size.height(), };
- if (flip_y) {
- src_subrect_texcoord[1] += src_subrect_texcoord[3];
- src_subrect_texcoord[3] *= -1.0;
- }
- gl_->Uniform4fv(src_subrect_location_, 1, src_subrect_texcoord);
-
- gl_->Uniform2f(src_pixelsize_location_, src_size.width(), src_size.height());
- gl_->Uniform2f(dst_pixelsize_location_,
- static_cast<float>(dst_size.width()),
- static_cast<float>(dst_size.height()));
-
- gl_->Uniform2f(
- scaling_vector_location_, scale_x ? 1.0 : 0.0, scale_x ? 0.0 : 1.0);
- gl_->Uniform4fv(color_weights_location_, 1, color_weights);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gl_helper_scaling.h b/chromium/content/common/gpu/client/gl_helper_scaling.h
deleted file mode 100644
index 6157d8e0d9b..00000000000
--- a/chromium/content/common/gpu/client/gl_helper_scaling.h
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GL_HELPER_SCALING_H_
-#define CONTENT_COMMON_GPU_CLIENT_GL_HELPER_SCALING_H_
-
-#include <deque>
-#include <map>
-#include <vector>
-
-#include "base/macros.h"
-#include "content/common/gpu/client/gl_helper.h"
-#include "ui/gfx/geometry/rect.h"
-#include "ui/gfx/geometry/size.h"
-
-namespace content {
-
-class ShaderProgram;
-class ScalerImpl;
-class GLHelperTest;
-
-// Implements GPU texture scaling methods.
-// Note that you should probably not use this class directly.
-// See gl_helper.cc::CreateScaler instead.
-class CONTENT_EXPORT GLHelperScaling {
- public:
- enum ShaderType {
- SHADER_BILINEAR,
- SHADER_BILINEAR2,
- SHADER_BILINEAR3,
- SHADER_BILINEAR4,
- SHADER_BILINEAR2X2,
- SHADER_BICUBIC_UPSCALE,
- SHADER_BICUBIC_HALF_1D,
- SHADER_PLANAR,
- SHADER_YUV_MRT_PASS1,
- SHADER_YUV_MRT_PASS2,
- };
-
- // Similar to ScalerInterface, but can generate multiple outputs.
- // Used for YUV conversion in gl_helper.c
- class CONTENT_EXPORT ShaderInterface {
- public:
- ShaderInterface() {}
- virtual ~ShaderInterface() {}
- // Note that the src_texture will have the min/mag filter set to GL_LINEAR
- // and wrap_s/t set to CLAMP_TO_EDGE in this call.
- virtual void Execute(GLuint source_texture,
- const std::vector<GLuint>& dest_textures) = 0;
- };
-
- typedef std::pair<ShaderType, bool> ShaderProgramKeyType;
-
- GLHelperScaling(gpu::gles2::GLES2Interface* gl,
- GLHelper* helper);
- ~GLHelperScaling();
- void InitBuffer();
-
- GLHelper::ScalerInterface* CreateScaler(
- GLHelper::ScalerQuality quality,
- gfx::Size src_size,
- gfx::Rect src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle);
-
- GLHelper::ScalerInterface* CreatePlanarScaler(
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle,
- const float color_weights[4]);
-
- ShaderInterface* CreateYuvMrtShader(
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle,
- ShaderType shader);
-
- private:
- // A ScaleOp represents a pass in a scaler pipeline, in one dimension.
- // Note that when quality is GOOD, multiple scaler passes will be
- // combined into one operation for increased performance.
- // Exposed in the header file for testing purposes.
- struct ScaleOp {
- ScaleOp(int factor, bool x, int size)
- : scale_factor(factor), scale_x(x), scale_size(size) {
- }
-
- // Calculate a set of ScaleOp needed to convert an image of size
- // |src| into an image of size |dst|. If |scale_x| is true, then
- // the calculations are for the X axis of the image, otherwise Y.
- // If |allow3| is true, we can use a SHADER_BILINEAR3 to replace
- // a scale up and scale down with a 3-tap bilinear scale.
- // The calculated ScaleOps are added to |ops|.
- static void AddOps(int src,
- int dst,
- bool scale_x,
- bool allow3,
- std::deque<ScaleOp>* ops) {
- int num_downscales = 0;
- if (allow3 && dst * 3 >= src && dst * 2 < src) {
- // Technically, this should be a scale up and then a
- // scale down, but it makes the optimization code more
- // complicated.
- ops->push_back(ScaleOp(3, scale_x, dst));
- return;
- }
- while ((dst << num_downscales) < src) {
- num_downscales++;
- }
- if ((dst << num_downscales) != src) {
- ops->push_back(ScaleOp(0, scale_x, dst << num_downscales));
- }
- while (num_downscales) {
- num_downscales--;
- ops->push_back(ScaleOp(2, scale_x, dst << num_downscales));
- }
- }
-
- // Update |size| to its new size. Before calling this function
- // |size| should be the size of the input image. After calling it,
- // |size| will be the size of the image after this particular
- // scaling operation.
- void UpdateSize(gfx::Size* subrect) {
- if (scale_x) {
- subrect->set_width(scale_size);
- } else {
- subrect->set_height(scale_size);
- }
- }
-
- // A scale factor of 0 means upscale
- // 2 means 50% scale
- // 3 means 33% scale, etc.
- int scale_factor;
- bool scale_x; // Otherwise y
- int scale_size; // Size to scale to.
- };
-
- // Full specification for a single scaling stage.
- struct ScalerStage {
- ScalerStage(ShaderType shader_,
- gfx::Size src_size_,
- gfx::Rect src_subrect_,
- gfx::Size dst_size_,
- bool scale_x_,
- bool vertically_flip_texture_,
- bool swizzle_);
- ShaderType shader;
- gfx::Size src_size;
- gfx::Rect src_subrect;
- gfx::Size dst_size;
- bool scale_x;
- bool vertically_flip_texture;
- bool swizzle;
- };
-
- // Compute a vector of scaler stages for a particular
- // set of input/output parameters.
- void ComputeScalerStages(GLHelper::ScalerQuality quality,
- const gfx::Size& src_size,
- const gfx::Rect& src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle,
- std::vector<ScalerStage> *scaler_stages);
-
- // Take two queues of ScaleOp structs and generate a
- // vector of scaler stages. This is the second half of
- // ComputeScalerStages.
- void ConvertScalerOpsToScalerStages(
- GLHelper::ScalerQuality quality,
- gfx::Size src_size,
- gfx::Rect src_subrect,
- const gfx::Size& dst_size,
- bool vertically_flip_texture,
- bool swizzle,
- std::deque<GLHelperScaling::ScaleOp>* x_ops,
- std::deque<GLHelperScaling::ScaleOp>* y_ops,
- std::vector<ScalerStage> *scaler_stages);
-
-
- scoped_refptr<ShaderProgram> GetShaderProgram(ShaderType type, bool swizzle);
-
- // Interleaved array of 2-dimentional vertex positions (x, y) and
- // 2-dimentional texture coordinates (s, t).
- static const GLfloat kVertexAttributes[];
-
- gpu::gles2::GLES2Interface* gl_;
- GLHelper* helper_;
-
- // The buffer that holds the vertices and the texture coordinates data for
- // drawing a quad.
- ScopedBuffer vertex_attributes_buffer_;
-
- std::map<ShaderProgramKeyType,
- scoped_refptr<ShaderProgram> > shader_programs_;
-
- friend class ShaderProgram;
- friend class ScalerImpl;
- friend class GLHelperTest;
- DISALLOW_COPY_AND_ASSIGN(GLHelperScaling);
-};
-
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GL_HELPER_SCALING_H_
diff --git a/chromium/content/common/gpu/client/gl_helper_unittest.cc b/chromium/content/common/gpu/client/gl_helper_unittest.cc
deleted file mode 100644
index 57022351c51..00000000000
--- a/chromium/content/common/gpu/client/gl_helper_unittest.cc
+++ /dev/null
@@ -1,2016 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <string.h>
-#include <cmath>
-#include <string>
-#include <vector>
-
-#include <GLES2/gl2.h>
-#include <GLES2/gl2ext.h>
-#include <GLES2/gl2extchromium.h>
-
-#include "base/at_exit.h"
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/files/file_util.h"
-#include "base/json/json_reader.h"
-#include "base/macros.h"
-#include "base/message_loop/message_loop.h"
-#include "base/run_loop.h"
-#include "base/strings/stringprintf.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/test/launcher/unit_test_launcher.h"
-#include "base/test/test_suite.h"
-#include "base/time/time.h"
-#include "base/trace_event/trace_event.h"
-#include "build/build_config.h"
-#include "content/common/gpu/client/gl_helper.h"
-#include "content/common/gpu/client/gl_helper_readback_support.h"
-#include "content/common/gpu/client/gl_helper_scaling.h"
-#include "content/public/test/unittest_test_suite.h"
-#include "content/test/content_test_suite.h"
-#include "gpu/blink/webgraphicscontext3d_in_process_command_buffer_impl.h"
-#include "media/base/video_frame.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/skia/include/core/SkBitmap.h"
-#include "third_party/skia/include/core/SkTypes.h"
-#include "ui/gl/gl_implementation.h"
-
-#if defined(OS_MACOSX)
-#include "base/mac/scoped_nsautorelease_pool.h"
-#endif
-
-namespace content {
-
-using blink::WebGLId;
-using blink::WebGraphicsContext3D;
-using gpu_blink::WebGraphicsContext3DInProcessCommandBufferImpl;
-
-content::GLHelper::ScalerQuality kQualities[] = {
- content::GLHelper::SCALER_QUALITY_BEST,
- content::GLHelper::SCALER_QUALITY_GOOD,
- content::GLHelper::SCALER_QUALITY_FAST, };
-
-const char* kQualityNames[] = {"best", "good", "fast", };
-
-class GLHelperTest : public testing::Test {
- protected:
- void SetUp() override {
- WebGraphicsContext3D::Attributes attributes;
- bool lose_context_when_out_of_memory = false;
- context_ =
- WebGraphicsContext3DInProcessCommandBufferImpl::CreateOffscreenContext(
- attributes, lose_context_when_out_of_memory);
- context_->InitializeOnCurrentThread();
- context_support_ = context_->GetContextSupport();
- helper_.reset(
- new content::GLHelper(context_->GetGLInterface(), context_support_));
- helper_scaling_.reset(new content::GLHelperScaling(
- context_->GetGLInterface(), helper_.get()));
- }
-
- void TearDown() override {
- helper_scaling_.reset(NULL);
- helper_.reset(NULL);
- context_.reset(NULL);
- }
-
- void StartTracing(const std::string& filter) {
- base::trace_event::TraceLog::GetInstance()->SetEnabled(
- base::trace_event::TraceConfig(filter,
- base::trace_event::RECORD_UNTIL_FULL),
- base::trace_event::TraceLog::RECORDING_MODE);
- }
-
- static void TraceDataCB(
- const base::Callback<void()>& callback,
- std::string* output,
- const scoped_refptr<base::RefCountedString>& json_events_str,
- bool has_more_events) {
- if (output->size() > 1 && !json_events_str->data().empty()) {
- output->append(",");
- }
- output->append(json_events_str->data());
- if (!has_more_events) {
- callback.Run();
- }
- }
-
- // End tracing, return tracing data in a simple map
- // of event name->counts.
- void EndTracing(std::map<std::string, int>* event_counts) {
- std::string json_data = "[";
- base::trace_event::TraceLog::GetInstance()->SetDisabled();
- base::RunLoop run_loop;
- base::trace_event::TraceLog::GetInstance()->Flush(
- base::Bind(&GLHelperTest::TraceDataCB,
- run_loop.QuitClosure(),
- base::Unretained(&json_data)));
- run_loop.Run();
- json_data.append("]");
-
- std::string error_msg;
- scoped_ptr<base::Value> trace_data =
- base::JSONReader::ReadAndReturnError(json_data, 0, NULL, &error_msg);
- CHECK(trace_data)
- << "JSON parsing failed (" << error_msg << ") JSON data:" << std::endl
- << json_data;
-
- base::ListValue* list;
- CHECK(trace_data->GetAsList(&list));
- for (size_t i = 0; i < list->GetSize(); i++) {
- base::Value* item = NULL;
- if (list->Get(i, &item)) {
- base::DictionaryValue* dict;
- CHECK(item->GetAsDictionary(&dict));
- std::string name;
- CHECK(dict->GetString("name", &name));
- std::string trace_type;
- CHECK(dict->GetString("ph", &trace_type));
- // Count all except END traces, as they come in BEGIN/END pairs.
- if (trace_type != "E" && trace_type != "e")
- (*event_counts)[name]++;
- VLOG(1) << "trace name: " << name;
- }
- }
- }
-
- // Bicubic filter kernel function.
- static float Bicubic(float x) {
- const float a = -0.5;
- x = std::abs(x);
- float x2 = x * x;
- float x3 = x2 * x;
- if (x <= 1) {
- return (a + 2) * x3 - (a + 3) * x2 + 1;
- } else if (x < 2) {
- return a * x3 - 5 * a * x2 + 8 * a * x - 4 * a;
- } else {
- return 0.0f;
- }
- }
-
- // Look up a single channel value. Works for 4-channel and single channel
- // bitmaps. Clamp x/y.
- int Channel(SkBitmap* pixels, int x, int y, int c) {
- if (pixels->bytesPerPixel() == 4) {
- uint32_t* data =
- pixels->getAddr32(std::max(0, std::min(x, pixels->width() - 1)),
- std::max(0, std::min(y, pixels->height() - 1)));
- return (*data) >> (c * 8) & 0xff;
- } else {
- DCHECK_EQ(pixels->bytesPerPixel(), 1);
- DCHECK_EQ(c, 0);
- return *pixels->getAddr8(std::max(0, std::min(x, pixels->width() - 1)),
- std::max(0, std::min(y, pixels->height() - 1)));
- }
- }
-
- // Set a single channel value. Works for 4-channel and single channel
- // bitmaps. Clamp x/y.
- void SetChannel(SkBitmap* pixels, int x, int y, int c, int v) {
- DCHECK_GE(x, 0);
- DCHECK_GE(y, 0);
- DCHECK_LT(x, pixels->width());
- DCHECK_LT(y, pixels->height());
- if (pixels->bytesPerPixel() == 4) {
- uint32_t* data = pixels->getAddr32(x, y);
- v = std::max(0, std::min(v, 255));
- *data = (*data & ~(0xffu << (c * 8))) | (v << (c * 8));
- } else {
- DCHECK_EQ(pixels->bytesPerPixel(), 1);
- DCHECK_EQ(c, 0);
- uint8_t* data = pixels->getAddr8(x, y);
- v = std::max(0, std::min(v, 255));
- *data = v;
- }
- }
-
- // Print all the R, G, B or A values from an SkBitmap in a
- // human-readable format.
- void PrintChannel(SkBitmap* pixels, int c) {
- for (int y = 0; y < pixels->height(); y++) {
- std::string formatted;
- for (int x = 0; x < pixels->width(); x++) {
- formatted.append(base::StringPrintf("%3d, ", Channel(pixels, x, y, c)));
- }
- LOG(ERROR) << formatted;
- }
- }
-
- // Print out the individual steps of a scaler pipeline.
- std::string PrintStages(
- const std::vector<GLHelperScaling::ScalerStage>& scaler_stages) {
- std::string ret;
- for (size_t i = 0; i < scaler_stages.size(); i++) {
- ret.append(base::StringPrintf("%dx%d -> %dx%d ",
- scaler_stages[i].src_size.width(),
- scaler_stages[i].src_size.height(),
- scaler_stages[i].dst_size.width(),
- scaler_stages[i].dst_size.height()));
- bool xy_matters = false;
- switch (scaler_stages[i].shader) {
- case GLHelperScaling::SHADER_BILINEAR:
- ret.append("bilinear");
- break;
- case GLHelperScaling::SHADER_BILINEAR2:
- ret.append("bilinear2");
- xy_matters = true;
- break;
- case GLHelperScaling::SHADER_BILINEAR3:
- ret.append("bilinear3");
- xy_matters = true;
- break;
- case GLHelperScaling::SHADER_BILINEAR4:
- ret.append("bilinear4");
- xy_matters = true;
- break;
- case GLHelperScaling::SHADER_BILINEAR2X2:
- ret.append("bilinear2x2");
- break;
- case GLHelperScaling::SHADER_BICUBIC_UPSCALE:
- ret.append("bicubic upscale");
- xy_matters = true;
- break;
- case GLHelperScaling::SHADER_BICUBIC_HALF_1D:
- ret.append("bicubic 1/2");
- xy_matters = true;
- break;
- case GLHelperScaling::SHADER_PLANAR:
- ret.append("planar");
- break;
- case GLHelperScaling::SHADER_YUV_MRT_PASS1:
- ret.append("rgb2yuv pass 1");
- break;
- case GLHelperScaling::SHADER_YUV_MRT_PASS2:
- ret.append("rgb2yuv pass 2");
- break;
- }
-
- if (xy_matters) {
- if (scaler_stages[i].scale_x) {
- ret.append(" X");
- } else {
- ret.append(" Y");
- }
- }
- ret.append("\n");
- }
- return ret;
- }
-
- bool CheckScale(double scale, int samples, bool already_scaled) {
- // 1:1 is valid if there is one sample.
- if (samples == 1 && scale == 1.0) {
- return true;
- }
- // Is it an exact down-scale (50%, 25%, etc.?)
- if (scale == 2.0 * samples) {
- return true;
- }
- // Upscales, only valid if we haven't already scaled in this dimension.
- if (!already_scaled) {
- // Is it a valid bilinear upscale?
- if (samples == 1 && scale <= 1.0) {
- return true;
- }
- // Multi-sample upscale-downscale combination?
- if (scale > samples / 2.0 && scale < samples) {
- return true;
- }
- }
- return false;
- }
-
- // Make sure that the stages of the scaler pipeline are sane.
- void ValidateScalerStages(
- content::GLHelper::ScalerQuality quality,
- const std::vector<GLHelperScaling::ScalerStage>& scaler_stages,
- const gfx::Size& dst_size,
- const std::string& message) {
- bool previous_error = HasFailure();
- // First, check that the input size for each stage is equal to
- // the output size of the previous stage.
- for (size_t i = 1; i < scaler_stages.size(); i++) {
- EXPECT_EQ(scaler_stages[i - 1].dst_size.width(),
- scaler_stages[i].src_size.width());
- EXPECT_EQ(scaler_stages[i - 1].dst_size.height(),
- scaler_stages[i].src_size.height());
- EXPECT_EQ(scaler_stages[i].src_subrect.x(), 0);
- EXPECT_EQ(scaler_stages[i].src_subrect.y(), 0);
- EXPECT_EQ(scaler_stages[i].src_subrect.width(),
- scaler_stages[i].src_size.width());
- EXPECT_EQ(scaler_stages[i].src_subrect.height(),
- scaler_stages[i].src_size.height());
- }
-
- // Check the output size matches the destination of the last stage
- EXPECT_EQ(scaler_stages[scaler_stages.size() - 1].dst_size.width(),
- dst_size.width());
- EXPECT_EQ(scaler_stages[scaler_stages.size() - 1].dst_size.height(),
- dst_size.height());
-
- // Used to verify that up-scales are not attempted after some
- // other scale.
- bool scaled_x = false;
- bool scaled_y = false;
-
- for (size_t i = 0; i < scaler_stages.size(); i++) {
- // Note: 2.0 means scaling down by 50%
- double x_scale =
- static_cast<double>(scaler_stages[i].src_subrect.width()) /
- static_cast<double>(scaler_stages[i].dst_size.width());
- double y_scale =
- static_cast<double>(scaler_stages[i].src_subrect.height()) /
- static_cast<double>(scaler_stages[i].dst_size.height());
-
- int x_samples = 0;
- int y_samples = 0;
-
- // Codify valid scale operations.
- switch (scaler_stages[i].shader) {
- case GLHelperScaling::SHADER_PLANAR:
- case GLHelperScaling::SHADER_YUV_MRT_PASS1:
- case GLHelperScaling::SHADER_YUV_MRT_PASS2:
- EXPECT_TRUE(false) << "Invalid shader.";
- break;
-
- case GLHelperScaling::SHADER_BILINEAR:
- if (quality != content::GLHelper::SCALER_QUALITY_FAST) {
- x_samples = 1;
- y_samples = 1;
- }
- break;
- case GLHelperScaling::SHADER_BILINEAR2:
- x_samples = 2;
- y_samples = 1;
- break;
- case GLHelperScaling::SHADER_BILINEAR3:
- x_samples = 3;
- y_samples = 1;
- break;
- case GLHelperScaling::SHADER_BILINEAR4:
- x_samples = 4;
- y_samples = 1;
- break;
- case GLHelperScaling::SHADER_BILINEAR2X2:
- x_samples = 2;
- y_samples = 2;
- break;
- case GLHelperScaling::SHADER_BICUBIC_UPSCALE:
- if (scaler_stages[i].scale_x) {
- EXPECT_LT(x_scale, 1.0);
- EXPECT_EQ(y_scale, 1.0);
- } else {
- EXPECT_EQ(x_scale, 1.0);
- EXPECT_LT(y_scale, 1.0);
- }
- break;
- case GLHelperScaling::SHADER_BICUBIC_HALF_1D:
- if (scaler_stages[i].scale_x) {
- EXPECT_EQ(x_scale, 2.0);
- EXPECT_EQ(y_scale, 1.0);
- } else {
- EXPECT_EQ(x_scale, 1.0);
- EXPECT_EQ(y_scale, 2.0);
- }
- break;
- }
-
- if (!scaler_stages[i].scale_x) {
- std::swap(x_samples, y_samples);
- }
-
- if (x_samples) {
- EXPECT_TRUE(CheckScale(x_scale, x_samples, scaled_x))
- << "x_scale = " << x_scale;
- }
- if (y_samples) {
- EXPECT_TRUE(CheckScale(y_scale, y_samples, scaled_y))
- << "y_scale = " << y_scale;
- }
-
- if (x_scale != 1.0) {
- scaled_x = true;
- }
- if (y_scale != 1.0) {
- scaled_y = true;
- }
- }
-
- if (HasFailure() && !previous_error) {
- LOG(ERROR) << "Invalid scaler stages: " << message;
- LOG(ERROR) << "Scaler stages:";
- LOG(ERROR) << PrintStages(scaler_stages);
- }
- }
-
- // Compares two bitmaps taking color types into account. Checks whether each
- // component of each pixel is no more than |maxdiff| apart. If bitmaps are not
- // similar enough, prints out |truth|, |other|, |source|, |scaler_stages|
- // and |message|.
- void Compare(SkBitmap* truth,
- SkBitmap* other,
- int maxdiff,
- SkBitmap* source,
- const std::vector<GLHelperScaling::ScalerStage>& scaler_stages,
- std::string message) {
- EXPECT_EQ(truth->width(), other->width());
- EXPECT_EQ(truth->height(), other->height());
- bool swizzle = (truth->colorType() == kRGBA_8888_SkColorType &&
- other->colorType() == kBGRA_8888_SkColorType) ||
- (truth->colorType() == kBGRA_8888_SkColorType &&
- other->colorType() == kRGBA_8888_SkColorType);
- EXPECT_TRUE(swizzle || truth->colorType() == other->colorType());
- int bpp = truth->bytesPerPixel();
- for (int x = 0; x < truth->width(); x++) {
- for (int y = 0; y < truth->height(); y++) {
- for (int c = 0; c < bpp; c++) {
- int a = Channel(truth, x, y, c);
- // swizzle when comparing if needed
- int b = swizzle && (c == 0 || c == 2)
- ? Channel(other, x, y, (c + 2) & 2)
- : Channel(other, x, y, c);
- EXPECT_NEAR(a, b, maxdiff) << " x=" << x << " y=" << y << " c=" << c
- << " " << message;
- if (std::abs(a - b) > maxdiff) {
- LOG(ERROR) << "-------expected--------";
- for (int i = 0; i < bpp; i++) {
- LOG(ERROR) << "Channel " << i << ":";
- PrintChannel(truth, i);
- }
- LOG(ERROR) << "-------actual--------";
- for (int i = 0; i < bpp; i++) {
- LOG(ERROR) << "Channel " << i << ":";
- PrintChannel(other, i);
- }
- if (source) {
- LOG(ERROR) << "-------original--------";
- for (int i = 0; i < source->bytesPerPixel(); i++) {
- LOG(ERROR) << "Channel " << i << ":";
- PrintChannel(source, i);
- }
- }
- LOG(ERROR) << "-----Scaler stages------";
- LOG(ERROR) << PrintStages(scaler_stages);
- return;
- }
- }
- }
- }
- }
-
- // Get a single R, G, B or A value as a float.
- float ChannelAsFloat(SkBitmap* pixels, int x, int y, int c) {
- return Channel(pixels, x, y, c) / 255.0;
- }
-
- // Works like a GL_LINEAR lookup on an SkBitmap.
- float Bilinear(SkBitmap* pixels, float x, float y, int c) {
- x -= 0.5;
- y -= 0.5;
- int base_x = static_cast<int>(floorf(x));
- int base_y = static_cast<int>(floorf(y));
- x -= base_x;
- y -= base_y;
- return (ChannelAsFloat(pixels, base_x, base_y, c) * (1 - x) * (1 - y) +
- ChannelAsFloat(pixels, base_x + 1, base_y, c) * x * (1 - y) +
- ChannelAsFloat(pixels, base_x, base_y + 1, c) * (1 - x) * y +
- ChannelAsFloat(pixels, base_x + 1, base_y + 1, c) * x * y);
- }
-
- // Encodes an RGBA bitmap to grayscale.
- // Reference implementation for
- // GLHelper::CopyToTextureImpl::EncodeTextureAsGrayscale.
- void EncodeToGrayscaleSlow(SkBitmap* input, SkBitmap* output) {
- const float kRGBtoGrayscaleColorWeights[3] = {0.213f, 0.715f, 0.072f};
- CHECK_EQ(kAlpha_8_SkColorType, output->colorType());
- CHECK_EQ(input->width(), output->width());
- CHECK_EQ(input->height(), output->height());
- CHECK_EQ(input->colorType(), kRGBA_8888_SkColorType);
-
- for (int dst_y = 0; dst_y < output->height(); dst_y++) {
- for (int dst_x = 0; dst_x < output->width(); dst_x++) {
- float c0 = ChannelAsFloat(input, dst_x, dst_y, 0);
- float c1 = ChannelAsFloat(input, dst_x, dst_y, 1);
- float c2 = ChannelAsFloat(input, dst_x, dst_y, 2);
- float value = c0 * kRGBtoGrayscaleColorWeights[0] +
- c1 * kRGBtoGrayscaleColorWeights[1] +
- c2 * kRGBtoGrayscaleColorWeights[2];
- SetChannel(
- output, dst_x, dst_y, 0, static_cast<int>(value * 255.0f + 0.5f));
- }
- }
- }
-
- // Very slow bicubic / bilinear scaler for reference.
- void ScaleSlow(SkBitmap* input,
- SkBitmap* output,
- content::GLHelper::ScalerQuality quality) {
- float xscale = static_cast<float>(input->width()) / output->width();
- float yscale = static_cast<float>(input->height()) / output->height();
- float clamped_xscale = xscale < 1.0 ? 1.0 : 1.0 / xscale;
- float clamped_yscale = yscale < 1.0 ? 1.0 : 1.0 / yscale;
- for (int dst_y = 0; dst_y < output->height(); dst_y++) {
- for (int dst_x = 0; dst_x < output->width(); dst_x++) {
- for (int channel = 0; channel < 4; channel++) {
- float dst_x_in_src = (dst_x + 0.5f) * xscale;
- float dst_y_in_src = (dst_y + 0.5f) * yscale;
-
- float value = 0.0f;
- float sum = 0.0f;
- switch (quality) {
- case content::GLHelper::SCALER_QUALITY_BEST:
- for (int src_y = -10; src_y < input->height() + 10; ++src_y) {
- float coeff_y =
- Bicubic((src_y + 0.5f - dst_y_in_src) * clamped_yscale);
- if (coeff_y == 0.0f) {
- continue;
- }
- for (int src_x = -10; src_x < input->width() + 10; ++src_x) {
- float coeff =
- coeff_y *
- Bicubic((src_x + 0.5f - dst_x_in_src) * clamped_xscale);
- if (coeff == 0.0f) {
- continue;
- }
- sum += coeff;
- float c = ChannelAsFloat(input, src_x, src_y, channel);
- value += c * coeff;
- }
- }
- break;
-
- case content::GLHelper::SCALER_QUALITY_GOOD: {
- int xshift = 0, yshift = 0;
- while ((output->width() << xshift) < input->width()) {
- xshift++;
- }
- while ((output->height() << yshift) < input->height()) {
- yshift++;
- }
- int xmag = 1 << xshift;
- int ymag = 1 << yshift;
- if (xmag == 4 && output->width() * 3 >= input->width()) {
- xmag = 3;
- }
- if (ymag == 4 && output->height() * 3 >= input->height()) {
- ymag = 3;
- }
- for (int x = 0; x < xmag; x++) {
- for (int y = 0; y < ymag; y++) {
- value += Bilinear(input,
- (dst_x * xmag + x + 0.5) * xscale / xmag,
- (dst_y * ymag + y + 0.5) * yscale / ymag,
- channel);
- sum += 1.0;
- }
- }
- break;
- }
-
- case content::GLHelper::SCALER_QUALITY_FAST:
- value = Bilinear(input, dst_x_in_src, dst_y_in_src, channel);
- sum = 1.0;
- }
- value /= sum;
- SetChannel(output,
- dst_x,
- dst_y,
- channel,
- static_cast<int>(value * 255.0f + 0.5f));
- }
- }
- }
- }
-
- void FlipSKBitmap(SkBitmap* bitmap) {
- int bpp = bitmap->bytesPerPixel();
- DCHECK(bpp == 4 || bpp == 1);
- int top_line = 0;
- int bottom_line = bitmap->height() - 1;
- while (top_line < bottom_line) {
- for (int x = 0; x < bitmap->width(); x++) {
- bpp == 4 ? std::swap(*bitmap->getAddr32(x, top_line),
- *bitmap->getAddr32(x, bottom_line))
- : std::swap(*bitmap->getAddr8(x, top_line),
- *bitmap->getAddr8(x, bottom_line));
- }
- top_line++;
- bottom_line--;
- }
- }
-
- // Swaps red and blue channels in each pixel in a 32-bit bitmap.
- void SwizzleSKBitmap(SkBitmap* bitmap) {
- int bpp = bitmap->bytesPerPixel();
- DCHECK(bpp == 4);
- for (int y = 0; y < bitmap->height(); y++) {
- for (int x = 0; x < bitmap->width(); x++) {
- // Swap channels 0 and 2 (red and blue)
- int c0 = Channel(bitmap, x, y, 0);
- int c2 = Channel(bitmap, x, y, 2);
- SetChannel(bitmap, x, y, 2, c0);
- SetChannel(bitmap, x, y, 0, c2);
- }
- }
- }
-
- // gl_helper scales recursively, so we'll need to do that
- // in the reference implementation too.
- void ScaleSlowRecursive(SkBitmap* input,
- SkBitmap* output,
- content::GLHelper::ScalerQuality quality) {
- if (quality == content::GLHelper::SCALER_QUALITY_FAST ||
- quality == content::GLHelper::SCALER_QUALITY_GOOD) {
- ScaleSlow(input, output, quality);
- return;
- }
-
- float xscale = static_cast<float>(output->width()) / input->width();
-
- // This corresponds to all the operations we can do directly.
- float yscale = static_cast<float>(output->height()) / input->height();
- if ((xscale == 1.0f && yscale == 1.0f) ||
- (xscale == 0.5f && yscale == 1.0f) ||
- (xscale == 1.0f && yscale == 0.5f) ||
- (xscale >= 1.0f && yscale == 1.0f) ||
- (xscale == 1.0f && yscale >= 1.0f)) {
- ScaleSlow(input, output, quality);
- return;
- }
-
- // Now we break the problem down into smaller pieces, using the
- // operations available.
- int xtmp = input->width();
- int ytmp = input->height();
-
- if (output->height() != input->height()) {
- ytmp = output->height();
- while (ytmp < input->height() && ytmp * 2 != input->height()) {
- ytmp += ytmp;
- }
- } else {
- xtmp = output->width();
- while (xtmp < input->width() && xtmp * 2 != input->width()) {
- xtmp += xtmp;
- }
- }
-
- SkBitmap tmp;
- tmp.allocN32Pixels(xtmp, ytmp);
-
- ScaleSlowRecursive(input, &tmp, quality);
- ScaleSlowRecursive(&tmp, output, quality);
- }
-
- // Creates an RGBA SkBitmap
- scoped_ptr<SkBitmap> CreateTestBitmap(int width,
- int height,
- int test_pattern) {
- scoped_ptr<SkBitmap> bitmap(new SkBitmap);
- bitmap->allocPixels(SkImageInfo::Make(
- width, height, kRGBA_8888_SkColorType, kPremul_SkAlphaType));
-
- for (int x = 0; x < width; ++x) {
- for (int y = 0; y < height; ++y) {
- switch (test_pattern) {
- case 0: // Smooth test pattern
- SetChannel(bitmap.get(), x, y, 0, x * 10);
- SetChannel(bitmap.get(), x, y, 0, y == 0 ? x * 50 : x * 10);
- SetChannel(bitmap.get(), x, y, 1, y * 10);
- SetChannel(bitmap.get(), x, y, 2, (x + y) * 10);
- SetChannel(bitmap.get(), x, y, 3, 255);
- break;
- case 1: // Small blocks
- SetChannel(bitmap.get(), x, y, 0, x & 1 ? 255 : 0);
- SetChannel(bitmap.get(), x, y, 1, y & 1 ? 255 : 0);
- SetChannel(bitmap.get(), x, y, 2, (x + y) & 1 ? 255 : 0);
- SetChannel(bitmap.get(), x, y, 3, 255);
- break;
- case 2: // Medium blocks
- SetChannel(bitmap.get(), x, y, 0, 10 + x / 2 * 50);
- SetChannel(bitmap.get(), x, y, 1, 10 + y / 3 * 50);
- SetChannel(bitmap.get(), x, y, 2, (x + y) / 5 * 50 + 5);
- SetChannel(bitmap.get(), x, y, 3, 255);
- break;
- }
- }
- }
- return bitmap;
- }
-
- // Binds texture and framebuffer and loads the bitmap pixels into the texture.
- void BindTextureAndFrameBuffer(WebGLId texture,
- WebGLId framebuffer,
- SkBitmap* bitmap,
- int width,
- int height) {
- context_->bindFramebuffer(GL_FRAMEBUFFER, framebuffer);
- context_->bindTexture(GL_TEXTURE_2D, texture);
- context_->texImage2D(GL_TEXTURE_2D,
- 0,
- GL_RGBA,
- width,
- height,
- 0,
- GL_RGBA,
- GL_UNSIGNED_BYTE,
- bitmap->getPixels());
- }
-
- // Create a test image, transform it using
- // GLHelper::CropScaleReadbackAndCleanTexture and a reference implementation
- // and compare the results.
- void TestCropScaleReadbackAndCleanTexture(int xsize,
- int ysize,
- int scaled_xsize,
- int scaled_ysize,
- int test_pattern,
- SkColorType out_color_type,
- bool swizzle,
- size_t quality_index) {
- DCHECK(out_color_type == kAlpha_8_SkColorType ||
- out_color_type == kRGBA_8888_SkColorType ||
- out_color_type == kBGRA_8888_SkColorType);
- WebGLId src_texture = context_->createTexture();
- WebGLId framebuffer = context_->createFramebuffer();
- scoped_ptr<SkBitmap> input_pixels =
- CreateTestBitmap(xsize, ysize, test_pattern);
- BindTextureAndFrameBuffer(
- src_texture, framebuffer, input_pixels.get(), xsize, ysize);
-
- std::string message = base::StringPrintf(
- "input size: %dx%d "
- "output size: %dx%d "
- "pattern: %d , quality: %s, "
- "out_color_type: %d",
- xsize,
- ysize,
- scaled_xsize,
- scaled_ysize,
- test_pattern,
- kQualityNames[quality_index],
- out_color_type);
-
- // Transform the bitmap using GLHelper::CropScaleReadbackAndCleanTexture.
- SkBitmap output_pixels;
- output_pixels.allocPixels(SkImageInfo::Make(
- scaled_xsize, scaled_ysize, out_color_type, kPremul_SkAlphaType));
- base::RunLoop run_loop;
- gfx::Size encoded_texture_size;
- helper_->CropScaleReadbackAndCleanTexture(
- src_texture,
- gfx::Size(xsize, ysize),
- gfx::Rect(xsize, ysize),
- gfx::Size(scaled_xsize, scaled_ysize),
- static_cast<unsigned char*>(output_pixels.getPixels()),
- out_color_type,
- base::Bind(&callcallback, run_loop.QuitClosure()),
- kQualities[quality_index]);
- run_loop.Run();
- // CropScaleReadbackAndCleanTexture flips the pixels. Flip them back.
- FlipSKBitmap(&output_pixels);
-
- // If the bitmap shouldn't have changed - compare against input.
- if (xsize == scaled_xsize && ysize == scaled_ysize &&
- out_color_type != kAlpha_8_SkColorType) {
- const std::vector<GLHelperScaling::ScalerStage> dummy_stages;
- Compare(input_pixels.get(),
- &output_pixels,
- 0,
- NULL,
- dummy_stages,
- message + " comparing against input");
- return;
- }
-
- // Now transform the bitmap using the reference implementation.
- SkBitmap scaled_pixels;
- scaled_pixels.allocPixels(SkImageInfo::Make(scaled_xsize,
- scaled_ysize,
- kRGBA_8888_SkColorType,
- kPremul_SkAlphaType));
- SkBitmap truth_pixels;
- // Step 1: Scale
- ScaleSlowRecursive(
- input_pixels.get(), &scaled_pixels, kQualities[quality_index]);
- // Step 2: Encode to grayscale if needed.
- if (out_color_type == kAlpha_8_SkColorType) {
- truth_pixels.allocPixels(SkImageInfo::Make(
- scaled_xsize, scaled_ysize, out_color_type, kPremul_SkAlphaType));
- EncodeToGrayscaleSlow(&scaled_pixels, &truth_pixels);
- } else {
- truth_pixels = scaled_pixels;
- }
-
- // Now compare the results.
- SkAutoLockPixels lock_input(truth_pixels);
- const std::vector<GLHelperScaling::ScalerStage> dummy_stages;
- Compare(&truth_pixels,
- &output_pixels,
- 2,
- input_pixels.get(),
- dummy_stages,
- message + " comparing against transformed/scaled");
-
- context_->deleteTexture(src_texture);
- context_->deleteFramebuffer(framebuffer);
- }
-
- // Scaling test: Create a test image, scale it using GLHelperScaling
- // and a reference implementation and compare the results.
- void TestScale(int xsize,
- int ysize,
- int scaled_xsize,
- int scaled_ysize,
- int test_pattern,
- size_t quality_index,
- bool flip) {
- WebGLId src_texture = context_->createTexture();
- WebGLId framebuffer = context_->createFramebuffer();
- scoped_ptr<SkBitmap> input_pixels =
- CreateTestBitmap(xsize, ysize, test_pattern);
- BindTextureAndFrameBuffer(
- src_texture, framebuffer, input_pixels.get(), xsize, ysize);
-
- std::string message = base::StringPrintf(
- "input size: %dx%d "
- "output size: %dx%d "
- "pattern: %d quality: %s",
- xsize,
- ysize,
- scaled_xsize,
- scaled_ysize,
- test_pattern,
- kQualityNames[quality_index]);
-
- std::vector<GLHelperScaling::ScalerStage> stages;
- helper_scaling_->ComputeScalerStages(kQualities[quality_index],
- gfx::Size(xsize, ysize),
- gfx::Rect(0, 0, xsize, ysize),
- gfx::Size(scaled_xsize, scaled_ysize),
- flip,
- false,
- &stages);
- ValidateScalerStages(kQualities[quality_index],
- stages,
- gfx::Size(scaled_xsize, scaled_ysize),
- message);
-
- WebGLId dst_texture =
- helper_->CopyAndScaleTexture(src_texture,
- gfx::Size(xsize, ysize),
- gfx::Size(scaled_xsize, scaled_ysize),
- flip,
- kQualities[quality_index]);
-
- SkBitmap output_pixels;
- output_pixels.allocPixels(SkImageInfo::Make(scaled_xsize,
- scaled_ysize,
- kRGBA_8888_SkColorType,
- kPremul_SkAlphaType));
-
- helper_->ReadbackTextureSync(
- dst_texture,
- gfx::Rect(0, 0, scaled_xsize, scaled_ysize),
- static_cast<unsigned char*>(output_pixels.getPixels()),
- kRGBA_8888_SkColorType);
- if (flip) {
- // Flip the pixels back.
- FlipSKBitmap(&output_pixels);
- }
-
- // If the bitmap shouldn't have changed - compare against input.
- if (xsize == scaled_xsize && ysize == scaled_ysize) {
- Compare(input_pixels.get(),
- &output_pixels,
- 0,
- NULL,
- stages,
- message + " comparing against input");
- return;
- }
-
- // Now scale the bitmap using the reference implementation.
- SkBitmap truth_pixels;
- truth_pixels.allocPixels(SkImageInfo::Make(scaled_xsize,
- scaled_ysize,
- kRGBA_8888_SkColorType,
- kPremul_SkAlphaType));
- ScaleSlowRecursive(
- input_pixels.get(), &truth_pixels, kQualities[quality_index]);
- Compare(&truth_pixels,
- &output_pixels,
- 2,
- input_pixels.get(),
- stages,
- message + " comparing against scaled");
-
- context_->deleteTexture(src_texture);
- context_->deleteTexture(dst_texture);
- context_->deleteFramebuffer(framebuffer);
- }
-
- // Create a scaling pipeline and check that it is made up of
- // valid scaling operations.
- void TestScalerPipeline(size_t quality,
- int xsize,
- int ysize,
- int dst_xsize,
- int dst_ysize) {
- std::vector<GLHelperScaling::ScalerStage> stages;
- helper_scaling_->ComputeScalerStages(kQualities[quality],
- gfx::Size(xsize, ysize),
- gfx::Rect(0, 0, xsize, ysize),
- gfx::Size(dst_xsize, dst_ysize),
- false,
- false,
- &stages);
- ValidateScalerStages(kQualities[quality],
- stages,
- gfx::Size(dst_xsize, dst_ysize),
- base::StringPrintf(
- "input size: %dx%d "
- "output size: %dx%d "
- "quality: %s",
- xsize,
- ysize,
- dst_xsize,
- dst_ysize,
- kQualityNames[quality]));
- }
-
- // Create a scaling pipeline and make sure that the steps
- // are exactly the steps we expect.
- void CheckPipeline(content::GLHelper::ScalerQuality quality,
- int xsize,
- int ysize,
- int dst_xsize,
- int dst_ysize,
- const std::string& description) {
- std::vector<GLHelperScaling::ScalerStage> stages;
- helper_scaling_->ComputeScalerStages(quality,
- gfx::Size(xsize, ysize),
- gfx::Rect(0, 0, xsize, ysize),
- gfx::Size(dst_xsize, dst_ysize),
- false,
- false,
- &stages);
- ValidateScalerStages(content::GLHelper::SCALER_QUALITY_GOOD,
- stages,
- gfx::Size(dst_xsize, dst_ysize),
- "");
- EXPECT_EQ(PrintStages(stages), description);
- }
-
- // Note: Left/Right means Top/Bottom when used for Y dimension.
- enum Margin {
- MarginLeft,
- MarginMiddle,
- MarginRight,
- MarginInvalid,
- };
-
- static Margin NextMargin(Margin m) {
- switch (m) {
- case MarginLeft:
- return MarginMiddle;
- case MarginMiddle:
- return MarginRight;
- case MarginRight:
- return MarginInvalid;
- default:
- return MarginInvalid;
- }
- }
-
- int compute_margin(int insize, int outsize, Margin m) {
- int available = outsize - insize;
- switch (m) {
- default:
- EXPECT_TRUE(false) << "This should not happen.";
- return 0;
- case MarginLeft:
- return 0;
- case MarginMiddle:
- return (available / 2) & ~1;
- case MarginRight:
- return available;
- }
- }
-
- // Convert 0.0 - 1.0 to 0 - 255
- int float_to_byte(float v) {
- int ret = static_cast<int>(floorf(v * 255.0f + 0.5f));
- if (ret < 0) {
- return 0;
- }
- if (ret > 255) {
- return 255;
- }
- return ret;
- }
-
- static void callcallback(const base::Callback<void()>& callback,
- bool result) {
- callback.Run();
- }
-
- void PrintPlane(unsigned char* plane, int xsize, int stride, int ysize) {
- for (int y = 0; y < ysize; y++) {
- std::string formatted;
- for (int x = 0; x < xsize; x++) {
- formatted.append(base::StringPrintf("%3d, ", plane[y * stride + x]));
- }
- LOG(ERROR) << formatted << " (" << (plane + y * stride) << ")";
- }
- }
-
- // Compare two planes make sure that each component of each pixel
- // is no more than |maxdiff| apart.
- void ComparePlane(unsigned char* truth,
- int truth_stride,
- unsigned char* other,
- int other_stride,
- int maxdiff,
- int xsize,
- int ysize,
- SkBitmap* source,
- std::string message) {
- for (int x = 0; x < xsize; x++) {
- for (int y = 0; y < ysize; y++) {
- int a = other[y * other_stride + x];
- int b = truth[y * truth_stride + x];
- EXPECT_NEAR(a, b, maxdiff) << " x=" << x << " y=" << y << " "
- << message;
- if (std::abs(a - b) > maxdiff) {
- LOG(ERROR) << "-------expected--------";
- PrintPlane(truth, xsize, truth_stride, ysize);
- LOG(ERROR) << "-------actual--------";
- PrintPlane(other, xsize, other_stride, ysize);
- if (source) {
- LOG(ERROR) << "-------before yuv conversion: red--------";
- PrintChannel(source, 0);
- LOG(ERROR) << "-------before yuv conversion: green------";
- PrintChannel(source, 1);
- LOG(ERROR) << "-------before yuv conversion: blue-------";
- PrintChannel(source, 2);
- }
- return;
- }
- }
- }
- }
-
- void DrawGridToBitmap(int w, int h,
- SkColor background_color,
- SkColor grid_color,
- int grid_pitch,
- int grid_width,
- SkBitmap& bmp) {
- ASSERT_GT(grid_pitch, 0);
- ASSERT_GT(grid_width, 0);
- ASSERT_NE(background_color, grid_color);
-
- for (int y = 0; y < h; ++y) {
- bool y_on_grid = ((y % grid_pitch) < grid_width);
-
- for (int x = 0; x < w; ++x) {
- bool on_grid = (y_on_grid || ((x % grid_pitch) < grid_width));
-
- if (bmp.colorType() == kRGBA_8888_SkColorType ||
- bmp.colorType() == kBGRA_8888_SkColorType) {
- *bmp.getAddr32(x, y) = (on_grid ? grid_color : background_color);
- } else if (bmp.colorType() == kRGB_565_SkColorType) {
- *bmp.getAddr16(x, y) = (on_grid ? grid_color : background_color);
- }
- }
- }
- }
-
- void DrawCheckerToBitmap(int w, int h,
- SkColor color1, SkColor color2,
- int rect_w, int rect_h,
- SkBitmap& bmp) {
- ASSERT_GT(rect_w, 0);
- ASSERT_GT(rect_h, 0);
- ASSERT_NE(color1, color2);
-
- for (int y = 0; y < h; ++y) {
- bool y_bit = (((y / rect_h) & 0x1) == 0);
-
- for (int x = 0; x < w; ++x) {
- bool x_bit = (((x / rect_w) & 0x1) == 0);
-
- bool use_color2 = (x_bit != y_bit); // xor
- if (bmp.colorType() == kRGBA_8888_SkColorType ||
- bmp.colorType() == kBGRA_8888_SkColorType) {
- *bmp.getAddr32(x, y) = (use_color2 ? color2 : color1);
- } else if (bmp.colorType() == kRGB_565_SkColorType) {
- *bmp.getAddr16(x, y) = (use_color2 ? color2 : color1);
- }
- }
- }
- }
-
- bool ColorComponentsClose(SkColor component1,
- SkColor component2,
- SkColorType color_type) {
- int c1 = static_cast<int>(component1);
- int c2 = static_cast<int>(component2);
- bool result = false;
- switch (color_type) {
- case kRGBA_8888_SkColorType:
- case kBGRA_8888_SkColorType:
- result = (std::abs(c1 - c2) == 0);
- break;
- case kRGB_565_SkColorType:
- result = (std::abs(c1 - c2) <= 7);
- break;
- default:
- break;
- }
- return result;
- }
-
- bool ColorsClose(SkColor color1, SkColor color2, SkColorType color_type) {
- bool red = ColorComponentsClose(SkColorGetR(color1),
- SkColorGetR(color2), color_type);
- bool green = ColorComponentsClose(SkColorGetG(color1),
- SkColorGetG(color2), color_type);
- bool blue = ColorComponentsClose(SkColorGetB(color1),
- SkColorGetB(color2), color_type);
- bool alpha = ColorComponentsClose(SkColorGetA(color1),
- SkColorGetA(color2), color_type);
- if (color_type == kRGB_565_SkColorType) {
- return red && blue && green;
- }
- return red && blue && green && alpha;
- }
-
- bool IsEqual(const SkBitmap& bmp1, const SkBitmap& bmp2) {
- if (bmp1.isNull() && bmp2.isNull())
- return true;
- if (bmp1.width() != bmp2.width() ||
- bmp1.height() != bmp2.height()) {
- LOG(ERROR) << "Bitmap geometry check failure";
- return false;
- }
- if (bmp1.colorType() != bmp2.colorType())
- return false;
-
- SkAutoLockPixels lock1(bmp1);
- SkAutoLockPixels lock2(bmp2);
- if (!bmp1.getPixels() || !bmp2.getPixels()) {
- LOG(ERROR) << "Empty Bitmap!";
- return false;
- }
- for (int y = 0; y < bmp1.height(); ++y) {
- for (int x = 0; x < bmp1.width(); ++x) {
- if (!ColorsClose(bmp1.getColor(x,y),
- bmp2.getColor(x,y),
- bmp1.colorType())) {
- LOG(ERROR) << "Bitmap color comparision failure";
- return false;
- }
- }
- }
- return true;
- }
-
- void BindAndAttachTextureWithPixels(GLuint src_texture,
- SkColorType color_type,
- const gfx::Size& src_size,
- const SkBitmap& input_pixels) {
- context_->bindTexture(GL_TEXTURE_2D, src_texture);
- GLenum format = 0;
- switch (color_type) {
- case kBGRA_8888_SkColorType:
- format = GL_BGRA_EXT;
- break;
- case kRGBA_8888_SkColorType:
- format = GL_RGBA;
- break;
- case kRGB_565_SkColorType:
- format = GL_RGB;
- break;
- default:
- NOTREACHED();
- }
- GLenum type = (color_type == kRGB_565_SkColorType) ?
- GL_UNSIGNED_SHORT_5_6_5 : GL_UNSIGNED_BYTE;
- context_->texImage2D(GL_TEXTURE_2D,
- 0,
- format,
- src_size.width(),
- src_size.height(),
- 0,
- format,
- type,
- input_pixels.getPixels());
- }
-
- void ReadBackTexture(GLuint src_texture,
- const gfx::Size& src_size,
- unsigned char* pixels,
- SkColorType color_type,
- bool async) {
- if (async) {
- base::RunLoop run_loop;
- helper_->ReadbackTextureAsync(src_texture,
- src_size,
- pixels,
- color_type,
- base::Bind(&callcallback,
- run_loop.QuitClosure()));
- run_loop.Run();
- } else {
- helper_->ReadbackTextureSync(src_texture,
- gfx::Rect(src_size),
- pixels,
- color_type);
- }
- }
- // Test basic format readback.
- bool TestTextureFormatReadback(const gfx::Size& src_size,
- SkColorType color_type,
- bool async) {
- SkImageInfo info =
- SkImageInfo::Make(src_size.width(),
- src_size.height(),
- color_type,
- kPremul_SkAlphaType);
- if (!helper_->IsReadbackConfigSupported(color_type)) {
- LOG(INFO) << "Skipping test format not supported" << color_type;
- return true;
- }
- WebGLId src_texture = context_->createTexture();
- SkBitmap input_pixels;
- input_pixels.allocPixels(info);
- // Test Pattern-1, Fill with Plain color pattern.
- // Erase the input bitmap with red color.
- input_pixels.eraseColor(SK_ColorRED);
- BindAndAttachTextureWithPixels(src_texture,
- color_type,
- src_size,
- input_pixels);
- SkBitmap output_pixels;
- output_pixels.allocPixels(info);
- // Initialize the output bitmap with Green color.
- // When the readback is over output bitmap should have the red color.
- output_pixels.eraseColor(SK_ColorGREEN);
- uint8_t* pixels = static_cast<uint8_t*>(output_pixels.getPixels());
- ReadBackTexture(src_texture, src_size, pixels, color_type, async);
- bool result = IsEqual(input_pixels, output_pixels);
- if (!result) {
- LOG(ERROR) << "Bitmap comparision failure Pattern-1";
- return false;
- }
- const int rect_w = 10, rect_h = 4, src_grid_pitch = 10, src_grid_width = 4;
- const SkColor color1 = SK_ColorRED, color2 = SK_ColorBLUE;
- // Test Pattern-2, Fill with Grid Pattern.
- DrawGridToBitmap(src_size.width(), src_size.height(),
- color2, color1,
- src_grid_pitch, src_grid_width,
- input_pixels);
- BindAndAttachTextureWithPixels(src_texture,
- color_type,
- src_size,
- input_pixels);
- ReadBackTexture(src_texture, src_size, pixels, color_type, async);
- result = IsEqual(input_pixels, output_pixels);
- if (!result) {
- LOG(ERROR) << "Bitmap comparision failure Pattern-2";
- return false;
- }
- // Test Pattern-3, Fill with CheckerBoard Pattern.
- DrawCheckerToBitmap(src_size.width(),
- src_size.height(),
- color1,
- color2, rect_w, rect_h, input_pixels);
- BindAndAttachTextureWithPixels(src_texture,
- color_type,
- src_size,
- input_pixels);
- ReadBackTexture(src_texture, src_size, pixels, color_type, async);
- result = IsEqual(input_pixels, output_pixels);
- if (!result) {
- LOG(ERROR) << "Bitmap comparision failure Pattern-3";
- return false;
- }
- context_->deleteTexture(src_texture);
- if (HasFailure()) {
- return false;
- }
- return true;
- }
-
- // YUV readback test. Create a test pattern, convert to YUV
- // with reference implementation and compare to what gl_helper
- // returns.
- void TestYUVReadback(int xsize,
- int ysize,
- int output_xsize,
- int output_ysize,
- int xmargin,
- int ymargin,
- int test_pattern,
- bool flip,
- bool use_mrt,
- content::GLHelper::ScalerQuality quality) {
- WebGLId src_texture = context_->createTexture();
- SkBitmap input_pixels;
- input_pixels.allocN32Pixels(xsize, ysize);
-
- for (int x = 0; x < xsize; ++x) {
- for (int y = 0; y < ysize; ++y) {
- switch (test_pattern) {
- case 0: // Smooth test pattern
- SetChannel(&input_pixels, x, y, 0, x * 10);
- SetChannel(&input_pixels, x, y, 1, y * 10);
- SetChannel(&input_pixels, x, y, 2, (x + y) * 10);
- SetChannel(&input_pixels, x, y, 3, 255);
- break;
- case 1: // Small blocks
- SetChannel(&input_pixels, x, y, 0, x & 1 ? 255 : 0);
- SetChannel(&input_pixels, x, y, 1, y & 1 ? 255 : 0);
- SetChannel(&input_pixels, x, y, 2, (x + y) & 1 ? 255 : 0);
- SetChannel(&input_pixels, x, y, 3, 255);
- break;
- case 2: // Medium blocks
- SetChannel(&input_pixels, x, y, 0, 10 + x / 2 * 50);
- SetChannel(&input_pixels, x, y, 1, 10 + y / 3 * 50);
- SetChannel(&input_pixels, x, y, 2, (x + y) / 5 * 50 + 5);
- SetChannel(&input_pixels, x, y, 3, 255);
- break;
- }
- }
- }
-
- context_->bindTexture(GL_TEXTURE_2D, src_texture);
- context_->texImage2D(GL_TEXTURE_2D,
- 0,
- GL_RGBA,
- xsize,
- ysize,
- 0,
- GL_RGBA,
- GL_UNSIGNED_BYTE,
- input_pixels.getPixels());
-
- gpu::Mailbox mailbox;
- context_->genMailboxCHROMIUM(mailbox.name);
- EXPECT_FALSE(mailbox.IsZero());
- context_->produceTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
- const blink::WGC3Duint64 fence_sync = context_->insertFenceSyncCHROMIUM();
- context_->shallowFlushCHROMIUM();
-
- gpu::SyncToken sync_token;
- ASSERT_TRUE(context_->genSyncTokenCHROMIUM(fence_sync,
- sync_token.GetData()));
-
- std::string message = base::StringPrintf(
- "input size: %dx%d "
- "output size: %dx%d "
- "margin: %dx%d "
- "pattern: %d %s %s",
- xsize,
- ysize,
- output_xsize,
- output_ysize,
- xmargin,
- ymargin,
- test_pattern,
- flip ? "flip" : "noflip",
- flip ? "mrt" : "nomrt");
- scoped_ptr<ReadbackYUVInterface> yuv_reader(
- helper_->CreateReadbackPipelineYUV(
- quality,
- gfx::Size(xsize, ysize),
- gfx::Rect(0, 0, xsize, ysize),
- gfx::Size(xsize, ysize),
- flip,
- use_mrt));
-
- scoped_refptr<media::VideoFrame> output_frame =
- media::VideoFrame::CreateFrame(
- media::PIXEL_FORMAT_YV12,
- // The coded size of the output frame is rounded up to the next
- // 16-byte boundary. This tests that the readback is being
- // positioned inside the frame's visible region, and not dependent
- // on its coded size.
- gfx::Size((output_xsize + 15) & ~15, (output_ysize + 15) & ~15),
- gfx::Rect(0, 0, output_xsize, output_ysize),
- gfx::Size(output_xsize, output_ysize),
- base::TimeDelta::FromSeconds(0));
- scoped_refptr<media::VideoFrame> truth_frame =
- media::VideoFrame::CreateFrame(
- media::PIXEL_FORMAT_YV12, gfx::Size(output_xsize, output_ysize),
- gfx::Rect(0, 0, output_xsize, output_ysize),
- gfx::Size(output_xsize, output_ysize),
- base::TimeDelta::FromSeconds(0));
-
- base::RunLoop run_loop;
- yuv_reader->ReadbackYUV(mailbox, sync_token, output_frame.get(),
- gfx::Point(xmargin, ymargin),
- base::Bind(&callcallback, run_loop.QuitClosure()));
- run_loop.Run();
-
- if (flip) {
- FlipSKBitmap(&input_pixels);
- }
-
- unsigned char* Y = truth_frame->visible_data(media::VideoFrame::kYPlane);
- unsigned char* U = truth_frame->visible_data(media::VideoFrame::kUPlane);
- unsigned char* V = truth_frame->visible_data(media::VideoFrame::kVPlane);
- int32_t y_stride = truth_frame->stride(media::VideoFrame::kYPlane);
- int32_t u_stride = truth_frame->stride(media::VideoFrame::kUPlane);
- int32_t v_stride = truth_frame->stride(media::VideoFrame::kVPlane);
- memset(Y, 0x00, y_stride * output_ysize);
- memset(U, 0x80, u_stride * output_ysize / 2);
- memset(V, 0x80, v_stride * output_ysize / 2);
-
- const float kRGBtoYColorWeights[] = {0.257f, 0.504f, 0.098f, 0.0625f};
- const float kRGBtoUColorWeights[] = {-0.148f, -0.291f, 0.439f, 0.5f};
- const float kRGBtoVColorWeights[] = {0.439f, -0.368f, -0.071f, 0.5f};
-
- for (int y = 0; y < ysize; y++) {
- for (int x = 0; x < xsize; x++) {
- Y[(y + ymargin) * y_stride + x + xmargin] = float_to_byte(
- ChannelAsFloat(&input_pixels, x, y, 0) * kRGBtoYColorWeights[0] +
- ChannelAsFloat(&input_pixels, x, y, 1) * kRGBtoYColorWeights[1] +
- ChannelAsFloat(&input_pixels, x, y, 2) * kRGBtoYColorWeights[2] +
- kRGBtoYColorWeights[3]);
- }
- }
-
- for (int y = 0; y < ysize / 2; y++) {
- for (int x = 0; x < xsize / 2; x++) {
- U[(y + ymargin / 2) * u_stride + x + xmargin / 2] =
- float_to_byte(Bilinear(&input_pixels, x * 2 + 1.0, y * 2 + 1.0, 0) *
- kRGBtoUColorWeights[0] +
- Bilinear(&input_pixels, x * 2 + 1.0, y * 2 + 1.0, 1) *
- kRGBtoUColorWeights[1] +
- Bilinear(&input_pixels, x * 2 + 1.0, y * 2 + 1.0, 2) *
- kRGBtoUColorWeights[2] +
- kRGBtoUColorWeights[3]);
- V[(y + ymargin / 2) * v_stride + x + xmargin / 2] =
- float_to_byte(Bilinear(&input_pixels, x * 2 + 1.0, y * 2 + 1.0, 0) *
- kRGBtoVColorWeights[0] +
- Bilinear(&input_pixels, x * 2 + 1.0, y * 2 + 1.0, 1) *
- kRGBtoVColorWeights[1] +
- Bilinear(&input_pixels, x * 2 + 1.0, y * 2 + 1.0, 2) *
- kRGBtoVColorWeights[2] +
- kRGBtoVColorWeights[3]);
- }
- }
-
- ComparePlane(Y,
- y_stride,
- output_frame->visible_data(media::VideoFrame::kYPlane),
- output_frame->stride(media::VideoFrame::kYPlane),
- 2,
- output_xsize,
- output_ysize,
- &input_pixels,
- message + " Y plane");
- ComparePlane(U,
- u_stride,
- output_frame->visible_data(media::VideoFrame::kUPlane),
- output_frame->stride(media::VideoFrame::kUPlane),
- 2,
- output_xsize / 2,
- output_ysize / 2,
- &input_pixels,
- message + " U plane");
- ComparePlane(V,
- v_stride,
- output_frame->visible_data(media::VideoFrame::kVPlane),
- output_frame->stride(media::VideoFrame::kVPlane),
- 2,
- output_xsize / 2,
- output_ysize / 2,
- &input_pixels,
- message + " V plane");
-
- context_->deleteTexture(src_texture);
- }
-
- void TestAddOps(int src, int dst, bool scale_x, bool allow3) {
- std::deque<GLHelperScaling::ScaleOp> ops;
- GLHelperScaling::ScaleOp::AddOps(src, dst, scale_x, allow3, &ops);
- // Scale factor 3 is a special case.
- // It is currently only allowed by itself.
- if (allow3 && dst * 3 >= src && dst * 2 < src) {
- EXPECT_EQ(ops[0].scale_factor, 3);
- EXPECT_EQ(ops.size(), 1U);
- EXPECT_EQ(ops[0].scale_x, scale_x);
- EXPECT_EQ(ops[0].scale_size, dst);
- return;
- }
-
- for (size_t i = 0; i < ops.size(); i++) {
- EXPECT_EQ(ops[i].scale_x, scale_x);
- if (i == 0) {
- // Only the first op is allowed to be a scale up.
- // (Scaling up *after* scaling down would make it fuzzy.)
- EXPECT_TRUE(ops[0].scale_factor == 0 || ops[0].scale_factor == 2);
- } else {
- // All other operations must be 50% downscales.
- EXPECT_EQ(ops[i].scale_factor, 2);
- }
- }
- // Check that the scale factors make sense and add up.
- int tmp = dst;
- for (int i = static_cast<int>(ops.size() - 1); i >= 0; i--) {
- EXPECT_EQ(tmp, ops[i].scale_size);
- if (ops[i].scale_factor == 0) {
- EXPECT_EQ(i, 0);
- EXPECT_GT(tmp, src);
- tmp = src;
- } else {
- tmp *= ops[i].scale_factor;
- }
- }
- EXPECT_EQ(tmp, src);
- }
-
- void CheckPipeline2(int xsize,
- int ysize,
- int dst_xsize,
- int dst_ysize,
- const std::string& description) {
- std::vector<GLHelperScaling::ScalerStage> stages;
- helper_scaling_->ConvertScalerOpsToScalerStages(
- content::GLHelper::SCALER_QUALITY_GOOD,
- gfx::Size(xsize, ysize),
- gfx::Rect(0, 0, xsize, ysize),
- gfx::Size(dst_xsize, dst_ysize),
- false,
- false,
- &x_ops_,
- &y_ops_,
- &stages);
- EXPECT_EQ(x_ops_.size(), 0U);
- EXPECT_EQ(y_ops_.size(), 0U);
- ValidateScalerStages(content::GLHelper::SCALER_QUALITY_GOOD,
- stages,
- gfx::Size(dst_xsize, dst_ysize),
- "");
- EXPECT_EQ(PrintStages(stages), description);
- }
-
- void CheckOptimizationsTest() {
- // Basic upscale. X and Y should be combined into one pass.
- x_ops_.push_back(GLHelperScaling::ScaleOp(0, true, 2000));
- y_ops_.push_back(GLHelperScaling::ScaleOp(0, false, 2000));
- CheckPipeline2(1024, 768, 2000, 2000, "1024x768 -> 2000x2000 bilinear\n");
-
- // X scaled 1/2, Y upscaled, should still be one pass.
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 512));
- y_ops_.push_back(GLHelperScaling::ScaleOp(0, false, 2000));
- CheckPipeline2(1024, 768, 512, 2000, "1024x768 -> 512x2000 bilinear\n");
-
- // X upscaled, Y scaled 1/2, one bilinear pass
- x_ops_.push_back(GLHelperScaling::ScaleOp(0, true, 2000));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 384));
- CheckPipeline2(1024, 768, 2000, 384, "1024x768 -> 2000x384 bilinear\n");
-
- // X scaled 1/2, Y scaled 1/2, one bilinear pass
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 512));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 384));
- CheckPipeline2(1024, 768, 512, 384, "1024x768 -> 512x384 bilinear\n");
-
- // X scaled 1/2, Y scaled to 60%, one bilinear2 pass.
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 50));
- y_ops_.push_back(GLHelperScaling::ScaleOp(0, false, 120));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 60));
- CheckPipeline2(100, 100, 50, 60, "100x100 -> 50x60 bilinear2 Y\n");
-
- // X scaled to 60%, Y scaled 1/2, one bilinear2 pass.
- x_ops_.push_back(GLHelperScaling::ScaleOp(0, true, 120));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 60));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 50));
- CheckPipeline2(100, 100, 60, 50, "100x100 -> 60x50 bilinear2 X\n");
-
- // X scaled to 60%, Y scaled 60%, one bilinear2x2 pass.
- x_ops_.push_back(GLHelperScaling::ScaleOp(0, true, 120));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 60));
- y_ops_.push_back(GLHelperScaling::ScaleOp(0, false, 120));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 60));
- CheckPipeline2(100, 100, 60, 60, "100x100 -> 60x60 bilinear2x2\n");
-
- // X scaled to 40%, Y scaled 40%, two bilinear3 passes.
- x_ops_.push_back(GLHelperScaling::ScaleOp(3, true, 40));
- y_ops_.push_back(GLHelperScaling::ScaleOp(3, false, 40));
- CheckPipeline2(100,
- 100,
- 40,
- 40,
- "100x100 -> 100x40 bilinear3 Y\n"
- "100x40 -> 40x40 bilinear3 X\n");
-
- // X scaled to 60%, Y scaled 40%
- x_ops_.push_back(GLHelperScaling::ScaleOp(0, true, 120));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 60));
- y_ops_.push_back(GLHelperScaling::ScaleOp(3, false, 40));
- CheckPipeline2(100,
- 100,
- 60,
- 40,
- "100x100 -> 100x40 bilinear3 Y\n"
- "100x40 -> 60x40 bilinear2 X\n");
-
- // X scaled to 40%, Y scaled 60%
- x_ops_.push_back(GLHelperScaling::ScaleOp(3, true, 40));
- y_ops_.push_back(GLHelperScaling::ScaleOp(0, false, 120));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 60));
- CheckPipeline2(100,
- 100,
- 40,
- 60,
- "100x100 -> 100x60 bilinear2 Y\n"
- "100x60 -> 40x60 bilinear3 X\n");
-
- // X scaled to 30%, Y scaled 30%
- x_ops_.push_back(GLHelperScaling::ScaleOp(0, true, 120));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 60));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 30));
- y_ops_.push_back(GLHelperScaling::ScaleOp(0, false, 120));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 60));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 30));
- CheckPipeline2(100,
- 100,
- 30,
- 30,
- "100x100 -> 100x30 bilinear4 Y\n"
- "100x30 -> 30x30 bilinear4 X\n");
-
- // X scaled to 50%, Y scaled 30%
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 50));
- y_ops_.push_back(GLHelperScaling::ScaleOp(0, false, 120));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 60));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 30));
- CheckPipeline2(100, 100, 50, 30, "100x100 -> 50x30 bilinear4 Y\n");
-
- // X scaled to 150%, Y scaled 30%
- // Note that we avoid combinding X and Y passes
- // as that would probably be LESS efficient here.
- x_ops_.push_back(GLHelperScaling::ScaleOp(0, true, 150));
- y_ops_.push_back(GLHelperScaling::ScaleOp(0, false, 120));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 60));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 30));
- CheckPipeline2(100,
- 100,
- 150,
- 30,
- "100x100 -> 100x30 bilinear4 Y\n"
- "100x30 -> 150x30 bilinear\n");
-
- // X scaled to 1%, Y scaled 1%
- x_ops_.push_back(GLHelperScaling::ScaleOp(0, true, 128));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 64));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 32));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 16));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 8));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 4));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 2));
- x_ops_.push_back(GLHelperScaling::ScaleOp(2, true, 1));
- y_ops_.push_back(GLHelperScaling::ScaleOp(0, false, 128));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 64));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 32));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 16));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 8));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 4));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 2));
- y_ops_.push_back(GLHelperScaling::ScaleOp(2, false, 1));
- CheckPipeline2(100,
- 100,
- 1,
- 1,
- "100x100 -> 100x32 bilinear4 Y\n"
- "100x32 -> 100x4 bilinear4 Y\n"
- "100x4 -> 64x1 bilinear2x2\n"
- "64x1 -> 8x1 bilinear4 X\n"
- "8x1 -> 1x1 bilinear4 X\n");
- }
-
- scoped_ptr<WebGraphicsContext3DInProcessCommandBufferImpl> context_;
- gpu::ContextSupport* context_support_;
- scoped_ptr<content::GLHelper> helper_;
- scoped_ptr<content::GLHelperScaling> helper_scaling_;
- std::deque<GLHelperScaling::ScaleOp> x_ops_, y_ops_;
-};
-
-class GLHelperPixelTest : public GLHelperTest {
- private:
- gfx::DisableNullDrawGLBindings enable_pixel_output_;
-};
-
-TEST_F(GLHelperTest, RGBASyncReadbackTest) {
- const int kTestSize = 64;
- bool result = TestTextureFormatReadback(gfx::Size(kTestSize,kTestSize),
- kRGBA_8888_SkColorType,
- false);
- EXPECT_EQ(result, true);
-}
-
-
-TEST_F(GLHelperTest, BGRASyncReadbackTest) {
- const int kTestSize = 64;
- bool result = TestTextureFormatReadback(gfx::Size(kTestSize,kTestSize),
- kBGRA_8888_SkColorType,
- false);
- EXPECT_EQ(result, true);
-}
-
-TEST_F(GLHelperTest, RGB565SyncReadbackTest) {
- const int kTestSize = 64;
- bool result = TestTextureFormatReadback(gfx::Size(kTestSize,kTestSize),
- kRGB_565_SkColorType,
- false);
- EXPECT_EQ(result, true);
-}
-
-TEST_F(GLHelperTest, RGBAASyncReadbackTest) {
- const int kTestSize = 64;
- bool result = TestTextureFormatReadback(gfx::Size(kTestSize,kTestSize),
- kRGBA_8888_SkColorType,
- true);
- EXPECT_EQ(result, true);
-}
-
-TEST_F(GLHelperTest, BGRAASyncReadbackTest) {
- const int kTestSize = 64;
- bool result = TestTextureFormatReadback(gfx::Size(kTestSize,kTestSize),
- kBGRA_8888_SkColorType,
- true);
- EXPECT_EQ(result, true);
-}
-
-TEST_F(GLHelperTest, RGB565ASyncReadbackTest) {
- const int kTestSize = 64;
- bool result = TestTextureFormatReadback(gfx::Size(kTestSize,kTestSize),
- kRGB_565_SkColorType,
- true);
- EXPECT_EQ(result, true);
-}
-
-TEST_F(GLHelperPixelTest, YUVReadbackOptTest) {
- // This test uses the gpu.service/gpu_decoder tracing events to detect how
- // many scaling passes are actually performed by the YUV readback pipeline.
- StartTracing(TRACE_DISABLED_BY_DEFAULT("gpu.service") ","
- TRACE_DISABLED_BY_DEFAULT("gpu_decoder"));
-
- TestYUVReadback(800,
- 400,
- 800,
- 400,
- 0,
- 0,
- 1,
- false,
- true,
- content::GLHelper::SCALER_QUALITY_FAST);
-
- std::map<std::string, int> event_counts;
- EndTracing(&event_counts);
- int draw_buffer_calls = event_counts["kDrawBuffersEXTImmediate"];
- int draw_arrays_calls = event_counts["kDrawArrays"];
- VLOG(1) << "Draw buffer calls: " << draw_buffer_calls;
- VLOG(1) << "DrawArrays calls: " << draw_arrays_calls;
-
- if (draw_buffer_calls) {
- // When using MRT, the YUV readback code should only
- // execute two draw arrays, and scaling should be integrated
- // into those two calls since we are using the FAST scalign
- // quality.
- EXPECT_EQ(2, draw_arrays_calls);
- } else {
- // When not using MRT, there are three passes for the YUV,
- // and one for the scaling.
- EXPECT_EQ(4, draw_arrays_calls);
- }
-}
-
-class GLHelperPixelYuvReadback :
- public GLHelperPixelTest,
- public ::testing::WithParamInterface<
- std::tr1::tuple<bool, bool, unsigned int, unsigned int>> {};
-
-int kYUVReadBackSizes[] = {2, 4, 14};
-
-TEST_P(GLHelperPixelYuvReadback, Test) {
- bool flip = std::tr1::get<0>(GetParam());
- bool use_mrt = std::tr1::get<1>(GetParam());
- unsigned int x = std::tr1::get<2>(GetParam());
- unsigned int y = std::tr1::get<3>(GetParam());
-
- for (unsigned int ox = x; ox < arraysize(kYUVReadBackSizes); ox++) {
- for (unsigned int oy = y; oy < arraysize(kYUVReadBackSizes); oy++) {
- // If output is a subsection of the destination frame, (letterbox)
- // then try different variations of where the subsection goes.
- for (Margin xm = x < ox ? MarginLeft : MarginRight;
- xm <= MarginRight;
- xm = NextMargin(xm)) {
- for (Margin ym = y < oy ? MarginLeft : MarginRight;
- ym <= MarginRight;
- ym = NextMargin(ym)) {
- for (int pattern = 0; pattern < 3; pattern++) {
- TestYUVReadback(kYUVReadBackSizes[x],
- kYUVReadBackSizes[y],
- kYUVReadBackSizes[ox],
- kYUVReadBackSizes[oy],
- compute_margin(kYUVReadBackSizes[x],
- kYUVReadBackSizes[ox], xm),
- compute_margin(kYUVReadBackSizes[y],
- kYUVReadBackSizes[oy], ym),
- pattern,
- flip,
- use_mrt,
- content::GLHelper::SCALER_QUALITY_GOOD);
- if (HasFailure()) {
- return;
- }
- }
- }
- }
- }
- }
-}
-
-// First argument is intentionally empty.
-INSTANTIATE_TEST_CASE_P(
- ,
- GLHelperPixelYuvReadback,
- ::testing::Combine(
- ::testing::Bool(),
- ::testing::Bool(),
- ::testing::Range<unsigned int>(0, arraysize(kYUVReadBackSizes)),
- ::testing::Range<unsigned int>(0, arraysize(kYUVReadBackSizes))));
-
-
-// Per pixel tests, all sizes are small so that we can print
-// out the generated bitmaps.
-TEST_F(GLHelperPixelTest, ScaleTest) {
- int sizes[] = {3, 6, 16};
- for (int flip = 0; flip <= 1; flip++) {
- for (size_t q_index = 0; q_index < arraysize(kQualities); q_index++) {
- for (int x = 0; x < 3; x++) {
- for (int y = 0; y < 3; y++) {
- for (int dst_x = 0; dst_x < 3; dst_x++) {
- for (int dst_y = 0; dst_y < 3; dst_y++) {
- for (int pattern = 0; pattern < 3; pattern++) {
- TestScale(sizes[x],
- sizes[y],
- sizes[dst_x],
- sizes[dst_y],
- pattern,
- q_index,
- flip == 1);
- if (HasFailure()) {
- return;
- }
- }
- }
- }
- }
- }
- }
- }
-}
-
-// Per pixel tests, all sizes are small so that we can print
-// out the generated bitmaps.
-TEST_F(GLHelperPixelTest, CropScaleReadbackAndCleanTextureTest) {
- const int kSizes[] = {3, 6, 16};
- const SkColorType kColorTypes[] = {
- kAlpha_8_SkColorType, kRGBA_8888_SkColorType, kBGRA_8888_SkColorType};
- for (size_t color_type = 0; color_type < arraysize(kColorTypes);
- color_type++) {
- // Test BEST and FAST qualities, skip GOOD
- for (size_t q_index = 0; q_index < arraysize(kQualities); q_index += 2) {
- for (size_t x = 0; x < arraysize(kSizes); x++) {
- for (size_t y = 0; y < arraysize(kSizes); y++) {
- for (size_t dst_x = 0; dst_x < arraysize(kSizes); dst_x++) {
- for (size_t dst_y = 0; dst_y < arraysize(kSizes); dst_y++) {
- for (int pattern = 0; pattern < 3; pattern++) {
- TestCropScaleReadbackAndCleanTexture(kSizes[x],
- kSizes[y],
- kSizes[dst_x],
- kSizes[dst_y],
- pattern,
- kColorTypes[color_type],
- false,
- q_index);
- if (HasFailure())
- return;
- }
- }
- }
- }
- }
- }
- }
-}
-
-// Validate that all scaling generates valid pipelines.
-TEST_F(GLHelperTest, ValidateScalerPipelines) {
- int sizes[] = {7, 99, 128, 256, 512, 719, 720, 721, 1920, 2011, 3217, 4096};
- for (size_t q = 0; q < arraysize(kQualities); q++) {
- for (size_t x = 0; x < arraysize(sizes); x++) {
- for (size_t y = 0; y < arraysize(sizes); y++) {
- for (size_t dst_x = 0; dst_x < arraysize(sizes); dst_x++) {
- for (size_t dst_y = 0; dst_y < arraysize(sizes); dst_y++) {
- TestScalerPipeline(
- q, sizes[x], sizes[y], sizes[dst_x], sizes[dst_y]);
- if (HasFailure()) {
- return;
- }
- }
- }
- }
- }
- }
-}
-
-// Make sure we don't create overly complicated pipelines
-// for a few common use cases.
-TEST_F(GLHelperTest, CheckSpecificPipelines) {
- // Upscale should be single pass.
- CheckPipeline(content::GLHelper::SCALER_QUALITY_GOOD,
- 1024,
- 700,
- 1280,
- 720,
- "1024x700 -> 1280x720 bilinear\n");
- // Slight downscale should use BILINEAR2X2.
- CheckPipeline(content::GLHelper::SCALER_QUALITY_GOOD,
- 1280,
- 720,
- 1024,
- 700,
- "1280x720 -> 1024x700 bilinear2x2\n");
- // Most common tab capture pipeline on the Pixel.
- // Should be using two BILINEAR3 passes.
- CheckPipeline(content::GLHelper::SCALER_QUALITY_GOOD,
- 2560,
- 1476,
- 1249,
- 720,
- "2560x1476 -> 2560x720 bilinear3 Y\n"
- "2560x720 -> 1249x720 bilinear3 X\n");
-}
-
-TEST_F(GLHelperTest, ScalerOpTest) {
- for (int allow3 = 0; allow3 <= 1; allow3++) {
- for (int dst = 1; dst < 2049; dst += 1 + (dst >> 3)) {
- for (int src = 1; src < 2049; src++) {
- TestAddOps(src, dst, allow3 == 1, (src & 1) == 1);
- if (HasFailure()) {
- LOG(ERROR) << "Failed for src=" << src << " dst=" << dst
- << " allow3=" << allow3;
- return;
- }
- }
- }
- }
-}
-
-TEST_F(GLHelperTest, CheckOptimizations) {
- // Test in baseclass since it is friends with GLHelperScaling
- CheckOptimizationsTest();
-}
-
-} // namespace content
-
-namespace {
-
-int RunHelper(base::TestSuite* test_suite) {
- content::UnitTestTestSuite runner(test_suite);
- base::MessageLoopForIO message_loop;
- return runner.Run();
-}
-
-} // namespace
-
-// These tests needs to run against a proper GL environment, so we
-// need to set it up before we can run the tests.
-int main(int argc, char** argv) {
- base::CommandLine::Init(argc, argv);
- base::TestSuite* suite = new content::ContentTestSuite(argc, argv);
-#if defined(OS_MACOSX)
- base::mac::ScopedNSAutoreleasePool pool;
-#endif
-
- return base::LaunchUnitTestsSerially(
- argc,
- argv,
- base::Bind(&RunHelper, base::Unretained(suite)));
-}
diff --git a/chromium/content/common/gpu/client/gpu_channel_host.cc b/chromium/content/common/gpu/client/gpu_channel_host.cc
deleted file mode 100644
index 25328f45399..00000000000
--- a/chromium/content/common/gpu/client/gpu_channel_host.cc
+++ /dev/null
@@ -1,552 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_channel_host.h"
-
-#include <algorithm>
-#include <utility>
-
-#include "base/atomic_sequence_num.h"
-#include "base/bind.h"
-#include "base/location.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
-#include "base/threading/thread_restrictions.h"
-#include "base/trace_event/trace_event.h"
-#include "build/build_config.h"
-#include "content/common/gpu/client/command_buffer_proxy_impl.h"
-#include "content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "ipc/ipc_sync_message_filter.h"
-#include "url/gurl.h"
-
-#if defined(OS_WIN) || defined(OS_MACOSX)
-#include "content/public/common/sandbox_init.h"
-#endif
-
-using base::AutoLock;
-
-namespace content {
-namespace {
-
-// Global atomic to generate unique transfer buffer IDs.
-base::StaticAtomicSequenceNumber g_next_transfer_buffer_id;
-
-} // namespace
-
-GpuChannelHost::StreamFlushInfo::StreamFlushInfo()
- : next_stream_flush_id(1),
- flushed_stream_flush_id(0),
- verified_stream_flush_id(0),
- flush_pending(false),
- route_id(MSG_ROUTING_NONE),
- put_offset(0),
- flush_count(0),
- flush_id(0) {}
-
-GpuChannelHost::StreamFlushInfo::~StreamFlushInfo() {}
-
-// static
-scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
- GpuChannelHostFactory* factory,
- int channel_id,
- const gpu::GPUInfo& gpu_info,
- const IPC::ChannelHandle& channel_handle,
- base::WaitableEvent* shutdown_event,
- gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) {
- DCHECK(factory->IsMainThread());
- scoped_refptr<GpuChannelHost> host =
- new GpuChannelHost(factory, channel_id, gpu_info,
- gpu_memory_buffer_manager);
- host->Connect(channel_handle, shutdown_event);
- return host;
-}
-
-GpuChannelHost::GpuChannelHost(
- GpuChannelHostFactory* factory,
- int channel_id,
- const gpu::GPUInfo& gpu_info,
- gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager)
- : factory_(factory),
- channel_id_(channel_id),
- gpu_info_(gpu_info),
- gpu_memory_buffer_manager_(gpu_memory_buffer_manager) {
- next_image_id_.GetNext();
- next_route_id_.GetNext();
- next_stream_id_.GetNext();
-}
-
-void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
- base::WaitableEvent* shutdown_event) {
- DCHECK(factory_->IsMainThread());
- // Open a channel to the GPU process. We pass NULL as the main listener here
- // since we need to filter everything to route it to the right thread.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
- factory_->GetIOThreadTaskRunner();
- channel_ =
- IPC::SyncChannel::Create(channel_handle, IPC::Channel::MODE_CLIENT, NULL,
- io_task_runner.get(), true, shutdown_event);
-
- sync_filter_ = channel_->CreateSyncMessageFilter();
-
- channel_filter_ = new MessageFilter();
-
- // Install the filter last, because we intercept all leftover
- // messages.
- channel_->AddFilter(channel_filter_.get());
-}
-
-bool GpuChannelHost::Send(IPC::Message* msg) {
- // Callee takes ownership of message, regardless of whether Send is
- // successful. See IPC::Sender.
- scoped_ptr<IPC::Message> message(msg);
- // The GPU process never sends synchronous IPCs so clear the unblock flag to
- // preserve order.
- message->set_unblock(false);
-
- // Currently we need to choose between two different mechanisms for sending.
- // On the main thread we use the regular channel Send() method, on another
- // thread we use SyncMessageFilter. We also have to be careful interpreting
- // IsMainThread() since it might return false during shutdown,
- // impl we are actually calling from the main thread (discard message then).
- //
- // TODO: Can we just always use sync_filter_ since we setup the channel
- // without a main listener?
- if (factory_->IsMainThread()) {
- // channel_ is only modified on the main thread, so we don't need to take a
- // lock here.
- if (!channel_) {
- DVLOG(1) << "GpuChannelHost::Send failed: Channel already destroyed";
- return false;
- }
- // http://crbug.com/125264
- base::ThreadRestrictions::ScopedAllowWait allow_wait;
- bool result = channel_->Send(message.release());
- if (!result)
- DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
- return result;
- }
-
- bool result = sync_filter_->Send(message.release());
- return result;
-}
-
-uint32_t GpuChannelHost::OrderingBarrier(
- int32_t route_id,
- int32_t stream_id,
- int32_t put_offset,
- uint32_t flush_count,
- const std::vector<ui::LatencyInfo>& latency_info,
- bool put_offset_changed,
- bool do_flush) {
- AutoLock lock(context_lock_);
- StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
- if (flush_info.flush_pending && flush_info.route_id != route_id)
- InternalFlush(&flush_info);
-
- if (put_offset_changed) {
- const uint32_t flush_id = flush_info.next_stream_flush_id++;
- flush_info.flush_pending = true;
- flush_info.route_id = route_id;
- flush_info.put_offset = put_offset;
- flush_info.flush_count = flush_count;
- flush_info.flush_id = flush_id;
- flush_info.latency_info.insert(flush_info.latency_info.end(),
- latency_info.begin(), latency_info.end());
-
- if (do_flush)
- InternalFlush(&flush_info);
-
- return flush_id;
- }
- return 0;
-}
-
-void GpuChannelHost::FlushPendingStream(int32_t stream_id) {
- AutoLock lock(context_lock_);
- auto flush_info_iter = stream_flush_info_.find(stream_id);
- if (flush_info_iter == stream_flush_info_.end())
- return;
-
- StreamFlushInfo& flush_info = flush_info_iter->second;
- if (flush_info.flush_pending)
- InternalFlush(&flush_info);
-}
-
-void GpuChannelHost::InternalFlush(StreamFlushInfo* flush_info) {
- context_lock_.AssertAcquired();
- DCHECK(flush_info);
- DCHECK(flush_info->flush_pending);
- DCHECK_LT(flush_info->flushed_stream_flush_id, flush_info->flush_id);
- Send(new GpuCommandBufferMsg_AsyncFlush(
- flush_info->route_id, flush_info->put_offset, flush_info->flush_count,
- flush_info->latency_info));
- flush_info->latency_info.clear();
- flush_info->flush_pending = false;
-
- flush_info->flushed_stream_flush_id = flush_info->flush_id;
-}
-
-scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateViewCommandBuffer(
- int32_t surface_id,
- CommandBufferProxyImpl* share_group,
- int32_t stream_id,
- GpuStreamPriority stream_priority,
- const std::vector<int32_t>& attribs,
- const GURL& active_url,
- gfx::GpuPreference gpu_preference) {
- DCHECK(!share_group || (stream_id == share_group->stream_id()));
- TRACE_EVENT1("gpu",
- "GpuChannelHost::CreateViewCommandBuffer",
- "surface_id",
- surface_id);
-
- GPUCreateCommandBufferConfig init_params;
- init_params.share_group_id =
- share_group ? share_group->route_id() : MSG_ROUTING_NONE;
- init_params.stream_id = stream_id;
- init_params.stream_priority = stream_priority;
- init_params.attribs = attribs;
- init_params.active_url = active_url;
- init_params.gpu_preference = gpu_preference;
-
- int32_t route_id = GenerateRouteID();
-
- CreateCommandBufferResult result = factory_->CreateViewCommandBuffer(
- surface_id, init_params, route_id);
- if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) {
- LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
-
- if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) {
- // The GPU channel needs to be considered lost. The caller will
- // then set up a new connection, and the GPU channel and any
- // view command buffers will all be associated with the same GPU
- // process.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
- factory_->GetIOThreadTaskRunner();
- io_task_runner->PostTask(
- FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
- channel_filter_.get()));
- }
-
- return NULL;
- }
-
- scoped_ptr<CommandBufferProxyImpl> command_buffer =
- make_scoped_ptr(new CommandBufferProxyImpl(this, route_id, stream_id));
- AddRoute(route_id, command_buffer->AsWeakPtr());
-
- return command_buffer;
-}
-
-scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateOffscreenCommandBuffer(
- const gfx::Size& size,
- CommandBufferProxyImpl* share_group,
- int32_t stream_id,
- GpuStreamPriority stream_priority,
- const std::vector<int32_t>& attribs,
- const GURL& active_url,
- gfx::GpuPreference gpu_preference) {
- DCHECK(!share_group || (stream_id == share_group->stream_id()));
- TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
-
- GPUCreateCommandBufferConfig init_params;
- init_params.share_group_id =
- share_group ? share_group->route_id() : MSG_ROUTING_NONE;
- init_params.stream_id = stream_id;
- init_params.stream_priority = stream_priority;
- init_params.attribs = attribs;
- init_params.active_url = active_url;
- init_params.gpu_preference = gpu_preference;
-
- int32_t route_id = GenerateRouteID();
-
- bool succeeded = false;
- if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(
- size, init_params, route_id, &succeeded))) {
- LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
- return NULL;
- }
-
- if (!succeeded) {
- LOG(ERROR)
- << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
- return NULL;
- }
-
- scoped_ptr<CommandBufferProxyImpl> command_buffer =
- make_scoped_ptr(new CommandBufferProxyImpl(this, route_id, stream_id));
- AddRoute(route_id, command_buffer->AsWeakPtr());
-
- return command_buffer;
-}
-
-scoped_ptr<media::JpegDecodeAccelerator> GpuChannelHost::CreateJpegDecoder(
- media::JpegDecodeAccelerator::Client* client) {
- TRACE_EVENT0("gpu", "GpuChannelHost::CreateJpegDecoder");
-
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
- factory_->GetIOThreadTaskRunner();
- int32_t route_id = GenerateRouteID();
- scoped_ptr<GpuJpegDecodeAcceleratorHost> decoder(
- new GpuJpegDecodeAcceleratorHost(this, route_id, io_task_runner));
- if (!decoder->Initialize(client)) {
- return nullptr;
- }
-
- // The reply message of jpeg decoder should run on IO thread.
- io_task_runner->PostTask(FROM_HERE,
- base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
- channel_filter_.get(), route_id,
- decoder->GetReceiver(), io_task_runner));
-
- return std::move(decoder);
-}
-
-void GpuChannelHost::DestroyCommandBuffer(
- CommandBufferProxyImpl* command_buffer) {
- TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
-
- int32_t route_id = command_buffer->route_id();
- int32_t stream_id = command_buffer->stream_id();
- Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
- RemoveRoute(route_id);
-
- AutoLock lock(context_lock_);
- StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
- if (flush_info.flush_pending && flush_info.route_id == route_id)
- flush_info.flush_pending = false;
-}
-
-void GpuChannelHost::DestroyChannel() {
- DCHECK(factory_->IsMainThread());
- AutoLock lock(context_lock_);
- channel_.reset();
-}
-
-void GpuChannelHost::AddRoute(
- int route_id, base::WeakPtr<IPC::Listener> listener) {
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
- factory_->GetIOThreadTaskRunner();
- io_task_runner->PostTask(FROM_HERE,
- base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
- channel_filter_.get(), route_id, listener,
- base::ThreadTaskRunnerHandle::Get()));
-}
-
-void GpuChannelHost::RemoveRoute(int route_id) {
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
- factory_->GetIOThreadTaskRunner();
- io_task_runner->PostTask(
- FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
- channel_filter_.get(), route_id));
-}
-
-base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
- base::SharedMemoryHandle source_handle) {
- if (IsLost())
- return base::SharedMemory::NULLHandle();
-
-#if defined(OS_WIN) || defined(OS_MACOSX)
- // Windows and Mac need to explicitly duplicate the handle out to another
- // process.
- base::SharedMemoryHandle target_handle;
- base::ProcessId peer_pid;
- {
- AutoLock lock(context_lock_);
- if (!channel_)
- return base::SharedMemory::NULLHandle();
- peer_pid = channel_->GetPeerPID();
- }
- bool success = BrokerDuplicateSharedMemoryHandle(source_handle, peer_pid,
- &target_handle);
- if (!success)
- return base::SharedMemory::NULLHandle();
-
- return target_handle;
-#else
- return base::SharedMemory::DuplicateHandle(source_handle);
-#endif // defined(OS_WIN) || defined(OS_MACOSX)
-}
-
-int32_t GpuChannelHost::ReserveTransferBufferId() {
- // 0 is a reserved value.
- return g_next_transfer_buffer_id.GetNext() + 1;
-}
-
-gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
- const gfx::GpuMemoryBufferHandle& source_handle,
- bool* requires_sync_point) {
- switch (source_handle.type) {
- case gfx::SHARED_MEMORY_BUFFER: {
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::SHARED_MEMORY_BUFFER;
- handle.handle = ShareToGpuProcess(source_handle.handle);
- handle.offset = source_handle.offset;
- handle.stride = source_handle.stride;
- *requires_sync_point = false;
- return handle;
- }
- case gfx::IO_SURFACE_BUFFER:
- case gfx::SURFACE_TEXTURE_BUFFER:
- case gfx::OZONE_NATIVE_PIXMAP:
- *requires_sync_point = true;
- return source_handle;
- default:
- NOTREACHED();
- return gfx::GpuMemoryBufferHandle();
- }
-}
-
-int32_t GpuChannelHost::ReserveImageId() {
- return next_image_id_.GetNext();
-}
-
-int32_t GpuChannelHost::GenerateRouteID() {
- return next_route_id_.GetNext();
-}
-
-int32_t GpuChannelHost::GenerateStreamID() {
- const int32_t stream_id = next_stream_id_.GetNext();
- DCHECK_NE(0, stream_id);
- DCHECK_NE(kDefaultStreamId, stream_id);
- return stream_id;
-}
-
-uint32_t GpuChannelHost::ValidateFlushIDReachedServer(int32_t stream_id,
- bool force_validate) {
- // Store what flush ids we will be validating for all streams.
- base::hash_map<int32_t, uint32_t> validate_flushes;
- uint32_t flushed_stream_flush_id = 0;
- uint32_t verified_stream_flush_id = 0;
- {
- AutoLock lock(context_lock_);
- for (const auto& iter : stream_flush_info_) {
- const int32_t iter_stream_id = iter.first;
- const StreamFlushInfo& flush_info = iter.second;
- if (iter_stream_id == stream_id) {
- flushed_stream_flush_id = flush_info.flushed_stream_flush_id;
- verified_stream_flush_id = flush_info.verified_stream_flush_id;
- }
-
- if (flush_info.flushed_stream_flush_id >
- flush_info.verified_stream_flush_id) {
- validate_flushes.insert(
- std::make_pair(iter_stream_id, flush_info.flushed_stream_flush_id));
- }
- }
- }
-
- if (!force_validate && flushed_stream_flush_id == verified_stream_flush_id) {
- // Current stream has no unverified flushes.
- return verified_stream_flush_id;
- }
-
- if (Send(new GpuChannelMsg_Nop())) {
- // Update verified flush id for all streams.
- uint32_t highest_flush_id = 0;
- AutoLock lock(context_lock_);
- for (const auto& iter : validate_flushes) {
- const int32_t validated_stream_id = iter.first;
- const uint32_t validated_flush_id = iter.second;
- StreamFlushInfo& flush_info = stream_flush_info_[validated_stream_id];
- if (flush_info.verified_stream_flush_id < validated_flush_id) {
- flush_info.verified_stream_flush_id = validated_flush_id;
- }
-
- if (validated_stream_id == stream_id)
- highest_flush_id = flush_info.verified_stream_flush_id;
- }
-
- return highest_flush_id;
- }
-
- return 0;
-}
-
-uint32_t GpuChannelHost::GetHighestValidatedFlushID(int32_t stream_id) {
- AutoLock lock(context_lock_);
- StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
- return flush_info.verified_stream_flush_id;
-}
-
-GpuChannelHost::~GpuChannelHost() {
-#if DCHECK_IS_ON()
- AutoLock lock(context_lock_);
- DCHECK(!channel_)
- << "GpuChannelHost::DestroyChannel must be called before destruction.";
-#endif
-}
-
-GpuChannelHost::MessageFilter::ListenerInfo::ListenerInfo() {}
-
-GpuChannelHost::MessageFilter::ListenerInfo::~ListenerInfo() {}
-
-GpuChannelHost::MessageFilter::MessageFilter()
- : lost_(false) {
-}
-
-GpuChannelHost::MessageFilter::~MessageFilter() {}
-
-void GpuChannelHost::MessageFilter::AddRoute(
- int32_t route_id,
- base::WeakPtr<IPC::Listener> listener,
- scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
- DCHECK(listeners_.find(route_id) == listeners_.end());
- DCHECK(task_runner);
- ListenerInfo info;
- info.listener = listener;
- info.task_runner = task_runner;
- listeners_[route_id] = info;
-}
-
-void GpuChannelHost::MessageFilter::RemoveRoute(int32_t route_id) {
- listeners_.erase(route_id);
-}
-
-bool GpuChannelHost::MessageFilter::OnMessageReceived(
- const IPC::Message& message) {
- // Never handle sync message replies or we will deadlock here.
- if (message.is_reply())
- return false;
-
- auto it = listeners_.find(message.routing_id());
- if (it == listeners_.end())
- return false;
-
- const ListenerInfo& info = it->second;
- info.task_runner->PostTask(
- FROM_HERE,
- base::Bind(base::IgnoreResult(&IPC::Listener::OnMessageReceived),
- info.listener, message));
- return true;
-}
-
-void GpuChannelHost::MessageFilter::OnChannelError() {
- // Set the lost state before signalling the proxies. That way, if they
- // themselves post a task to recreate the context, they will not try to re-use
- // this channel host.
- {
- AutoLock lock(lock_);
- lost_ = true;
- }
-
- // Inform all the proxies that an error has occurred. This will be reported
- // via OpenGL as a lost context.
- for (const auto& kv : listeners_) {
- const ListenerInfo& info = kv.second;
- info.task_runner->PostTask(
- FROM_HERE, base::Bind(&IPC::Listener::OnChannelError, info.listener));
- }
-
- listeners_.clear();
-}
-
-bool GpuChannelHost::MessageFilter::IsLost() const {
- AutoLock lock(lock_);
- return lost_;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_channel_host.h b/chromium/content/common/gpu/client/gpu_channel_host.h
deleted file mode 100644
index bf7b3894c21..00000000000
--- a/chromium/content/common/gpu/client/gpu_channel_host.h
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GPU_CHANNEL_HOST_H_
-#define CONTENT_COMMON_GPU_CLIENT_GPU_CHANNEL_HOST_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <string>
-#include <vector>
-
-#include "base/atomic_sequence_num.h"
-#include "base/containers/scoped_ptr_hash_map.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/process/process.h"
-#include "base/synchronization/lock.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/gpu_process_launch_causes.h"
-#include "content/common/gpu/gpu_result_codes.h"
-#include "content/common/gpu/gpu_stream_priority.h"
-#include "content/common/message_router.h"
-#include "gpu/config/gpu_info.h"
-#include "ipc/ipc_channel_handle.h"
-#include "ipc/ipc_sync_channel.h"
-#include "ipc/message_filter.h"
-#include "media/video/jpeg_decode_accelerator.h"
-#include "ui/events/latency_info.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gfx/gpu_memory_buffer.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gl/gpu_preference.h"
-
-class GURL;
-class TransportTextureService;
-struct GPUCreateCommandBufferConfig;
-
-namespace base {
-class MessageLoop;
-class WaitableEvent;
-}
-
-namespace IPC {
-class SyncMessageFilter;
-}
-
-namespace media {
-class JpegDecodeAccelerator;
-class VideoDecodeAccelerator;
-class VideoEncodeAccelerator;
-}
-
-namespace gpu {
-class GpuMemoryBufferManager;
-}
-
-namespace content {
-class CommandBufferProxyImpl;
-class GpuChannelHost;
-
-class CONTENT_EXPORT GpuChannelHostFactory {
- public:
- virtual ~GpuChannelHostFactory() {}
-
- virtual bool IsMainThread() = 0;
- virtual scoped_refptr<base::SingleThreadTaskRunner>
- GetIOThreadTaskRunner() = 0;
- virtual scoped_ptr<base::SharedMemory> AllocateSharedMemory(size_t size) = 0;
- virtual CreateCommandBufferResult CreateViewCommandBuffer(
- int32_t surface_id,
- const GPUCreateCommandBufferConfig& init_params,
- int32_t route_id) = 0;
-};
-
-// Encapsulates an IPC channel between the client and one GPU process.
-// On the GPU process side there's a corresponding GpuChannel.
-// Every method can be called on any thread with a message loop, except for the
-// IO thread.
-class GpuChannelHost : public IPC::Sender,
- public base::RefCountedThreadSafe<GpuChannelHost> {
- public:
- // Must be called on the main thread (as defined by the factory).
- static scoped_refptr<GpuChannelHost> Create(
- GpuChannelHostFactory* factory,
- int channel_id,
- const gpu::GPUInfo& gpu_info,
- const IPC::ChannelHandle& channel_handle,
- base::WaitableEvent* shutdown_event,
- gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager);
-
- static const int32_t kDefaultStreamId = -1;
- static const GpuStreamPriority kDefaultStreamPriority =
- GpuStreamPriority::NORMAL;
-
- bool IsLost() const {
- DCHECK(channel_filter_.get());
- return channel_filter_->IsLost();
- }
-
- int channel_id() const { return channel_id_; }
-
- // The GPU stats reported by the GPU process.
- const gpu::GPUInfo& gpu_info() const { return gpu_info_; }
-
- // IPC::Sender implementation:
- bool Send(IPC::Message* msg) override;
-
- // Set an ordering barrier. AsyncFlushes any pending barriers on other
- // routes. Combines multiple OrderingBarriers into a single AsyncFlush.
- // Returns the flush ID for the stream or 0 if put offset was not changed.
- uint32_t OrderingBarrier(int32_t route_id,
- int32_t stream_id,
- int32_t put_offset,
- uint32_t flush_count,
- const std::vector<ui::LatencyInfo>& latency_info,
- bool put_offset_changed,
- bool do_flush);
-
- void FlushPendingStream(int32_t stream_id);
-
- // Create and connect to a command buffer in the GPU process.
- scoped_ptr<CommandBufferProxyImpl> CreateViewCommandBuffer(
- int32_t surface_id,
- CommandBufferProxyImpl* share_group,
- int32_t stream_id,
- GpuStreamPriority stream_priority,
- const std::vector<int32_t>& attribs,
- const GURL& active_url,
- gfx::GpuPreference gpu_preference);
-
- // Create and connect to a command buffer in the GPU process.
- scoped_ptr<CommandBufferProxyImpl> CreateOffscreenCommandBuffer(
- const gfx::Size& size,
- CommandBufferProxyImpl* share_group,
- int32_t stream_id,
- GpuStreamPriority stream_priority,
- const std::vector<int32_t>& attribs,
- const GURL& active_url,
- gfx::GpuPreference gpu_preference);
-
- // Creates a JPEG decoder in the GPU process.
- scoped_ptr<media::JpegDecodeAccelerator> CreateJpegDecoder(
- media::JpegDecodeAccelerator::Client* client);
-
- // Destroy a command buffer created by this channel.
- void DestroyCommandBuffer(CommandBufferProxyImpl* command_buffer);
-
- // Destroy this channel. Must be called on the main thread, before
- // destruction.
- void DestroyChannel();
-
- // Add a route for the current message loop.
- void AddRoute(int route_id, base::WeakPtr<IPC::Listener> listener);
- void RemoveRoute(int route_id);
-
- GpuChannelHostFactory* factory() const { return factory_; }
-
- gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager() const {
- return gpu_memory_buffer_manager_;
- }
-
- // Returns a handle to the shared memory that can be sent via IPC to the
- // GPU process. The caller is responsible for ensuring it is closed. Returns
- // an invalid handle on failure.
- base::SharedMemoryHandle ShareToGpuProcess(
- base::SharedMemoryHandle source_handle);
-
- // Reserve one unused transfer buffer ID.
- int32_t ReserveTransferBufferId();
-
- // Returns a GPU memory buffer handle to the buffer that can be sent via
- // IPC to the GPU process. The caller is responsible for ensuring it is
- // closed. Returns an invalid handle on failure.
- gfx::GpuMemoryBufferHandle ShareGpuMemoryBufferToGpuProcess(
- const gfx::GpuMemoryBufferHandle& source_handle,
- bool* requires_sync_point);
-
- // Reserve one unused image ID.
- int32_t ReserveImageId();
-
- // Generate a route ID guaranteed to be unique for this channel.
- int32_t GenerateRouteID();
-
- // Generate a stream ID guaranteed to be unique for this channel.
- int32_t GenerateStreamID();
-
- // Sends a synchronous nop to the server which validate that all previous IPC
- // messages have been received. Once the synchronous nop has been sent to the
- // server all previous flushes will all be marked as validated, including
- // flushes for other streams on the same channel. Once a validation has been
- // sent, it will return the highest validated flush id for the stream.
- // If the validation fails (which can only happen upon context lost), the
- // highest validated flush id will not change. If no flush ID were ever
- // validated then it will return 0 (Note the lowest valid flush ID is 1).
- uint32_t ValidateFlushIDReachedServer(int32_t stream_id, bool force_validate);
-
- // Returns the highest validated flush ID for a given stream.
- uint32_t GetHighestValidatedFlushID(int32_t stream_id);
-
- private:
- friend class base::RefCountedThreadSafe<GpuChannelHost>;
-
- // A filter used internally to route incoming messages from the IO thread
- // to the correct message loop. It also maintains some shared state between
- // all the contexts.
- class MessageFilter : public IPC::MessageFilter {
- public:
- MessageFilter();
-
- // Called on the IO thread.
- void AddRoute(int32_t route_id,
- base::WeakPtr<IPC::Listener> listener,
- scoped_refptr<base::SingleThreadTaskRunner> task_runner);
- // Called on the IO thread.
- void RemoveRoute(int32_t route_id);
-
- // IPC::MessageFilter implementation
- // (called on the IO thread):
- bool OnMessageReceived(const IPC::Message& msg) override;
- void OnChannelError() override;
-
- // The following methods can be called on any thread.
-
- // Whether the channel is lost.
- bool IsLost() const;
-
- private:
- struct ListenerInfo {
- ListenerInfo();
- ~ListenerInfo();
-
- base::WeakPtr<IPC::Listener> listener;
- scoped_refptr<base::SingleThreadTaskRunner> task_runner;
- };
-
- ~MessageFilter() override;
-
- // Threading notes: |listeners_| is only accessed on the IO thread. Every
- // other field is protected by |lock_|.
- base::hash_map<int32_t, ListenerInfo> listeners_;
-
- // Protects all fields below this one.
- mutable base::Lock lock_;
-
- // Whether the channel has been lost.
- bool lost_;
- };
-
- struct StreamFlushInfo {
- StreamFlushInfo();
- ~StreamFlushInfo();
-
- // These are global per stream.
- uint32_t next_stream_flush_id;
- uint32_t flushed_stream_flush_id;
- uint32_t verified_stream_flush_id;
-
- // These are local per context.
- bool flush_pending;
- int32_t route_id;
- int32_t put_offset;
- uint32_t flush_count;
- uint32_t flush_id;
- std::vector<ui::LatencyInfo> latency_info;
- };
-
- GpuChannelHost(GpuChannelHostFactory* factory,
- int channel_id,
- const gpu::GPUInfo& gpu_info,
- gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager);
- ~GpuChannelHost() override;
- void Connect(const IPC::ChannelHandle& channel_handle,
- base::WaitableEvent* shutdown_event);
- bool InternalSend(IPC::Message* msg);
- void InternalFlush(StreamFlushInfo* flush_info);
-
- // Threading notes: all fields are constant during the lifetime of |this|
- // except:
- // - |next_image_id_|, atomic type
- // - |next_route_id_|, atomic type
- // - |next_stream_id_|, atomic type
- // - |channel_| and |stream_flush_info_|, protected by |context_lock_|
- GpuChannelHostFactory* const factory_;
-
- const int channel_id_;
- const gpu::GPUInfo gpu_info_;
-
- scoped_refptr<MessageFilter> channel_filter_;
-
- gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager_;
-
- // A filter for sending messages from thread other than the main thread.
- scoped_refptr<IPC::SyncMessageFilter> sync_filter_;
-
- // Image IDs are allocated in sequence.
- base::AtomicSequenceNumber next_image_id_;
-
- // Route IDs are allocated in sequence.
- base::AtomicSequenceNumber next_route_id_;
-
- // Stream IDs are allocated in sequence.
- base::AtomicSequenceNumber next_stream_id_;
-
- // Protects channel_ and stream_flush_info_.
- mutable base::Lock context_lock_;
- scoped_ptr<IPC::SyncChannel> channel_;
- base::hash_map<int32_t, StreamFlushInfo> stream_flush_info_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuChannelHost);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GPU_CHANNEL_HOST_H_
diff --git a/chromium/content/common/gpu/client/gpu_context_tests.h b/chromium/content/common/gpu/client/gpu_context_tests.h
index 8810789d076..9687c4a057e 100644
--- a/chromium/content/common/gpu/client/gpu_context_tests.h
+++ b/chromium/content/common/gpu/client/gpu_context_tests.h
@@ -10,6 +10,7 @@
#include "base/run_loop.h"
#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/context_support.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/common/sync_token.h"
namespace {
@@ -30,32 +31,39 @@ class SignalTest : public ContextTestBase {
}
// These tests should time out if the callback doesn't get called.
- void TestSignalQuery(blink::WebGLId query) {
+ void TestSignalQuery(GLuint query) {
base::RunLoop run_loop;
context_support_->SignalQuery(
- query,
- base::Bind(
- &RunOnlyOnce, run_loop.QuitClosure(), base::Owned(new int(0))));
+ query, base::Bind(&RunOnlyOnce, run_loop.QuitClosure(),
+ base::Owned(new int(0))));
run_loop.Run();
}
};
CONTEXT_TEST_F(SignalTest, BasicSignalSyncTokenTest) {
- if (!context_)
+#if defined(OS_WIN)
+ // The IPC version of ContextTestBase::SetUpOnMainThread does not succeed on
+ // some platforms.
+ if (!gl_)
return;
+#endif
- const blink::WGC3Duint64 fence_sync = context_->insertFenceSyncCHROMIUM();
- context_->shallowFlushCHROMIUM();
+ const GLuint64 fence_sync = gl_->InsertFenceSyncCHROMIUM();
+ gl_->ShallowFlushCHROMIUM();
gpu::SyncToken sync_token;
- ASSERT_TRUE(context_->genSyncTokenCHROMIUM(fence_sync, sync_token.GetData()));
+ gl_->GenSyncTokenCHROMIUM(fence_sync, sync_token.GetData());
TestSignalSyncToken(sync_token);
};
CONTEXT_TEST_F(SignalTest, EmptySignalSyncTokenTest) {
- if (!context_)
+#if defined(OS_WIN)
+ // The IPC version of ContextTestBase::SetUpOnMainThread does not succeed on
+ // some platforms.
+ if (!gl_)
return;
+#endif
// Signalling something that doesn't exist should run the callback
// immediately.
@@ -64,42 +72,59 @@ CONTEXT_TEST_F(SignalTest, EmptySignalSyncTokenTest) {
};
CONTEXT_TEST_F(SignalTest, InvalidSignalSyncTokenTest) {
- if (!context_)
+#if defined(OS_WIN)
+ // The IPC version of ContextTestBase::SetUpOnMainThread does not succeed on
+ // some platforms.
+ if (!gl_)
return;
+#endif
// Signalling something that doesn't exist should run the callback
// immediately.
- gpu::SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO,
- 0,
- 1297824234,
+ gpu::SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO, 0,
+ gpu::CommandBufferId::FromUnsafeValue(1297824234),
9123743439);
TestSignalSyncToken(sync_token);
};
CONTEXT_TEST_F(SignalTest, BasicSignalQueryTest) {
- if (!context_)
+#if defined(OS_WIN)
+ // The IPC version of ContextTestBase::SetUpOnMainThread does not succeed on
+ // some platforms.
+ if (!gl_)
return;
+#endif
- unsigned query = context_->createQueryEXT();
- context_->beginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, query);
- context_->finish();
- context_->endQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
+ unsigned query;
+ gl_->GenQueriesEXT(1, &query);
+ gl_->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, query);
+ gl_->Finish();
+ gl_->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
TestSignalQuery(query);
- context_->deleteQueryEXT(query);
+ gl_->DeleteQueriesEXT(1, &query);
};
CONTEXT_TEST_F(SignalTest, SignalQueryUnboundTest) {
- if (!context_)
+#if defined(OS_WIN)
+ // The IPC version of ContextTestBase::SetUpOnMainThread does not succeed on
+ // some platforms.
+ if (!gl_)
return;
+#endif
- blink::WebGLId query = context_->createQueryEXT();
+ GLuint query;
+ gl_->GenQueriesEXT(1, &query);
TestSignalQuery(query);
- context_->deleteQueryEXT(query);
+ gl_->DeleteQueriesEXT(1, &query);
};
CONTEXT_TEST_F(SignalTest, InvalidSignalQueryUnboundTest) {
- if (!context_)
+#if defined(OS_WIN)
+ // The IPC version of ContextTestBase::SetUpOnMainThread does not succeed on
+ // some platforms.
+ if (!gl_)
return;
+#endif
// Signalling something that doesn't exist should run the callback
// immediately.
@@ -110,5 +135,4 @@ CONTEXT_TEST_F(SignalTest, InvalidSignalQueryUnboundTest) {
TestSignalQuery(928729082);
TestSignalQuery(928729081);
};
-
};
diff --git a/chromium/content/common/gpu/client/gpu_in_process_context_tests.cc b/chromium/content/common/gpu/client/gpu_in_process_context_tests.cc
index 6e740572211..c8f0d3f293f 100644
--- a/chromium/content/common/gpu/client/gpu_in_process_context_tests.cc
+++ b/chromium/content/common/gpu/client/gpu_in_process_context_tests.cc
@@ -7,32 +7,50 @@
#include <string>
#include <vector>
-#include "content/public/test/unittest_test_suite.h"
-#include "gpu/blink/webgraphicscontext3d_in_process_command_buffer_impl.h"
+#include "gpu/command_buffer/client/gl_in_process_context.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_surface.h"
namespace {
-using gpu_blink::WebGraphicsContext3DInProcessCommandBufferImpl;
-
class ContextTestBase : public testing::Test {
public:
void SetUp() override {
- blink::WebGraphicsContext3D::Attributes attributes;
- bool lose_context_when_out_of_memory = false;
- typedef WebGraphicsContext3DInProcessCommandBufferImpl WGC3DIPCBI;
- context_ = WGC3DIPCBI::CreateOffscreenContext(
- attributes, lose_context_when_out_of_memory);
- context_->InitializeOnCurrentThread();
- context_support_ = context_->GetContextSupport();
+ gpu::gles2::ContextCreationAttribHelper attributes;
+ attributes.alpha_size = 8;
+ attributes.depth_size = 24;
+ attributes.red_size = 8;
+ attributes.green_size = 8;
+ attributes.blue_size = 8;
+ attributes.stencil_size = 8;
+ attributes.samples = 4;
+ attributes.sample_buffers = 1;
+ attributes.bind_generates_resource = false;
+
+ context_.reset(gpu::GLInProcessContext::Create(
+ nullptr, /* service */
+ nullptr, /* surface */
+ true, /* offscreen */
+ gfx::kNullAcceleratedWidget, /* window */
+ gfx::Size(1, 1), /* size */
+ nullptr, /* share_context */
+ attributes, gfx::PreferDiscreteGpu,
+ ::gpu::GLInProcessContextSharedMemoryLimits(),
+ nullptr, /* gpu_memory_buffer_manager */
+ nullptr /* image_factory */));
+ gl_ = context_->GetImplementation();
+ context_support_ = context_->GetImplementation();
}
void TearDown() override { context_.reset(NULL); }
protected:
- scoped_ptr<WebGraphicsContext3DInProcessCommandBufferImpl> context_;
+ gpu::gles2::GLES2Interface* gl_;
gpu::ContextSupport* context_support_;
+
+ private:
+ scoped_ptr<gpu::GLInProcessContext> context_;
};
} // namespace
diff --git a/chromium/content/common/gpu/client/gpu_jpeg_decode_accelerator_host.cc b/chromium/content/common/gpu/client/gpu_jpeg_decode_accelerator_host.cc
deleted file mode 100644
index eac971b43b7..00000000000
--- a/chromium/content/common/gpu/client/gpu_jpeg_decode_accelerator_host.cc
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h"
-
-#include <stddef.h>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/shared_memory_handle.h"
-#include "base/memory/weak_ptr.h"
-#include "base/synchronization/waitable_event.h"
-#include "build/build_config.h"
-#include "content/common/gpu/client/gpu_channel_host.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "ipc/ipc_listener.h"
-#include "ipc/ipc_message_macros.h"
-#include "ipc/ipc_message_utils.h"
-
-namespace content {
-
-// Class to receive AcceleratedJpegDecoderHostMsg_DecodeAck IPC message on IO
-// thread. This does very similar what MessageFilter usually does. It is not
-// MessageFilter because GpuChannelHost doesn't support AddFilter.
-class GpuJpegDecodeAcceleratorHost::Receiver : public IPC::Listener,
- public base::NonThreadSafe {
- public:
- Receiver(Client* client,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
- : client_(client),
- io_task_runner_(io_task_runner),
- weak_factory_for_io_(this) {
- DCHECK(CalledOnValidThread());
- }
-
- ~Receiver() override { DCHECK(CalledOnValidThread()); }
-
- void InvalidateWeakPtr(base::WaitableEvent* event) {
- DCHECK(io_task_runner_->BelongsToCurrentThread());
- weak_factory_for_io_.InvalidateWeakPtrs();
- event->Signal();
- }
-
- // IPC::Listener implementation.
- void OnChannelError() override {
- DCHECK(io_task_runner_->BelongsToCurrentThread());
-
- OnDecodeAck(kInvalidBitstreamBufferId, PLATFORM_FAILURE);
- }
-
- bool OnMessageReceived(const IPC::Message& msg) override {
- DCHECK(io_task_runner_->BelongsToCurrentThread());
-
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(GpuJpegDecodeAcceleratorHost::Receiver, msg)
- IPC_MESSAGE_HANDLER(AcceleratedJpegDecoderHostMsg_DecodeAck, OnDecodeAck)
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
- DCHECK(handled);
- return handled;
- }
-
- base::WeakPtr<IPC::Listener> AsWeakPtrForIO() {
- return weak_factory_for_io_.GetWeakPtr();
- }
-
- private:
- void OnDecodeAck(int32_t bitstream_buffer_id, Error error) {
- DCHECK(io_task_runner_->BelongsToCurrentThread());
-
- if (!client_)
- return;
-
- if (error == media::JpegDecodeAccelerator::NO_ERRORS) {
- client_->VideoFrameReady(bitstream_buffer_id);
- } else {
- // Only NotifyError once.
- // Client::NotifyError() may trigger deletion of |this| (on another
- // thread), so calling it needs to be the last thing done on this stack!
- media::JpegDecodeAccelerator::Client* client = nullptr;
- std::swap(client, client_);
- client->NotifyError(bitstream_buffer_id, error);
- }
- }
-
- Client* client_;
-
- // GPU IO task runner.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
-
- // Weak pointers will be invalidated on IO thread.
- base::WeakPtrFactory<Receiver> weak_factory_for_io_;
-
- DISALLOW_COPY_AND_ASSIGN(Receiver);
-};
-
-GpuJpegDecodeAcceleratorHost::GpuJpegDecodeAcceleratorHost(
- GpuChannelHost* channel,
- int32_t route_id,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
- : channel_(channel),
- decoder_route_id_(route_id),
- io_task_runner_(io_task_runner) {
- DCHECK(channel_);
- DCHECK_NE(decoder_route_id_, MSG_ROUTING_NONE);
-}
-
-GpuJpegDecodeAcceleratorHost::~GpuJpegDecodeAcceleratorHost() {
- DCHECK(CalledOnValidThread());
- Send(new AcceleratedJpegDecoderMsg_Destroy(decoder_route_id_));
-
- if (receiver_) {
- channel_->RemoveRoute(decoder_route_id_);
-
- // Invalidate weak ptr of |receiver_|. After that, no more messages will be
- // routed to |receiver_| on IO thread.
- base::WaitableEvent event(false, false);
- io_task_runner_->PostTask(FROM_HERE,
- base::Bind(&Receiver::InvalidateWeakPtr,
- base::Unretained(receiver_.get()),
- base::Unretained(&event)));
- event.Wait();
- }
-}
-
-bool GpuJpegDecodeAcceleratorHost::Initialize(
- media::JpegDecodeAccelerator::Client* client) {
- DCHECK(CalledOnValidThread());
-
- bool succeeded = false;
- // This cannot be on IO thread because the msg is synchronous.
- Send(new GpuMsg_CreateJpegDecoder(decoder_route_id_, &succeeded));
-
- if (!succeeded) {
- DLOG(ERROR) << "Send(GpuMsg_CreateJpegDecoder()) failed";
- return false;
- }
-
- receiver_.reset(new Receiver(client, io_task_runner_));
-
- return true;
-}
-
-void GpuJpegDecodeAcceleratorHost::Decode(
- const media::BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<media::VideoFrame>& video_frame) {
- DCHECK(CalledOnValidThread());
-
- DCHECK(
- base::SharedMemory::IsHandleValid(video_frame->shared_memory_handle()));
-
- base::SharedMemoryHandle input_handle =
- channel_->ShareToGpuProcess(bitstream_buffer.handle());
- if (!base::SharedMemory::IsHandleValid(input_handle)) {
- DLOG(ERROR) << "Failed to duplicate handle of BitstreamBuffer";
- return;
- }
- base::SharedMemoryHandle output_handle =
- channel_->ShareToGpuProcess(video_frame->shared_memory_handle());
- if (!base::SharedMemory::IsHandleValid(output_handle)) {
- DLOG(ERROR) << "Failed to duplicate handle of VideoFrame";
-#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
- if (input_handle.auto_close) {
- // Defer closing task to the ScopedFD.
- base::ScopedFD(input_handle.fd);
- }
-#else
- // TODO(kcwu) fix the handle leak after crbug.com/493414 resolved.
-#endif
- return;
- }
-
- size_t output_buffer_size = media::VideoFrame::AllocationSize(
- video_frame->format(), video_frame->coded_size());
-
- AcceleratedJpegDecoderMsg_Decode_Params decode_params;
- decode_params.coded_size = video_frame->coded_size();
- decode_params.input_buffer_id = bitstream_buffer.id();
- decode_params.input_buffer_handle = input_handle;
- decode_params.input_buffer_size = bitstream_buffer.size();
- decode_params.output_video_frame_handle = output_handle;
- decode_params.output_buffer_size = output_buffer_size;
- Send(new AcceleratedJpegDecoderMsg_Decode(decoder_route_id_, decode_params));
-}
-
-bool GpuJpegDecodeAcceleratorHost::IsSupported() {
- return channel_->gpu_info().jpeg_decode_accelerator_supported;
-}
-
-void GpuJpegDecodeAcceleratorHost::Send(IPC::Message* message) {
- DCHECK(CalledOnValidThread());
-
- if (!channel_->Send(message)) {
- DLOG(ERROR) << "Send(" << message->type() << ") failed";
- }
-}
-
-base::WeakPtr<IPC::Listener> GpuJpegDecodeAcceleratorHost::GetReceiver() {
- return receiver_->AsWeakPtrForIO();
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h b/chromium/content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h
deleted file mode 100644
index 53465f3aee7..00000000000
--- a/chromium/content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GPU_JPEG_DECODE_ACCELERATOR_HOST_H_
-#define CONTENT_COMMON_GPU_CLIENT_GPU_JPEG_DECODE_ACCELERATOR_HOST_H_
-
-#include <stdint.h>
-
-#include "base/macros.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "media/video/jpeg_decode_accelerator.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-}
-
-namespace IPC {
-class Listener;
-class Message;
-}
-
-namespace content {
-class GpuChannelHost;
-
-// This class is used to talk to JpegDecodeAccelerator in the GPU process
-// through IPC messages.
-class GpuJpegDecodeAcceleratorHost : public media::JpegDecodeAccelerator,
- public base::NonThreadSafe {
- public:
- // VideoCaptureGpuJpegDecoder owns |this| and |channel|. And
- // VideoCaptureGpuJpegDecoder delete |this| before |channel|. So |this| is
- // guaranteed not to outlive |channel|.
- GpuJpegDecodeAcceleratorHost(
- GpuChannelHost* channel,
- int32_t route_id,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
- ~GpuJpegDecodeAcceleratorHost() override;
-
- // media::JpegDecodeAccelerator implementation.
- // |client| is called on the IO thread, but is never called into after the
- // GpuJpegDecodeAcceleratorHost is destroyed.
- bool Initialize(media::JpegDecodeAccelerator::Client* client) override;
- void Decode(const media::BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<media::VideoFrame>& video_frame) override;
- bool IsSupported() override;
-
- base::WeakPtr<IPC::Listener> GetReceiver();
-
- private:
- class Receiver;
-
- void Send(IPC::Message* message);
-
- // Unowned reference to the GpuChannelHost to send IPC messages to the GPU
- // process.
- GpuChannelHost* channel_;
-
- // Route ID for the associated decoder in the GPU process.
- int32_t decoder_route_id_;
-
- // GPU IO task runner.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
-
- scoped_ptr<Receiver> receiver_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuJpegDecodeAcceleratorHost);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GPU_JPEG_DECODE_ACCELERATOR_HOST_H_
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl.cc b/chromium/content/common/gpu/client/gpu_memory_buffer_impl.cc
deleted file mode 100644
index 5c6571aaf66..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_memory_buffer_impl.h"
-
-#include "base/logging.h"
-#include "build/build_config.h"
-#include "content/common/gpu/client/gpu_memory_buffer_impl_shared_memory.h"
-
-#if defined(OS_MACOSX)
-#include "content/common/gpu/client/gpu_memory_buffer_impl_io_surface.h"
-#endif
-
-#if defined(OS_ANDROID)
-#include "content/common/gpu/client/gpu_memory_buffer_impl_surface_texture.h"
-#endif
-
-#if defined(USE_OZONE)
-#include "content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap.h"
-#endif
-
-namespace content {
-
-GpuMemoryBufferImpl::GpuMemoryBufferImpl(gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback)
- : id_(id),
- size_(size),
- format_(format),
- callback_(callback),
- mapped_(false) {}
-
-GpuMemoryBufferImpl::~GpuMemoryBufferImpl() {
- DCHECK(!mapped_);
- callback_.Run(destruction_sync_token_);
-}
-
-// static
-scoped_ptr<GpuMemoryBufferImpl> GpuMemoryBufferImpl::CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- const DestructionCallback& callback) {
- switch (handle.type) {
- case gfx::SHARED_MEMORY_BUFFER:
- return GpuMemoryBufferImplSharedMemory::CreateFromHandle(
- handle, size, format, usage, callback);
-#if defined(OS_MACOSX)
- case gfx::IO_SURFACE_BUFFER:
- return GpuMemoryBufferImplIOSurface::CreateFromHandle(
- handle, size, format, usage, callback);
-#endif
-#if defined(OS_ANDROID)
- case gfx::SURFACE_TEXTURE_BUFFER:
- return GpuMemoryBufferImplSurfaceTexture::CreateFromHandle(
- handle, size, format, usage, callback);
-#endif
-#if defined(USE_OZONE)
- case gfx::OZONE_NATIVE_PIXMAP:
- return GpuMemoryBufferImplOzoneNativePixmap::CreateFromHandle(
- handle, size, format, usage, callback);
-#endif
- default:
- NOTREACHED();
- return nullptr;
- }
-}
-
-// static
-GpuMemoryBufferImpl* GpuMemoryBufferImpl::FromClientBuffer(
- ClientBuffer buffer) {
- return reinterpret_cast<GpuMemoryBufferImpl*>(buffer);
-}
-
-gfx::Size GpuMemoryBufferImpl::GetSize() const {
- return size_;
-}
-
-gfx::BufferFormat GpuMemoryBufferImpl::GetFormat() const {
- return format_;
-}
-
-gfx::GpuMemoryBufferId GpuMemoryBufferImpl::GetId() const {
- return id_;
-}
-
-ClientBuffer GpuMemoryBufferImpl::AsClientBuffer() {
- return reinterpret_cast<ClientBuffer>(this);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl.h b/chromium/content/common/gpu/client/gpu_memory_buffer_impl.h
deleted file mode 100644
index 954a87535fd..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_H_
-#define CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_H_
-
-#include "base/callback.h"
-#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "content/common/content_export.h"
-#include "gpu/command_buffer/common/sync_token.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gfx/gpu_memory_buffer.h"
-
-namespace content {
-
-// Provides common implementation of a GPU memory buffer.
-class CONTENT_EXPORT GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
- public:
- typedef base::Callback<void(const gpu::SyncToken& sync)> DestructionCallback;
-
- ~GpuMemoryBufferImpl() override;
-
- // Creates an instance from the given |handle|. |size| and |internalformat|
- // should match what was used to allocate the |handle|. |callback| is
- // called when instance is deleted, which is not necessarily on the same
- // thread as this function was called on and instance was created on.
- static scoped_ptr<GpuMemoryBufferImpl> CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- const DestructionCallback& callback);
-
- // Type-checking upcast routine. Returns an NULL on failure.
- static GpuMemoryBufferImpl* FromClientBuffer(ClientBuffer buffer);
-
- // Overridden from gfx::GpuMemoryBuffer:
- gfx::Size GetSize() const override;
- gfx::BufferFormat GetFormat() const override;
- gfx::GpuMemoryBufferId GetId() const override;
- ClientBuffer AsClientBuffer() override;
-
- void set_destruction_sync_token(const gpu::SyncToken& sync_token) {
- destruction_sync_token_ = sync_token;
- }
-
- protected:
- GpuMemoryBufferImpl(gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback);
-
- const gfx::GpuMemoryBufferId id_;
- const gfx::Size size_;
- const gfx::BufferFormat format_;
- const DestructionCallback callback_;
- bool mapped_;
- gpu::SyncToken destruction_sync_token_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferImpl);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_H_
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface.cc b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface.cc
deleted file mode 100644
index b6038cf72da..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface.cc
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_memory_buffer_impl_io_surface.h"
-
-#include "base/logging.h"
-#include "content/common/gpu/gpu_memory_buffer_factory_io_surface.h"
-#include "ui/gfx/buffer_format_util.h"
-#include "ui/gfx/mac/io_surface.h"
-
-namespace content {
-namespace {
-
-uint32_t LockFlags(gfx::BufferUsage usage) {
- switch (usage) {
- case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE:
- return kIOSurfaceLockAvoidSync;
- case gfx::BufferUsage::GPU_READ:
- case gfx::BufferUsage::SCANOUT:
- case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT:
- return 0;
- }
- NOTREACHED();
- return 0;
-}
-
-void NoOp() {
-}
-
-} // namespace
-
-GpuMemoryBufferImplIOSurface::GpuMemoryBufferImplIOSurface(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback,
- IOSurfaceRef io_surface,
- uint32_t lock_flags)
- : GpuMemoryBufferImpl(id, size, format, callback),
- io_surface_(io_surface),
- lock_flags_(lock_flags) {}
-
-GpuMemoryBufferImplIOSurface::~GpuMemoryBufferImplIOSurface() {
-}
-
-// static
-scoped_ptr<GpuMemoryBufferImplIOSurface>
-GpuMemoryBufferImplIOSurface::CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- const DestructionCallback& callback) {
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface(
- IOSurfaceLookupFromMachPort(handle.mach_port.get()));
- if (!io_surface)
- return nullptr;
-
- return make_scoped_ptr(
- new GpuMemoryBufferImplIOSurface(handle.id, size, format, callback,
- io_surface.release(), LockFlags(usage)));
-}
-
-// static
-bool GpuMemoryBufferImplIOSurface::IsConfigurationSupported(
- gfx::BufferFormat format,
- gfx::BufferUsage usage) {
- return GpuMemoryBufferFactoryIOSurface::
- IsGpuMemoryBufferConfigurationSupported(format, usage);
-}
-
-// static
-base::Closure GpuMemoryBufferImplIOSurface::AllocateForTesting(
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle) {
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface(
- gfx::CreateIOSurface(size, format));
- DCHECK(io_surface);
- gfx::GpuMemoryBufferId kBufferId(1);
- handle->type = gfx::IO_SURFACE_BUFFER;
- handle->id = kBufferId;
- handle->mach_port.reset(IOSurfaceCreateMachPort(io_surface));
- return base::Bind(&NoOp);
-}
-
-bool GpuMemoryBufferImplIOSurface::Map() {
- DCHECK(!mapped_);
- IOReturn status = IOSurfaceLock(io_surface_, lock_flags_, NULL);
- DCHECK_NE(status, kIOReturnCannotLock);
- mapped_ = true;
- return true;
-}
-
-void* GpuMemoryBufferImplIOSurface::memory(size_t plane) {
- DCHECK(mapped_);
- DCHECK_LT(plane, gfx::NumberOfPlanesForBufferFormat(format_));
- return IOSurfaceGetBaseAddressOfPlane(io_surface_, plane);
-}
-
-void GpuMemoryBufferImplIOSurface::Unmap() {
- DCHECK(mapped_);
- IOSurfaceUnlock(io_surface_, lock_flags_, NULL);
- mapped_ = false;
-}
-
-bool GpuMemoryBufferImplIOSurface::IsInUseByMacOSWindowServer() const {
- return IOSurfaceIsInUse(io_surface_);
-}
-
-int GpuMemoryBufferImplIOSurface::stride(size_t plane) const {
- DCHECK_LT(plane, gfx::NumberOfPlanesForBufferFormat(format_));
- return IOSurfaceGetBytesPerRowOfPlane(io_surface_, plane);
-}
-
-gfx::GpuMemoryBufferHandle GpuMemoryBufferImplIOSurface::GetHandle() const {
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::IO_SURFACE_BUFFER;
- handle.id = id_;
- return handle;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface.h b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface.h
deleted file mode 100644
index 8883cc1a3e2..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_IO_SURFACE_H_
-#define CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_IO_SURFACE_H_
-
-#include <IOSurface/IOSurface.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include "base/mac/scoped_cftyperef.h"
-#include "base/macros.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/client/gpu_memory_buffer_impl.h"
-
-namespace content {
-
-// Implementation of GPU memory buffer based on IO surfaces.
-class CONTENT_EXPORT GpuMemoryBufferImplIOSurface : public GpuMemoryBufferImpl {
- public:
- ~GpuMemoryBufferImplIOSurface() override;
-
- static scoped_ptr<GpuMemoryBufferImplIOSurface> CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- const DestructionCallback& callback);
-
- static bool IsConfigurationSupported(gfx::BufferFormat format,
- gfx::BufferUsage usage);
-
- static base::Closure AllocateForTesting(const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle);
-
- // Overridden from gfx::GpuMemoryBuffer:
- bool Map() override;
- void* memory(size_t plane) override;
- void Unmap() override;
- bool IsInUseByMacOSWindowServer() const override;
- int stride(size_t plane) const override;
- gfx::GpuMemoryBufferHandle GetHandle() const override;
-
- private:
- GpuMemoryBufferImplIOSurface(gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback,
- IOSurfaceRef io_surface,
- uint32_t lock_flags);
-
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface_;
- uint32_t lock_flags_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferImplIOSurface);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_IO_SURFACE_H_
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface_unittest.cc b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface_unittest.cc
deleted file mode 100644
index 83edce6c3e7..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_io_surface_unittest.cc
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_memory_buffer_impl_io_surface.h"
-#include "content/test/gpu_memory_buffer_impl_test_template.h"
-
-namespace content {
-namespace {
-
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplIOSurface,
- GpuMemoryBufferImplTest,
- GpuMemoryBufferImplIOSurface);
-
-} // namespace
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc
deleted file mode 100644
index c0303555ad9..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap.h"
-
-#include <utility>
-
-#include "content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.h"
-#include "ui/gfx/buffer_format_util.h"
-#include "ui/ozone/public/client_native_pixmap_factory.h"
-#include "ui/ozone/public/native_pixmap.h"
-#include "ui/ozone/public/ozone_platform.h"
-#include "ui/ozone/public/surface_factory_ozone.h"
-
-namespace content {
-namespace {
-
-void FreeNativePixmapForTesting(scoped_refptr<ui::NativePixmap> native_pixmap) {
- // Nothing to do here. |native_pixmap| will be freed when this function
- // returns and reference count drops to 0.
-}
-
-} // namespace
-
-GpuMemoryBufferImplOzoneNativePixmap::GpuMemoryBufferImplOzoneNativePixmap(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback,
- scoped_ptr<ui::ClientNativePixmap> pixmap)
- : GpuMemoryBufferImpl(id, size, format, callback),
- pixmap_(std::move(pixmap)) {}
-
-GpuMemoryBufferImplOzoneNativePixmap::~GpuMemoryBufferImplOzoneNativePixmap() {}
-
-// static
-scoped_ptr<GpuMemoryBufferImplOzoneNativePixmap>
-GpuMemoryBufferImplOzoneNativePixmap::CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- const DestructionCallback& callback) {
- scoped_ptr<ui::ClientNativePixmap> native_pixmap =
- ui::ClientNativePixmapFactory::GetInstance()->ImportFromHandle(
- handle.native_pixmap_handle, size, usage);
- DCHECK(native_pixmap);
- return make_scoped_ptr(new GpuMemoryBufferImplOzoneNativePixmap(
- handle.id, size, format, callback, std::move(native_pixmap)));
-}
-
-// static
-bool GpuMemoryBufferImplOzoneNativePixmap::IsConfigurationSupported(
- gfx::BufferFormat format,
- gfx::BufferUsage usage) {
- return GpuMemoryBufferFactoryOzoneNativePixmap::
- IsGpuMemoryBufferConfigurationSupported(format, usage);
-}
-
-// static
-base::Closure GpuMemoryBufferImplOzoneNativePixmap::AllocateForTesting(
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle) {
- DCHECK(IsConfigurationSupported(format, usage));
- scoped_refptr<ui::NativePixmap> pixmap =
- ui::OzonePlatform::GetInstance()
- ->GetSurfaceFactoryOzone()
- ->CreateNativePixmap(gfx::kNullPluginWindow, size, format, usage);
- handle->type = gfx::OZONE_NATIVE_PIXMAP;
- handle->native_pixmap_handle = pixmap->ExportHandle();
- return base::Bind(&FreeNativePixmapForTesting, pixmap);
-}
-
-bool GpuMemoryBufferImplOzoneNativePixmap::Map() {
- DCHECK(!mapped_);
- if (!pixmap_->Map())
- return false;
- mapped_ = true;
- return mapped_;
-}
-
-void* GpuMemoryBufferImplOzoneNativePixmap::memory(size_t plane) {
- DCHECK(mapped_);
- DCHECK_LT(plane, gfx::NumberOfPlanesForBufferFormat(format_));
- return pixmap_->Map();
-}
-
-void GpuMemoryBufferImplOzoneNativePixmap::Unmap() {
- DCHECK(mapped_);
- pixmap_->Unmap();
- mapped_ = false;
-}
-
-int GpuMemoryBufferImplOzoneNativePixmap::stride(size_t plane) const {
- DCHECK_LT(plane, gfx::NumberOfPlanesForBufferFormat(format_));
- int stride;
- pixmap_->GetStride(&stride);
- return stride;
-}
-
-gfx::GpuMemoryBufferHandle GpuMemoryBufferImplOzoneNativePixmap::GetHandle()
- const {
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::OZONE_NATIVE_PIXMAP;
- handle.id = id_;
- return handle;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap.h b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap.h
deleted file mode 100644
index aa073e111e3..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_OZONE_NATIVE_PIXMAP_H_
-#define CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_OZONE_NATIVE_PIXMAP_H_
-
-#include <stddef.h>
-
-#include "base/macros.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/client/gpu_memory_buffer_impl.h"
-
-namespace ui {
-class ClientNativePixmap;
-}
-
-namespace content {
-
-// Implementation of GPU memory buffer based on Ozone native pixmap.
-class CONTENT_EXPORT GpuMemoryBufferImplOzoneNativePixmap
- : public GpuMemoryBufferImpl {
- public:
- ~GpuMemoryBufferImplOzoneNativePixmap() override;
-
- static scoped_ptr<GpuMemoryBufferImplOzoneNativePixmap> CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- const DestructionCallback& callback);
-
- static bool IsConfigurationSupported(gfx::BufferFormat format,
- gfx::BufferUsage usage);
-
- static base::Closure AllocateForTesting(const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle);
-
- // Overridden from gfx::GpuMemoryBuffer:
- bool Map() override;
- void* memory(size_t plane) override;
- void Unmap() override;
- int stride(size_t plane) const override;
- gfx::GpuMemoryBufferHandle GetHandle() const override;
-
- private:
- GpuMemoryBufferImplOzoneNativePixmap(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback,
- scoped_ptr<ui::ClientNativePixmap> native_pixmap);
-
- scoped_ptr<ui::ClientNativePixmap> pixmap_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferImplOzoneNativePixmap);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_OZONE_NATIVE_PIXMAP_H_
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap_unittest.cc b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap_unittest.cc
deleted file mode 100644
index 08295c5186e..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap_unittest.cc
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_pixmap.h"
-#include "content/test/gpu_memory_buffer_impl_test_template.h"
-
-namespace content {
-namespace {
-
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplOzoneNativePixmap,
- GpuMemoryBufferImplTest,
- GpuMemoryBufferImplOzoneNativePixmap);
-
-} // namespace
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory.cc b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory.cc
deleted file mode 100644
index 83781ae8553..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory.cc
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_memory_buffer_impl_shared_memory.h"
-
-#include <stdint.h>
-#include <utility>
-
-#include "base/bind.h"
-#include "base/numerics/safe_math.h"
-#include "base/process/memory.h"
-#include "ui/gfx/buffer_format_util.h"
-#include "ui/gl/gl_bindings.h"
-
-namespace content {
-namespace {
-
-void Noop() {}
-
-} // namespace
-
-GpuMemoryBufferImplSharedMemory::GpuMemoryBufferImplSharedMemory(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback,
- scoped_ptr<base::SharedMemory> shared_memory,
- size_t offset,
- int stride)
- : GpuMemoryBufferImpl(id, size, format, callback),
- shared_memory_(std::move(shared_memory)),
- offset_(offset),
- stride_(stride) {
- DCHECK(IsSizeValidForFormat(size, format));
-}
-
-GpuMemoryBufferImplSharedMemory::~GpuMemoryBufferImplSharedMemory() {
-}
-
-// static
-scoped_ptr<GpuMemoryBufferImplSharedMemory>
-GpuMemoryBufferImplSharedMemory::Create(gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback) {
- size_t buffer_size = 0u;
- if (!gfx::BufferSizeForBufferFormatChecked(size, format, &buffer_size))
- return nullptr;
-
- scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
- if (!shared_memory->CreateAndMapAnonymous(buffer_size))
- return nullptr;
-
- return make_scoped_ptr(new GpuMemoryBufferImplSharedMemory(
- id, size, format, callback, std::move(shared_memory), 0,
- gfx::RowSizeForBufferFormat(size.width(), format, 0)));
-}
-
-// static
-gfx::GpuMemoryBufferHandle
-GpuMemoryBufferImplSharedMemory::AllocateForChildProcess(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- base::ProcessHandle child_process) {
- size_t buffer_size = 0u;
- if (!gfx::BufferSizeForBufferFormatChecked(size, format, &buffer_size))
- return gfx::GpuMemoryBufferHandle();
-
- base::SharedMemory shared_memory;
- if (!shared_memory.CreateAnonymous(buffer_size))
- return gfx::GpuMemoryBufferHandle();
-
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::SHARED_MEMORY_BUFFER;
- handle.id = id;
- handle.offset = 0;
- handle.stride = static_cast<int32_t>(
- gfx::RowSizeForBufferFormat(size.width(), format, 0));
- shared_memory.GiveToProcess(child_process, &handle.handle);
- return handle;
-}
-
-// static
-scoped_ptr<GpuMemoryBufferImplSharedMemory>
-GpuMemoryBufferImplSharedMemory::CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- const DestructionCallback& callback) {
- DCHECK(base::SharedMemory::IsHandleValid(handle.handle));
-
- return make_scoped_ptr(new GpuMemoryBufferImplSharedMemory(
- handle.id, size, format, callback,
- make_scoped_ptr(new base::SharedMemory(handle.handle, false)),
- handle.offset, handle.stride));
-}
-
-// static
-bool GpuMemoryBufferImplSharedMemory::IsUsageSupported(gfx::BufferUsage usage) {
- switch (usage) {
- case gfx::BufferUsage::GPU_READ:
- case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE:
- case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT:
- return true;
- case gfx::BufferUsage::SCANOUT:
- return false;
- }
- NOTREACHED();
- return false;
-}
-
-// static
-bool GpuMemoryBufferImplSharedMemory::IsConfigurationSupported(
- gfx::BufferFormat format,
- gfx::BufferUsage usage) {
- return IsUsageSupported(usage);
-}
-
-// static
-bool GpuMemoryBufferImplSharedMemory::IsSizeValidForFormat(
- const gfx::Size& size,
- gfx::BufferFormat format) {
- switch (format) {
- case gfx::BufferFormat::ATC:
- case gfx::BufferFormat::ATCIA:
- case gfx::BufferFormat::DXT1:
- case gfx::BufferFormat::DXT5:
- case gfx::BufferFormat::ETC1:
- // Compressed images must have a width and height that's evenly divisible
- // by the block size.
- return size.width() % 4 == 0 && size.height() % 4 == 0;
- case gfx::BufferFormat::R_8:
- case gfx::BufferFormat::RGBA_4444:
- case gfx::BufferFormat::RGBA_8888:
- case gfx::BufferFormat::RGBX_8888:
- case gfx::BufferFormat::BGRA_8888:
- case gfx::BufferFormat::BGRX_8888:
- return true;
- case gfx::BufferFormat::YUV_420:
- case gfx::BufferFormat::YUV_420_BIPLANAR: {
- size_t num_planes = gfx::NumberOfPlanesForBufferFormat(format);
- for (size_t i = 0; i < num_planes; ++i) {
- size_t factor = gfx::SubsamplingFactorForBufferFormat(format, i);
- if (size.width() % factor || size.height() % factor)
- return false;
- }
- return true;
- }
- case gfx::BufferFormat::UYVY_422:
- return size.width() % 2 == 0;
- }
-
- NOTREACHED();
- return false;
-}
-
-// static
-base::Closure GpuMemoryBufferImplSharedMemory::AllocateForTesting(
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle) {
- base::SharedMemory shared_memory;
- bool rv = shared_memory.CreateAnonymous(
- gfx::BufferSizeForBufferFormat(size, format));
- DCHECK(rv);
- handle->type = gfx::SHARED_MEMORY_BUFFER;
- handle->offset = 0;
- handle->stride = static_cast<int32_t>(
- gfx::RowSizeForBufferFormat(size.width(), format, 0));
- handle->handle = base::SharedMemory::DuplicateHandle(shared_memory.handle());
- return base::Bind(&Noop);
-}
-
-bool GpuMemoryBufferImplSharedMemory::Map() {
- DCHECK(!mapped_);
-
- // Map the buffer first time Map() is called then keep it mapped for the
- // lifetime of the buffer. This avoids mapping the buffer unless necessary.
- if (!shared_memory_->memory()) {
- DCHECK_EQ(static_cast<size_t>(stride_),
- gfx::RowSizeForBufferFormat(size_.width(), format_, 0));
- size_t buffer_size = gfx::BufferSizeForBufferFormat(size_, format_);
- // Note: offset_ != 0 is not common use-case. To keep it simple we
- // map offset + buffer_size here but this can be avoided using MapAt().
- size_t map_size = offset_ + buffer_size;
- if (!shared_memory_->Map(map_size))
- base::TerminateBecauseOutOfMemory(map_size);
- }
- mapped_ = true;
- return true;
-}
-
-void* GpuMemoryBufferImplSharedMemory::memory(size_t plane) {
- DCHECK(mapped_);
- DCHECK_LT(plane, gfx::NumberOfPlanesForBufferFormat(format_));
- return reinterpret_cast<uint8_t*>(shared_memory_->memory()) + offset_ +
- gfx::BufferOffsetForBufferFormat(size_, format_, plane);
-}
-
-void GpuMemoryBufferImplSharedMemory::Unmap() {
- DCHECK(mapped_);
- mapped_ = false;
-}
-
-int GpuMemoryBufferImplSharedMemory::stride(size_t plane) const {
- DCHECK_LT(plane, gfx::NumberOfPlanesForBufferFormat(format_));
- return gfx::RowSizeForBufferFormat(size_.width(), format_, plane);
-}
-
-gfx::GpuMemoryBufferHandle GpuMemoryBufferImplSharedMemory::GetHandle() const {
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::SHARED_MEMORY_BUFFER;
- handle.id = id_;
- handle.offset = offset_;
- handle.stride = stride_;
- handle.handle = shared_memory_->handle();
- return handle;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory.h b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory.h
deleted file mode 100644
index 9416cbb31e1..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_SHARED_MEMORY_H_
-#define CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_SHARED_MEMORY_H_
-
-#include <stddef.h>
-
-#include "base/macros.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/client/gpu_memory_buffer_impl.h"
-
-namespace content {
-
-// Implementation of GPU memory buffer based on shared memory.
-class CONTENT_EXPORT GpuMemoryBufferImplSharedMemory
- : public GpuMemoryBufferImpl {
- public:
- ~GpuMemoryBufferImplSharedMemory() override;
-
- static scoped_ptr<GpuMemoryBufferImplSharedMemory> Create(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback);
-
- static gfx::GpuMemoryBufferHandle AllocateForChildProcess(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- base::ProcessHandle child_process);
-
- static scoped_ptr<GpuMemoryBufferImplSharedMemory> CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- const DestructionCallback& callback);
-
- static bool IsUsageSupported(gfx::BufferUsage usage);
- static bool IsConfigurationSupported(gfx::BufferFormat format,
- gfx::BufferUsage usage);
- static bool IsSizeValidForFormat(const gfx::Size& size,
- gfx::BufferFormat format);
-
- static base::Closure AllocateForTesting(const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle);
-
- // Overridden from gfx::GpuMemoryBuffer:
- bool Map() override;
- void* memory(size_t plane) override;
- void Unmap() override;
- int stride(size_t plane) const override;
- gfx::GpuMemoryBufferHandle GetHandle() const override;
-
- private:
- GpuMemoryBufferImplSharedMemory(gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback,
- scoped_ptr<base::SharedMemory> shared_memory,
- size_t offset,
- int stride);
-
- scoped_ptr<base::SharedMemory> shared_memory_;
- size_t offset_;
- int stride_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferImplSharedMemory);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_SHARED_MEMORY_H_
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory_unittest.cc b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory_unittest.cc
deleted file mode 100644
index 251e16c4a3a..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_shared_memory_unittest.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_memory_buffer_impl_shared_memory.h"
-#include "content/test/gpu_memory_buffer_impl_test_template.h"
-
-namespace content {
-namespace {
-
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplSharedMemory,
- GpuMemoryBufferImplTest,
- GpuMemoryBufferImplSharedMemory);
-
-void BufferDestroyed(bool* destroyed, const gpu::SyncToken& sync_token) {
- *destroyed = true;
-}
-
-TEST(GpuMemoryBufferImplSharedMemoryTest, Create) {
- const gfx::GpuMemoryBufferId kBufferId(1);
-
- gfx::Size buffer_size(8, 8);
-
- for (auto format : gfx::GetBufferFormatsForTesting()) {
- bool destroyed = false;
- scoped_ptr<GpuMemoryBufferImplSharedMemory> buffer(
- GpuMemoryBufferImplSharedMemory::Create(
- kBufferId, buffer_size, format,
- base::Bind(&BufferDestroyed, base::Unretained(&destroyed))));
- ASSERT_TRUE(buffer);
- EXPECT_EQ(buffer->GetFormat(), format);
-
- // Check if destruction callback is executed when deleting the buffer.
- buffer.reset();
- ASSERT_TRUE(destroyed);
- }
-}
-
-} // namespace
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture.cc b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture.cc
deleted file mode 100644
index 64de624c155..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture.cc
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_memory_buffer_impl_surface_texture.h"
-
-#include "base/logging.h"
-#include "base/trace_event/trace_event.h"
-#include "content/common/android/surface_texture_manager.h"
-#include "content/common/gpu/gpu_memory_buffer_factory_surface_texture.h"
-#include "ui/gfx/buffer_format_util.h"
-#include "ui/gl/android/surface_texture.h"
-#include "ui/gl/gl_bindings.h"
-
-namespace content {
-namespace {
-
-int WindowFormat(gfx::BufferFormat format) {
- switch (format) {
- case gfx::BufferFormat::RGBA_8888:
- return WINDOW_FORMAT_RGBA_8888;
- case gfx::BufferFormat::ATC:
- case gfx::BufferFormat::ATCIA:
- case gfx::BufferFormat::DXT1:
- case gfx::BufferFormat::DXT5:
- case gfx::BufferFormat::ETC1:
- case gfx::BufferFormat::R_8:
- case gfx::BufferFormat::RGBA_4444:
- case gfx::BufferFormat::RGBX_8888:
- case gfx::BufferFormat::BGRX_8888:
- case gfx::BufferFormat::BGRA_8888:
- case gfx::BufferFormat::YUV_420:
- case gfx::BufferFormat::YUV_420_BIPLANAR:
- case gfx::BufferFormat::UYVY_422:
- NOTREACHED();
- return 0;
- }
-
- NOTREACHED();
- return 0;
-}
-
-void FreeSurfaceTextureForTesting(
- scoped_refptr<gfx::SurfaceTexture> surface_texture,
- gfx::GpuMemoryBufferId id) {
- SurfaceTextureManager::GetInstance()->UnregisterSurfaceTexture(id.id, 0);
-}
-
-} // namespace
-
-GpuMemoryBufferImplSurfaceTexture::GpuMemoryBufferImplSurfaceTexture(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback,
- ANativeWindow* native_window)
- : GpuMemoryBufferImpl(id, size, format, callback),
- native_window_(native_window) {}
-
-GpuMemoryBufferImplSurfaceTexture::~GpuMemoryBufferImplSurfaceTexture() {
- ANativeWindow_release(native_window_);
-}
-
-// static
-scoped_ptr<GpuMemoryBufferImplSurfaceTexture>
-GpuMemoryBufferImplSurfaceTexture::CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- const DestructionCallback& callback) {
- ANativeWindow* native_window =
- SurfaceTextureManager::GetInstance()
- ->AcquireNativeWidgetForSurfaceTexture(handle.id.id);
- if (!native_window)
- return nullptr;
-
- ANativeWindow_setBuffersGeometry(
- native_window, size.width(), size.height(), WindowFormat(format));
-
- return make_scoped_ptr(new GpuMemoryBufferImplSurfaceTexture(
- handle.id, size, format, callback, native_window));
-}
-
-// static
-bool GpuMemoryBufferImplSurfaceTexture::IsConfigurationSupported(
- gfx::BufferFormat format,
- gfx::BufferUsage usage) {
- return GpuMemoryBufferFactorySurfaceTexture::
- IsGpuMemoryBufferConfigurationSupported(format, usage);
-}
-
-// static
-base::Closure GpuMemoryBufferImplSurfaceTexture::AllocateForTesting(
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle) {
- scoped_refptr<gfx::SurfaceTexture> surface_texture =
- gfx::SurfaceTexture::Create(0);
- DCHECK(surface_texture);
- const gfx::GpuMemoryBufferId kBufferId(1);
- SurfaceTextureManager::GetInstance()->RegisterSurfaceTexture(
- kBufferId.id, 0, surface_texture.get());
- handle->type = gfx::SURFACE_TEXTURE_BUFFER;
- handle->id = kBufferId;
- return base::Bind(&FreeSurfaceTextureForTesting, surface_texture, kBufferId);
-}
-
-bool GpuMemoryBufferImplSurfaceTexture::Map() {
- TRACE_EVENT0("gpu", "GpuMemoryBufferImplSurfaceTexture::Map");
- DCHECK(!mapped_);
- DCHECK(native_window_);
- if (ANativeWindow_lock(native_window_, &buffer_, NULL)) {
- DPLOG(ERROR) << "ANativeWindow_lock failed";
- return false;
- }
- DCHECK_LE(size_.width(), buffer_.stride);
- mapped_ = true;
- return true;
-}
-
-void* GpuMemoryBufferImplSurfaceTexture::memory(size_t plane) {
- TRACE_EVENT0("gpu", "GpuMemoryBufferImplSurfaceTexture::memory");
- DCHECK(mapped_);
- DCHECK_EQ(0u, plane);
- return buffer_.bits;
-}
-
-void GpuMemoryBufferImplSurfaceTexture::Unmap() {
- TRACE_EVENT0("gpu", "GpuMemoryBufferImplSurfaceTexture::Unmap");
- DCHECK(mapped_);
- ANativeWindow_unlockAndPost(native_window_);
- mapped_ = false;
-}
-
-int GpuMemoryBufferImplSurfaceTexture::stride(size_t plane) const {
- DCHECK(mapped_);
- DCHECK_EQ(0u, plane);
- return gfx::RowSizeForBufferFormat(buffer_.stride, format_, 0);
-}
-
-gfx::GpuMemoryBufferHandle
-GpuMemoryBufferImplSurfaceTexture::GetHandle() const {
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::SURFACE_TEXTURE_BUFFER;
- handle.id = id_;
- return handle;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture.h b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture.h
deleted file mode 100644
index 1613c63134b..00000000000
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_SURFACE_TEXTURE_H_
-#define CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_SURFACE_TEXTURE_H_
-
-#include <android/native_window.h>
-#include <stddef.h>
-
-#include "base/macros.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/client/gpu_memory_buffer_impl.h"
-
-namespace content {
-
-// Implementation of GPU memory buffer based on SurfaceTextures.
-class CONTENT_EXPORT GpuMemoryBufferImplSurfaceTexture
- : public GpuMemoryBufferImpl {
- public:
- ~GpuMemoryBufferImplSurfaceTexture() override;
-
- static scoped_ptr<GpuMemoryBufferImplSurfaceTexture> CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- const DestructionCallback& callback);
-
- static bool IsConfigurationSupported(gfx::BufferFormat format,
- gfx::BufferUsage usage);
-
- static base::Closure AllocateForTesting(const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle);
-
- // Overridden from gfx::GpuMemoryBuffer:
- bool Map() override;
- void* memory(size_t plane) override;
- void Unmap() override;
- int stride(size_t plane) const override;
- gfx::GpuMemoryBufferHandle GetHandle() const override;
-
- private:
- GpuMemoryBufferImplSurfaceTexture(gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- const DestructionCallback& callback,
- ANativeWindow* native_window);
-
- ANativeWindow* native_window_;
- ANativeWindow_Buffer buffer_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferImplSurfaceTexture);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GPU_MEMORY_BUFFER_IMPL_SURFACE_TEXTURE_H_
diff --git a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture_unittest.cc b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture_unittest.cc
index 1cd513df3ac..a3edc70bd8b 100644
--- a/chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture_unittest.cc
+++ b/chromium/content/common/gpu/client/gpu_memory_buffer_impl_surface_texture_unittest.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "content/common/gpu/client/gpu_memory_buffer_impl_surface_texture.h"
-#include "content/test/gpu_memory_buffer_impl_test_template.h"
+#include "gpu/ipc/client/gpu_memory_buffer_impl_surface_texture.h"
+#include "gpu/ipc/client/gpu_memory_buffer_impl_test_template.h"
-namespace content {
+namespace gpu {
namespace {
INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplSurfaceTexture,
@@ -13,4 +13,4 @@ INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplSurfaceTexture,
GpuMemoryBufferImplSurfaceTexture);
} // namespace
-} // namespace content
+} // namespace gpu
diff --git a/chromium/content/common/gpu/client/gpu_video_decode_accelerator_host.cc b/chromium/content/common/gpu/client/gpu_video_decode_accelerator_host.cc
deleted file mode 100644
index 19a336b9ca3..00000000000
--- a/chromium/content/common/gpu/client/gpu_video_decode_accelerator_host.cc
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
-#include "build/build_config.h"
-#include "content/common/gpu/client/gpu_channel_host.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/view_messages.h"
-#include "ipc/ipc_message_macros.h"
-#include "ipc/ipc_message_utils.h"
-
-#if defined(OS_WIN)
-#include "content/public/common/sandbox_init.h"
-#endif // OS_WIN
-
-using media::VideoDecodeAccelerator;
-namespace content {
-
-GpuVideoDecodeAcceleratorHost::GpuVideoDecodeAcceleratorHost(
- GpuChannelHost* channel,
- CommandBufferProxyImpl* impl)
- : channel_(channel),
- decoder_route_id_(MSG_ROUTING_NONE),
- client_(NULL),
- impl_(impl),
- weak_this_factory_(this) {
- DCHECK(channel_);
- DCHECK(impl_);
- impl_->AddDeletionObserver(this);
-}
-
-GpuVideoDecodeAcceleratorHost::~GpuVideoDecodeAcceleratorHost() {
- DCHECK(CalledOnValidThread());
-
- if (channel_ && decoder_route_id_ != MSG_ROUTING_NONE)
- channel_->RemoveRoute(decoder_route_id_);
- if (impl_)
- impl_->RemoveDeletionObserver(this);
-}
-
-bool GpuVideoDecodeAcceleratorHost::OnMessageReceived(const IPC::Message& msg) {
- DCHECK(CalledOnValidThread());
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAcceleratorHost, msg)
- IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_CdmAttached,
- OnCdmAttached)
- IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed,
- OnBitstreamBufferProcessed)
- IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers,
- OnProvidePictureBuffer)
- IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_PictureReady,
- OnPictureReady)
- IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_FlushDone,
- OnFlushDone)
- IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_ResetDone,
- OnResetDone)
- IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_ErrorNotification,
- OnNotifyError)
- IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer,
- OnDismissPictureBuffer)
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
- DCHECK(handled);
- // See OnNotifyError for why |this| mustn't be used after OnNotifyError might
- // have been called above.
- return handled;
-}
-
-void GpuVideoDecodeAcceleratorHost::OnChannelError() {
- DCHECK(CalledOnValidThread());
- if (channel_) {
- if (decoder_route_id_ != MSG_ROUTING_NONE)
- channel_->RemoveRoute(decoder_route_id_);
- channel_ = NULL;
- }
- DLOG(ERROR) << "OnChannelError()";
- PostNotifyError(PLATFORM_FAILURE);
-}
-
-bool GpuVideoDecodeAcceleratorHost::Initialize(const Config& config,
- Client* client) {
- DCHECK(CalledOnValidThread());
- client_ = client;
-
- if (!impl_)
- return false;
-
- int32_t route_id = channel_->GenerateRouteID();
- channel_->AddRoute(route_id, weak_this_factory_.GetWeakPtr());
-
- bool succeeded = false;
- Send(new GpuCommandBufferMsg_CreateVideoDecoder(impl_->route_id(), config,
- route_id, &succeeded));
-
- if (!succeeded) {
- DLOG(ERROR) << "Send(GpuCommandBufferMsg_CreateVideoDecoder()) failed";
- PostNotifyError(PLATFORM_FAILURE);
- channel_->RemoveRoute(route_id);
- return false;
- }
- decoder_route_id_ = route_id;
- return true;
-}
-
-void GpuVideoDecodeAcceleratorHost::SetCdm(int cdm_id) {
- DCHECK(CalledOnValidThread());
- if (!channel_)
- return;
- Send(new AcceleratedVideoDecoderMsg_SetCdm(decoder_route_id_, cdm_id));
-}
-
-void GpuVideoDecodeAcceleratorHost::Decode(
- const media::BitstreamBuffer& bitstream_buffer) {
- DCHECK(CalledOnValidThread());
- if (!channel_)
- return;
-
- base::SharedMemoryHandle handle = channel_->ShareToGpuProcess(
- bitstream_buffer.handle());
- if (!base::SharedMemory::IsHandleValid(handle)) {
- NOTREACHED() << "Failed to duplicate buffer handler";
- return;
- }
-
- AcceleratedVideoDecoderMsg_Decode_Params params;
- params.bitstream_buffer_id = bitstream_buffer.id();
- params.buffer_handle = handle;
- params.size = bitstream_buffer.size();
- params.presentation_timestamp = bitstream_buffer.presentation_timestamp();
- params.key_id = bitstream_buffer.key_id();
- params.iv = bitstream_buffer.iv();
- params.subsamples = bitstream_buffer.subsamples();
-
- Send(new AcceleratedVideoDecoderMsg_Decode(decoder_route_id_, params));
-}
-
-void GpuVideoDecodeAcceleratorHost::AssignPictureBuffers(
- const std::vector<media::PictureBuffer>& buffers) {
- DCHECK(CalledOnValidThread());
- if (!channel_)
- return;
- // Rearrange data for IPC command.
- std::vector<int32_t> buffer_ids;
- std::vector<uint32_t> texture_ids;
- for (uint32_t i = 0; i < buffers.size(); i++) {
- const media::PictureBuffer& buffer = buffers[i];
- if (buffer.size() != picture_buffer_dimensions_) {
- DLOG(ERROR) << "buffer.size() invalid: expected "
- << picture_buffer_dimensions_.ToString()
- << ", got " << buffer.size().ToString();
- PostNotifyError(INVALID_ARGUMENT);
- return;
- }
- texture_ids.push_back(buffer.texture_id());
- buffer_ids.push_back(buffer.id());
- }
- Send(new AcceleratedVideoDecoderMsg_AssignPictureBuffers(
- decoder_route_id_, buffer_ids, texture_ids));
-}
-
-void GpuVideoDecodeAcceleratorHost::ReusePictureBuffer(
- int32_t picture_buffer_id) {
- DCHECK(CalledOnValidThread());
- if (!channel_)
- return;
- Send(new AcceleratedVideoDecoderMsg_ReusePictureBuffer(
- decoder_route_id_, picture_buffer_id));
-}
-
-void GpuVideoDecodeAcceleratorHost::Flush() {
- DCHECK(CalledOnValidThread());
- if (!channel_)
- return;
- Send(new AcceleratedVideoDecoderMsg_Flush(decoder_route_id_));
-}
-
-void GpuVideoDecodeAcceleratorHost::Reset() {
- DCHECK(CalledOnValidThread());
- if (!channel_)
- return;
- Send(new AcceleratedVideoDecoderMsg_Reset(decoder_route_id_));
-}
-
-void GpuVideoDecodeAcceleratorHost::Destroy() {
- DCHECK(CalledOnValidThread());
- if (channel_)
- Send(new AcceleratedVideoDecoderMsg_Destroy(decoder_route_id_));
- client_ = NULL;
- delete this;
-}
-
-void GpuVideoDecodeAcceleratorHost::OnWillDeleteImpl() {
- DCHECK(CalledOnValidThread());
- impl_ = NULL;
-
- // The CommandBufferProxyImpl is going away; error out this VDA.
- OnChannelError();
-}
-
-void GpuVideoDecodeAcceleratorHost::PostNotifyError(Error error) {
- DCHECK(CalledOnValidThread());
- DVLOG(2) << "PostNotifyError(): error=" << error;
- base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::Bind(&GpuVideoDecodeAcceleratorHost::OnNotifyError,
- weak_this_factory_.GetWeakPtr(), error));
-}
-
-void GpuVideoDecodeAcceleratorHost::Send(IPC::Message* message) {
- DCHECK(CalledOnValidThread());
- uint32_t message_type = message->type();
- if (!channel_->Send(message)) {
- DLOG(ERROR) << "Send(" << message_type << ") failed";
- PostNotifyError(PLATFORM_FAILURE);
- }
-}
-
-void GpuVideoDecodeAcceleratorHost::OnCdmAttached(bool success) {
- DCHECK(CalledOnValidThread());
- if (client_)
- client_->NotifyCdmAttached(success);
-}
-
-void GpuVideoDecodeAcceleratorHost::OnBitstreamBufferProcessed(
- int32_t bitstream_buffer_id) {
- DCHECK(CalledOnValidThread());
- if (client_)
- client_->NotifyEndOfBitstreamBuffer(bitstream_buffer_id);
-}
-
-void GpuVideoDecodeAcceleratorHost::OnProvidePictureBuffer(
- uint32_t num_requested_buffers,
- const gfx::Size& dimensions,
- uint32_t texture_target) {
- DCHECK(CalledOnValidThread());
- picture_buffer_dimensions_ = dimensions;
- if (client_) {
- client_->ProvidePictureBuffers(
- num_requested_buffers, dimensions, texture_target);
- }
-}
-
-void GpuVideoDecodeAcceleratorHost::OnDismissPictureBuffer(
- int32_t picture_buffer_id) {
- DCHECK(CalledOnValidThread());
- if (client_)
- client_->DismissPictureBuffer(picture_buffer_id);
-}
-
-void GpuVideoDecodeAcceleratorHost::OnPictureReady(
- int32_t picture_buffer_id,
- int32_t bitstream_buffer_id,
- const gfx::Rect& visible_rect,
- bool allow_overlay) {
- DCHECK(CalledOnValidThread());
- if (!client_)
- return;
- media::Picture picture(picture_buffer_id, bitstream_buffer_id, visible_rect,
- allow_overlay);
- client_->PictureReady(picture);
-}
-
-void GpuVideoDecodeAcceleratorHost::OnFlushDone() {
- DCHECK(CalledOnValidThread());
- if (client_)
- client_->NotifyFlushDone();
-}
-
-void GpuVideoDecodeAcceleratorHost::OnResetDone() {
- DCHECK(CalledOnValidThread());
- if (client_)
- client_->NotifyResetDone();
-}
-
-void GpuVideoDecodeAcceleratorHost::OnNotifyError(uint32_t error) {
- DCHECK(CalledOnValidThread());
- if (!client_)
- return;
- weak_this_factory_.InvalidateWeakPtrs();
-
- // Client::NotifyError() may Destroy() |this|, so calling it needs to be the
- // last thing done on this stack!
- media::VideoDecodeAccelerator::Client* client = NULL;
- std::swap(client, client_);
- client->NotifyError(static_cast<media::VideoDecodeAccelerator::Error>(error));
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_video_decode_accelerator_host.h b/chromium/content/common/gpu/client/gpu_video_decode_accelerator_host.h
deleted file mode 100644
index 5e4ba9df1f6..00000000000
--- a/chromium/content/common/gpu/client/gpu_video_decode_accelerator_host.h
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GPU_VIDEO_DECODE_ACCELERATOR_HOST_H_
-#define CONTENT_COMMON_GPU_CLIENT_GPU_VIDEO_DECODE_ACCELERATOR_HOST_H_
-
-#include <stdint.h>
-
-#include <vector>
-
-#include "base/macros.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "content/common/gpu/client/command_buffer_proxy_impl.h"
-#include "ipc/ipc_listener.h"
-#include "media/video/video_decode_accelerator.h"
-#include "ui/gfx/geometry/size.h"
-
-namespace content {
-class GpuChannelHost;
-
-// This class is used to talk to VideoDecodeAccelerator in the Gpu process
-// through IPC messages.
-class GpuVideoDecodeAcceleratorHost
- : public IPC::Listener,
- public media::VideoDecodeAccelerator,
- public CommandBufferProxyImpl::DeletionObserver,
- public base::NonThreadSafe {
- public:
- // |this| is guaranteed not to outlive |channel| and |impl|. (See comments
- // for |channel_| and |impl_|.)
- GpuVideoDecodeAcceleratorHost(GpuChannelHost* channel,
- CommandBufferProxyImpl* impl);
-
- // IPC::Listener implementation.
- void OnChannelError() override;
- bool OnMessageReceived(const IPC::Message& message) override;
-
- // media::VideoDecodeAccelerator implementation.
- bool Initialize(const Config& config, Client* client) override;
- void SetCdm(int cdm_id) override;
- void Decode(const media::BitstreamBuffer& bitstream_buffer) override;
- void AssignPictureBuffers(
- const std::vector<media::PictureBuffer>& buffers) override;
- void ReusePictureBuffer(int32_t picture_buffer_id) override;
- void Flush() override;
- void Reset() override;
- void Destroy() override;
-
- // CommandBufferProxyImpl::DeletionObserver implemetnation.
- void OnWillDeleteImpl() override;
-
- private:
- // Only Destroy() should be deleting |this|.
- ~GpuVideoDecodeAcceleratorHost() override;
-
- // Notify |client_| of an error. Posts a task to avoid re-entrancy.
- void PostNotifyError(Error);
-
- void Send(IPC::Message* message);
-
- // IPC handlers, proxying media::VideoDecodeAccelerator::Client for the GPU
- // process. Should not be called directly.
- void OnCdmAttached(bool success);
- void OnBitstreamBufferProcessed(int32_t bitstream_buffer_id);
- void OnProvidePictureBuffer(uint32_t num_requested_buffers,
- const gfx::Size& dimensions,
- uint32_t texture_target);
- void OnDismissPictureBuffer(int32_t picture_buffer_id);
- void OnPictureReady(int32_t picture_buffer_id,
- int32_t bitstream_buffer_id,
- const gfx::Rect& visible_rect,
- bool allow_overlay);
- void OnFlushDone();
- void OnResetDone();
- void OnNotifyError(uint32_t error);
-
- // Unowned reference to the GpuChannelHost to send IPC messages to the GPU
- // process. |channel_| outlives |impl_|, so the reference is always valid as
- // long as it is not NULL.
- GpuChannelHost* channel_;
-
- // Route ID for the associated decoder in the GPU process.
- int32_t decoder_route_id_;
-
- // The client that will receive callbacks from the decoder.
- Client* client_;
-
- // Unowned reference to the CommandBufferProxyImpl that created us. |this|
- // registers as a DeletionObserver of |impl_|, the so reference is always
- // valid as long as it is not NULL.
- CommandBufferProxyImpl* impl_;
-
- // Requested dimensions of the buffer, from ProvidePictureBuffers().
- gfx::Size picture_buffer_dimensions_;
-
- // WeakPtr factory for posting tasks back to itself.
- base::WeakPtrFactory<GpuVideoDecodeAcceleratorHost> weak_this_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuVideoDecodeAcceleratorHost);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GPU_VIDEO_DECODE_ACCELERATOR_HOST_H_
diff --git a/chromium/content/common/gpu/client/gpu_video_encode_accelerator_host.cc b/chromium/content/common/gpu/client/gpu_video_encode_accelerator_host.cc
deleted file mode 100644
index 9002490e1dd..00000000000
--- a/chromium/content/common/gpu/client/gpu_video_encode_accelerator_host.cc
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
-
-#include "base/location.h"
-#include "base/logging.h"
-#include "content/common/gpu/client/gpu_channel_host.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/gpu/media/gpu_video_accelerator_util.h"
-#include "media/base/video_frame.h"
-#include "media/video/video_encode_accelerator.h"
-#include "ui/gfx/gpu_memory_buffer.h"
-
-namespace content {
-
-GpuVideoEncodeAcceleratorHost::GpuVideoEncodeAcceleratorHost(
- GpuChannelHost* channel,
- CommandBufferProxyImpl* impl)
- : channel_(channel),
- encoder_route_id_(MSG_ROUTING_NONE),
- client_(NULL),
- impl_(impl),
- next_frame_id_(0),
- weak_this_factory_(this) {
- DCHECK(channel_);
- DCHECK(impl_);
- impl_->AddDeletionObserver(this);
-}
-
-GpuVideoEncodeAcceleratorHost::~GpuVideoEncodeAcceleratorHost() {
- DCHECK(CalledOnValidThread());
- if (channel_ && encoder_route_id_ != MSG_ROUTING_NONE)
- channel_->RemoveRoute(encoder_route_id_);
- if (impl_)
- impl_->RemoveDeletionObserver(this);
-}
-
-bool GpuVideoEncodeAcceleratorHost::OnMessageReceived(
- const IPC::Message& message) {
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(GpuVideoEncodeAcceleratorHost, message)
- IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderHostMsg_RequireBitstreamBuffers,
- OnRequireBitstreamBuffers)
- IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderHostMsg_NotifyInputDone,
- OnNotifyInputDone)
- IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderHostMsg_BitstreamBufferReady,
- OnBitstreamBufferReady)
- IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderHostMsg_NotifyError,
- OnNotifyError)
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
- DCHECK(handled);
- // See OnNotifyError for why |this| mustn't be used after OnNotifyError might
- // have been called above.
- return handled;
-}
-
-void GpuVideoEncodeAcceleratorHost::OnChannelError() {
- DCHECK(CalledOnValidThread());
- if (channel_) {
- if (encoder_route_id_ != MSG_ROUTING_NONE)
- channel_->RemoveRoute(encoder_route_id_);
- channel_ = NULL;
- }
- PostNotifyError(FROM_HERE, kPlatformFailureError, "OnChannelError()");
-}
-
-media::VideoEncodeAccelerator::SupportedProfiles
-GpuVideoEncodeAcceleratorHost::GetSupportedProfiles() {
- DCHECK(CalledOnValidThread());
- if (!channel_)
- return media::VideoEncodeAccelerator::SupportedProfiles();
- return GpuVideoAcceleratorUtil::ConvertGpuToMediaEncodeProfiles(
- channel_->gpu_info().video_encode_accelerator_supported_profiles);
-}
-
-bool GpuVideoEncodeAcceleratorHost::Initialize(
- media::VideoPixelFormat input_format,
- const gfx::Size& input_visible_size,
- media::VideoCodecProfile output_profile,
- uint32_t initial_bitrate,
- Client* client) {
- DCHECK(CalledOnValidThread());
- client_ = client;
- if (!impl_) {
- DLOG(ERROR) << "impl_ destroyed";
- return false;
- }
-
- int32_t route_id = channel_->GenerateRouteID();
- channel_->AddRoute(route_id, weak_this_factory_.GetWeakPtr());
-
- bool succeeded = false;
- Send(new GpuCommandBufferMsg_CreateVideoEncoder(
- impl_->route_id(), input_format, input_visible_size, output_profile,
- initial_bitrate, route_id, &succeeded));
- if (!succeeded) {
- DLOG(ERROR) << "Send(GpuCommandBufferMsg_CreateVideoEncoder()) failed";
- channel_->RemoveRoute(route_id);
- return false;
- }
- encoder_route_id_ = route_id;
- return true;
-}
-
-void GpuVideoEncodeAcceleratorHost::Encode(
- const scoped_refptr<media::VideoFrame>& frame,
- bool force_keyframe) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(media::PIXEL_FORMAT_I420, frame->format());
- if (!channel_)
- return;
-
- switch (frame->storage_type()) {
- case media::VideoFrame::STORAGE_SHMEM:
- EncodeSharedMemoryFrame(frame, force_keyframe);
- break;
- case media::VideoFrame::STORAGE_GPU_MEMORY_BUFFERS:
- EncodeGpuMemoryBufferFrame(frame, force_keyframe);
- break;
- default:
- PostNotifyError(FROM_HERE, kPlatformFailureError,
- "Encode(): cannot encode frame with invalid handles");
- return;
- }
-
- frame_map_[next_frame_id_] = frame;
-
- // Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
- next_frame_id_ = (next_frame_id_ + 1) & 0x3FFFFFFF;
-}
-
-void GpuVideoEncodeAcceleratorHost::UseOutputBitstreamBuffer(
- const media::BitstreamBuffer& buffer) {
- DCHECK(CalledOnValidThread());
- if (!channel_)
- return;
-
- base::SharedMemoryHandle handle =
- channel_->ShareToGpuProcess(buffer.handle());
- if (!base::SharedMemory::IsHandleValid(handle)) {
- PostNotifyError(
- FROM_HERE, kPlatformFailureError,
- base::StringPrintf("UseOutputBitstreamBuffer(): failed to duplicate "
- "buffer handle for GPU process: buffer.id()=%d",
- buffer.id()));
- return;
- }
- Send(new AcceleratedVideoEncoderMsg_UseOutputBitstreamBuffer(
- encoder_route_id_, buffer.id(), handle, buffer.size()));
-}
-
-void GpuVideoEncodeAcceleratorHost::RequestEncodingParametersChange(
- uint32_t bitrate,
- uint32_t framerate) {
- DCHECK(CalledOnValidThread());
- if (!channel_)
- return;
-
- Send(new AcceleratedVideoEncoderMsg_RequestEncodingParametersChange(
- encoder_route_id_, bitrate, framerate));
-}
-
-void GpuVideoEncodeAcceleratorHost::Destroy() {
- DCHECK(CalledOnValidThread());
- if (channel_)
- Send(new AcceleratedVideoEncoderMsg_Destroy(encoder_route_id_));
- client_ = NULL;
- delete this;
-}
-
-void GpuVideoEncodeAcceleratorHost::OnWillDeleteImpl() {
- DCHECK(CalledOnValidThread());
- impl_ = NULL;
-
- // The CommandBufferProxyImpl is going away; error out this VEA.
- OnChannelError();
-}
-
-void GpuVideoEncodeAcceleratorHost::EncodeGpuMemoryBufferFrame(
- const scoped_refptr<media::VideoFrame>& frame,
- bool force_keyframe){
- DCHECK_EQ(media::VideoFrame::NumPlanes(media::PIXEL_FORMAT_I420),
- frame->gpu_memory_buffer_handles().size());
- AcceleratedVideoEncoderMsg_Encode_Params2 params;
- params.frame_id = next_frame_id_;
- params.timestamp = frame->timestamp();
- bool requires_sync_point = false;
- for (const auto& handle : frame->gpu_memory_buffer_handles()) {
- gfx::GpuMemoryBufferHandle new_handle =
- channel_->ShareGpuMemoryBufferToGpuProcess(handle,
- &requires_sync_point);
- if (new_handle.is_null()) {
- PostNotifyError(FROM_HERE, kPlatformFailureError,
- "EncodeGpuMemoryBufferFrame(): failed to share gpu "
- "memory buffer handle for gpu process");
- return;
- }
- params.gpu_memory_buffer_handles.push_back(new_handle);
- }
- params.size = frame->coded_size();
- params.force_keyframe = force_keyframe;
-
- Send(new AcceleratedVideoEncoderMsg_Encode2(encoder_route_id_, params));
-}
-
-void GpuVideoEncodeAcceleratorHost::EncodeSharedMemoryFrame(
- const scoped_refptr<media::VideoFrame>& frame,
- bool force_keyframe){
- if (!base::SharedMemory::IsHandleValid(frame->shared_memory_handle())) {
- PostNotifyError(FROM_HERE, kPlatformFailureError,
- "EncodeSharedMemory(): cannot encode frame with invalid "
- "shared memory handle");
- return;
- }
-
- AcceleratedVideoEncoderMsg_Encode_Params params;
- params.frame_id = next_frame_id_;
- params.timestamp = frame->timestamp();
- params.buffer_handle =
- channel_->ShareToGpuProcess(frame->shared_memory_handle());
- if (!base::SharedMemory::IsHandleValid(params.buffer_handle)) {
- PostNotifyError(FROM_HERE, kPlatformFailureError,
- "Encode(): failed to duplicate shared memory buffer handle "
- "for GPU process");
- return;
- }
- params.buffer_offset =
- base::checked_cast<uint32_t>(frame->shared_memory_offset());
- params.buffer_size =
- media::VideoFrame::AllocationSize(frame->format(), frame->coded_size());
- params.force_keyframe = force_keyframe;
-
- Send(new AcceleratedVideoEncoderMsg_Encode(encoder_route_id_, params));
-}
-
-void GpuVideoEncodeAcceleratorHost::PostNotifyError(
- const tracked_objects::Location& location, Error error,
- const std::string& message) {
- DCHECK(CalledOnValidThread());
- DLOG(ERROR) << "Error from " << location.function_name()
- << "(" << location.file_name() << ":"
- << location.line_number() << ") "
- << message << " (error = " << error << ")";
- // Post the error notification back to this thread, to avoid re-entrancy.
- base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::Bind(&GpuVideoEncodeAcceleratorHost::OnNotifyError,
- weak_this_factory_.GetWeakPtr(), error));
-}
-
-void GpuVideoEncodeAcceleratorHost::Send(IPC::Message* message) {
- DCHECK(CalledOnValidThread());
- uint32_t message_type = message->type();
- if (!channel_->Send(message)) {
- PostNotifyError(FROM_HERE, kPlatformFailureError,
- base::StringPrintf("Send(%d) failed", message_type));
- }
-}
-
-void GpuVideoEncodeAcceleratorHost::OnRequireBitstreamBuffers(
- uint32_t input_count,
- const gfx::Size& input_coded_size,
- uint32_t output_buffer_size) {
- DCHECK(CalledOnValidThread());
- DVLOG(2) << "OnRequireBitstreamBuffers(): input_count=" << input_count
- << ", input_coded_size=" << input_coded_size.ToString()
- << ", output_buffer_size=" << output_buffer_size;
- if (client_) {
- client_->RequireBitstreamBuffers(
- input_count, input_coded_size, output_buffer_size);
- }
-}
-
-void GpuVideoEncodeAcceleratorHost::OnNotifyInputDone(int32_t frame_id) {
- DCHECK(CalledOnValidThread());
- DVLOG(3) << "OnNotifyInputDone(): frame_id=" << frame_id;
- // Fun-fact: std::hash_map is not spec'd to be re-entrant; since freeing a
- // frame can trigger a further encode to be kicked off and thus an .insert()
- // back into the map, we separate the frame's dtor running from the .erase()
- // running by holding on to the frame temporarily. This isn't "just
- // theoretical" - Android's std::hash_map crashes if we don't do this.
- scoped_refptr<media::VideoFrame> frame = frame_map_[frame_id];
- if (!frame_map_.erase(frame_id)) {
- DLOG(ERROR) << "OnNotifyInputDone(): "
- "invalid frame_id=" << frame_id;
- // See OnNotifyError for why this needs to be the last thing in this
- // function.
- OnNotifyError(kPlatformFailureError);
- return;
- }
- frame = NULL; // Not necessary but nice to be explicit; see fun-fact above.
-}
-
-void GpuVideoEncodeAcceleratorHost::OnBitstreamBufferReady(
- int32_t bitstream_buffer_id,
- uint32_t payload_size,
- bool key_frame) {
- DCHECK(CalledOnValidThread());
- DVLOG(3) << "OnBitstreamBufferReady(): "
- "bitstream_buffer_id=" << bitstream_buffer_id
- << ", payload_size=" << payload_size
- << ", key_frame=" << key_frame;
- if (client_)
- client_->BitstreamBufferReady(bitstream_buffer_id, payload_size, key_frame);
-}
-
-void GpuVideoEncodeAcceleratorHost::OnNotifyError(Error error) {
- DCHECK(CalledOnValidThread());
- DVLOG(2) << "OnNotifyError(): error=" << error;
- if (!client_)
- return;
- weak_this_factory_.InvalidateWeakPtrs();
-
- // Client::NotifyError() may Destroy() |this|, so calling it needs to be the
- // last thing done on this stack!
- media::VideoEncodeAccelerator::Client* client = NULL;
- std::swap(client_, client);
- client->NotifyError(error);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/gpu_video_encode_accelerator_host.h b/chromium/content/common/gpu/client/gpu_video_encode_accelerator_host.h
deleted file mode 100644
index 38995938259..00000000000
--- a/chromium/content/common/gpu/client/gpu_video_encode_accelerator_host.h
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GPU_VIDEO_ENCODE_ACCELERATOR_HOST_H_
-#define CONTENT_COMMON_GPU_CLIENT_GPU_VIDEO_ENCODE_ACCELERATOR_HOST_H_
-
-#include <stdint.h>
-
-#include <vector>
-
-#include "base/containers/hash_tables.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "content/common/gpu/client/command_buffer_proxy_impl.h"
-#include "gpu/config/gpu_info.h"
-#include "ipc/ipc_listener.h"
-#include "media/video/video_encode_accelerator.h"
-
-namespace gfx {
-struct GpuMemoryBufferHandle;
-class Size;
-} // namespace gfx
-
-namespace media {
-class VideoFrame;
-} // namespace media
-
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
-namespace content {
-class GpuChannelHost;
-
-// This class is the renderer-side host for the VideoEncodeAccelerator in the
-// GPU process, coordinated over IPC.
-class GpuVideoEncodeAcceleratorHost
- : public IPC::Listener,
- public media::VideoEncodeAccelerator,
- public CommandBufferProxyImpl::DeletionObserver,
- public base::NonThreadSafe {
- public:
- // |this| is guaranteed not to outlive |channel| and |impl|. (See comments
- // for |channel_| and |impl_|.)
- GpuVideoEncodeAcceleratorHost(GpuChannelHost* channel,
- CommandBufferProxyImpl* impl);
-
- // IPC::Listener implementation.
- bool OnMessageReceived(const IPC::Message& message) override;
- void OnChannelError() override;
-
- // media::VideoEncodeAccelerator implementation.
- SupportedProfiles GetSupportedProfiles() override;
- bool Initialize(media::VideoPixelFormat input_format,
- const gfx::Size& input_visible_size,
- media::VideoCodecProfile output_profile,
- uint32_t initial_bitrate,
- Client* client) override;
- void Encode(const scoped_refptr<media::VideoFrame>& frame,
- bool force_keyframe) override;
- void UseOutputBitstreamBuffer(const media::BitstreamBuffer& buffer) override;
- void RequestEncodingParametersChange(uint32_t bitrate,
- uint32_t framerate_num) override;
- void Destroy() override;
-
- // CommandBufferProxyImpl::DeletionObserver implementation.
- void OnWillDeleteImpl() override;
-
- private:
- // Only Destroy() should be deleting |this|.
- ~GpuVideoEncodeAcceleratorHost() override;
-
- // Encode specific video frame types.
- void EncodeGpuMemoryBufferFrame(const scoped_refptr<media::VideoFrame>& frame,
- bool force_keyframe);
- void EncodeSharedMemoryFrame(const scoped_refptr<media::VideoFrame>& frame,
- bool force_keyframe);
-
- // Notify |client_| of an error. Posts a task to avoid re-entrancy.
- void PostNotifyError(const tracked_objects::Location& location,
- Error error, const std::string& message);
-
- void Send(IPC::Message* message);
-
- // IPC handlers, proxying media::VideoEncodeAccelerator::Client for the GPU
- // process. Should not be called directly.
- void OnRequireBitstreamBuffers(uint32_t input_count,
- const gfx::Size& input_coded_size,
- uint32_t output_buffer_size);
- void OnNotifyInputDone(int32_t frame_id);
- void OnBitstreamBufferReady(int32_t bitstream_buffer_id,
- uint32_t payload_size,
- bool key_frame);
- void OnNotifyError(Error error);
-
- // Unowned reference to the GpuChannelHost to send IPC messages to the GPU
- // process. |channel_| outlives |impl_|, so the reference is always valid as
- // long as it is not NULL.
- GpuChannelHost* channel_;
-
- // Route ID for the associated encoder in the GPU process.
- int32_t encoder_route_id_;
-
- // The client that will receive callbacks from the encoder.
- Client* client_;
-
- // Unowned reference to the CommandBufferProxyImpl that created us. |this|
- // registers as a DeletionObserver of |impl_|, so the reference is always
- // valid as long as it is not NULL.
- CommandBufferProxyImpl* impl_;
-
- // media::VideoFrames sent to the encoder.
- // base::IDMap not used here, since that takes pointers, not scoped_refptr.
- typedef base::hash_map<int32_t, scoped_refptr<media::VideoFrame>> FrameMap;
- FrameMap frame_map_;
-
- // ID serial number for the next frame to send to the GPU process.
- int32_t next_frame_id_;
-
- // WeakPtr factory for posting tasks back to itself.
- base::WeakPtrFactory<GpuVideoEncodeAcceleratorHost> weak_this_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuVideoEncodeAcceleratorHost);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GPU_VIDEO_ENCODE_ACCELERATOR_HOST_H_
diff --git a/chromium/content/common/gpu/client/grcontext_for_gles2_interface.cc b/chromium/content/common/gpu/client/grcontext_for_gles2_interface.cc
new file mode 100644
index 00000000000..cebdfd7745c
--- /dev/null
+++ b/chromium/content/common/gpu/client/grcontext_for_gles2_interface.cc
@@ -0,0 +1,62 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/client/grcontext_for_gles2_interface.h"
+
+#include <stddef.h>
+#include <string.h>
+#include <utility>
+
+#include "base/lazy_instance.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu/skia_bindings/gl_bindings_skia_cmd_buffer.h"
+#include "third_party/skia/include/gpu/GrContext.h"
+#include "third_party/skia/include/gpu/gl/GrGLInterface.h"
+
+namespace content {
+
+GrContextForGLES2Interface::GrContextForGLES2Interface(
+ gpu::gles2::GLES2Interface* gl) {
+ sk_sp<GrGLInterface> interface(
+ skia_bindings::CreateGLES2InterfaceBindings(gl));
+ gr_context_ =
+ sk_sp<GrContext>(GrContext::Create(kOpenGL_GrBackend,
+ // GrContext takes ownership of |interface|.
+ reinterpret_cast<GrBackendContext>(interface.get())));
+ if (gr_context_) {
+ // The limit of the number of GPU resources we hold in the GrContext's
+ // GPU cache.
+ static const int kMaxGaneshResourceCacheCount = 2048;
+ // The limit of the bytes allocated toward GPU resources in the GrContext's
+ // GPU cache.
+ static const size_t kMaxGaneshResourceCacheBytes = 96 * 1024 * 1024;
+
+ gr_context_->setResourceCacheLimits(kMaxGaneshResourceCacheCount,
+ kMaxGaneshResourceCacheBytes);
+ }
+}
+
+GrContextForGLES2Interface::~GrContextForGLES2Interface() {
+ // At this point the GLES2Interface is going to be destroyed, so have
+ // the GrContext clean up and not try to use it anymore.
+ if (gr_context_)
+ gr_context_->releaseResourcesAndAbandonContext();
+}
+
+void GrContextForGLES2Interface::OnLostContext() {
+ if (gr_context_)
+ gr_context_->abandonContext();
+}
+
+void GrContextForGLES2Interface::FreeGpuResources() {
+ if (gr_context_) {
+ TRACE_EVENT_INSTANT0("gpu", "GrContext::freeGpuResources",
+ TRACE_EVENT_SCOPE_THREAD);
+ gr_context_->freeGpuResources();
+ }
+}
+
+} // namespace content
diff --git a/chromium/content/common/gpu/client/grcontext_for_gles2_interface.h b/chromium/content/common/gpu/client/grcontext_for_gles2_interface.h
new file mode 100644
index 00000000000..354092a9592
--- /dev/null
+++ b/chromium/content/common/gpu/client/grcontext_for_gles2_interface.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_CLIENT_GRCONTEXT_FOR_GLES2_INTERFACE_H_
+#define CONTENT_COMMON_GPU_CLIENT_GRCONTEXT_FOR_GLES2_INTERFACE_H_
+
+#include "base/macros.h"
+#include "third_party/skia/include/core/SkRefCnt.h"
+
+class GrContext;
+
+namespace gpu {
+namespace gles2 {
+class GLES2Interface;
+}
+}
+
+namespace content {
+
+// This class binds an offscreen GrContext to an offscreen context3d. The
+// context3d is used by the GrContext so must be valid as long as this class
+// is alive.
+class GrContextForGLES2Interface {
+ public:
+ explicit GrContextForGLES2Interface(gpu::gles2::GLES2Interface* gl);
+ virtual ~GrContextForGLES2Interface();
+
+ GrContext* get() { return gr_context_.get(); }
+
+ void OnLostContext();
+ void FreeGpuResources();
+
+ private:
+ sk_sp<class GrContext> gr_context_;
+
+ DISALLOW_COPY_AND_ASSIGN(GrContextForGLES2Interface);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_CLIENT_GRCONTEXT_FOR_GLES2_INTERFACE_H_
diff --git a/chromium/content/common/gpu/client/grcontext_for_webgraphicscontext3d.cc b/chromium/content/common/gpu/client/grcontext_for_webgraphicscontext3d.cc
deleted file mode 100644
index 5cd4944db68..00000000000
--- a/chromium/content/common/gpu/client/grcontext_for_webgraphicscontext3d.cc
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/client/grcontext_for_webgraphicscontext3d.h"
-
-#include <stddef.h>
-#include <string.h>
-#include <utility>
-
-#include "base/lazy_instance.h"
-#include "base/macros.h"
-#include "base/trace_event/trace_event.h"
-#include "gpu/blink/webgraphicscontext3d_impl.h"
-#include "gpu/command_buffer/client/gles2_lib.h"
-#include "gpu/skia_bindings/gl_bindings_skia_cmd_buffer.h"
-#include "third_party/skia/include/gpu/GrContext.h"
-
-using gpu_blink::WebGraphicsContext3DImpl;
-
-namespace content {
-
-namespace {
-
-// Singleton used to initialize and terminate the gles2 library.
-class GLES2Initializer {
- public:
- GLES2Initializer() { gles2::Initialize(); }
-
- ~GLES2Initializer() { gles2::Terminate(); }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(GLES2Initializer);
-};
-
-base::LazyInstance<GLES2Initializer> g_gles2_initializer =
- LAZY_INSTANCE_INITIALIZER;
-
-void BindWebGraphicsContext3DGLContextCallback(const GrGLInterface* interface) {
- gles2::SetGLContext(static_cast<const GrGLInterfaceForWebGraphicsContext3D*>(
- interface)->WebContext3D()->GetGLInterface());
-}
-
-} // namespace anonymous
-
-GrContextForWebGraphicsContext3D::GrContextForWebGraphicsContext3D(
- skia::RefPtr<GrGLInterfaceForWebGraphicsContext3D> gr_interface) {
- if (!gr_interface || !gr_interface->WebContext3D())
- return;
-
- // Ensure the gles2 library is initialized first in a thread safe way.
- g_gles2_initializer.Get();
- gles2::SetGLContext(gr_interface->WebContext3D()->GetGLInterface());
-
- skia_bindings::InitCommandBufferSkiaGLBinding(gr_interface.get());
-
- gr_interface->fCallback = BindWebGraphicsContext3DGLContextCallback;
-
- gr_context_ = skia::AdoptRef(GrContext::Create(
- kOpenGL_GrBackend,
- reinterpret_cast<GrBackendContext>(gr_interface.get())));
- if (gr_context_) {
- // The limit of the number of GPU resources we hold in the GrContext's
- // GPU cache.
- static const int kMaxGaneshResourceCacheCount = 2048;
- // The limit of the bytes allocated toward GPU resources in the GrContext's
- // GPU cache.
- static const size_t kMaxGaneshResourceCacheBytes = 96 * 1024 * 1024;
-
- gr_context_->setResourceCacheLimits(kMaxGaneshResourceCacheCount,
- kMaxGaneshResourceCacheBytes);
- }
-}
-
-GrContextForWebGraphicsContext3D::~GrContextForWebGraphicsContext3D() {
-}
-
-void GrContextForWebGraphicsContext3D::OnLostContext() {
- if (gr_context_)
- gr_context_->abandonContext();
-}
-
-void GrContextForWebGraphicsContext3D::FreeGpuResources() {
- if (gr_context_) {
- TRACE_EVENT_INSTANT0("gpu", "GrContext::freeGpuResources", \
- TRACE_EVENT_SCOPE_THREAD);
- gr_context_->freeGpuResources();
- }
-}
-
-GrGLInterfaceForWebGraphicsContext3D::GrGLInterfaceForWebGraphicsContext3D(
- scoped_ptr<gpu_blink::WebGraphicsContext3DImpl> context3d)
- : context3d_(std::move(context3d)) {}
-
-void GrGLInterfaceForWebGraphicsContext3D::BindToCurrentThread() {
- context_thread_checker_.DetachFromThread();
-}
-
-GrGLInterfaceForWebGraphicsContext3D::~GrGLInterfaceForWebGraphicsContext3D() {
- DCHECK(context_thread_checker_.CalledOnValidThread());
-#if !defined(NDEBUG)
- // Set all the function pointers to zero, in order to crash if function
- // pointers are used after free.
- memset(&fFunctions, 0, sizeof(GrGLInterface::Functions));
-#endif
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/client/grcontext_for_webgraphicscontext3d.h b/chromium/content/common/gpu/client/grcontext_for_webgraphicscontext3d.h
deleted file mode 100644
index 7597f906a58..00000000000
--- a/chromium/content/common/gpu/client/grcontext_for_webgraphicscontext3d.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_CLIENT_GRCONTEXT_FOR_WEBGRAPHICSCONTEXT3D_H_
-#define CONTENT_COMMON_GPU_CLIENT_GRCONTEXT_FOR_WEBGRAPHICSCONTEXT3D_H_
-
-#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/thread_checker.h"
-#include "skia/ext/refptr.h"
-#include "third_party/skia/include/gpu/gl/GrGLInterface.h"
-
-class GrContext;
-
-namespace gpu_blink {
-class WebGraphicsContext3DImpl;
-}
-
-namespace content {
-
-// Wrap WebGraphicsContext3DImpl into a GrGLInterface object, which allows
-// the WebGraphicsContext3DImpl to be jointly refcounted (indirectly)
-// by the GrContext and the context provider. This makes it legal for the
-// GrContext to be invoked when it outlives the context provider that created
-// it. By doing this we no longer have to worry about use after free errors
-// caused a lack of consideration for object destruction order.
-class GrGLInterfaceForWebGraphicsContext3D final : public GrGLInterface {
- public:
- GrGLInterfaceForWebGraphicsContext3D(
- scoped_ptr<gpu_blink::WebGraphicsContext3DImpl> context3d);
- ~GrGLInterfaceForWebGraphicsContext3D() final;
-
- void BindToCurrentThread();
-
- gpu_blink::WebGraphicsContext3DImpl* WebContext3D() const {
- return context3d_.get();
- }
- private:
- base::ThreadChecker context_thread_checker_;
- scoped_ptr<gpu_blink::WebGraphicsContext3DImpl> context3d_;
-};
-
-// This class binds an offscreen GrContext to an offscreen context3d. The
-// context3d is used by the GrContext so must be valid as long as this class
-// is alive.
-class GrContextForWebGraphicsContext3D {
- public:
- explicit GrContextForWebGraphicsContext3D(
- skia::RefPtr<GrGLInterfaceForWebGraphicsContext3D> context3d);
- virtual ~GrContextForWebGraphicsContext3D();
-
- GrContext* get() { return gr_context_.get(); }
-
- void OnLostContext();
- void FreeGpuResources();
-
- private:
- skia::RefPtr<class GrContext> gr_context_;
-
- DISALLOW_COPY_AND_ASSIGN(GrContextForWebGraphicsContext3D);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_CLIENT_GRCONTEXT_FOR_WEBGRAPHICSCONTEXT3D_H_
diff --git a/chromium/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.cc b/chromium/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.cc
index f7783397057..708a3da8ee0 100644
--- a/chromium/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.cc
+++ b/chromium/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.cc
@@ -23,22 +23,19 @@
#include "base/metrics/histogram.h"
#include "base/profiler/scoped_tracker.h"
#include "base/trace_event/trace_event.h"
-#include "content/common/gpu/client/gpu_channel_host.h"
-#include "content/public/common/content_constants.h"
-#include "content/public/common/content_switches.h"
#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
#include "gpu/command_buffer/client/gles2_implementation.h"
#include "gpu/command_buffer/client/gles2_trace_implementation.h"
+#include "gpu/command_buffer/client/gpu_switches.h"
#include "gpu/command_buffer/client/transfer_buffer.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/gpu_memory_allocation.h"
#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/ipc/client/gpu_channel_host.h"
#include "gpu/skia_bindings/gl_bindings_skia_cmd_buffer.h"
#include "third_party/skia/include/core/SkTypes.h"
-using blink::WGC3Denum;
-
namespace content {
namespace {
@@ -46,14 +43,15 @@ namespace {
static base::LazyInstance<base::Lock>::Leaky
g_default_share_groups_lock = LAZY_INSTANCE_INITIALIZER;
-typedef std::map<GpuChannelHost*,
- scoped_refptr<WebGraphicsContext3DCommandBufferImpl::ShareGroup> >
+typedef std::map<
+ gpu::GpuChannelHost*,
+ scoped_refptr<WebGraphicsContext3DCommandBufferImpl::ShareGroup>>
ShareGroupMap;
static base::LazyInstance<ShareGroupMap> g_default_share_groups =
LAZY_INSTANCE_INITIALIZER;
scoped_refptr<WebGraphicsContext3DCommandBufferImpl::ShareGroup>
- GetDefaultShareGroupForHost(GpuChannelHost* host) {
+GetDefaultShareGroupForHost(gpu::GpuChannelHost* host) {
base::AutoLock lock(g_default_share_groups_lock.Get());
ShareGroupMap& share_groups = g_default_share_groups.Get();
@@ -84,34 +82,38 @@ WebGraphicsContext3DCommandBufferImpl::ShareGroup::~ShareGroup() {
}
WebGraphicsContext3DCommandBufferImpl::WebGraphicsContext3DCommandBufferImpl(
- int surface_id,
+ gpu::SurfaceHandle surface_handle,
const GURL& active_url,
- GpuChannelHost* host,
- const Attributes& attributes,
- bool lose_context_when_out_of_memory,
+ gpu::GpuChannelHost* host,
+ const gpu::gles2::ContextCreationAttribHelper& attributes,
+ gfx::GpuPreference gpu_preference,
+ bool share_resources,
+ bool automatic_flushes,
const SharedMemoryLimits& limits,
WebGraphicsContext3DCommandBufferImpl* share_context)
- : lose_context_when_out_of_memory_(lose_context_when_out_of_memory),
+ : automatic_flushes_(automatic_flushes),
attributes_(attributes),
- visible_(false),
host_(host),
- surface_id_(surface_id),
+ surface_handle_(surface_handle),
active_url_(active_url),
- context_type_(CONTEXT_TYPE_UNKNOWN),
- gpu_preference_(attributes.preferDiscreteGPU ? gfx::PreferDiscreteGpu
- : gfx::PreferIntegratedGpu),
+ gpu_preference_(gpu_preference),
mem_limits_(limits),
weak_ptr_factory_(this) {
- if (attributes_.webGL)
- context_type_ = OFFSCREEN_CONTEXT_FOR_WEBGL;
+ switch (attributes.context_type) {
+ case gpu::gles2::CONTEXT_TYPE_OPENGLES2:
+ case gpu::gles2::CONTEXT_TYPE_OPENGLES3:
+ context_type_ = CONTEXT_TYPE_UNKNOWN;
+ case gpu::gles2::CONTEXT_TYPE_WEBGL1:
+ case gpu::gles2::CONTEXT_TYPE_WEBGL2:
+ context_type_ = OFFSCREEN_CONTEXT_FOR_WEBGL;
+ }
if (share_context) {
- DCHECK(!attributes_.shareResources);
+ DCHECK(!share_resources);
share_group_ = share_context->share_group_;
+ } else if (share_resources) {
+ share_group_ = GetDefaultShareGroupForHost(host);
} else {
- share_group_ = attributes_.shareResources
- ? GetDefaultShareGroupForHost(host)
- : scoped_refptr<WebGraphicsContext3DCommandBufferImpl::ShareGroup>(
- new ShareGroup());
+ share_group_ = make_scoped_refptr(new ShareGroup);
}
}
@@ -138,7 +140,7 @@ bool WebGraphicsContext3DCommandBufferImpl::MaybeInitializeGL() {
FROM_HERE_WITH_EXPLICIT_FUNCTION(
"125248 WebGraphicsContext3DCommandBufferImpl::MaybeInitializeGL"));
- if (!CreateContext(surface_id_ != 0)) {
+ if (!CreateContext()) {
Destroy();
initialize_failed_ = true;
@@ -157,44 +159,31 @@ bool WebGraphicsContext3DCommandBufferImpl::MaybeInitializeGL() {
real_gl_->TraceBeginCHROMIUM("WebGraphicsContext3D",
"CommandBufferContext");
- visible_ = true;
initialized_ = true;
return true;
}
bool WebGraphicsContext3DCommandBufferImpl::InitializeCommandBuffer(
- bool onscreen, WebGraphicsContext3DCommandBufferImpl* share_context) {
+ WebGraphicsContext3DCommandBufferImpl* share_context) {
if (!host_.get())
return false;
- CommandBufferProxyImpl* share_group_command_buffer = NULL;
+ gpu::CommandBufferProxyImpl* share_group_command_buffer = NULL;
if (share_context) {
share_group_command_buffer = share_context->GetCommandBufferProxy();
}
- ::gpu::gles2::ContextCreationAttribHelper attribs_for_gles2;
- ConvertAttributes(attributes_, &attribs_for_gles2);
- attribs_for_gles2.lose_context_when_out_of_memory =
- lose_context_when_out_of_memory_;
- DCHECK(attribs_for_gles2.buffer_preserved);
- std::vector<int32_t> attribs;
- attribs_for_gles2.Serialize(&attribs);
+ DCHECK(attributes_.buffer_preserved);
+ std::vector<int32_t> serialized_attributes;
+ attributes_.Serialize(&serialized_attributes);
// Create a proxy to a command buffer in the GPU process.
- if (onscreen) {
- command_buffer_ =
- host_->CreateViewCommandBuffer(surface_id_, share_group_command_buffer,
- GpuChannelHost::kDefaultStreamId,
- GpuChannelHost::kDefaultStreamPriority,
- attribs, active_url_, gpu_preference_);
- } else {
- command_buffer_ = host_->CreateOffscreenCommandBuffer(
- gfx::Size(1, 1), share_group_command_buffer,
- GpuChannelHost::kDefaultStreamId,
- GpuChannelHost::kDefaultStreamPriority, attribs, active_url_,
- gpu_preference_);
- }
+ command_buffer_ = host_->CreateCommandBuffer(
+ surface_handle_, gfx::Size(), share_group_command_buffer,
+ gpu::GpuChannelHost::kDefaultStreamId,
+ gpu::GpuChannelHost::kDefaultStreamPriority, serialized_attributes,
+ active_url_, gpu_preference_);
if (!command_buffer_) {
DLOG(ERROR) << "GpuChannelHost failed to create command buffer.";
@@ -213,7 +202,7 @@ bool WebGraphicsContext3DCommandBufferImpl::InitializeCommandBuffer(
return result;
}
-bool WebGraphicsContext3DCommandBufferImpl::CreateContext(bool onscreen) {
+bool WebGraphicsContext3DCommandBufferImpl::CreateContext() {
TRACE_EVENT0("gpu", "WebGfxCtx3DCmdBfrImpl::CreateContext");
scoped_refptr<gpu::gles2::ShareGroup> gles2_share_group;
@@ -225,7 +214,7 @@ bool WebGraphicsContext3DCommandBufferImpl::CreateContext(bool onscreen) {
share_group_lock.reset(new base::AutoLock(share_group_->lock()));
share_context = share_group_->GetAnyContextLocked();
- if (!InitializeCommandBuffer(onscreen, share_context)) {
+ if (!InitializeCommandBuffer(share_context)) {
LOG(ERROR) << "Failed to initialize command buffer.";
return false;
}
@@ -243,7 +232,7 @@ bool WebGraphicsContext3DCommandBufferImpl::CreateContext(bool onscreen) {
return false;
}
- if (attributes_.noAutomaticFlushes)
+ if (!automatic_flushes_)
gles2_helper_->SetAutomaticFlushes(false);
// Create a transfer buffer used to copy resources between the renderer
// process and the GPU process.
@@ -251,15 +240,17 @@ bool WebGraphicsContext3DCommandBufferImpl::CreateContext(bool onscreen) {
DCHECK(host_.get());
- // Create the object exposing the OpenGL API.
- const bool bind_generates_resources = false;
+ const bool bind_generates_resource = attributes_.bind_generates_resource;
+ const bool lose_context_when_out_of_memory =
+ attributes_.lose_context_when_out_of_memory;
const bool support_client_side_arrays = false;
+ // Create the object exposing the OpenGL API.
real_gl_.reset(new gpu::gles2::GLES2Implementation(
gles2_helper_.get(), gles2_share_group.get(), transfer_buffer_.get(),
- bind_generates_resources, lose_context_when_out_of_memory_,
+ bind_generates_resource, lose_context_when_out_of_memory,
support_client_side_arrays, command_buffer_.get()));
- setGLInterface(real_gl_.get());
+ SetGLInterface(real_gl_.get());
if (!real_gl_->Initialize(
mem_limits_.start_transfer_buffer_size,
@@ -276,7 +267,7 @@ bool WebGraphicsContext3DCommandBufferImpl::CreateContext(bool onscreen) {
if (base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableGpuClientTracing)) {
trace_gl_.reset(new gpu::gles2::GLES2TraceImplementation(GetGLInterface()));
- setGLInterface(trace_gl_.get());
+ SetGLInterface(trace_gl_.get());
}
return true;
}
@@ -306,7 +297,7 @@ void WebGraphicsContext3DCommandBufferImpl::Destroy() {
// issued on this context might not be visible to other contexts in the
// share group.
gl->Flush();
- setGLInterface(NULL);
+ SetGLInterface(nullptr);
}
trace_gl_.reset();
@@ -316,7 +307,7 @@ void WebGraphicsContext3DCommandBufferImpl::Destroy() {
real_gl_.reset();
command_buffer_.reset();
- host_ = NULL;
+ host_ = nullptr;
}
gpu::ContextSupport*
@@ -336,9 +327,11 @@ bool WebGraphicsContext3DCommandBufferImpl::IsCommandBufferContextLost() {
// static
WebGraphicsContext3DCommandBufferImpl*
WebGraphicsContext3DCommandBufferImpl::CreateOffscreenContext(
- GpuChannelHost* host,
- const WebGraphicsContext3D::Attributes& attributes,
- bool lose_context_when_out_of_memory,
+ gpu::GpuChannelHost* host,
+ const gpu::gles2::ContextCreationAttribHelper& attributes,
+ gfx::GpuPreference gpu_preference,
+ bool share_resources,
+ bool automatic_flushes,
const GURL& active_url,
const SharedMemoryLimits& limits,
WebGraphicsContext3DCommandBufferImpl* share_context) {
@@ -349,13 +342,8 @@ WebGraphicsContext3DCommandBufferImpl::CreateOffscreenContext(
return NULL;
return new WebGraphicsContext3DCommandBufferImpl(
- 0,
- active_url,
- host,
- attributes,
- lose_context_when_out_of_memory,
- limits,
- share_context);
+ gpu::kNullSurfaceHandle, active_url, host, attributes, gpu_preference,
+ share_resources, automatic_flushes, limits, share_context);
}
void WebGraphicsContext3DCommandBufferImpl::OnContextLost() {
diff --git a/chromium/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h b/chromium/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h
index b556c1bcdf5..1bbd7202eb6 100644
--- a/chromium/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h
+++ b/chromium/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h
@@ -18,17 +18,19 @@
#include "base/synchronization/lock.h"
#include "content/common/content_export.h"
#include "content/common/gpu/client/command_buffer_metrics.h"
-#include "content/common/gpu/client/command_buffer_proxy_impl.h"
#include "gpu/blink/webgraphicscontext3d_impl.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/ipc/client/command_buffer_proxy_impl.h"
+#include "gpu/ipc/common/surface_handle.h"
#include "third_party/WebKit/public/platform/WebGraphicsContext3D.h"
#include "third_party/WebKit/public/platform/WebString.h"
-#include "ui/gfx/native_widget_types.h"
#include "ui/gl/gpu_preference.h"
#include "url/gurl.h"
namespace gpu {
class ContextSupport;
+class GpuChannelHost;
class TransferBuffer;
namespace gles2 {
@@ -39,7 +41,6 @@ class GLES2Interface;
}
namespace content {
-class GpuChannelHost;
const size_t kDefaultCommandBufferSize = 1024 * 1024;
const size_t kDefaultStartTransferBufferSize = 1 * 1024 * 1024;
@@ -108,17 +109,19 @@ class WebGraphicsContext3DCommandBufferImpl
};
WebGraphicsContext3DCommandBufferImpl(
- int surface_id,
+ gpu::SurfaceHandle surface_handle,
const GURL& active_url,
- GpuChannelHost* host,
- const Attributes& attributes,
- bool lose_context_when_out_of_memory,
+ gpu::GpuChannelHost* host,
+ const gpu::gles2::ContextCreationAttribHelper& attributes,
+ gfx::GpuPreference gpu_preference,
+ bool share_resources,
+ bool automatic_flushes,
const SharedMemoryLimits& limits,
WebGraphicsContext3DCommandBufferImpl* share_context);
~WebGraphicsContext3DCommandBufferImpl() override;
- CommandBufferProxyImpl* GetCommandBufferProxy() {
+ gpu::CommandBufferProxyImpl* GetCommandBufferProxy() {
return command_buffer_.get();
}
@@ -135,13 +138,15 @@ class WebGraphicsContext3DCommandBufferImpl
// Create & initialize a WebGraphicsContext3DCommandBufferImpl. Return NULL
// on any failure.
static CONTENT_EXPORT WebGraphicsContext3DCommandBufferImpl*
- CreateOffscreenContext(
- GpuChannelHost* host,
- const WebGraphicsContext3D::Attributes& attributes,
- bool lose_context_when_out_of_memory,
- const GURL& active_url,
- const SharedMemoryLimits& limits,
- WebGraphicsContext3DCommandBufferImpl* share_context);
+ CreateOffscreenContext(
+ gpu::GpuChannelHost* host,
+ const gpu::gles2::ContextCreationAttribHelper& attributes,
+ gfx::GpuPreference gpu_preference,
+ bool share_resources,
+ bool automatic_flushes,
+ const GURL& active_url,
+ const SharedMemoryLimits& limits,
+ WebGraphicsContext3DCommandBufferImpl* share_context);
size_t GetMappedMemoryLimit() {
return mem_limits_.mapped_memory_reclaim_limit;
@@ -166,7 +171,7 @@ class WebGraphicsContext3DCommandBufferImpl
// thread).
bool MaybeInitializeGL();
- bool InitializeCommandBuffer(bool onscreen,
+ bool InitializeCommandBuffer(
WebGraphicsContext3DCommandBufferImpl* share_context);
void Destroy();
@@ -178,30 +183,28 @@ class WebGraphicsContext3DCommandBufferImpl
//
// NOTE: on Mac OS X, this entry point is only used to set up the
// accelerated compositor's output. On this platform, we actually pass
- // a gfx::PluginWindowHandle in place of the gfx::NativeViewId,
+ // a gpu::SurfaceHandle in place of the gfx::NativeViewId,
// because the facility to allocate a fake PluginWindowHandle is
// already in place. We could add more entry points and messages to
// allocate both fake PluginWindowHandles and NativeViewIds and map
// from fake NativeViewIds to PluginWindowHandles, but this seems like
// unnecessary complexity at the moment.
- bool CreateContext(bool onscreen);
+ bool CreateContext();
virtual void OnContextLost();
- bool lose_context_when_out_of_memory_;
- blink::WebGraphicsContext3D::Attributes attributes_;
-
- bool visible_;
+ bool automatic_flushes_;
+ gpu::gles2::ContextCreationAttribHelper attributes_;
// State needed by MaybeInitializeGL.
- scoped_refptr<GpuChannelHost> host_;
- int32_t surface_id_;
+ scoped_refptr<gpu::GpuChannelHost> host_;
+ gpu::SurfaceHandle surface_handle_;
GURL active_url_;
CommandBufferContextType context_type_;
gfx::GpuPreference gpu_preference_;
- scoped_ptr<CommandBufferProxyImpl> command_buffer_;
+ scoped_ptr<gpu::CommandBufferProxyImpl> command_buffer_;
scoped_ptr<gpu::gles2::GLES2CmdHelper> gles2_helper_;
scoped_ptr<gpu::TransferBuffer> transfer_buffer_;
scoped_ptr<gpu::gles2::GLES2Implementation> real_gl_;
diff --git a/chromium/content/common/gpu/gpu_channel.cc b/chromium/content/common/gpu/gpu_channel.cc
deleted file mode 100644
index e3f920d96b0..00000000000
--- a/chromium/content/common/gpu/gpu_channel.cc
+++ /dev/null
@@ -1,1086 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_channel.h"
-
-#include <utility>
-
-#if defined(OS_WIN)
-#include <windows.h>
-#endif
-
-#include <algorithm>
-#include <deque>
-#include <set>
-#include <vector>
-
-#include "base/atomicops.h"
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/location.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/single_thread_task_runner.h"
-#include "base/stl_util.h"
-#include "base/strings/string_util.h"
-#include "base/synchronization/lock.h"
-#include "base/thread_task_runner_handle.h"
-#include "base/timer/timer.h"
-#include "base/trace_event/memory_dump_manager.h"
-#include "base/trace_event/process_memory_dump.h"
-#include "base/trace_event/trace_event.h"
-#include "build/build_config.h"
-#include "content/common/gpu/gpu_channel_manager.h"
-#include "content/common/gpu/gpu_memory_buffer_factory.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
-#include "content/public/common/content_switches.h"
-#include "gpu/command_buffer/common/mailbox.h"
-#include "gpu/command_buffer/common/value_state.h"
-#include "gpu/command_buffer/service/gpu_scheduler.h"
-#include "gpu/command_buffer/service/image_factory.h"
-#include "gpu/command_buffer/service/mailbox_manager.h"
-#include "gpu/command_buffer/service/sync_point_manager.h"
-#include "gpu/command_buffer/service/valuebuffer_manager.h"
-#include "ipc/ipc_channel.h"
-#include "ipc/message_filter.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_image_shared_memory.h"
-#include "ui/gl/gl_surface.h"
-
-#if defined(OS_POSIX)
-#include "ipc/ipc_channel_posix.h"
-#endif
-
-namespace content {
-namespace {
-
-// Number of milliseconds between successive vsync. Many GL commands block
-// on vsync, so thresholds for preemption should be multiples of this.
-const int64_t kVsyncIntervalMs = 17;
-
-// Amount of time that we will wait for an IPC to be processed before
-// preempting. After a preemption, we must wait this long before triggering
-// another preemption.
-const int64_t kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
-
-// Once we trigger a preemption, the maximum duration that we will wait
-// before clearing the preemption.
-const int64_t kMaxPreemptTimeMs = kVsyncIntervalMs;
-
-// Stop the preemption once the time for the longest pending IPC drops
-// below this threshold.
-const int64_t kStopPreemptThresholdMs = kVsyncIntervalMs;
-
-} // anonymous namespace
-
-scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create(
- const base::WeakPtr<GpuChannel>& gpu_channel,
- base::SingleThreadTaskRunner* task_runner,
- gpu::SyncPointManager* sync_point_manager) {
- return new GpuChannelMessageQueue(gpu_channel, task_runner,
- sync_point_manager);
-}
-
-scoped_refptr<gpu::SyncPointOrderData>
-GpuChannelMessageQueue::GetSyncPointOrderData() {
- return sync_point_order_data_;
-}
-
-GpuChannelMessageQueue::GpuChannelMessageQueue(
- const base::WeakPtr<GpuChannel>& gpu_channel,
- base::SingleThreadTaskRunner* task_runner,
- gpu::SyncPointManager* sync_point_manager)
- : enabled_(true),
- sync_point_order_data_(gpu::SyncPointOrderData::Create()),
- gpu_channel_(gpu_channel),
- task_runner_(task_runner),
- sync_point_manager_(sync_point_manager) {}
-
-GpuChannelMessageQueue::~GpuChannelMessageQueue() {
- DCHECK(channel_messages_.empty());
-}
-
-uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const {
- return sync_point_order_data_->unprocessed_order_num();
-}
-
-uint32_t GpuChannelMessageQueue::GetProcessedOrderNum() const {
- return sync_point_order_data_->processed_order_num();
-}
-
-void GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) {
- base::AutoLock auto_lock(channel_messages_lock_);
- if (enabled_)
- PushMessageHelper(make_scoped_ptr(new GpuChannelMessage(message)));
-}
-
-bool GpuChannelMessageQueue::GenerateSyncPointMessage(
- const IPC::Message& message,
- bool retire_sync_point,
- uint32_t* sync_point) {
- DCHECK_EQ((uint32_t)GpuCommandBufferMsg_InsertSyncPoint::ID, message.type());
- DCHECK(sync_point);
- base::AutoLock auto_lock(channel_messages_lock_);
- if (enabled_) {
- *sync_point = sync_point_manager_->GenerateSyncPoint();
-
- scoped_ptr<GpuChannelMessage> msg(new GpuChannelMessage(message));
- msg->retire_sync_point = retire_sync_point;
- msg->sync_point = *sync_point;
-
- PushMessageHelper(std::move(msg));
- return true;
- }
- return false;
-}
-
-bool GpuChannelMessageQueue::HasQueuedMessages() const {
- base::AutoLock auto_lock(channel_messages_lock_);
- return !channel_messages_.empty();
-}
-
-base::TimeTicks GpuChannelMessageQueue::GetNextMessageTimeTick() const {
- base::AutoLock auto_lock(channel_messages_lock_);
- if (!channel_messages_.empty())
- return channel_messages_.front()->time_received;
- return base::TimeTicks();
-}
-
-GpuChannelMessage* GpuChannelMessageQueue::GetNextMessage() const {
- base::AutoLock auto_lock(channel_messages_lock_);
- if (!channel_messages_.empty()) {
- DCHECK_GT(channel_messages_.front()->order_number,
- sync_point_order_data_->processed_order_num());
- DCHECK_LE(channel_messages_.front()->order_number,
- sync_point_order_data_->unprocessed_order_num());
-
- return channel_messages_.front();
- }
- return nullptr;
-}
-
-void GpuChannelMessageQueue::BeginMessageProcessing(
- const GpuChannelMessage* msg) {
- sync_point_order_data_->BeginProcessingOrderNumber(msg->order_number);
-}
-
-void GpuChannelMessageQueue::PauseMessageProcessing(
- const GpuChannelMessage* msg) {
- sync_point_order_data_->PauseProcessingOrderNumber(msg->order_number);
-}
-
-bool GpuChannelMessageQueue::MessageProcessed() {
- base::AutoLock auto_lock(channel_messages_lock_);
- DCHECK(!channel_messages_.empty());
- scoped_ptr<GpuChannelMessage> msg(channel_messages_.front());
- channel_messages_.pop_front();
- sync_point_order_data_->FinishProcessingOrderNumber(msg->order_number);
- return !channel_messages_.empty();
-}
-
-void GpuChannelMessageQueue::DeleteAndDisableMessages() {
- {
- base::AutoLock auto_lock(channel_messages_lock_);
- DCHECK(enabled_);
- enabled_ = false;
- }
-
- // We guarantee that the queues will no longer be modified after enabled_
- // is set to false, it is now safe to modify the queue without the lock.
- // All public facing modifying functions check enabled_ while all
- // private modifying functions DCHECK(enabled_) to enforce this.
- while (!channel_messages_.empty()) {
- scoped_ptr<GpuChannelMessage> msg(channel_messages_.front());
- channel_messages_.pop_front();
- // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and
- // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check
- // if we have a sync point number here.
- if (msg->sync_point)
- sync_point_manager_->RetireSyncPoint(msg->sync_point);
- }
-
- if (sync_point_order_data_) {
- sync_point_order_data_->Destroy();
- sync_point_order_data_ = nullptr;
- }
-}
-
-void GpuChannelMessageQueue::ScheduleHandleMessage() {
- task_runner_->PostTask(FROM_HERE,
- base::Bind(&GpuChannel::HandleMessage, gpu_channel_));
-}
-
-void GpuChannelMessageQueue::PushMessageHelper(
- scoped_ptr<GpuChannelMessage> msg) {
- channel_messages_lock_.AssertAcquired();
- DCHECK(enabled_);
-
- msg->order_number = sync_point_order_data_->GenerateUnprocessedOrderNumber(
- sync_point_manager_);
- msg->time_received = base::TimeTicks::Now();
-
- bool had_messages = !channel_messages_.empty();
- channel_messages_.push_back(msg.release());
- if (!had_messages)
- ScheduleHandleMessage();
-}
-
-GpuChannelMessageFilter::GpuChannelMessageFilter(
- const base::WeakPtr<GpuChannel>& gpu_channel,
- GpuChannelMessageQueue* message_queue,
- base::SingleThreadTaskRunner* task_runner,
- gpu::PreemptionFlag* preempting_flag,
- bool future_sync_points)
- : preemption_state_(IDLE),
- gpu_channel_(gpu_channel),
- message_queue_(message_queue),
- sender_(nullptr),
- peer_pid_(base::kNullProcessId),
- task_runner_(task_runner),
- preempting_flag_(preempting_flag),
- a_stub_is_descheduled_(false),
- future_sync_points_(future_sync_points) {}
-
-GpuChannelMessageFilter::~GpuChannelMessageFilter() {}
-
-void GpuChannelMessageFilter::OnFilterAdded(IPC::Sender* sender) {
- DCHECK(!sender_);
- sender_ = sender;
- timer_ = make_scoped_ptr(new base::OneShotTimer);
- for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
- filter->OnFilterAdded(sender_);
- }
-}
-
-void GpuChannelMessageFilter::OnFilterRemoved() {
- DCHECK(sender_);
- for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
- filter->OnFilterRemoved();
- }
- sender_ = nullptr;
- peer_pid_ = base::kNullProcessId;
- timer_ = nullptr;
-}
-
-void GpuChannelMessageFilter::OnChannelConnected(int32_t peer_pid) {
- DCHECK(peer_pid_ == base::kNullProcessId);
- peer_pid_ = peer_pid;
- for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
- filter->OnChannelConnected(peer_pid);
- }
-}
-
-void GpuChannelMessageFilter::OnChannelError() {
- for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
- filter->OnChannelError();
- }
-}
-
-void GpuChannelMessageFilter::OnChannelClosing() {
- for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
- filter->OnChannelClosing();
- }
-}
-
-void GpuChannelMessageFilter::AddChannelFilter(
- scoped_refptr<IPC::MessageFilter> filter) {
- channel_filters_.push_back(filter);
- if (sender_)
- filter->OnFilterAdded(sender_);
- if (peer_pid_ != base::kNullProcessId)
- filter->OnChannelConnected(peer_pid_);
-}
-
-void GpuChannelMessageFilter::RemoveChannelFilter(
- scoped_refptr<IPC::MessageFilter> filter) {
- if (sender_)
- filter->OnFilterRemoved();
- channel_filters_.erase(
- std::find(channel_filters_.begin(), channel_filters_.end(), filter));
-}
-
-bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
- DCHECK(sender_);
-
- if (message.should_unblock() || message.is_reply()) {
- DLOG(ERROR) << "Unexpected message type";
- return true;
- }
-
- if (message.type() == GpuChannelMsg_Nop::ID) {
- IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
- Send(reply);
- return true;
- }
-
- for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
- if (filter->OnMessageReceived(message)) {
- return true;
- }
- }
-
- bool handled = false;
- if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) &&
- !future_sync_points_) {
- DLOG(ERROR) << "Untrusted client should not send "
- "GpuCommandBufferMsg_RetireSyncPoint message";
- return true;
- }
-
- if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
- base::Tuple<bool> params;
- IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
- if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message,
- &params)) {
- reply->set_reply_error();
- Send(reply);
- return true;
- }
- bool retire_sync_point = base::get<0>(params);
- if (!future_sync_points_ && !retire_sync_point) {
- DLOG(ERROR) << "Untrusted contexts can't create future sync points";
- reply->set_reply_error();
- Send(reply);
- return true;
- }
-
- // Message queue must handle the entire sync point generation because the
- // message queue could be disabled from the main thread during generation.
- uint32_t sync_point = 0u;
- if (!message_queue_->GenerateSyncPointMessage(message, retire_sync_point,
- &sync_point)) {
- DLOG(ERROR) << "GpuChannel has been destroyed.";
- reply->set_reply_error();
- Send(reply);
- return true;
- }
-
- DCHECK_NE(sync_point, 0u);
- GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
- Send(reply);
- handled = true;
- }
-
- // Forward all other messages to the GPU Channel.
- if (!handled) {
- if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
- message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
- task_runner_->PostTask(FROM_HERE,
- base::Bind(&GpuChannel::HandleOutOfOrderMessage,
- gpu_channel_, message));
- } else {
- message_queue_->PushBackMessage(message);
- }
- handled = true;
- }
-
- UpdatePreemptionState();
- return handled;
-}
-
-void GpuChannelMessageFilter::OnMessageProcessed() {
- UpdatePreemptionState();
-}
-
-void GpuChannelMessageFilter::UpdateStubSchedulingState(
- bool a_stub_is_descheduled) {
- a_stub_is_descheduled_ = a_stub_is_descheduled;
- UpdatePreemptionState();
-}
-
-bool GpuChannelMessageFilter::Send(IPC::Message* message) {
- return sender_->Send(message);
-}
-
-void GpuChannelMessageFilter::UpdatePreemptionState() {
- switch (preemption_state_) {
- case IDLE:
- if (preempting_flag_.get() && message_queue_->HasQueuedMessages())
- TransitionToWaiting();
- break;
- case WAITING:
- // A timer will transition us to CHECKING.
- DCHECK(timer_->IsRunning());
- break;
- case CHECKING: {
- base::TimeTicks time_tick = message_queue_->GetNextMessageTimeTick();
- if (!time_tick.is_null()) {
- base::TimeDelta time_elapsed = base::TimeTicks::Now() - time_tick;
- if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
- // Schedule another check for when the IPC may go long.
- timer_->Start(FROM_HERE,
- base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
- time_elapsed,
- this, &GpuChannelMessageFilter::UpdatePreemptionState);
- } else {
- if (a_stub_is_descheduled_)
- TransitionToWouldPreemptDescheduled();
- else
- TransitionToPreempting();
- }
- }
- } break;
- case PREEMPTING:
- // A TransitionToIdle() timer should always be running in this state.
- DCHECK(timer_->IsRunning());
- if (a_stub_is_descheduled_)
- TransitionToWouldPreemptDescheduled();
- else
- TransitionToIdleIfCaughtUp();
- break;
- case WOULD_PREEMPT_DESCHEDULED:
- // A TransitionToIdle() timer should never be running in this state.
- DCHECK(!timer_->IsRunning());
- if (!a_stub_is_descheduled_)
- TransitionToPreempting();
- else
- TransitionToIdleIfCaughtUp();
- break;
- default:
- NOTREACHED();
- }
-}
-
-void GpuChannelMessageFilter::TransitionToIdleIfCaughtUp() {
- DCHECK(preemption_state_ == PREEMPTING ||
- preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
- base::TimeTicks next_tick = message_queue_->GetNextMessageTimeTick();
- if (next_tick.is_null()) {
- TransitionToIdle();
- } else {
- base::TimeDelta time_elapsed = base::TimeTicks::Now() - next_tick;
- if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
- TransitionToIdle();
- }
-}
-
-void GpuChannelMessageFilter::TransitionToIdle() {
- DCHECK(preemption_state_ == PREEMPTING ||
- preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
- // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
- timer_->Stop();
-
- preemption_state_ = IDLE;
- preempting_flag_->Reset();
- TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
-
- UpdatePreemptionState();
-}
-
-void GpuChannelMessageFilter::TransitionToWaiting() {
- DCHECK_EQ(preemption_state_, IDLE);
- DCHECK(!timer_->IsRunning());
-
- preemption_state_ = WAITING;
- timer_->Start(FROM_HERE,
- base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs), this,
- &GpuChannelMessageFilter::TransitionToChecking);
-}
-
-void GpuChannelMessageFilter::TransitionToChecking() {
- DCHECK_EQ(preemption_state_, WAITING);
- DCHECK(!timer_->IsRunning());
-
- preemption_state_ = CHECKING;
- max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
- UpdatePreemptionState();
-}
-
-void GpuChannelMessageFilter::TransitionToPreempting() {
- DCHECK(preemption_state_ == CHECKING ||
- preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
- DCHECK(!a_stub_is_descheduled_);
-
- // Stop any pending state update checks that we may have queued
- // while CHECKING.
- if (preemption_state_ == CHECKING)
- timer_->Stop();
-
- preemption_state_ = PREEMPTING;
- preempting_flag_->Set();
- TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
-
- timer_->Start(FROM_HERE, max_preemption_time_, this,
- &GpuChannelMessageFilter::TransitionToIdle);
-
- UpdatePreemptionState();
-}
-
-void GpuChannelMessageFilter::TransitionToWouldPreemptDescheduled() {
- DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING);
- DCHECK(a_stub_is_descheduled_);
-
- if (preemption_state_ == CHECKING) {
- // Stop any pending state update checks that we may have queued
- // while CHECKING.
- timer_->Stop();
- } else {
- // Stop any TransitionToIdle() timers that we may have queued
- // while PREEMPTING.
- timer_->Stop();
- max_preemption_time_ = timer_->desired_run_time() - base::TimeTicks::Now();
- if (max_preemption_time_ < base::TimeDelta()) {
- TransitionToIdle();
- return;
- }
- }
-
- preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
- preempting_flag_->Reset();
- TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
-
- UpdatePreemptionState();
-}
-
-GpuChannel::StreamState::StreamState(int32_t id, GpuStreamPriority priority)
- : id_(id), priority_(priority) {}
-
-GpuChannel::StreamState::~StreamState() {}
-
-void GpuChannel::StreamState::AddRoute(int32_t route_id) {
- routes_.insert(route_id);
-}
-void GpuChannel::StreamState::RemoveRoute(int32_t route_id) {
- routes_.erase(route_id);
-}
-
-bool GpuChannel::StreamState::HasRoute(int32_t route_id) const {
- return routes_.find(route_id) != routes_.end();
-}
-
-bool GpuChannel::StreamState::HasRoutes() const {
- return !routes_.empty();
-}
-
-GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
- gpu::SyncPointManager* sync_point_manager,
- GpuWatchdog* watchdog,
- gfx::GLShareGroup* share_group,
- gpu::gles2::MailboxManager* mailbox,
- gpu::PreemptionFlag* preempting_flag,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- int client_id,
- uint64_t client_tracing_id,
- bool allow_future_sync_points,
- bool allow_real_time_streams)
- : gpu_channel_manager_(gpu_channel_manager),
- sync_point_manager_(sync_point_manager),
- channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")),
- preempting_flag_(preempting_flag),
- client_id_(client_id),
- client_tracing_id_(client_tracing_id),
- task_runner_(task_runner),
- io_task_runner_(io_task_runner),
- share_group_(share_group),
- mailbox_manager_(mailbox),
- subscription_ref_set_(new gpu::gles2::SubscriptionRefSet),
- pending_valuebuffer_state_(new gpu::ValueStateMap),
- watchdog_(watchdog),
- num_stubs_descheduled_(0),
- allow_future_sync_points_(allow_future_sync_points),
- allow_real_time_streams_(allow_real_time_streams),
- weak_factory_(this) {
- DCHECK(gpu_channel_manager);
- DCHECK(client_id);
-
- message_queue_ = GpuChannelMessageQueue::Create(
- weak_factory_.GetWeakPtr(), task_runner, sync_point_manager);
-
- filter_ = new GpuChannelMessageFilter(
- weak_factory_.GetWeakPtr(), message_queue_.get(), task_runner,
- preempting_flag, allow_future_sync_points);
-
- subscription_ref_set_->AddObserver(this);
-}
-
-GpuChannel::~GpuChannel() {
- // Clear stubs first because of dependencies.
- stubs_.clear();
-
- message_queue_->DeleteAndDisableMessages();
-
- subscription_ref_set_->RemoveObserver(this);
- if (preempting_flag_.get())
- preempting_flag_->Reset();
-}
-
-IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event) {
- DCHECK(shutdown_event);
- DCHECK(!channel_);
-
- IPC::ChannelHandle channel_handle(channel_id_);
-
- channel_ =
- IPC::SyncChannel::Create(channel_handle, IPC::Channel::MODE_SERVER, this,
- io_task_runner_, false, shutdown_event);
-
-#if defined(OS_POSIX)
- // On POSIX, pass the renderer-side FD. Also mark it as auto-close so
- // that it gets closed after it has been sent.
- base::ScopedFD renderer_fd = channel_->TakeClientFileDescriptor();
- DCHECK(renderer_fd.is_valid());
- channel_handle.socket = base::FileDescriptor(std::move(renderer_fd));
-#endif
-
- channel_->AddFilter(filter_.get());
-
- return channel_handle;
-}
-
-base::ProcessId GpuChannel::GetClientPID() const {
- return channel_->GetPeerPID();
-}
-
-uint32_t GpuChannel::GetProcessedOrderNum() const {
- return message_queue_->GetProcessedOrderNum();
-}
-
-uint32_t GpuChannel::GetUnprocessedOrderNum() const {
- return message_queue_->GetUnprocessedOrderNum();
-}
-
-bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
- // All messages should be pushed to channel_messages_ and handled separately.
- NOTREACHED();
- return false;
-}
-
-void GpuChannel::OnChannelError() {
- gpu_channel_manager_->RemoveChannel(client_id_);
-}
-
-bool GpuChannel::Send(IPC::Message* message) {
- // The GPU process must never send a synchronous IPC message to the renderer
- // process. This could result in deadlock.
- DCHECK(!message->is_sync());
-
- DVLOG(1) << "sending message @" << message << " on channel @" << this
- << " with type " << message->type();
-
- if (!channel_) {
- delete message;
- return false;
- }
-
- return channel_->Send(message);
-}
-
-void GpuChannel::OnAddSubscription(unsigned int target) {
- gpu_channel_manager()->Send(
- new GpuHostMsg_AddSubscription(client_id_, target));
-}
-
-void GpuChannel::OnRemoveSubscription(unsigned int target) {
- gpu_channel_manager()->Send(
- new GpuHostMsg_RemoveSubscription(client_id_, target));
-}
-
-void GpuChannel::OnStubSchedulingChanged(GpuCommandBufferStub* stub,
- bool scheduled) {
- bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
- if (scheduled) {
- num_stubs_descheduled_--;
- ScheduleHandleMessage();
- } else {
- num_stubs_descheduled_++;
- }
- DCHECK_LE(num_stubs_descheduled_, stubs_.size());
- bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
-
- if (a_stub_is_descheduled != a_stub_was_descheduled) {
- if (preempting_flag_.get()) {
- io_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
- filter_, a_stub_is_descheduled));
- }
- }
-}
-
-CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer(
- const gfx::GLSurfaceHandle& window,
- const GPUCreateCommandBufferConfig& init_params,
- int32_t route_id) {
- TRACE_EVENT1("gpu", "GpuChannel::CreateViewCommandBuffer", "route_id",
- route_id);
-
- int32_t share_group_id = init_params.share_group_id;
- GpuCommandBufferStub* share_group = stubs_.get(share_group_id);
-
- if (!share_group && share_group_id != MSG_ROUTING_NONE)
- return CREATE_COMMAND_BUFFER_FAILED;
-
- int32_t stream_id = init_params.stream_id;
- GpuStreamPriority stream_priority = init_params.stream_priority;
-
- if (share_group && stream_id != share_group->stream_id())
- return CREATE_COMMAND_BUFFER_FAILED;
-
- if (!allow_real_time_streams_ &&
- stream_priority == GpuStreamPriority::REAL_TIME)
- return CREATE_COMMAND_BUFFER_FAILED;
-
- auto stream_it = streams_.find(stream_id);
- if (stream_it != streams_.end() &&
- stream_priority != GpuStreamPriority::INHERIT &&
- stream_priority != stream_it->second.priority()) {
- return CREATE_COMMAND_BUFFER_FAILED;
- }
-
- bool offscreen = false;
- scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
- this, sync_point_manager_, task_runner_.get(), share_group, window,
- mailbox_manager_.get(), preempted_flag_.get(),
- subscription_ref_set_.get(), pending_valuebuffer_state_.get(),
- gfx::Size(), disallowed_features_, init_params.attribs,
- init_params.gpu_preference, stream_id, route_id, offscreen, watchdog_,
- init_params.active_url));
-
- if (!router_.AddRoute(route_id, stub.get())) {
- DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): "
- "failed to add route";
- return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
- }
-
- if (stream_it != streams_.end()) {
- stream_it->second.AddRoute(route_id);
- } else {
- StreamState stream(stream_id, stream_priority);
- stream.AddRoute(route_id);
- streams_.insert(std::make_pair(stream_id, stream));
- }
-
- stubs_.set(route_id, std::move(stub));
- return CREATE_COMMAND_BUFFER_SUCCEEDED;
-}
-
-GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) {
- return stubs_.get(route_id);
-}
-
-void GpuChannel::LoseAllContexts() {
- gpu_channel_manager_->LoseAllContexts();
-}
-
-void GpuChannel::MarkAllContextsLost() {
- for (auto& kv : stubs_)
- kv.second->MarkContextLost();
-}
-
-bool GpuChannel::AddRoute(int32_t route_id, IPC::Listener* listener) {
- return router_.AddRoute(route_id, listener);
-}
-
-void GpuChannel::RemoveRoute(int32_t route_id) {
- router_.RemoveRoute(route_id);
-}
-
-void GpuChannel::SetPreemptByFlag(
- scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
- DCHECK(stubs_.empty());
- preempted_flag_ = preempted_flag;
-}
-
-void GpuChannel::OnDestroy() {
- TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
- gpu_channel_manager_->RemoveChannel(client_id_);
-}
-
-bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
- IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
- OnCreateOffscreenCommandBuffer)
- IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
- OnDestroyCommandBuffer)
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder,
- OnCreateJpegDecoder)
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
- DCHECK(handled) << msg.type();
- return handled;
-}
-
-scoped_refptr<gpu::SyncPointOrderData> GpuChannel::GetSyncPointOrderData() {
- return message_queue_->GetSyncPointOrderData();
-}
-
-void GpuChannel::HandleMessage() {
- // If we have been preempted by another channel, just post a task to wake up.
- if (preempted_flag_ && preempted_flag_->IsSet()) {
- ScheduleHandleMessage();
- return;
- }
-
- GpuChannelMessage* m = message_queue_->GetNextMessage();
-
- // TODO(sunnyps): This could be a DCHECK maybe?
- if (!m)
- return;
-
- const IPC::Message& message = m->message;
- message_queue_->BeginMessageProcessing(m);
- int32_t routing_id = message.routing_id();
- GpuCommandBufferStub* stub = stubs_.get(routing_id);
-
- DCHECK(!stub || stub->IsScheduled());
-
- DVLOG(1) << "received message @" << &message << " on channel @" << this
- << " with type " << message.type();
-
- bool handled = false;
-
- if (routing_id == MSG_ROUTING_CONTROL) {
- handled = OnControlMessageReceived(message);
- } else if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
- // TODO(dyen): Temporary handling of old sync points.
- // This must ensure that the sync point will be retired. Normally we'll
- // find the stub based on the routing ID, and associate the sync point
- // with it, but if that fails for any reason (channel or stub already
- // deleted, invalid routing id), we need to retire the sync point
- // immediately.
- if (stub) {
- stub->InsertSyncPoint(m->sync_point, m->retire_sync_point);
- } else {
- sync_point_manager_->RetireSyncPoint(m->sync_point);
- }
- handled = true;
- } else {
- handled = router_.RouteMessage(message);
- }
-
- // Respond to sync messages even if router failed to route.
- if (!handled && message.is_sync()) {
- IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
- reply->set_reply_error();
- Send(reply);
- handled = true;
- }
-
- // A command buffer may be descheduled or preempted but only in the middle of
- // a flush. In this case we should not pop the message from the queue.
- if (stub && stub->HasUnprocessedCommands()) {
- DCHECK_EQ((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID, message.type());
- message_queue_->PauseMessageProcessing(m);
- // If the stub is still scheduled then we were preempted and need to
- // schedule a wakeup otherwise some other event will wake us up e.g. sync
- // point completion. No DCHECK for preemption flag because that can change
- // any time.
- if (stub->IsScheduled())
- ScheduleHandleMessage();
- return;
- }
-
- if (message_queue_->MessageProcessed())
- ScheduleHandleMessage();
-
- if (preempting_flag_) {
- io_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_));
- }
-}
-
-void GpuChannel::ScheduleHandleMessage() {
- task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage,
- weak_factory_.GetWeakPtr()));
-}
-
-void GpuChannel::HandleOutOfOrderMessage(const IPC::Message& msg) {
- switch (msg.type()) {
- case GpuCommandBufferMsg_WaitForGetOffsetInRange::ID:
- case GpuCommandBufferMsg_WaitForTokenInRange::ID:
- router_.RouteMessage(msg);
- break;
- default:
- NOTREACHED();
- }
-}
-
-#if defined(OS_ANDROID)
-const GpuCommandBufferStub* GpuChannel::GetOneStub() const {
- for (const auto& kv : stubs_) {
- const GpuCommandBufferStub* stub = kv.second;
- if (stub->decoder() && !stub->decoder()->WasContextLost())
- return stub;
- }
- return nullptr;
-}
-#endif
-
-void GpuChannel::OnCreateOffscreenCommandBuffer(
- const gfx::Size& size,
- const GPUCreateCommandBufferConfig& init_params,
- int32_t route_id,
- bool* succeeded) {
- TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id",
- route_id);
-
- int32_t share_group_id = init_params.share_group_id;
- GpuCommandBufferStub* share_group = stubs_.get(share_group_id);
-
- if (!share_group && share_group_id != MSG_ROUTING_NONE) {
- *succeeded = false;
- return;
- }
-
- int32_t stream_id = init_params.stream_id;
- GpuStreamPriority stream_priority = init_params.stream_priority;
-
- if (share_group && stream_id != share_group->stream_id()) {
- *succeeded = false;
- return;
- }
-
- if (!allow_real_time_streams_ &&
- stream_priority == GpuStreamPriority::REAL_TIME) {
- *succeeded = false;
- return;
- }
-
- auto stream_it = streams_.find(stream_id);
- if (stream_it != streams_.end() &&
- stream_priority != GpuStreamPriority::INHERIT &&
- stream_priority != stream_it->second.priority()) {
- *succeeded = false;
- return;
- }
-
- bool offscreen = true;
- scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
- this, sync_point_manager_, task_runner_.get(), share_group,
- gfx::GLSurfaceHandle(), mailbox_manager_.get(), preempted_flag_.get(),
- subscription_ref_set_.get(), pending_valuebuffer_state_.get(), size,
- disallowed_features_, init_params.attribs, init_params.gpu_preference,
- init_params.stream_id, route_id, offscreen, watchdog_,
- init_params.active_url));
-
- if (!router_.AddRoute(route_id, stub.get())) {
- DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
- "failed to add route";
- *succeeded = false;
- return;
- }
-
- if (stream_it != streams_.end()) {
- stream_it->second.AddRoute(route_id);
- } else {
- StreamState stream(stream_id, stream_priority);
- stream.AddRoute(route_id);
- streams_.insert(std::make_pair(stream_id, stream));
- }
-
- stubs_.set(route_id, std::move(stub));
- *succeeded = true;
-}
-
-void GpuChannel::OnDestroyCommandBuffer(int32_t route_id) {
- TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
- "route_id", route_id);
-
- scoped_ptr<GpuCommandBufferStub> stub = stubs_.take_and_erase(route_id);
-
- if (!stub)
- return;
-
- router_.RemoveRoute(route_id);
-
- int32_t stream_id = stub->stream_id();
- auto stream_it = streams_.find(stream_id);
- DCHECK(stream_it != streams_.end());
- stream_it->second.RemoveRoute(route_id);
- if (!stream_it->second.HasRoutes())
- streams_.erase(stream_it);
-
- // In case the renderer is currently blocked waiting for a sync reply from the
- // stub, we need to make sure to reschedule the GpuChannel here.
- if (!stub->IsScheduled()) {
- // This stub won't get a chance to reschedule, so update the count now.
- OnStubSchedulingChanged(stub.get(), true);
- }
-}
-
-void GpuChannel::OnCreateJpegDecoder(int32_t route_id,
- IPC::Message* reply_msg) {
- if (!jpeg_decoder_) {
- jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_));
- }
- jpeg_decoder_->AddClient(route_id, reply_msg);
-}
-
-void GpuChannel::CacheShader(const std::string& key,
- const std::string& shader) {
- gpu_channel_manager_->Send(
- new GpuHostMsg_CacheShader(client_id_, key, shader));
-}
-
-void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
- io_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter,
- filter_, make_scoped_refptr(filter)));
-}
-
-void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
- io_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuChannelMessageFilter::RemoveChannelFilter,
- filter_, make_scoped_refptr(filter)));
-}
-
-uint64_t GpuChannel::GetMemoryUsage() {
- // Collect the unique memory trackers in use by the |stubs_|.
- std::set<gpu::gles2::MemoryTracker*> unique_memory_trackers;
- for (auto& kv : stubs_)
- unique_memory_trackers.insert(kv.second->GetMemoryTracker());
-
- // Sum the memory usage for all unique memory trackers.
- uint64_t size = 0;
- for (auto* tracker : unique_memory_trackers) {
- size += gpu_channel_manager()->gpu_memory_manager()->GetTrackerMemoryUsage(
- tracker);
- }
-
- return size;
-}
-
-scoped_refptr<gl::GLImage> GpuChannel::CreateImageForGpuMemoryBuffer(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- uint32_t internalformat) {
- switch (handle.type) {
- case gfx::SHARED_MEMORY_BUFFER: {
- if (!base::IsValueInRangeForNumericType<size_t>(handle.stride))
- return nullptr;
- scoped_refptr<gl::GLImageSharedMemory> image(
- new gl::GLImageSharedMemory(size, internalformat));
- if (!image->Initialize(handle.handle, handle.id, format, handle.offset,
- handle.stride)) {
- return nullptr;
- }
-
- return image;
- }
- default: {
- GpuChannelManager* manager = gpu_channel_manager();
- if (!manager->gpu_memory_buffer_factory())
- return nullptr;
-
- return manager->gpu_memory_buffer_factory()
- ->AsImageFactory()
- ->CreateImageForGpuMemoryBuffer(handle,
- size,
- format,
- internalformat,
- client_id_);
- }
- }
-}
-
-void GpuChannel::HandleUpdateValueState(
- unsigned int target, const gpu::ValueState& state) {
- pending_valuebuffer_state_->UpdateState(target, state);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_channel.h b/chromium/content/common/gpu/gpu_channel.h
deleted file mode 100644
index 319f81d40ed..00000000000
--- a/chromium/content/common/gpu/gpu_channel.h
+++ /dev/null
@@ -1,485 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_CHANNEL_H_
-#define CONTENT_COMMON_GPU_GPU_CHANNEL_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <string>
-
-#include "base/containers/hash_tables.h"
-#include "base/containers/scoped_ptr_hash_map.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/process/process.h"
-#include "base/trace_event/memory_dump_provider.h"
-#include "build/build_config.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/gpu_command_buffer_stub.h"
-#include "content/common/gpu/gpu_memory_manager.h"
-#include "content/common/gpu/gpu_result_codes.h"
-#include "content/common/gpu/gpu_stream_priority.h"
-#include "content/common/message_router.h"
-#include "gpu/command_buffer/service/valuebuffer_manager.h"
-#include "ipc/ipc_sync_channel.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gl/gl_share_group.h"
-#include "ui/gl/gpu_preference.h"
-
-struct GPUCreateCommandBufferConfig;
-
-namespace base {
-class WaitableEvent;
-}
-
-namespace gpu {
-class PreemptionFlag;
-class SyncPointOrderData;
-class SyncPointManager;
-union ValueState;
-class ValueStateMap;
-namespace gles2 {
-class SubscriptionRefSet;
-}
-}
-
-namespace IPC {
-class MessageFilter;
-}
-
-namespace content {
-class GpuChannelManager;
-class GpuChannelMessageFilter;
-class GpuChannelMessageQueue;
-class GpuJpegDecodeAccelerator;
-class GpuWatchdog;
-
-// Encapsulates an IPC channel between the GPU process and one renderer
-// process. On the renderer side there's a corresponding GpuChannelHost.
-class CONTENT_EXPORT GpuChannel
- : public IPC::Listener,
- public IPC::Sender,
- public gpu::gles2::SubscriptionRefSet::Observer {
- public:
- // Takes ownership of the renderer process handle.
- GpuChannel(GpuChannelManager* gpu_channel_manager,
- gpu::SyncPointManager* sync_point_manager,
- GpuWatchdog* watchdog,
- gfx::GLShareGroup* share_group,
- gpu::gles2::MailboxManager* mailbox_manager,
- gpu::PreemptionFlag* preempting_flag,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- int client_id,
- uint64_t client_tracing_id,
- bool allow_future_sync_points,
- bool allow_real_time_streams);
- ~GpuChannel() override;
-
- // Initializes the IPC channel. Caller takes ownership of the client FD in
- // the returned handle and is responsible for closing it.
- virtual IPC::ChannelHandle Init(base::WaitableEvent* shutdown_event);
-
- // Get the GpuChannelManager that owns this channel.
- GpuChannelManager* gpu_channel_manager() const {
- return gpu_channel_manager_;
- }
-
- const std::string& channel_id() const { return channel_id_; }
-
- virtual base::ProcessId GetClientPID() const;
-
- int client_id() const { return client_id_; }
-
- uint64_t client_tracing_id() const { return client_tracing_id_; }
-
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner() const {
- return io_task_runner_;
- }
-
- // IPC::Listener implementation:
- bool OnMessageReceived(const IPC::Message& msg) override;
- void OnChannelError() override;
-
- // IPC::Sender implementation:
- bool Send(IPC::Message* msg) override;
-
- // SubscriptionRefSet::Observer implementation
- void OnAddSubscription(unsigned int target) override;
- void OnRemoveSubscription(unsigned int target) override;
-
- // This is called when a command buffer transitions between scheduled and
- // descheduled states. When any stub is descheduled, we stop preempting
- // other channels.
- void OnStubSchedulingChanged(GpuCommandBufferStub* stub, bool scheduled);
-
- CreateCommandBufferResult CreateViewCommandBuffer(
- const gfx::GLSurfaceHandle& window,
- const GPUCreateCommandBufferConfig& init_params,
- int32_t route_id);
-
- gfx::GLShareGroup* share_group() const { return share_group_.get(); }
-
- GpuCommandBufferStub* LookupCommandBuffer(int32_t route_id);
-
- void LoseAllContexts();
- void MarkAllContextsLost();
-
- // Called to add a listener for a particular message routing ID.
- // Returns true if succeeded.
- bool AddRoute(int32_t route_id, IPC::Listener* listener);
-
- // Called to remove a listener for a particular message routing ID.
- void RemoveRoute(int32_t route_id);
-
- void SetPreemptingFlag(gpu::PreemptionFlag* flag);
-
- // If |preemption_flag->IsSet()|, any stub on this channel
- // should stop issuing GL commands. Setting this to NULL stops deferral.
- void SetPreemptByFlag(
- scoped_refptr<gpu::PreemptionFlag> preemption_flag);
-
- void CacheShader(const std::string& key, const std::string& shader);
-
- void AddFilter(IPC::MessageFilter* filter);
- void RemoveFilter(IPC::MessageFilter* filter);
-
- uint64_t GetMemoryUsage();
-
- scoped_refptr<gl::GLImage> CreateImageForGpuMemoryBuffer(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- uint32_t internalformat);
-
- bool allow_future_sync_points() const { return allow_future_sync_points_; }
-
- void HandleUpdateValueState(unsigned int target,
- const gpu::ValueState& state);
-
- // Visible for testing.
- const gpu::ValueStateMap* pending_valuebuffer_state() const {
- return pending_valuebuffer_state_.get();
- }
-
- // Visible for testing.
- GpuChannelMessageFilter* filter() const { return filter_.get(); }
-
- // Returns the global order number for the last processed IPC message.
- uint32_t GetProcessedOrderNum() const;
-
- // Returns the global order number for the last unprocessed IPC message.
- uint32_t GetUnprocessedOrderNum() const;
-
- // Returns the shared sync point global order data.
- scoped_refptr<gpu::SyncPointOrderData> GetSyncPointOrderData();
-
- void HandleMessage();
-
- // Some messages such as WaitForGetOffsetInRange and WaitForTokenInRange are
- // processed as soon as possible because the client is blocked until they
- // are completed.
- void HandleOutOfOrderMessage(const IPC::Message& msg);
-
-#if defined(OS_ANDROID)
- const GpuCommandBufferStub* GetOneStub() const;
-#endif
-
- protected:
- // The message filter on the io thread.
- scoped_refptr<GpuChannelMessageFilter> filter_;
-
- // Map of routing id to command buffer stub.
- base::ScopedPtrHashMap<int32_t, scoped_ptr<GpuCommandBufferStub>> stubs_;
-
- private:
- class StreamState {
- public:
- StreamState(int32_t id, GpuStreamPriority priority);
- ~StreamState();
-
- int32_t id() const { return id_; }
- GpuStreamPriority priority() const { return priority_; }
-
- void AddRoute(int32_t route_id);
- void RemoveRoute(int32_t route_id);
- bool HasRoute(int32_t route_id) const;
- bool HasRoutes() const;
-
- private:
- int32_t id_;
- GpuStreamPriority priority_;
- base::hash_set<int32_t> routes_;
- };
-
- void OnDestroy();
-
- bool OnControlMessageReceived(const IPC::Message& msg);
-
- void ScheduleHandleMessage();
-
- // Message handlers.
- void OnCreateOffscreenCommandBuffer(
- const gfx::Size& size,
- const GPUCreateCommandBufferConfig& init_params,
- int32_t route_id,
- bool* succeeded);
- void OnDestroyCommandBuffer(int32_t route_id);
- void OnCreateJpegDecoder(int32_t route_id, IPC::Message* reply_msg);
-
- // The lifetime of objects of this class is managed by a GpuChannelManager.
- // The GpuChannelManager destroy all the GpuChannels that they own when they
- // are destroyed. So a raw pointer is safe.
- GpuChannelManager* gpu_channel_manager_;
-
- // Sync point manager. Outlives the channel and is guaranteed to outlive the
- // message loop.
- gpu::SyncPointManager* sync_point_manager_;
-
- scoped_ptr<IPC::SyncChannel> channel_;
-
- // Uniquely identifies the channel within this GPU process.
- std::string channel_id_;
-
- // Used to implement message routing functionality to CommandBuffer objects
- MessageRouter router_;
-
- // Whether the processing of IPCs on this channel is stalled and we should
- // preempt other GpuChannels.
- scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
-
- // If non-NULL, all stubs on this channel should stop processing GL
- // commands (via their GpuScheduler) when preempted_flag_->IsSet()
- scoped_refptr<gpu::PreemptionFlag> preempted_flag_;
-
- scoped_refptr<GpuChannelMessageQueue> message_queue_;
-
- // The id of the client who is on the other side of the channel.
- int client_id_;
-
- // The tracing ID used for memory allocations associated with this client.
- uint64_t client_tracing_id_;
-
- // The task runners for the main thread and the io thread.
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
-
- // The share group that all contexts associated with a particular renderer
- // process use.
- scoped_refptr<gfx::GLShareGroup> share_group_;
-
- scoped_refptr<gpu::gles2::MailboxManager> mailbox_manager_;
-
- scoped_refptr<gpu::gles2::SubscriptionRefSet> subscription_ref_set_;
-
- scoped_refptr<gpu::ValueStateMap> pending_valuebuffer_state_;
-
- scoped_ptr<GpuJpegDecodeAccelerator> jpeg_decoder_;
-
- gpu::gles2::DisallowedFeatures disallowed_features_;
- GpuWatchdog* watchdog_;
-
- size_t num_stubs_descheduled_;
-
- // Map of stream id to stream state.
- base::hash_map<int32_t, StreamState> streams_;
-
- bool allow_future_sync_points_;
- bool allow_real_time_streams_;
-
- // Member variables should appear before the WeakPtrFactory, to ensure
- // that any WeakPtrs to Controller are invalidated before its members
- // variable's destructors are executed, rendering them invalid.
- base::WeakPtrFactory<GpuChannel> weak_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuChannel);
-};
-
-// This filter does three things:
-// - it counts and timestamps each message forwarded to the channel
-// so that we can preempt other channels if a message takes too long to
-// process. To guarantee fairness, we must wait a minimum amount of time
-// before preempting and we limit the amount of time that we can preempt in
-// one shot (see constants above).
-// - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
-// thread, generating the sync point ID and responding immediately, and then
-// posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
-// into the channel's queue.
-// - it generates mailbox names for clients of the GPU process on the IO thread.
-class GpuChannelMessageFilter : public IPC::MessageFilter {
- public:
- GpuChannelMessageFilter(const base::WeakPtr<GpuChannel>& gpu_channel,
- GpuChannelMessageQueue* message_queue,
- base::SingleThreadTaskRunner* task_runner,
- gpu::PreemptionFlag* preempting_flag,
- bool future_sync_points);
-
- // IPC::MessageFilter implementation.
- void OnFilterAdded(IPC::Sender* sender) override;
- void OnFilterRemoved() override;
- void OnChannelConnected(int32_t peer_pid) override;
- void OnChannelError() override;
- void OnChannelClosing() override;
- bool OnMessageReceived(const IPC::Message& message) override;
-
- void AddChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
- void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
-
- void OnMessageProcessed();
-
- void UpdateStubSchedulingState(bool a_stub_is_descheduled);
-
- bool Send(IPC::Message* message);
-
- protected:
- ~GpuChannelMessageFilter() override;
-
- private:
- enum PreemptionState {
- // Either there's no other channel to preempt, there are no messages
- // pending processing, or we just finished preempting and have to wait
- // before preempting again.
- IDLE,
- // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
- WAITING,
- // We can preempt whenever any IPC processing takes more than
- // kPreemptWaitTimeMs.
- CHECKING,
- // We are currently preempting (i.e. no stub is descheduled).
- PREEMPTING,
- // We would like to preempt, but some stub is descheduled.
- WOULD_PREEMPT_DESCHEDULED,
- };
-
- void UpdatePreemptionState();
-
- void TransitionToIdleIfCaughtUp();
- void TransitionToIdle();
- void TransitionToWaiting();
- void TransitionToChecking();
- void TransitionToPreempting();
- void TransitionToWouldPreemptDescheduled();
-
- PreemptionState preemption_state_;
-
- // Maximum amount of time that we can spend in PREEMPTING.
- // It is reset when we transition to IDLE.
- base::TimeDelta max_preemption_time_;
-
- base::WeakPtr<GpuChannel> gpu_channel_;
- // The message_queue_ is used to handle messages on the main thread.
- scoped_refptr<GpuChannelMessageQueue> message_queue_;
- IPC::Sender* sender_;
- base::ProcessId peer_pid_;
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
- std::vector<scoped_refptr<IPC::MessageFilter>> channel_filters_;
-
- // This timer is created and destroyed on the IO thread.
- scoped_ptr<base::OneShotTimer> timer_;
-
- bool a_stub_is_descheduled_;
-
- // True if this channel can create future sync points.
- bool future_sync_points_;
-};
-
-struct GpuChannelMessage {
- uint32_t order_number;
- base::TimeTicks time_received;
- IPC::Message message;
-
- // TODO(dyen): Temporary sync point data, remove once new sync point lands.
- bool retire_sync_point;
- uint32_t sync_point;
-
- GpuChannelMessage(const IPC::Message& msg)
- : order_number(0),
- time_received(base::TimeTicks()),
- message(msg),
- retire_sync_point(false),
- sync_point(0) {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(GpuChannelMessage);
-};
-
-class GpuChannelMessageQueue
- : public base::RefCountedThreadSafe<GpuChannelMessageQueue> {
- public:
- static scoped_refptr<GpuChannelMessageQueue> Create(
- const base::WeakPtr<GpuChannel>& gpu_channel,
- base::SingleThreadTaskRunner* task_runner,
- gpu::SyncPointManager* sync_point_manager);
-
- scoped_refptr<gpu::SyncPointOrderData> GetSyncPointOrderData();
-
- // Returns the global order number for the last unprocessed IPC message.
- uint32_t GetUnprocessedOrderNum() const;
-
- // Returns the global order number for the last unprocessed IPC message.
- uint32_t GetProcessedOrderNum() const;
-
- bool HasQueuedMessages() const;
-
- base::TimeTicks GetNextMessageTimeTick() const;
-
- GpuChannelMessage* GetNextMessage() const;
-
- // Should be called before a message begins to be processed.
- void BeginMessageProcessing(const GpuChannelMessage* msg);
-
- // Should be called if a message began processing but did not finish.
- void PauseMessageProcessing(const GpuChannelMessage* msg);
-
- // Should be called after a message returned by GetNextMessage is processed.
- // Returns true if there are more messages on the queue.
- bool MessageProcessed();
-
- void PushBackMessage(const IPC::Message& message);
-
- bool GenerateSyncPointMessage(const IPC::Message& message,
- bool retire_sync_point,
- uint32_t* sync_point_number);
-
- void DeleteAndDisableMessages();
-
- private:
- friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>;
-
- GpuChannelMessageQueue(const base::WeakPtr<GpuChannel>& gpu_channel,
- base::SingleThreadTaskRunner* task_runner,
- gpu::SyncPointManager* sync_point_manager);
- ~GpuChannelMessageQueue();
-
- void ScheduleHandleMessage();
-
- void PushMessageHelper(scoped_ptr<GpuChannelMessage> msg);
-
- bool enabled_;
-
- // Both deques own the messages.
- std::deque<GpuChannelMessage*> channel_messages_;
-
- // This lock protects enabled_ and channel_messages_.
- mutable base::Lock channel_messages_lock_;
-
- // Keeps track of sync point related state such as message order numbers.
- scoped_refptr<gpu::SyncPointOrderData> sync_point_order_data_;
-
- base::WeakPtr<GpuChannel> gpu_channel_;
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- gpu::SyncPointManager* sync_point_manager_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_CHANNEL_H_
diff --git a/chromium/content/common/gpu/gpu_channel_manager.cc b/chromium/content/common/gpu/gpu_channel_manager.cc
deleted file mode 100644
index 8f59e90345d..00000000000
--- a/chromium/content/common/gpu/gpu_channel_manager.cc
+++ /dev/null
@@ -1,380 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_channel_manager.h"
-
-#include <algorithm>
-#include <utility>
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/location.h"
-#include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
-#include "build/build_config.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_memory_buffer_factory.h"
-#include "content/common/gpu/gpu_memory_manager.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/message_router.h"
-#include "content/public/common/content_switches.h"
-#include "gpu/command_buffer/common/value_state.h"
-#include "gpu/command_buffer/service/feature_info.h"
-#include "gpu/command_buffer/service/gpu_switches.h"
-#include "gpu/command_buffer/service/mailbox_manager.h"
-#include "gpu/command_buffer/service/memory_program_cache.h"
-#include "gpu/command_buffer/service/shader_translator_cache.h"
-#include "gpu/command_buffer/service/sync_point_manager.h"
-#include "ipc/message_filter.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_share_group.h"
-
-#if defined(OS_CHROMEOS)
-#include "content/common/gpu/media/gpu_arc_video_service.h"
-#endif
-
-namespace content {
-
-namespace {
-#if defined(OS_ANDROID)
-// Amount of time we expect the GPU to stay powered up without being used.
-const int kMaxGpuIdleTimeMs = 40;
-// Maximum amount of time we keep pinging the GPU waiting for the client to
-// draw.
-const int kMaxKeepAliveTimeMs = 200;
-#endif
-
-}
-
-GpuChannelManager::GpuChannelManager(
- IPC::SyncChannel* channel,
- GpuWatchdog* watchdog,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- base::WaitableEvent* shutdown_event,
- gpu::SyncPointManager* sync_point_manager,
- GpuMemoryBufferFactory* gpu_memory_buffer_factory)
- : task_runner_(task_runner),
- io_task_runner_(io_task_runner),
- channel_(channel),
- watchdog_(watchdog),
- shutdown_event_(shutdown_event),
- share_group_(new gfx::GLShareGroup),
- mailbox_manager_(gpu::gles2::MailboxManager::Create()),
- gpu_memory_manager_(this),
- sync_point_manager_(sync_point_manager),
- sync_point_client_waiter_(
- sync_point_manager->CreateSyncPointClientWaiter()),
- gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
- weak_factory_(this) {
- DCHECK(task_runner);
- DCHECK(io_task_runner);
- const base::CommandLine* command_line =
- base::CommandLine::ForCurrentProcess();
- if (command_line->HasSwitch(switches::kUIPrioritizeInGpuProcess))
- preemption_flag_ = new gpu::PreemptionFlag;
-}
-
-GpuChannelManager::~GpuChannelManager() {
- // Destroy channels before anything else because of dependencies.
- gpu_channels_.clear();
- if (default_offscreen_surface_.get()) {
- default_offscreen_surface_->Destroy();
- default_offscreen_surface_ = NULL;
- }
-}
-
-gpu::gles2::ProgramCache* GpuChannelManager::program_cache() {
- if (!program_cache_.get() &&
- (gfx::g_driver_gl.ext.b_GL_ARB_get_program_binary ||
- gfx::g_driver_gl.ext.b_GL_OES_get_program_binary) &&
- !base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableGpuProgramCache)) {
- program_cache_.reset(new gpu::gles2::MemoryProgramCache());
- }
- return program_cache_.get();
-}
-
-gpu::gles2::ShaderTranslatorCache*
-GpuChannelManager::shader_translator_cache() {
- if (!shader_translator_cache_.get())
- shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache;
- return shader_translator_cache_.get();
-}
-
-gpu::gles2::FramebufferCompletenessCache*
-GpuChannelManager::framebuffer_completeness_cache() {
- if (!framebuffer_completeness_cache_.get())
- framebuffer_completeness_cache_ =
- new gpu::gles2::FramebufferCompletenessCache;
- return framebuffer_completeness_cache_.get();
-}
-
-void GpuChannelManager::RemoveChannel(int client_id) {
- Send(new GpuHostMsg_DestroyChannel(client_id));
- gpu_channels_.erase(client_id);
-}
-
-int GpuChannelManager::GenerateRouteID() {
- static int last_id = 0;
- return ++last_id;
-}
-
-void GpuChannelManager::AddRoute(int32_t routing_id, IPC::Listener* listener) {
- router_.AddRoute(routing_id, listener);
-}
-
-void GpuChannelManager::RemoveRoute(int32_t routing_id) {
- router_.RemoveRoute(routing_id);
-}
-
-GpuChannel* GpuChannelManager::LookupChannel(int32_t client_id) const {
- const auto& it = gpu_channels_.find(client_id);
- return it != gpu_channels_.end() ? it->second : nullptr;
-}
-
-bool GpuChannelManager::OnControlMessageReceived(const IPC::Message& msg) {
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(GpuChannelManager, msg)
- IPC_MESSAGE_HANDLER(GpuMsg_EstablishChannel, OnEstablishChannel)
- IPC_MESSAGE_HANDLER(GpuMsg_CloseChannel, OnCloseChannel)
- IPC_MESSAGE_HANDLER(GpuMsg_CreateViewCommandBuffer,
- OnCreateViewCommandBuffer)
- IPC_MESSAGE_HANDLER(GpuMsg_DestroyGpuMemoryBuffer, OnDestroyGpuMemoryBuffer)
-#if defined(OS_CHROMEOS)
- IPC_MESSAGE_HANDLER(GpuMsg_CreateArcVideoAcceleratorChannel,
- OnCreateArcVideoAcceleratorChannel)
-#endif
- IPC_MESSAGE_HANDLER(GpuMsg_LoadedShader, OnLoadedShader)
- IPC_MESSAGE_HANDLER(GpuMsg_UpdateValueState, OnUpdateValueState)
-#if defined(OS_ANDROID)
- IPC_MESSAGE_HANDLER(GpuMsg_WakeUpGpu, OnWakeUpGpu);
-#endif
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
- return handled;
-}
-
-bool GpuChannelManager::OnMessageReceived(const IPC::Message& msg) {
- if (msg.routing_id() == MSG_ROUTING_CONTROL)
- return OnControlMessageReceived(msg);
-
- return router_.RouteMessage(msg);
-}
-
-bool GpuChannelManager::Send(IPC::Message* msg) {
- return channel_->Send(msg);
-}
-
-scoped_ptr<GpuChannel> GpuChannelManager::CreateGpuChannel(
- int client_id,
- uint64_t client_tracing_id,
- bool preempts,
- bool allow_future_sync_points,
- bool allow_real_time_streams) {
- return make_scoped_ptr(new GpuChannel(
- this, sync_point_manager(), watchdog_, share_group(), mailbox_manager(),
- preempts ? preemption_flag() : nullptr, task_runner_.get(),
- io_task_runner_.get(), client_id, client_tracing_id,
- allow_future_sync_points, allow_real_time_streams));
-}
-
-void GpuChannelManager::OnEstablishChannel(
- const GpuMsg_EstablishChannel_Params& params) {
- DCHECK(!params.preempts || !params.preempted);
- scoped_ptr<GpuChannel> channel(CreateGpuChannel(
- params.client_id, params.client_tracing_id, params.preempts,
- params.allow_future_sync_points, params.allow_real_time_streams));
- if (params.preempted)
- channel->SetPreemptByFlag(preemption_flag_.get());
- IPC::ChannelHandle channel_handle = channel->Init(shutdown_event_);
-
- gpu_channels_.set(params.client_id, std::move(channel));
-
- Send(new GpuHostMsg_ChannelEstablished(channel_handle));
-}
-
-void GpuChannelManager::OnCloseChannel(
- const IPC::ChannelHandle& channel_handle) {
- for (auto it = gpu_channels_.begin(); it != gpu_channels_.end(); ++it) {
- if (it->second->channel_id() == channel_handle.name) {
- gpu_channels_.erase(it);
- return;
- }
- }
-}
-
-void GpuChannelManager::OnCreateViewCommandBuffer(
- const gfx::GLSurfaceHandle& window,
- int32_t client_id,
- const GPUCreateCommandBufferConfig& init_params,
- int32_t route_id) {
- CreateCommandBufferResult result = CREATE_COMMAND_BUFFER_FAILED;
-
- auto it = gpu_channels_.find(client_id);
- if (it != gpu_channels_.end()) {
- result = it->second->CreateViewCommandBuffer(window, init_params, route_id);
- }
-
- Send(new GpuHostMsg_CommandBufferCreated(result));
-}
-
-void GpuChannelManager::DestroyGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- int client_id) {
- io_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuChannelManager::DestroyGpuMemoryBufferOnIO,
- base::Unretained(this), id, client_id));
-}
-
-void GpuChannelManager::DestroyGpuMemoryBufferOnIO(
- gfx::GpuMemoryBufferId id,
- int client_id) {
- gpu_memory_buffer_factory_->DestroyGpuMemoryBuffer(id, client_id);
-}
-
-void GpuChannelManager::OnDestroyGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- int client_id,
- const gpu::SyncToken& sync_token) {
- if (sync_token.HasData()) {
- scoped_refptr<gpu::SyncPointClientState> release_state =
- sync_point_manager()->GetSyncPointClientState(
- sync_token.namespace_id(), sync_token.command_buffer_id());
- if (release_state) {
- sync_point_client_waiter_->WaitOutOfOrder(
- release_state.get(), sync_token.release_count(),
- base::Bind(&GpuChannelManager::DestroyGpuMemoryBuffer,
- base::Unretained(this), id, client_id));
- return;
- }
- }
-
- // No sync token or invalid sync token, destroy immediately.
- DestroyGpuMemoryBuffer(id, client_id);
-}
-
-#if defined(OS_CHROMEOS)
-void GpuChannelManager::OnCreateArcVideoAcceleratorChannel() {
- if (!gpu_arc_video_service_) {
- gpu_arc_video_service_.reset(
- new GpuArcVideoService(shutdown_event_, io_task_runner_));
- }
-
- gpu_arc_video_service_->CreateChannel(
- base::Bind(&GpuChannelManager::ArcVideoAcceleratorChannelCreated,
- weak_factory_.GetWeakPtr()));
-}
-
-void GpuChannelManager::ArcVideoAcceleratorChannelCreated(
- const IPC::ChannelHandle& handle) {
- Send(new GpuHostMsg_ArcVideoAcceleratorChannelCreated(handle));
-}
-
-void GpuChannelManager::OnShutdownArcVideoService() {
- gpu_arc_video_service_.reset();
-}
-#endif
-
-void GpuChannelManager::OnUpdateValueState(
- int client_id, unsigned int target, const gpu::ValueState& state) {
- // Only pass updated state to the channel corresponding to the
- // render_widget_host where the event originated.
- auto it = gpu_channels_.find(client_id);
- if (it != gpu_channels_.end())
- it->second->HandleUpdateValueState(target, state);
-}
-
-void GpuChannelManager::OnLoadedShader(const std::string& program_proto) {
- if (program_cache())
- program_cache()->LoadProgram(program_proto);
-}
-
-uint32_t GpuChannelManager::GetUnprocessedOrderNum() const {
- uint32_t unprocessed_order_num = 0;
- for (auto& kv : gpu_channels_) {
- unprocessed_order_num =
- std::max(unprocessed_order_num, kv.second->GetUnprocessedOrderNum());
- }
- return unprocessed_order_num;
-}
-
-uint32_t GpuChannelManager::GetProcessedOrderNum() const {
- uint32_t processed_order_num = 0;
- for (auto& kv : gpu_channels_) {
- processed_order_num =
- std::max(processed_order_num, kv.second->GetProcessedOrderNum());
- }
- return processed_order_num;
-}
-
-void GpuChannelManager::LoseAllContexts() {
- for (auto& kv : gpu_channels_) {
- kv.second->MarkAllContextsLost();
- }
- task_runner_->PostTask(FROM_HERE,
- base::Bind(&GpuChannelManager::OnLoseAllContexts,
- weak_factory_.GetWeakPtr()));
-}
-
-void GpuChannelManager::OnLoseAllContexts() {
- gpu_channels_.clear();
-}
-
-gfx::GLSurface* GpuChannelManager::GetDefaultOffscreenSurface() {
- if (!default_offscreen_surface_.get()) {
- default_offscreen_surface_ =
- gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size());
- }
- return default_offscreen_surface_.get();
-}
-
-#if defined(OS_ANDROID)
-void GpuChannelManager::DidAccessGpu() {
- last_gpu_access_time_ = base::TimeTicks::Now();
-}
-
-void GpuChannelManager::OnWakeUpGpu() {
- begin_wake_up_time_ = base::TimeTicks::Now();
- ScheduleWakeUpGpu();
-}
-
-void GpuChannelManager::ScheduleWakeUpGpu() {
- base::TimeTicks now = base::TimeTicks::Now();
- TRACE_EVENT2("gpu", "GpuChannelManager::ScheduleWakeUp",
- "idle_time", (now - last_gpu_access_time_).InMilliseconds(),
- "keep_awake_time", (now - begin_wake_up_time_).InMilliseconds());
- if (now - last_gpu_access_time_ <
- base::TimeDelta::FromMilliseconds(kMaxGpuIdleTimeMs))
- return;
- if (now - begin_wake_up_time_ >
- base::TimeDelta::FromMilliseconds(kMaxKeepAliveTimeMs))
- return;
-
- DoWakeUpGpu();
-
- base::MessageLoop::current()->PostDelayedTask(
- FROM_HERE, base::Bind(&GpuChannelManager::ScheduleWakeUpGpu,
- weak_factory_.GetWeakPtr()),
- base::TimeDelta::FromMilliseconds(kMaxGpuIdleTimeMs));
-}
-
-void GpuChannelManager::DoWakeUpGpu() {
- const GpuCommandBufferStub* stub = nullptr;
- for (const auto& kv : gpu_channels_) {
- const GpuChannel* channel = kv.second;
- stub = channel->GetOneStub();
- if (stub) {
- DCHECK(stub->decoder());
- break;
- }
- }
- if (!stub || !stub->decoder()->MakeCurrent())
- return;
- glFinish();
- DidAccessGpu();
-}
-#endif
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_channel_manager.h b/chromium/content/common/gpu/gpu_channel_manager.h
deleted file mode 100644
index f195a55c27e..00000000000
--- a/chromium/content/common/gpu/gpu_channel_manager.h
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_CHANNEL_MANAGER_H_
-#define CONTENT_COMMON_GPU_GPU_CHANNEL_MANAGER_H_
-
-#include <stdint.h>
-
-#include <deque>
-#include <string>
-#include <vector>
-
-#include "base/containers/scoped_ptr_hash_map.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "build/build_config.h"
-#include "content/common/content_export.h"
-#include "content/common/content_param_traits.h"
-#include "content/common/gpu/gpu_memory_manager.h"
-#include "content/common/message_router.h"
-#include "ipc/ipc_listener.h"
-#include "ipc/ipc_sender.h"
-#include "ui/gfx/gpu_memory_buffer.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gl/gl_surface.h"
-
-namespace base {
-class WaitableEvent;
-}
-
-namespace gfx {
-class GLShareGroup;
-}
-
-namespace gpu {
-class PreemptionFlag;
-class SyncPointClient;
-class SyncPointManager;
-struct SyncToken;
-union ValueState;
-namespace gles2 {
-class FramebufferCompletenessCache;
-class MailboxManager;
-class ProgramCache;
-class ShaderTranslatorCache;
-}
-}
-
-namespace IPC {
-struct ChannelHandle;
-class SyncChannel;
-}
-
-struct GPUCreateCommandBufferConfig;
-struct GpuMsg_EstablishChannel_Params;
-
-namespace content {
-#if defined(OS_CHROMEOS)
-class GpuArcVideoService;
-#endif
-class GpuChannel;
-class GpuMemoryBufferFactory;
-class GpuWatchdog;
-
-// A GpuChannelManager is a thread responsible for issuing rendering commands
-// managing the lifetimes of GPU channels and forwarding IPC requests from the
-// browser process to them based on the corresponding renderer ID.
-class CONTENT_EXPORT GpuChannelManager : public IPC::Listener,
- public IPC::Sender {
- public:
- GpuChannelManager(IPC::SyncChannel* channel,
- GpuWatchdog* watchdog,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- base::WaitableEvent* shutdown_event,
- gpu::SyncPointManager* sync_point_manager,
- GpuMemoryBufferFactory* gpu_memory_buffer_factory);
- ~GpuChannelManager() override;
-
- // Remove the channel for a particular renderer.
- void RemoveChannel(int client_id);
-
- // Listener overrides.
- bool OnMessageReceived(const IPC::Message& msg) override;
-
- // Sender overrides.
- bool Send(IPC::Message* msg) override;
-
- void LoseAllContexts();
-
- int GenerateRouteID();
- void AddRoute(int32_t routing_id, IPC::Listener* listener);
- void RemoveRoute(int32_t routing_id);
-
- gpu::gles2::ProgramCache* program_cache();
- gpu::gles2::ShaderTranslatorCache* shader_translator_cache();
- gpu::gles2::FramebufferCompletenessCache* framebuffer_completeness_cache();
-
- GpuMemoryManager* gpu_memory_manager() { return &gpu_memory_manager_; }
-
- GpuChannel* LookupChannel(int32_t client_id) const;
-
- gfx::GLSurface* GetDefaultOffscreenSurface();
-
- GpuMemoryBufferFactory* gpu_memory_buffer_factory() {
- return gpu_memory_buffer_factory_;
- }
-
- // Returns the maximum order number for unprocessed IPC messages across all
- // channels.
- uint32_t GetUnprocessedOrderNum() const;
-
- // Returns the maximum order number for processed IPC messages across all
- // channels.
- uint32_t GetProcessedOrderNum() const;
-
-#if defined(OS_ANDROID)
- void DidAccessGpu();
-#endif
-
- protected:
- virtual scoped_ptr<GpuChannel> CreateGpuChannel(int client_id,
- uint64_t client_tracing_id,
- bool preempts,
- bool allow_future_sync_points,
- bool allow_real_time_streams);
-
- gpu::SyncPointManager* sync_point_manager() const {
- return sync_point_manager_;
- }
-
- gfx::GLShareGroup* share_group() const { return share_group_.get(); }
- gpu::gles2::MailboxManager* mailbox_manager() const {
- return mailbox_manager_.get();
- }
- gpu::PreemptionFlag* preemption_flag() const {
- return preemption_flag_.get();
- }
-
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
-
- // These objects manage channels to individual renderer processes there is
- // one channel for each renderer process that has connected to this GPU
- // process.
- base::ScopedPtrHashMap<int32_t, scoped_ptr<GpuChannel>> gpu_channels_;
-
- private:
- // Message handlers.
- bool OnControlMessageReceived(const IPC::Message& msg);
- void OnEstablishChannel(const GpuMsg_EstablishChannel_Params& params);
- void OnCloseChannel(const IPC::ChannelHandle& channel_handle);
- void OnVisibilityChanged(int32_t render_view_id,
- int32_t client_id,
- bool visible);
- void OnCreateViewCommandBuffer(
- const gfx::GLSurfaceHandle& window,
- int32_t client_id,
- const GPUCreateCommandBufferConfig& init_params,
- int32_t route_id);
- void OnLoadedShader(const std::string& shader);
- void DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id, int client_id);
- void DestroyGpuMemoryBufferOnIO(gfx::GpuMemoryBufferId id, int client_id);
- void OnDestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
- int client_id,
- const gpu::SyncToken& sync_token);
-#if defined(OS_CHROMEOS)
- void OnCreateArcVideoAcceleratorChannel();
- void ArcVideoAcceleratorChannelCreated(const IPC::ChannelHandle& handle);
- void OnShutdownArcVideoService();
-#endif
-
- void OnUpdateValueState(int client_id,
- unsigned int target,
- const gpu::ValueState& state);
-#if defined(OS_ANDROID)
- void OnWakeUpGpu();
- void ScheduleWakeUpGpu();
- void DoWakeUpGpu();
-#endif
- void OnLoseAllContexts();
-
- // Used to send and receive IPC messages from the browser process.
- IPC::SyncChannel* const channel_;
- MessageRouter router_;
-
- GpuWatchdog* watchdog_;
-
- base::WaitableEvent* shutdown_event_;
-
- scoped_refptr<gfx::GLShareGroup> share_group_;
- scoped_refptr<gpu::gles2::MailboxManager> mailbox_manager_;
- scoped_refptr<gpu::PreemptionFlag> preemption_flag_;
- GpuMemoryManager gpu_memory_manager_;
- // SyncPointManager guaranteed to outlive running MessageLoop.
- gpu::SyncPointManager* sync_point_manager_;
- scoped_ptr<gpu::SyncPointClient> sync_point_client_waiter_;
- scoped_ptr<gpu::gles2::ProgramCache> program_cache_;
- scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
- scoped_refptr<gpu::gles2::FramebufferCompletenessCache>
- framebuffer_completeness_cache_;
- scoped_refptr<gfx::GLSurface> default_offscreen_surface_;
-#if defined(OS_CHROMEOS)
- scoped_ptr<GpuArcVideoService> gpu_arc_video_service_;
-#endif
- GpuMemoryBufferFactory* const gpu_memory_buffer_factory_;
-#if defined(OS_ANDROID)
- // Last time we know the GPU was powered on. Global for tracking across all
- // transport surfaces.
- base::TimeTicks last_gpu_access_time_;
- base::TimeTicks begin_wake_up_time_;
-#endif
-
- // Member variables should appear before the WeakPtrFactory, to ensure
- // that any WeakPtrs to Controller are invalidated before its members
- // variable's destructors are executed, rendering them invalid.
- base::WeakPtrFactory<GpuChannelManager> weak_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuChannelManager);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_CHANNEL_MANAGER_H_
diff --git a/chromium/content/common/gpu/gpu_channel_manager_unittest.cc b/chromium/content/common/gpu/gpu_channel_manager_unittest.cc
deleted file mode 100644
index bef694808e8..00000000000
--- a/chromium/content/common/gpu/gpu_channel_manager_unittest.cc
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright (c) 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_channel_manager.h"
-#include "content/common/gpu/gpu_channel_test_common.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "gpu/command_buffer/common/value_state.h"
-#include "gpu/command_buffer/service/gl_utils.h"
-#include "gpu/command_buffer/service/valuebuffer_manager.h"
-#include "ipc/ipc_test_sink.h"
-
-using gpu::gles2::ValuebufferManager;
-using gpu::ValueState;
-
-namespace content {
-
-class GpuChannelManagerTest : public GpuChannelTestCommon {
- public:
- GpuChannelManagerTest() : GpuChannelTestCommon() {}
- ~GpuChannelManagerTest() override {}
-};
-
-TEST_F(GpuChannelManagerTest, EstablishChannel) {
- int32_t kClientId = 1;
- uint64_t kClientTracingId = 1;
-
- ASSERT_TRUE(channel_manager());
-
- GpuMsg_EstablishChannel_Params params;
- params.client_id = kClientId;
- params.client_tracing_id = kClientTracingId;
- params.preempts = false;
- params.preempted = false;
- params.allow_future_sync_points = false;
- params.allow_real_time_streams = false;
- EXPECT_TRUE(
- channel_manager()->OnMessageReceived(GpuMsg_EstablishChannel(params)));
- EXPECT_EQ((size_t)1, sink()->message_count());
- const IPC::Message* msg =
- sink()->GetUniqueMessageMatching(GpuHostMsg_ChannelEstablished::ID);
- ASSERT_TRUE(msg);
- base::Tuple<IPC::ChannelHandle> handle;
- ASSERT_TRUE(GpuHostMsg_ChannelEstablished::Read(msg, &handle));
- EXPECT_NE("", base::get<0>(handle).name);
- sink()->ClearMessages();
-
- GpuChannel* channel = channel_manager()->LookupChannel(kClientId);
- ASSERT_TRUE(channel);
- EXPECT_EQ(base::get<0>(handle).name, channel->channel_id());
-}
-
-TEST_F(GpuChannelManagerTest, SecureValueStateForwarding) {
- int32_t kClientId1 = 111;
- uint64_t kClientTracingId1 = 11111;
- int32_t kClientId2 = 222;
- uint64_t kClientTracingId2 = 22222;
- ValueState value_state1;
- value_state1.int_value[0] = 1111;
- value_state1.int_value[1] = 0;
- value_state1.int_value[2] = 0;
- value_state1.int_value[3] = 0;
- ValueState value_state2;
- value_state2.int_value[0] = 3333;
- value_state2.int_value[1] = 0;
- value_state2.int_value[2] = 0;
- value_state2.int_value[3] = 0;
-
- ASSERT_TRUE(channel_manager());
-
- // Initialize gpu channels
- GpuMsg_EstablishChannel_Params params;
- params.client_id = kClientId1;
- params.client_tracing_id = kClientTracingId1;
- params.preempts = false;
- params.preempted = false;
- params.allow_future_sync_points = false;
- params.allow_real_time_streams = false;
- EXPECT_TRUE(
- channel_manager()->OnMessageReceived(GpuMsg_EstablishChannel(params)));
- GpuChannel* channel1 = channel_manager()->LookupChannel(kClientId1);
- ASSERT_TRUE(channel1);
-
- params.client_id = kClientId2;
- params.client_tracing_id = kClientTracingId2;
- EXPECT_TRUE(
- channel_manager()->OnMessageReceived(GpuMsg_EstablishChannel(params)));
- GpuChannel* channel2 = channel_manager()->LookupChannel(kClientId2);
- ASSERT_TRUE(channel2);
-
- EXPECT_NE(channel1, channel2);
-
- // Make sure value states are only accessible by proper channels
- channel_manager()->OnMessageReceived(GpuMsg_UpdateValueState(
- kClientId1, GL_MOUSE_POSITION_CHROMIUM, value_state1));
- channel_manager()->OnMessageReceived(GpuMsg_UpdateValueState(
- kClientId2, GL_MOUSE_POSITION_CHROMIUM, value_state2));
-
- const gpu::ValueStateMap* pending_value_buffer_state1 =
- channel1->pending_valuebuffer_state();
- const gpu::ValueStateMap* pending_value_buffer_state2 =
- channel2->pending_valuebuffer_state();
- EXPECT_NE(pending_value_buffer_state1, pending_value_buffer_state2);
-
- const ValueState* state1 =
- pending_value_buffer_state1->GetState(GL_MOUSE_POSITION_CHROMIUM);
- const ValueState* state2 =
- pending_value_buffer_state2->GetState(GL_MOUSE_POSITION_CHROMIUM);
- EXPECT_NE(state1, state2);
-
- EXPECT_EQ(state1->int_value[0], value_state1.int_value[0]);
- EXPECT_EQ(state2->int_value[0], value_state2.int_value[0]);
- EXPECT_NE(state1->int_value[0], state2->int_value[0]);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_channel_test_common.cc b/chromium/content/common/gpu/gpu_channel_test_common.cc
deleted file mode 100644
index 7fe03759fc9..00000000000
--- a/chromium/content/common/gpu/gpu_channel_test_common.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_channel_test_common.h"
-
-#include "base/test/test_simple_task_runner.h"
-#include "base/thread_task_runner_handle.h"
-#include "gpu/command_buffer/service/sync_point_manager.h"
-#include "ipc/ipc_test_sink.h"
-
-namespace content {
-
-TestGpuChannelManager::TestGpuChannelManager(
- IPC::TestSink* sink,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- gpu::SyncPointManager* sync_point_manager,
- GpuMemoryBufferFactory* gpu_memory_buffer_factory)
- : GpuChannelManager(nullptr,
- nullptr,
- task_runner,
- io_task_runner,
- nullptr,
- sync_point_manager,
- gpu_memory_buffer_factory),
- sink_(sink) {}
-
-TestGpuChannelManager::~TestGpuChannelManager() {
- // Clear gpu channels here so that any IPC messages sent are handled using the
- // overridden Send method.
- gpu_channels_.clear();
-}
-
-bool TestGpuChannelManager::Send(IPC::Message* msg) {
- return sink_->Send(msg);
-}
-
-scoped_ptr<GpuChannel> TestGpuChannelManager::CreateGpuChannel(
- int client_id,
- uint64_t client_tracing_id,
- bool preempts,
- bool allow_future_sync_points,
- bool allow_real_time_streams) {
- return make_scoped_ptr(new TestGpuChannel(
- sink_, this, sync_point_manager(), share_group(), mailbox_manager(),
- preempts ? preemption_flag() : nullptr, task_runner_.get(),
- io_task_runner_.get(), client_id, client_tracing_id,
- allow_future_sync_points, allow_real_time_streams));
-}
-
-TestGpuChannel::TestGpuChannel(IPC::TestSink* sink,
- GpuChannelManager* gpu_channel_manager,
- gpu::SyncPointManager* sync_point_manager,
- gfx::GLShareGroup* share_group,
- gpu::gles2::MailboxManager* mailbox_manager,
- gpu::PreemptionFlag* preempting_flag,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- int client_id,
- uint64_t client_tracing_id,
- bool allow_future_sync_points,
- bool allow_real_time_streams)
- : GpuChannel(gpu_channel_manager,
- sync_point_manager,
- nullptr,
- share_group,
- mailbox_manager,
- preempting_flag,
- task_runner,
- io_task_runner,
- client_id,
- client_tracing_id,
- allow_future_sync_points,
- allow_real_time_streams),
- sink_(sink) {}
-
-TestGpuChannel::~TestGpuChannel() {
- // Call stubs here so that any IPC messages sent are handled using the
- // overridden Send method.
- stubs_.clear();
-}
-
-base::ProcessId TestGpuChannel::GetClientPID() const {
- return base::kNullProcessId;
-}
-
-IPC::ChannelHandle TestGpuChannel::Init(base::WaitableEvent* shutdown_event) {
- filter_->OnFilterAdded(sink_);
- return IPC::ChannelHandle(channel_id());
-}
-
-bool TestGpuChannel::Send(IPC::Message* msg) {
- DCHECK(!msg->is_sync());
- return sink_->Send(msg);
-}
-
-// TODO(sunnyps): Use a mock memory buffer factory when necessary.
-GpuChannelTestCommon::GpuChannelTestCommon()
- : sink_(new IPC::TestSink),
- task_runner_(new base::TestSimpleTaskRunner),
- io_task_runner_(new base::TestSimpleTaskRunner),
- sync_point_manager_(new gpu::SyncPointManager(false)),
- channel_manager_(new TestGpuChannelManager(sink_.get(),
- task_runner_.get(),
- io_task_runner_.get(),
- sync_point_manager_.get(),
- nullptr)) {}
-
-GpuChannelTestCommon::~GpuChannelTestCommon() {}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_channel_test_common.h b/chromium/content/common/gpu/gpu_channel_test_common.h
deleted file mode 100644
index ed2243531dd..00000000000
--- a/chromium/content/common/gpu/gpu_channel_test_common.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stdint.h>
-
-#include "base/memory/scoped_ptr.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_channel_manager.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-class TestSimpleTaskRunner;
-} // namespace base
-
-namespace IPC {
-class TestSink;
-} // namespace IPC
-
-namespace content {
-
-class SyncPointManager;
-
-class TestGpuChannelManager : public GpuChannelManager {
- public:
- TestGpuChannelManager(IPC::TestSink* sink,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- gpu::SyncPointManager* sync_point_manager,
- GpuMemoryBufferFactory* gpu_memory_buffer_factory);
- ~TestGpuChannelManager() override;
-
- // IPC::Sender implementation.
- bool Send(IPC::Message* msg) override;
-
- protected:
- scoped_ptr<GpuChannel> CreateGpuChannel(
- int client_id,
- uint64_t client_tracing_id,
- bool preempts,
- bool allow_future_sync_points,
- bool allow_real_time_streams) override;
-
- private:
- IPC::TestSink* const sink_;
-};
-
-class TestGpuChannel : public GpuChannel {
- public:
- TestGpuChannel(IPC::TestSink* sink,
- GpuChannelManager* gpu_channel_manager,
- gpu::SyncPointManager* sync_point_manager,
- gfx::GLShareGroup* share_group,
- gpu::gles2::MailboxManager* mailbox_manager,
- gpu::PreemptionFlag* preempting_flag,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
- int client_id,
- uint64_t client_tracing_id,
- bool allow_future_sync_points,
- bool allow_real_time_streams);
- ~TestGpuChannel() override;
-
- base::ProcessId GetClientPID() const override;
-
- IPC::ChannelHandle Init(base::WaitableEvent* shutdown_event) override;
-
- // IPC::Sender implementation.
- bool Send(IPC::Message* msg) override;
-
- private:
- IPC::TestSink* const sink_;
-};
-
-class GpuChannelTestCommon : public testing::Test {
- public:
- GpuChannelTestCommon();
- ~GpuChannelTestCommon() override;
-
- protected:
- IPC::TestSink* sink() { return sink_.get(); }
- GpuChannelManager* channel_manager() { return channel_manager_.get(); }
- base::TestSimpleTaskRunner* task_runner() { return task_runner_.get(); }
-
- private:
- scoped_ptr<IPC::TestSink> sink_;
- scoped_refptr<base::TestSimpleTaskRunner> task_runner_;
- scoped_refptr<base::TestSimpleTaskRunner> io_task_runner_;
- scoped_ptr<gpu::SyncPointManager> sync_point_manager_;
- scoped_ptr<GpuChannelManager> channel_manager_;
-};
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_channel_unittest.cc b/chromium/content/common/gpu/gpu_channel_unittest.cc
deleted file mode 100644
index d7a376aef76..00000000000
--- a/chromium/content/common/gpu/gpu_channel_unittest.cc
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stdint.h>
-
-#include "base/test/test_simple_task_runner.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_channel_manager.h"
-#include "content/common/gpu/gpu_channel_test_common.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "ipc/ipc_test_sink.h"
-
-namespace content {
-
-class GpuChannelTest : public GpuChannelTestCommon {
- public:
- GpuChannelTest() : GpuChannelTestCommon() {}
- ~GpuChannelTest() override {}
-
- GpuChannel* CreateChannel(int32_t client_id, bool allow_real_time_streams) {
- DCHECK(channel_manager());
- uint64_t kClientTracingId = 1;
- GpuMsg_EstablishChannel_Params params;
- params.client_id = client_id;
- params.client_tracing_id = kClientTracingId;
- params.preempts = false;
- params.preempted = false;
- params.allow_future_sync_points = false;
- params.allow_real_time_streams = allow_real_time_streams;
- EXPECT_TRUE(
- channel_manager()->OnMessageReceived(GpuMsg_EstablishChannel(params)));
- return channel_manager()->LookupChannel(client_id);
- }
-};
-
-TEST_F(GpuChannelTest, CreateViewCommandBuffer) {
- int32_t kClientId = 1;
- GpuChannel* channel = CreateChannel(kClientId, false);
- ASSERT_TRUE(channel);
-
- gfx::GLSurfaceHandle surface_handle;
- int32_t kRouteId = 1;
- GPUCreateCommandBufferConfig init_params;
- init_params.share_group_id = MSG_ROUTING_NONE;
- init_params.stream_id = 0;
- init_params.stream_priority = GpuStreamPriority::NORMAL;
- init_params.attribs = std::vector<int>();
- init_params.active_url = GURL();
- init_params.gpu_preference = gfx::PreferIntegratedGpu;
- channel_manager()->OnMessageReceived(GpuMsg_CreateViewCommandBuffer(
- surface_handle, kClientId, init_params, kRouteId));
-
- const IPC::Message* msg =
- sink()->GetUniqueMessageMatching(GpuHostMsg_CommandBufferCreated::ID);
- ASSERT_TRUE(msg);
-
- base::Tuple<CreateCommandBufferResult> result;
- ASSERT_TRUE(GpuHostMsg_CommandBufferCreated::Read(msg, &result));
-
- EXPECT_EQ(CREATE_COMMAND_BUFFER_SUCCEEDED, base::get<0>(result));
-
- sink()->ClearMessages();
-
- GpuCommandBufferStub* stub = channel->LookupCommandBuffer(kRouteId);
- ASSERT_TRUE(stub);
-}
-
-TEST_F(GpuChannelTest, IncompatibleStreamIds) {
- int32_t kClientId = 1;
- GpuChannel* channel = CreateChannel(kClientId, false);
- ASSERT_TRUE(channel);
-
- // Create first context.
- int32_t kRouteId1 = 1;
- int32_t kStreamId1 = 1;
- GPUCreateCommandBufferConfig init_params;
- init_params.share_group_id = MSG_ROUTING_NONE;
- init_params.stream_id = kStreamId1;
- init_params.stream_priority = GpuStreamPriority::NORMAL;
- init_params.attribs = std::vector<int>();
- init_params.active_url = GURL();
- init_params.gpu_preference = gfx::PreferIntegratedGpu;
- channel_manager()->OnMessageReceived(GpuMsg_CreateViewCommandBuffer(
- gfx::GLSurfaceHandle(), kClientId, init_params, kRouteId1));
-
- const IPC::Message* msg =
- sink()->GetUniqueMessageMatching(GpuHostMsg_CommandBufferCreated::ID);
- ASSERT_TRUE(msg);
-
- base::Tuple<CreateCommandBufferResult> result;
- ASSERT_TRUE(GpuHostMsg_CommandBufferCreated::Read(msg, &result));
-
- EXPECT_EQ(CREATE_COMMAND_BUFFER_SUCCEEDED, base::get<0>(result));
-
- sink()->ClearMessages();
-
- GpuCommandBufferStub* stub = channel->LookupCommandBuffer(kRouteId1);
- ASSERT_TRUE(stub);
-
- // Create second context in same share group but different stream.
- int32_t kRouteId2 = 2;
- int32_t kStreamId2 = 2;
-
- init_params.share_group_id = kRouteId1;
- init_params.stream_id = kStreamId2;
- init_params.stream_priority = GpuStreamPriority::NORMAL;
- init_params.attribs = std::vector<int>();
- init_params.active_url = GURL();
- init_params.gpu_preference = gfx::PreferIntegratedGpu;
- channel_manager()->OnMessageReceived(GpuMsg_CreateViewCommandBuffer(
- gfx::GLSurfaceHandle(), kClientId, init_params, kRouteId2));
-
- msg = sink()->GetUniqueMessageMatching(GpuHostMsg_CommandBufferCreated::ID);
- ASSERT_TRUE(msg);
-
- ASSERT_TRUE(GpuHostMsg_CommandBufferCreated::Read(msg, &result));
-
- EXPECT_EQ(CREATE_COMMAND_BUFFER_FAILED, base::get<0>(result));
-
- sink()->ClearMessages();
-
- stub = channel->LookupCommandBuffer(kRouteId2);
- ASSERT_FALSE(stub);
-}
-
-TEST_F(GpuChannelTest, IncompatibleStreamPriorities) {
- int32_t kClientId = 1;
- GpuChannel* channel = CreateChannel(kClientId, false);
- ASSERT_TRUE(channel);
-
- // Create first context.
- int32_t kRouteId1 = 1;
- int32_t kStreamId1 = 1;
- GpuStreamPriority kStreamPriority1 = GpuStreamPriority::NORMAL;
- GPUCreateCommandBufferConfig init_params;
- init_params.share_group_id = MSG_ROUTING_NONE;
- init_params.stream_id = kStreamId1;
- init_params.stream_priority = kStreamPriority1;
- init_params.attribs = std::vector<int>();
- init_params.active_url = GURL();
- init_params.gpu_preference = gfx::PreferIntegratedGpu;
- channel_manager()->OnMessageReceived(GpuMsg_CreateViewCommandBuffer(
- gfx::GLSurfaceHandle(), kClientId, init_params, kRouteId1));
-
- const IPC::Message* msg =
- sink()->GetUniqueMessageMatching(GpuHostMsg_CommandBufferCreated::ID);
- ASSERT_TRUE(msg);
-
- base::Tuple<CreateCommandBufferResult> result;
- ASSERT_TRUE(GpuHostMsg_CommandBufferCreated::Read(msg, &result));
-
- EXPECT_EQ(CREATE_COMMAND_BUFFER_SUCCEEDED, base::get<0>(result));
-
- sink()->ClearMessages();
-
- GpuCommandBufferStub* stub = channel->LookupCommandBuffer(kRouteId1);
- ASSERT_TRUE(stub);
-
- // Create second context in same share group but different stream.
- int32_t kRouteId2 = 2;
- int32_t kStreamId2 = kStreamId1;
- GpuStreamPriority kStreamPriority2 = GpuStreamPriority::LOW;
-
- init_params.share_group_id = MSG_ROUTING_NONE;
- init_params.stream_id = kStreamId2;
- init_params.stream_priority = kStreamPriority2;
- init_params.attribs = std::vector<int>();
- init_params.active_url = GURL();
- init_params.gpu_preference = gfx::PreferIntegratedGpu;
- channel_manager()->OnMessageReceived(GpuMsg_CreateViewCommandBuffer(
- gfx::GLSurfaceHandle(), kClientId, init_params, kRouteId2));
-
- msg = sink()->GetUniqueMessageMatching(GpuHostMsg_CommandBufferCreated::ID);
- ASSERT_TRUE(msg);
-
- ASSERT_TRUE(GpuHostMsg_CommandBufferCreated::Read(msg, &result));
-
- EXPECT_EQ(CREATE_COMMAND_BUFFER_FAILED, base::get<0>(result));
-
- sink()->ClearMessages();
-
- stub = channel->LookupCommandBuffer(kRouteId2);
- ASSERT_FALSE(stub);
-}
-
-TEST_F(GpuChannelTest, StreamLifetime) {
- int32_t kClientId = 1;
- GpuChannel* channel = CreateChannel(kClientId, false);
- ASSERT_TRUE(channel);
-
- // Create first context.
- int32_t kRouteId1 = 1;
- int32_t kStreamId1 = 1;
- GpuStreamPriority kStreamPriority1 = GpuStreamPriority::NORMAL;
- GPUCreateCommandBufferConfig init_params;
- init_params.share_group_id = MSG_ROUTING_NONE;
- init_params.stream_id = kStreamId1;
- init_params.stream_priority = kStreamPriority1;
- init_params.attribs = std::vector<int>();
- init_params.active_url = GURL();
- init_params.gpu_preference = gfx::PreferIntegratedGpu;
- channel_manager()->OnMessageReceived(GpuMsg_CreateViewCommandBuffer(
- gfx::GLSurfaceHandle(), kClientId, init_params, kRouteId1));
-
- const IPC::Message* msg =
- sink()->GetUniqueMessageMatching(GpuHostMsg_CommandBufferCreated::ID);
- ASSERT_TRUE(msg);
-
- base::Tuple<CreateCommandBufferResult> result;
- ASSERT_TRUE(GpuHostMsg_CommandBufferCreated::Read(msg, &result));
-
- EXPECT_EQ(CREATE_COMMAND_BUFFER_SUCCEEDED, base::get<0>(result));
-
- sink()->ClearMessages();
-
- GpuCommandBufferStub* stub = channel->LookupCommandBuffer(kRouteId1);
- ASSERT_TRUE(stub);
-
- {
- // GpuChannelHost always calls set_unblock(false) on messages sent to the
- // GPU process.
- IPC::Message m = GpuChannelMsg_DestroyCommandBuffer(kRouteId1);
- m.set_unblock(false);
- EXPECT_TRUE(channel->filter()->OnMessageReceived(m));
- task_runner()->RunPendingTasks();
- }
-
- stub = channel->LookupCommandBuffer(kRouteId1);
- ASSERT_FALSE(stub);
-
- // Create second context in same share group but different stream.
- int32_t kRouteId2 = 2;
- int32_t kStreamId2 = 2;
- GpuStreamPriority kStreamPriority2 = GpuStreamPriority::LOW;
-
- init_params.share_group_id = MSG_ROUTING_NONE;
- init_params.stream_id = kStreamId2;
- init_params.stream_priority = kStreamPriority2;
- init_params.attribs = std::vector<int>();
- init_params.active_url = GURL();
- init_params.gpu_preference = gfx::PreferIntegratedGpu;
- channel_manager()->OnMessageReceived(GpuMsg_CreateViewCommandBuffer(
- gfx::GLSurfaceHandle(), kClientId, init_params, kRouteId2));
-
- msg = sink()->GetUniqueMessageMatching(GpuHostMsg_CommandBufferCreated::ID);
- ASSERT_TRUE(msg);
-
- ASSERT_TRUE(GpuHostMsg_CommandBufferCreated::Read(msg, &result));
-
- EXPECT_EQ(CREATE_COMMAND_BUFFER_SUCCEEDED, base::get<0>(result));
-
- sink()->ClearMessages();
-
- stub = channel->LookupCommandBuffer(kRouteId2);
- ASSERT_TRUE(stub);
-}
-
-TEST_F(GpuChannelTest, RealTimeStreamsDisallowed) {
- int32_t kClientId = 1;
- bool allow_real_time_streams = false;
- GpuChannel* channel = CreateChannel(kClientId, allow_real_time_streams);
- ASSERT_TRUE(channel);
-
- // Create first context.
- int32_t kRouteId = 1;
- int32_t kStreamId = 1;
- GpuStreamPriority kStreamPriority = GpuStreamPriority::REAL_TIME;
- GPUCreateCommandBufferConfig init_params;
- init_params.share_group_id = MSG_ROUTING_NONE;
- init_params.stream_id = kStreamId;
- init_params.stream_priority = kStreamPriority;
- init_params.attribs = std::vector<int>();
- init_params.active_url = GURL();
- init_params.gpu_preference = gfx::PreferIntegratedGpu;
- channel_manager()->OnMessageReceived(GpuMsg_CreateViewCommandBuffer(
- gfx::GLSurfaceHandle(), kClientId, init_params, kRouteId));
-
- const IPC::Message* msg =
- sink()->GetUniqueMessageMatching(GpuHostMsg_CommandBufferCreated::ID);
- ASSERT_TRUE(msg);
-
- base::Tuple<CreateCommandBufferResult> result;
- ASSERT_TRUE(GpuHostMsg_CommandBufferCreated::Read(msg, &result));
-
- EXPECT_EQ(CREATE_COMMAND_BUFFER_FAILED, base::get<0>(result));
-
- sink()->ClearMessages();
-
- GpuCommandBufferStub* stub = channel->LookupCommandBuffer(kRouteId);
- ASSERT_FALSE(stub);
-}
-
-TEST_F(GpuChannelTest, RealTimeStreamsAllowed) {
- int32_t kClientId = 1;
- bool allow_real_time_streams = true;
- GpuChannel* channel = CreateChannel(kClientId, allow_real_time_streams);
- ASSERT_TRUE(channel);
-
- // Create first context.
- int32_t kRouteId = 1;
- int32_t kStreamId = 1;
- GpuStreamPriority kStreamPriority = GpuStreamPriority::REAL_TIME;
- GPUCreateCommandBufferConfig init_params;
- init_params.share_group_id = MSG_ROUTING_NONE;
- init_params.stream_id = kStreamId;
- init_params.stream_priority = kStreamPriority;
- init_params.attribs = std::vector<int>();
- init_params.active_url = GURL();
- init_params.gpu_preference = gfx::PreferIntegratedGpu;
- channel_manager()->OnMessageReceived(GpuMsg_CreateViewCommandBuffer(
- gfx::GLSurfaceHandle(), kClientId, init_params, kRouteId));
-
- const IPC::Message* msg =
- sink()->GetUniqueMessageMatching(GpuHostMsg_CommandBufferCreated::ID);
- ASSERT_TRUE(msg);
-
- base::Tuple<CreateCommandBufferResult> result;
- ASSERT_TRUE(GpuHostMsg_CommandBufferCreated::Read(msg, &result));
-
- EXPECT_EQ(CREATE_COMMAND_BUFFER_SUCCEEDED, base::get<0>(result));
-
- sink()->ClearMessages();
-
- GpuCommandBufferStub* stub = channel->LookupCommandBuffer(kRouteId);
- ASSERT_TRUE(stub);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_command_buffer_stub.cc b/chromium/content/common/gpu/gpu_command_buffer_stub.cc
deleted file mode 100644
index 58bfa6a4fb9..00000000000
--- a/chromium/content/common/gpu/gpu_command_buffer_stub.cc
+++ /dev/null
@@ -1,1269 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_command_buffer_stub.h"
-
-#include <utility>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/command_line.h"
-#include "base/hash.h"
-#include "base/json/json_writer.h"
-#include "base/macros.h"
-#include "base/memory/shared_memory.h"
-#include "base/time/time.h"
-#include "base/trace_event/trace_event.h"
-#include "build/build_config.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_channel_manager.h"
-#include "content/common/gpu/gpu_memory_manager.h"
-#include "content/common/gpu/gpu_memory_tracking.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/gpu/gpu_watchdog.h"
-#include "content/common/gpu/image_transport_surface.h"
-#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
-#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
-#include "content/public/common/content_client.h"
-#include "content/public/common/content_switches.h"
-#include "gpu/command_buffer/common/constants.h"
-#include "gpu/command_buffer/common/mailbox.h"
-#include "gpu/command_buffer/common/sync_token.h"
-#include "gpu/command_buffer/service/gl_context_virtual.h"
-#include "gpu/command_buffer/service/gl_state_restorer_impl.h"
-#include "gpu/command_buffer/service/image_factory.h"
-#include "gpu/command_buffer/service/image_manager.h"
-#include "gpu/command_buffer/service/logger.h"
-#include "gpu/command_buffer/service/mailbox_manager.h"
-#include "gpu/command_buffer/service/memory_tracking.h"
-#include "gpu/command_buffer/service/query_manager.h"
-#include "gpu/command_buffer/service/sync_point_manager.h"
-#include "gpu/command_buffer/service/transfer_buffer_manager.h"
-#include "gpu/command_buffer/service/valuebuffer_manager.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_switches.h"
-
-#if defined(OS_WIN)
-#include "base/win/win_util.h"
-#include "content/public/common/sandbox_init.h"
-#endif
-
-#if defined(OS_ANDROID)
-#include "content/common/gpu/stream_texture_android.h"
-#endif
-
-namespace content {
-struct WaitForCommandState {
- WaitForCommandState(int32_t start, int32_t end, IPC::Message* reply)
- : start(start), end(end), reply(reply) {}
-
- int32_t start;
- int32_t end;
- scoped_ptr<IPC::Message> reply;
-};
-
-namespace {
-
-// The GpuCommandBufferMemoryTracker class provides a bridge between the
-// ContextGroup's memory type managers and the GpuMemoryManager class.
-class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
- public:
- explicit GpuCommandBufferMemoryTracker(GpuChannel* channel,
- uint64_t share_group_tracing_guid)
- : tracking_group_(
- channel->gpu_channel_manager()
- ->gpu_memory_manager()
- ->CreateTrackingGroup(channel->GetClientPID(), this)),
- client_tracing_id_(channel->client_tracing_id()),
- client_id_(channel->client_id()),
- share_group_tracing_guid_(share_group_tracing_guid) {}
-
- void TrackMemoryAllocatedChange(
- size_t old_size, size_t new_size) override {
- tracking_group_->TrackMemoryAllocatedChange(
- old_size, new_size);
- }
-
- bool EnsureGPUMemoryAvailable(size_t size_needed) override {
- return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
- };
-
- uint64_t ClientTracingId() const override { return client_tracing_id_; }
- int ClientId() const override { return client_id_; }
- uint64_t ShareGroupTracingGUID() const override {
- return share_group_tracing_guid_;
- }
-
- private:
- ~GpuCommandBufferMemoryTracker() override {}
- scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
- const uint64_t client_tracing_id_;
- const int client_id_;
- const uint64_t share_group_tracing_guid_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
-};
-
-// FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
-// url_hash matches.
-void FastSetActiveURL(const GURL& url, size_t url_hash) {
- // Leave the previously set URL in the empty case -- empty URLs are given by
- // BlinkPlatformImpl::createOffscreenGraphicsContext3D. Hopefully the
- // onscreen context URL was set previously and will show up even when a crash
- // occurs during offscreen command processing.
- if (url.is_empty())
- return;
- static size_t g_last_url_hash = 0;
- if (url_hash != g_last_url_hash) {
- g_last_url_hash = url_hash;
- GetContentClient()->SetActiveURL(url);
- }
-}
-
-// The first time polling a fence, delay some extra time to allow other
-// stubs to process some work, or else the timing of the fences could
-// allow a pattern of alternating fast and slow frames to occur.
-const int64_t kHandleMoreWorkPeriodMs = 2;
-const int64_t kHandleMoreWorkPeriodBusyMs = 1;
-
-// Prevents idle work from being starved.
-const int64_t kMaxTimeSinceIdleMs = 10;
-
-class DevToolsChannelData : public base::trace_event::ConvertableToTraceFormat {
- public:
- static scoped_refptr<base::trace_event::ConvertableToTraceFormat>
- CreateForChannel(GpuChannel* channel);
-
- void AppendAsTraceFormat(std::string* out) const override {
- std::string tmp;
- base::JSONWriter::Write(*value_, &tmp);
- *out += tmp;
- }
-
- private:
- explicit DevToolsChannelData(base::Value* value) : value_(value) {}
- ~DevToolsChannelData() override {}
- scoped_ptr<base::Value> value_;
- DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData);
-};
-
-scoped_refptr<base::trace_event::ConvertableToTraceFormat>
-DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
- scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue);
- res->SetInteger("renderer_pid", channel->GetClientPID());
- res->SetDouble("used_bytes", channel->GetMemoryUsage());
- return new DevToolsChannelData(res.release());
-}
-
-void RunOnThread(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
- const base::Closure& callback) {
- if (task_runner->BelongsToCurrentThread()) {
- callback.Run();
- } else {
- task_runner->PostTask(FROM_HERE, callback);
- }
-}
-
-uint64_t GetCommandBufferID(int channel_id, int32_t route_id) {
- return (static_cast<uint64_t>(channel_id) << 32) | route_id;
-}
-
-} // namespace
-
-GpuCommandBufferStub::GpuCommandBufferStub(
- GpuChannel* channel,
- gpu::SyncPointManager* sync_point_manager,
- base::SingleThreadTaskRunner* task_runner,
- GpuCommandBufferStub* share_group,
- const gfx::GLSurfaceHandle& handle,
- gpu::gles2::MailboxManager* mailbox_manager,
- gpu::PreemptionFlag* preempt_by_flag,
- gpu::gles2::SubscriptionRefSet* subscription_ref_set,
- gpu::ValueStateMap* pending_valuebuffer_state,
- const gfx::Size& size,
- const gpu::gles2::DisallowedFeatures& disallowed_features,
- const std::vector<int32_t>& attribs,
- gfx::GpuPreference gpu_preference,
- int32_t stream_id,
- int32_t route_id,
- bool offscreen,
- GpuWatchdog* watchdog,
- const GURL& active_url)
- : channel_(channel),
- sync_point_manager_(sync_point_manager),
- task_runner_(task_runner),
- initialized_(false),
- handle_(handle),
- initial_size_(size),
- disallowed_features_(disallowed_features),
- requested_attribs_(attribs),
- gpu_preference_(gpu_preference),
- use_virtualized_gl_context_(false),
- command_buffer_id_(GetCommandBufferID(channel->client_id(), route_id)),
- stream_id_(stream_id),
- route_id_(route_id),
- offscreen_(offscreen),
- last_flush_count_(0),
- watchdog_(watchdog),
- waiting_for_sync_point_(false),
- previous_processed_num_(0),
- preemption_flag_(preempt_by_flag),
- active_url_(active_url) {
- active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
- FastSetActiveURL(active_url_, active_url_hash_);
-
- gpu::gles2::ContextCreationAttribHelper attrib_parser;
- attrib_parser.Parse(requested_attribs_);
-
- if (share_group) {
- context_group_ = share_group->context_group_;
- DCHECK(context_group_->bind_generates_resource() ==
- attrib_parser.bind_generates_resource);
- } else {
- context_group_ = new gpu::gles2::ContextGroup(
- mailbox_manager,
- new GpuCommandBufferMemoryTracker(channel, command_buffer_id_),
- channel_->gpu_channel_manager()->shader_translator_cache(),
- channel_->gpu_channel_manager()->framebuffer_completeness_cache(), NULL,
- subscription_ref_set, pending_valuebuffer_state,
- attrib_parser.bind_generates_resource);
- }
-
-// Virtualize PreferIntegratedGpu contexts by default on OS X to prevent
-// performance regressions when enabling FCM.
-// http://crbug.com/180463
-#if defined(OS_MACOSX)
- if (gpu_preference_ == gfx::PreferIntegratedGpu)
- use_virtualized_gl_context_ = true;
-#endif
-
- use_virtualized_gl_context_ |=
- context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
-
- // MailboxManagerSync synchronization correctness currently depends on having
- // only a single context. See crbug.com/510243 for details.
- use_virtualized_gl_context_ |= mailbox_manager->UsesSync();
-
- if (offscreen && initial_size_.IsEmpty()) {
- // If we're an offscreen surface with zero width and/or height, set to a
- // non-zero size so that we have a complete framebuffer for operations like
- // glClear.
- initial_size_ = gfx::Size(1, 1);
- }
-}
-
-GpuCommandBufferStub::~GpuCommandBufferStub() {
- Destroy();
-}
-
-GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
- return channel()->gpu_channel_manager()->gpu_memory_manager();
-}
-
-bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
- "GPUTask",
- "data",
- DevToolsChannelData::CreateForChannel(channel()));
- FastSetActiveURL(active_url_, active_url_hash_);
-
- bool have_context = false;
- // Ensure the appropriate GL context is current before handling any IPC
- // messages directed at the command buffer. This ensures that the message
- // handler can assume that the context is current (not necessary for
- // RetireSyncPoint or WaitSyncPoint).
- if (decoder_.get() &&
- message.type() != GpuCommandBufferMsg_SetGetBuffer::ID &&
- message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
- message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
- message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID &&
- message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID &&
- message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
- message.type() != GpuCommandBufferMsg_SignalSyncPoint::ID) {
- if (!MakeCurrent())
- return false;
- have_context = true;
- }
-
- // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
- // here. This is so the reply can be delayed if the scheduler is unscheduled.
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
- OnInitialize);
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
- OnSetGetBuffer);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
- OnProduceFrontBuffer);
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
- OnWaitForTokenInRange);
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
- OnWaitForGetOffsetInRange);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
- OnRegisterTransferBuffer);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
- OnDestroyTransferBuffer);
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
- OnCreateVideoDecoder)
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
- OnCreateVideoEncoder)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
- OnRetireSyncPoint)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
- OnSignalSyncPoint)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncToken,
- OnSignalSyncToken)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
- OnSignalQuery)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
- OnCreateStreamTexture)
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
-
- CheckCompleteWaits();
-
- // Ensure that any delayed work that was created will be handled.
- if (have_context) {
- if (scheduler_)
- scheduler_->ProcessPendingQueries();
- ScheduleDelayedWork(
- base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodMs));
- }
-
- DCHECK(handled);
- return handled;
-}
-
-bool GpuCommandBufferStub::Send(IPC::Message* message) {
- return channel_->Send(message);
-}
-
-bool GpuCommandBufferStub::IsScheduled() {
- return (!scheduler_.get() || scheduler_->scheduled());
-}
-
-void GpuCommandBufferStub::PollWork() {
- // Post another delayed task if we have not yet reached the time at which
- // we should process delayed work.
- base::TimeTicks current_time = base::TimeTicks::Now();
- DCHECK(!process_delayed_work_time_.is_null());
- if (process_delayed_work_time_ > current_time) {
- task_runner_->PostDelayedTask(
- FROM_HERE, base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
- process_delayed_work_time_ - current_time);
- return;
- }
- process_delayed_work_time_ = base::TimeTicks();
-
- PerformWork();
-}
-
-void GpuCommandBufferStub::PerformWork() {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::PerformWork");
-
- FastSetActiveURL(active_url_, active_url_hash_);
- if (decoder_.get() && !MakeCurrent())
- return;
-
- if (scheduler_) {
- uint32_t current_unprocessed_num =
- channel()->gpu_channel_manager()->GetUnprocessedOrderNum();
- // We're idle when no messages were processed or scheduled.
- bool is_idle = (previous_processed_num_ == current_unprocessed_num);
- if (!is_idle && !last_idle_time_.is_null()) {
- base::TimeDelta time_since_idle =
- base::TimeTicks::Now() - last_idle_time_;
- base::TimeDelta max_time_since_idle =
- base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
-
- // Force idle when it's been too long since last time we were idle.
- if (time_since_idle > max_time_since_idle)
- is_idle = true;
- }
-
- if (is_idle) {
- last_idle_time_ = base::TimeTicks::Now();
- scheduler_->PerformIdleWork();
- }
-
- scheduler_->ProcessPendingQueries();
- }
-
- ScheduleDelayedWork(
- base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodBusyMs));
-}
-
-bool GpuCommandBufferStub::HasUnprocessedCommands() {
- if (command_buffer_) {
- gpu::CommandBuffer::State state = command_buffer_->GetLastState();
- return command_buffer_->GetPutOffset() != state.get_offset &&
- !gpu::error::IsError(state.error);
- }
- return false;
-}
-
-void GpuCommandBufferStub::ScheduleDelayedWork(base::TimeDelta delay) {
- bool has_more_work = scheduler_.get() && (scheduler_->HasPendingQueries() ||
- scheduler_->HasMoreIdleWork());
- if (!has_more_work) {
- last_idle_time_ = base::TimeTicks();
- return;
- }
-
- base::TimeTicks current_time = base::TimeTicks::Now();
- // |process_delayed_work_time_| is set if processing of delayed work is
- // already scheduled. Just update the time if already scheduled.
- if (!process_delayed_work_time_.is_null()) {
- process_delayed_work_time_ = current_time + delay;
- return;
- }
-
- // Idle when no messages are processed between now and when
- // PollWork is called.
- previous_processed_num_ =
- channel()->gpu_channel_manager()->GetProcessedOrderNum();
- if (last_idle_time_.is_null())
- last_idle_time_ = current_time;
-
- // IsScheduled() returns true after passing all unschedule fences
- // and this is when we can start performing idle work. Idle work
- // is done synchronously so we can set delay to 0 and instead poll
- // for more work at the rate idle work is performed. This also ensures
- // that idle work is done as efficiently as possible without any
- // unnecessary delays.
- if (scheduler_.get() && scheduler_->scheduled() &&
- scheduler_->HasMoreIdleWork()) {
- delay = base::TimeDelta();
- }
-
- process_delayed_work_time_ = current_time + delay;
- task_runner_->PostDelayedTask(
- FROM_HERE, base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
- delay);
-}
-
-bool GpuCommandBufferStub::MakeCurrent() {
- if (decoder_->MakeCurrent())
- return true;
- DLOG(ERROR) << "Context lost because MakeCurrent failed.";
- command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
- command_buffer_->SetParseError(gpu::error::kLostContext);
- CheckContextLost();
- return false;
-}
-
-void GpuCommandBufferStub::Destroy() {
- if (wait_for_token_) {
- Send(wait_for_token_->reply.release());
- wait_for_token_.reset();
- }
- if (wait_for_get_offset_) {
- Send(wait_for_get_offset_->reply.release());
- wait_for_get_offset_.reset();
- }
-
- if (initialized_) {
- GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
- if (handle_.is_null() && !active_url_.is_empty()) {
- gpu_channel_manager->Send(
- new GpuHostMsg_DidDestroyOffscreenContext(active_url_));
- }
- }
-
- while (!sync_points_.empty())
- OnRetireSyncPoint(sync_points_.front());
-
- if (decoder_)
- decoder_->set_engine(NULL);
-
- // The scheduler has raw references to the decoder and the command buffer so
- // destroy it before those.
- scheduler_.reset();
-
- sync_point_client_.reset();
-
- bool have_context = false;
- if (decoder_ && decoder_->GetGLContext()) {
- // Try to make the context current regardless of whether it was lost, so we
- // don't leak resources.
- have_context = decoder_->GetGLContext()->MakeCurrent(surface_.get());
- }
- FOR_EACH_OBSERVER(DestructionObserver,
- destruction_observers_,
- OnWillDestroyStub());
-
- if (decoder_) {
- decoder_->Destroy(have_context);
- decoder_.reset();
- }
-
- command_buffer_.reset();
-
- // Remove this after crbug.com/248395 is sorted out.
- surface_ = NULL;
-}
-
-void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
- Destroy();
- GpuCommandBufferMsg_Initialize::WriteReplyParams(
- reply_message, false, gpu::Capabilities());
- Send(reply_message);
-}
-
-void GpuCommandBufferStub::OnInitialize(
- base::SharedMemoryHandle shared_state_handle,
- IPC::Message* reply_message) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
- DCHECK(!command_buffer_.get());
-
- scoped_ptr<base::SharedMemory> shared_state_shm(
- new base::SharedMemory(shared_state_handle, false));
-
- command_buffer_.reset(new gpu::CommandBufferService(
- context_group_->transfer_buffer_manager()));
-
- bool result = command_buffer_->Initialize();
- DCHECK(result);
-
- GpuChannelManager* manager = channel_->gpu_channel_manager();
- DCHECK(manager);
-
- decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
- scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
- decoder_.get(),
- decoder_.get()));
- sync_point_client_ = sync_point_manager_->CreateSyncPointClient(
- channel_->GetSyncPointOrderData(), gpu::CommandBufferNamespace::GPU_IO,
- command_buffer_id_);
-
- if (preemption_flag_.get())
- scheduler_->SetPreemptByFlag(preemption_flag_);
-
- decoder_->set_engine(scheduler_.get());
-
- if (!handle_.is_null()) {
- surface_ = ImageTransportSurface::CreateSurface(
- channel_->gpu_channel_manager(),
- this,
- handle_);
- } else {
- surface_ = manager->GetDefaultOffscreenSurface();
- }
-
- if (!surface_.get()) {
- DLOG(ERROR) << "Failed to create surface.";
- OnInitializeFailed(reply_message);
- return;
- }
-
- scoped_refptr<gfx::GLContext> context;
- if (use_virtualized_gl_context_ && channel_->share_group()) {
- context = channel_->share_group()->GetSharedContext();
- if (!context.get()) {
- context = gfx::GLContext::CreateGLContext(
- channel_->share_group(),
- channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
- gpu_preference_);
- if (!context.get()) {
- DLOG(ERROR) << "Failed to create shared context for virtualization.";
- OnInitializeFailed(reply_message);
- return;
- }
- channel_->share_group()->SetSharedContext(context.get());
- }
- // This should be a non-virtual GL context.
- DCHECK(context->GetHandle());
- context = new gpu::GLContextVirtual(
- channel_->share_group(), context.get(), decoder_->AsWeakPtr());
- if (!context->Initialize(surface_.get(), gpu_preference_)) {
- // TODO(sievers): The real context created above for the default
- // offscreen surface might not be compatible with this surface.
- // Need to adjust at least GLX to be able to create the initial context
- // with a config that is compatible with onscreen and offscreen surfaces.
- context = NULL;
-
- DLOG(ERROR) << "Failed to initialize virtual GL context.";
- OnInitializeFailed(reply_message);
- return;
- }
- }
- if (!context.get()) {
- context = gfx::GLContext::CreateGLContext(
- channel_->share_group(), surface_.get(), gpu_preference_);
- }
- if (!context.get()) {
- DLOG(ERROR) << "Failed to create context.";
- OnInitializeFailed(reply_message);
- return;
- }
-
- if (!context->MakeCurrent(surface_.get())) {
- LOG(ERROR) << "Failed to make context current.";
- OnInitializeFailed(reply_message);
- return;
- }
-
- if (!context->GetGLStateRestorer()) {
- context->SetGLStateRestorer(
- new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
- }
-
- if (!context_group_->has_program_cache() &&
- !context_group_->feature_info()->workarounds().disable_program_cache) {
- context_group_->set_program_cache(
- channel_->gpu_channel_manager()->program_cache());
- }
-
- // Initialize the decoder with either the view or pbuffer GLContext.
- if (!decoder_->Initialize(surface_, context, offscreen_, initial_size_,
- disallowed_features_, requested_attribs_)) {
- DLOG(ERROR) << "Failed to initialize decoder.";
- OnInitializeFailed(reply_message);
- return;
- }
-
- if (base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableGPUServiceLogging)) {
- decoder_->set_log_commands(true);
- }
-
- decoder_->GetLogger()->SetMsgCallback(
- base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
- base::Unretained(this)));
- decoder_->SetShaderCacheCallback(
- base::Bind(&GpuCommandBufferStub::SendCachedShader,
- base::Unretained(this)));
- decoder_->SetWaitSyncPointCallback(
- base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
- base::Unretained(this)));
- decoder_->SetFenceSyncReleaseCallback(base::Bind(
- &GpuCommandBufferStub::OnFenceSyncRelease, base::Unretained(this)));
- decoder_->SetWaitFenceSyncCallback(base::Bind(
- &GpuCommandBufferStub::OnWaitFenceSync, base::Unretained(this)));
-
- command_buffer_->SetPutOffsetChangeCallback(
- base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
- command_buffer_->SetGetBufferChangeCallback(
- base::Bind(&gpu::GpuScheduler::SetGetBuffer,
- base::Unretained(scheduler_.get())));
- command_buffer_->SetParseErrorCallback(
- base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
- scheduler_->SetSchedulingChangedCallback(base::Bind(
- &GpuCommandBufferStub::OnSchedulingChanged, base::Unretained(this)));
-
- if (watchdog_) {
- scheduler_->SetCommandProcessedCallback(
- base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
- base::Unretained(this)));
- }
-
- const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState);
- if (!shared_state_shm->Map(kSharedStateSize)) {
- DLOG(ERROR) << "Failed to map shared state buffer.";
- OnInitializeFailed(reply_message);
- return;
- }
- command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
- std::move(shared_state_shm), kSharedStateSize));
-
- gpu::Capabilities capabilities = decoder_->GetCapabilities();
- capabilities.future_sync_points = channel_->allow_future_sync_points();
-
- GpuCommandBufferMsg_Initialize::WriteReplyParams(
- reply_message, true, capabilities);
- Send(reply_message);
-
- if (handle_.is_null() && !active_url_.is_empty()) {
- manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
- active_url_));
- }
-
- initialized_ = true;
-}
-
-void GpuCommandBufferStub::OnCreateStreamTexture(uint32_t texture_id,
- int32_t stream_id,
- bool* succeeded) {
-#if defined(OS_ANDROID)
- *succeeded = StreamTexture::Create(this, texture_id, stream_id);
-#else
- *succeeded = false;
-#endif
-}
-
-void GpuCommandBufferStub::SetLatencyInfoCallback(
- const LatencyInfoCallback& callback) {
- latency_info_callback_ = callback;
-}
-
-int32_t GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
- // The command buffer is pairs of enum, value
- // search for the requested attribute, return the value.
- for (std::vector<int32_t>::const_iterator it = requested_attribs_.begin();
- it != requested_attribs_.end(); ++it) {
- if (*it++ == attr) {
- return *it;
- }
- }
- return -1;
-}
-
-void GpuCommandBufferStub::OnSetGetBuffer(int32_t shm_id,
- IPC::Message* reply_message) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
- if (command_buffer_)
- command_buffer_->SetGetBuffer(shm_id);
- Send(reply_message);
-}
-
-void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
- if (!decoder_) {
- LOG(ERROR) << "Can't produce front buffer before initialization.";
- return;
- }
-
- decoder_->ProduceFrontBuffer(mailbox);
-}
-
-void GpuCommandBufferStub::OnParseError() {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
- DCHECK(command_buffer_.get());
- gpu::CommandBuffer::State state = command_buffer_->GetLastState();
- IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
- route_id_, state.context_lost_reason, state.error);
- msg->set_unblock(true);
- Send(msg);
-
- // Tell the browser about this context loss as well, so it can
- // determine whether client APIs like WebGL need to be immediately
- // blocked from automatically running.
- GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
- gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
- handle_.is_null(), state.context_lost_reason, active_url_));
-
- CheckContextLost();
-}
-
-void GpuCommandBufferStub::OnSchedulingChanged(bool scheduled) {
- TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnSchedulingChanged", "scheduled",
- scheduled);
- channel_->OnStubSchedulingChanged(this, scheduled);
-}
-
-void GpuCommandBufferStub::OnWaitForTokenInRange(int32_t start,
- int32_t end,
- IPC::Message* reply_message) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
- DCHECK(command_buffer_.get());
- CheckContextLost();
- if (wait_for_token_)
- LOG(ERROR) << "Got WaitForToken command while currently waiting for token.";
- wait_for_token_ =
- make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
- CheckCompleteWaits();
-}
-
-void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
- int32_t start,
- int32_t end,
- IPC::Message* reply_message) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
- DCHECK(command_buffer_.get());
- CheckContextLost();
- if (wait_for_get_offset_) {
- LOG(ERROR)
- << "Got WaitForGetOffset command while currently waiting for offset.";
- }
- wait_for_get_offset_ =
- make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
- CheckCompleteWaits();
-}
-
-void GpuCommandBufferStub::CheckCompleteWaits() {
- if (wait_for_token_ || wait_for_get_offset_) {
- gpu::CommandBuffer::State state = command_buffer_->GetLastState();
- if (wait_for_token_ &&
- (gpu::CommandBuffer::InRange(
- wait_for_token_->start, wait_for_token_->end, state.token) ||
- state.error != gpu::error::kNoError)) {
- ReportState();
- GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
- wait_for_token_->reply.get(), state);
- Send(wait_for_token_->reply.release());
- wait_for_token_.reset();
- }
- if (wait_for_get_offset_ &&
- (gpu::CommandBuffer::InRange(wait_for_get_offset_->start,
- wait_for_get_offset_->end,
- state.get_offset) ||
- state.error != gpu::error::kNoError)) {
- ReportState();
- GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
- wait_for_get_offset_->reply.get(), state);
- Send(wait_for_get_offset_->reply.release());
- wait_for_get_offset_.reset();
- }
- }
-}
-
-void GpuCommandBufferStub::OnAsyncFlush(
- int32_t put_offset,
- uint32_t flush_count,
- const std::vector<ui::LatencyInfo>& latency_info) {
- TRACE_EVENT1(
- "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
- DCHECK(command_buffer_);
-
- // We received this message out-of-order. This should not happen but is here
- // to catch regressions. Ignore the message.
- DVLOG_IF(0, flush_count - last_flush_count_ >= 0x8000000U)
- << "Received a Flush message out-of-order";
-
- if (flush_count > last_flush_count_ &&
- ui::LatencyInfo::Verify(latency_info,
- "GpuCommandBufferStub::OnAsyncFlush") &&
- !latency_info_callback_.is_null()) {
- latency_info_callback_.Run(latency_info);
- }
-
- last_flush_count_ = flush_count;
- gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
- command_buffer_->Flush(put_offset);
- gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
-
- if (pre_state.get_offset != post_state.get_offset)
- ReportState();
-
-#if defined(OS_ANDROID)
- GpuChannelManager* manager = channel_->gpu_channel_manager();
- manager->DidAccessGpu();
-#endif
-}
-
-void GpuCommandBufferStub::OnRegisterTransferBuffer(
- int32_t id,
- base::SharedMemoryHandle transfer_buffer,
- uint32_t size) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
-
- // Take ownership of the memory and map it into this process.
- // This validates the size.
- scoped_ptr<base::SharedMemory> shared_memory(
- new base::SharedMemory(transfer_buffer, false));
- if (!shared_memory->Map(size)) {
- DVLOG(0) << "Failed to map shared memory.";
- return;
- }
-
- if (command_buffer_) {
- command_buffer_->RegisterTransferBuffer(
- id, gpu::MakeBackingFromSharedMemory(std::move(shared_memory), size));
- }
-}
-
-void GpuCommandBufferStub::OnDestroyTransferBuffer(int32_t id) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
-
- if (command_buffer_)
- command_buffer_->DestroyTransferBuffer(id);
-}
-
-void GpuCommandBufferStub::OnCommandProcessed() {
- if (watchdog_)
- watchdog_->CheckArmed();
-}
-
-void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
-
-void GpuCommandBufferStub::PutChanged() {
- FastSetActiveURL(active_url_, active_url_hash_);
- scheduler_->PutChanged();
-}
-
-void GpuCommandBufferStub::OnCreateVideoDecoder(
- const media::VideoDecodeAccelerator::Config& config,
- int32_t decoder_route_id,
- IPC::Message* reply_message) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
- GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
- decoder_route_id, this, channel_->io_task_runner());
- decoder->Initialize(config, reply_message);
- // decoder is registered as a DestructionObserver of this stub and will
- // self-delete during destruction of this stub.
-}
-
-void GpuCommandBufferStub::OnCreateVideoEncoder(
- media::VideoPixelFormat input_format,
- const gfx::Size& input_visible_size,
- media::VideoCodecProfile output_profile,
- uint32_t initial_bitrate,
- int32_t encoder_route_id,
- IPC::Message* reply_message) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
- GpuVideoEncodeAccelerator* encoder =
- new GpuVideoEncodeAccelerator(encoder_route_id, this);
- encoder->Initialize(input_format,
- input_visible_size,
- output_profile,
- initial_bitrate,
- reply_message);
- // encoder is registered as a DestructionObserver of this stub and will
- // self-delete during destruction of this stub.
-}
-
-void GpuCommandBufferStub::InsertSyncPoint(uint32_t sync_point, bool retire) {
- sync_points_.push_back(sync_point);
- if (retire) {
- OnMessageReceived(
- GpuCommandBufferMsg_RetireSyncPoint(route_id_, sync_point));
- }
-}
-
-void GpuCommandBufferStub::OnRetireSyncPoint(uint32_t sync_point) {
- DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
- sync_points_.pop_front();
-
- gpu::gles2::MailboxManager* mailbox_manager =
- context_group_->mailbox_manager();
- if (mailbox_manager->UsesSync() && MakeCurrent()) {
- // Old sync points are global and do not have a command buffer ID,
- // We can simply use the global sync point number as the release count with
- // 0 for the command buffer ID (under normal circumstances 0 is invalid so
- // will not be used) until the old sync points are replaced.
- gpu::SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO, 0, 0,
- sync_point);
- mailbox_manager->PushTextureUpdates(sync_token);
- }
-
- sync_point_manager_->RetireSyncPoint(sync_point);
-}
-
-bool GpuCommandBufferStub::OnWaitSyncPoint(uint32_t sync_point) {
- DCHECK(!waiting_for_sync_point_);
- DCHECK(scheduler_->scheduled());
- if (!sync_point)
- return true;
- if (sync_point_manager_->IsSyncPointRetired(sync_point)) {
- // Old sync points are global and do not have a command buffer ID,
- // We can simply use the global sync point number as the release count with
- // 0 for the command buffer ID (under normal circumstances 0 is invalid so
- // will not be used) until the old sync points are replaced.
- PullTextureUpdates(gpu::CommandBufferNamespace::GPU_IO, 0, sync_point);
- return true;
- }
-
- TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this, "GpuCommandBufferStub",
- this);
-
- waiting_for_sync_point_ = true;
- sync_point_manager_->AddSyncPointCallback(
- sync_point,
- base::Bind(&RunOnThread, task_runner_,
- base::Bind(&GpuCommandBufferStub::OnWaitSyncPointCompleted,
- this->AsWeakPtr(), sync_point)));
-
- if (!waiting_for_sync_point_)
- return true;
-
- scheduler_->SetScheduled(false);
- return false;
-}
-
-void GpuCommandBufferStub::OnWaitSyncPointCompleted(uint32_t sync_point) {
- DCHECK(waiting_for_sync_point_);
- TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this, "GpuCommandBufferStub",
- this);
- // Old sync points are global and do not have a command buffer ID,
- // We can simply use the global sync point number as the release count with
- // 0 for the command buffer ID (under normal circumstances 0 is invalid so
- // will not be used) until the old sync points are replaced.
- PullTextureUpdates(gpu::CommandBufferNamespace::GPU_IO, 0, sync_point);
- waiting_for_sync_point_ = false;
- scheduler_->SetScheduled(true);
-}
-
-void GpuCommandBufferStub::PullTextureUpdates(
- gpu::CommandBufferNamespace namespace_id,
- uint64_t command_buffer_id,
- uint32_t release) {
- gpu::gles2::MailboxManager* mailbox_manager =
- context_group_->mailbox_manager();
- if (mailbox_manager->UsesSync() && MakeCurrent()) {
- gpu::SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
- mailbox_manager->PullTextureUpdates(sync_token);
- }
-}
-
-void GpuCommandBufferStub::OnSignalSyncPoint(uint32_t sync_point, uint32_t id) {
- sync_point_manager_->AddSyncPointCallback(
- sync_point,
- base::Bind(&GpuCommandBufferStub::OnSignalAck, this->AsWeakPtr(), id));
-}
-
-void GpuCommandBufferStub::OnSignalSyncToken(const gpu::SyncToken& sync_token,
- uint32_t id) {
- scoped_refptr<gpu::SyncPointClientState> release_state =
- sync_point_manager_->GetSyncPointClientState(
- sync_token.namespace_id(), sync_token.command_buffer_id());
-
- if (release_state) {
- sync_point_client_->Wait(release_state.get(), sync_token.release_count(),
- base::Bind(&GpuCommandBufferStub::OnSignalAck,
- this->AsWeakPtr(), id));
- } else {
- OnSignalAck(id);
- }
-}
-
-void GpuCommandBufferStub::OnSignalAck(uint32_t id) {
- Send(new GpuCommandBufferMsg_SignalAck(route_id_, id));
-}
-
-void GpuCommandBufferStub::OnSignalQuery(uint32_t query_id, uint32_t id) {
- if (decoder_) {
- gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
- if (query_manager) {
- gpu::gles2::QueryManager::Query* query =
- query_manager->GetQuery(query_id);
- if (query) {
- query->AddCallback(
- base::Bind(&GpuCommandBufferStub::OnSignalAck,
- this->AsWeakPtr(),
- id));
- return;
- }
- }
- }
- // Something went wrong, run callback immediately.
- OnSignalAck(id);
-}
-
-void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) {
- if (sync_point_client_->client_state()->IsFenceSyncReleased(release)) {
- DLOG(ERROR) << "Fence Sync has already been released.";
- return;
- }
-
- gpu::gles2::MailboxManager* mailbox_manager =
- context_group_->mailbox_manager();
- if (mailbox_manager->UsesSync() && MakeCurrent()) {
- gpu::SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO, 0,
- command_buffer_id_, release);
- mailbox_manager->PushTextureUpdates(sync_token);
- }
-
- sync_point_client_->ReleaseFenceSync(release);
-}
-
-bool GpuCommandBufferStub::OnWaitFenceSync(
- gpu::CommandBufferNamespace namespace_id,
- uint64_t command_buffer_id,
- uint64_t release) {
- DCHECK(!waiting_for_sync_point_);
- DCHECK(scheduler_->scheduled());
-
- scoped_refptr<gpu::SyncPointClientState> release_state =
- sync_point_manager_->GetSyncPointClientState(namespace_id,
- command_buffer_id);
-
- if (!release_state)
- return true;
-
- if (release_state->IsFenceSyncReleased(release)) {
- PullTextureUpdates(namespace_id, command_buffer_id, release);
- return true;
- }
-
- TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub",
- this);
- waiting_for_sync_point_ = true;
- sync_point_client_->WaitNonThreadSafe(
- release_state.get(), release, task_runner_,
- base::Bind(&GpuCommandBufferStub::OnWaitFenceSyncCompleted,
- this->AsWeakPtr(), namespace_id, command_buffer_id, release));
-
- if (!waiting_for_sync_point_)
- return true;
-
- scheduler_->SetScheduled(false);
- return false;
-}
-
-void GpuCommandBufferStub::OnWaitFenceSyncCompleted(
- gpu::CommandBufferNamespace namespace_id,
- uint64_t command_buffer_id,
- uint64_t release) {
- DCHECK(waiting_for_sync_point_);
- TRACE_EVENT_ASYNC_END1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub",
- this);
- PullTextureUpdates(namespace_id, command_buffer_id, release);
- waiting_for_sync_point_ = false;
- scheduler_->SetScheduled(true);
-}
-
-void GpuCommandBufferStub::OnCreateImage(
- const GpuCommandBufferMsg_CreateImage_Params& params) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
- const int32_t id = params.id;
- const gfx::GpuMemoryBufferHandle& handle = params.gpu_memory_buffer;
- const gfx::Size& size = params.size;
- const gfx::BufferFormat& format = params.format;
- const uint32_t internalformat = params.internal_format;
- const uint64_t image_release_count = params.image_release_count;
-
- if (!decoder_)
- return;
-
- gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
- DCHECK(image_manager);
- if (image_manager->LookupImage(id)) {
- LOG(ERROR) << "Image already exists with same ID.";
- return;
- }
-
- if (!gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
- format, decoder_->GetCapabilities())) {
- LOG(ERROR) << "Format is not supported.";
- return;
- }
-
- if (!gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(size,
- format)) {
- LOG(ERROR) << "Invalid image size for format.";
- return;
- }
-
- if (!gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
- internalformat, format)) {
- LOG(ERROR) << "Incompatible image format.";
- return;
- }
-
- scoped_refptr<gl::GLImage> image = channel()->CreateImageForGpuMemoryBuffer(
- handle, size, format, internalformat);
- if (!image.get())
- return;
-
- image_manager->AddImage(image.get(), id);
- if (image_release_count) {
- sync_point_client_->ReleaseFenceSync(image_release_count);
- }
-}
-
-void GpuCommandBufferStub::OnDestroyImage(int32_t id) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
-
- if (!decoder_)
- return;
-
- gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
- DCHECK(image_manager);
- if (!image_manager->LookupImage(id)) {
- LOG(ERROR) << "Image with ID doesn't exist.";
- return;
- }
-
- image_manager->RemoveImage(id);
-}
-
-void GpuCommandBufferStub::SendConsoleMessage(int32_t id,
- const std::string& message) {
- GPUCommandBufferConsoleMessage console_message;
- console_message.id = id;
- console_message.message = message;
- IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
- route_id_, console_message);
- msg->set_unblock(true);
- Send(msg);
-}
-
-void GpuCommandBufferStub::SendCachedShader(
- const std::string& key, const std::string& shader) {
- channel_->CacheShader(key, shader);
-}
-
-void GpuCommandBufferStub::AddDestructionObserver(
- DestructionObserver* observer) {
- destruction_observers_.AddObserver(observer);
-}
-
-void GpuCommandBufferStub::RemoveDestructionObserver(
- DestructionObserver* observer) {
- destruction_observers_.RemoveObserver(observer);
-}
-
-const gpu::gles2::FeatureInfo* GpuCommandBufferStub::GetFeatureInfo() const {
- return context_group_->feature_info();
-}
-
-gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
- return context_group_->memory_tracker();
-}
-
-bool GpuCommandBufferStub::CheckContextLost() {
- DCHECK(command_buffer_);
- gpu::CommandBuffer::State state = command_buffer_->GetLastState();
- bool was_lost = state.error == gpu::error::kLostContext;
-
- if (was_lost) {
- bool was_lost_by_robustness =
- decoder_ && decoder_->WasContextLostByRobustnessExtension();
-
- // Work around issues with recovery by allowing a new GPU process to launch.
- if ((was_lost_by_robustness ||
- context_group_->feature_info()->workarounds().exit_on_context_lost) &&
- !base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kSingleProcess) &&
- !base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kInProcessGPU)) {
- LOG(ERROR) << "Exiting GPU process because some drivers cannot recover"
- << " from problems.";
-#if defined(OS_WIN)
- base::win::SetShouldCrashOnProcessDetach(false);
-#endif
- exit(0);
- }
-
- // Lose all other contexts if the reset was triggered by the robustness
- // extension instead of being synthetic.
- if (was_lost_by_robustness &&
- (gfx::GLContext::LosesAllContextsOnContextLost() ||
- use_virtualized_gl_context_)) {
- channel_->LoseAllContexts();
- }
- }
-
- CheckCompleteWaits();
- return was_lost;
-}
-
-void GpuCommandBufferStub::MarkContextLost() {
- if (!command_buffer_ ||
- command_buffer_->GetLastState().error == gpu::error::kLostContext)
- return;
-
- command_buffer_->SetContextLostReason(gpu::error::kUnknown);
- if (decoder_)
- decoder_->MarkContextLost(gpu::error::kUnknown);
- command_buffer_->SetParseError(gpu::error::kLostContext);
-}
-
-void GpuCommandBufferStub::SendSwapBuffersCompleted(
- const std::vector<ui::LatencyInfo>& latency_info,
- gfx::SwapResult result) {
- Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info,
- result));
-}
-
-void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
- base::TimeDelta interval) {
- Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
- interval));
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_command_buffer_stub.h b/chromium/content/common/gpu/gpu_command_buffer_stub.h
deleted file mode 100644
index a9374d7b0ec..00000000000
--- a/chromium/content/common/gpu/gpu_command_buffer_stub.h
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_COMMAND_BUFFER_STUB_H_
-#define CONTENT_COMMON_GPU_GPU_COMMAND_BUFFER_STUB_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <deque>
-#include <string>
-#include <vector>
-
-#include "base/macros.h"
-#include "base/memory/weak_ptr.h"
-#include "base/observer_list.h"
-#include "base/time/time.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/gpu_memory_manager.h"
-#include "gpu/command_buffer/common/constants.h"
-#include "gpu/command_buffer/common/gpu_memory_allocation.h"
-#include "gpu/command_buffer/service/command_buffer_service.h"
-#include "gpu/command_buffer/service/context_group.h"
-#include "gpu/command_buffer/service/gpu_scheduler.h"
-#include "ipc/ipc_listener.h"
-#include "ipc/ipc_sender.h"
-#include "media/video/video_decode_accelerator.h"
-#include "ui/events/latency_info.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gfx/gpu_memory_buffer.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gfx/swap_result.h"
-#include "ui/gl/gl_surface.h"
-#include "ui/gl/gpu_preference.h"
-#include "url/gurl.h"
-
-namespace gpu {
-struct Mailbox;
-struct SyncToken;
-class SyncPointClient;
-class SyncPointManager;
-class ValueStateMap;
-namespace gles2 {
-class MailboxManager;
-class SubscriptionRefSet;
-}
-}
-
-struct GpuCommandBufferMsg_CreateImage_Params;
-
-namespace content {
-
-class GpuChannel;
-class GpuVideoDecodeAccelerator;
-class GpuVideoEncodeAccelerator;
-class GpuWatchdog;
-struct WaitForCommandState;
-
-class GpuCommandBufferStub
- : public IPC::Listener,
- public IPC::Sender,
- public base::SupportsWeakPtr<GpuCommandBufferStub> {
- public:
- class DestructionObserver {
- public:
- // Called in Destroy(), before the context/surface are released.
- virtual void OnWillDestroyStub() = 0;
-
- protected:
- virtual ~DestructionObserver() {}
- };
-
- typedef base::Callback<void(const std::vector<ui::LatencyInfo>&)>
- LatencyInfoCallback;
-
- GpuCommandBufferStub(
- GpuChannel* channel,
- gpu::SyncPointManager* sync_point_manager,
- base::SingleThreadTaskRunner* task_runner,
- GpuCommandBufferStub* share_group,
- const gfx::GLSurfaceHandle& handle,
- gpu::gles2::MailboxManager* mailbox_manager,
- gpu::PreemptionFlag* preempt_by_flag,
- gpu::gles2::SubscriptionRefSet* subscription_ref_set,
- gpu::ValueStateMap* pending_valuebuffer_state,
- const gfx::Size& size,
- const gpu::gles2::DisallowedFeatures& disallowed_features,
- const std::vector<int32_t>& attribs,
- gfx::GpuPreference gpu_preference,
- int32_t stream_id,
- int32_t route_id,
- bool offscreen,
- GpuWatchdog* watchdog,
- const GURL& active_url);
-
- ~GpuCommandBufferStub() override;
-
- // IPC::Listener implementation:
- bool OnMessageReceived(const IPC::Message& message) override;
-
- // IPC::Sender implementation:
- bool Send(IPC::Message* msg) override;
-
- gpu::gles2::MemoryTracker* GetMemoryTracker() const;
-
- // Whether this command buffer can currently handle IPC messages.
- bool IsScheduled();
-
- // Whether there are commands in the buffer that haven't been processed.
- bool HasUnprocessedCommands();
-
- gpu::gles2::GLES2Decoder* decoder() const { return decoder_.get(); }
- gpu::GpuScheduler* scheduler() const { return scheduler_.get(); }
- GpuChannel* channel() const { return channel_; }
-
- // Unique command buffer ID for this command buffer stub.
- uint64_t command_buffer_id() const { return command_buffer_id_; }
-
- // Identifies the various GpuCommandBufferStubs in the GPU process belonging
- // to the same renderer process.
- int32_t route_id() const { return route_id_; }
-
- // Identifies the stream for this command buffer.
- int32_t stream_id() const { return stream_id_; }
-
- gfx::GpuPreference gpu_preference() { return gpu_preference_; }
-
- int32_t GetRequestedAttribute(int attr) const;
-
- // Sends a message to the console.
- void SendConsoleMessage(int32_t id, const std::string& message);
-
- void SendCachedShader(const std::string& key, const std::string& shader);
-
- gfx::GLSurface* surface() const { return surface_.get(); }
-
- void AddDestructionObserver(DestructionObserver* observer);
- void RemoveDestructionObserver(DestructionObserver* observer);
-
- // Associates a sync point to this stub. When the stub is destroyed, it will
- // retire all sync points that haven't been previously retired.
- void InsertSyncPoint(uint32_t sync_point, bool retire);
-
- void SetLatencyInfoCallback(const LatencyInfoCallback& callback);
-
- void MarkContextLost();
-
- const gpu::gles2::FeatureInfo* GetFeatureInfo() const;
-
- void SendSwapBuffersCompleted(
- const std::vector<ui::LatencyInfo>& latency_info,
- gfx::SwapResult result);
- void SendUpdateVSyncParameters(base::TimeTicks timebase,
- base::TimeDelta interval);
-
- private:
- GpuMemoryManager* GetMemoryManager() const;
-
- void Destroy();
-
- bool MakeCurrent();
-
- // Cleans up and sends reply if OnInitialize failed.
- void OnInitializeFailed(IPC::Message* reply_message);
-
- // Message handlers:
- void OnInitialize(base::SharedMemoryHandle shared_state_shm,
- IPC::Message* reply_message);
- void OnSetGetBuffer(int32_t shm_id, IPC::Message* reply_message);
- void OnProduceFrontBuffer(const gpu::Mailbox& mailbox);
- void OnGetState(IPC::Message* reply_message);
- void OnWaitForTokenInRange(int32_t start,
- int32_t end,
- IPC::Message* reply_message);
- void OnWaitForGetOffsetInRange(int32_t start,
- int32_t end,
- IPC::Message* reply_message);
- void OnAsyncFlush(int32_t put_offset,
- uint32_t flush_count,
- const std::vector<ui::LatencyInfo>& latency_info);
- void OnRegisterTransferBuffer(int32_t id,
- base::SharedMemoryHandle transfer_buffer,
- uint32_t size);
- void OnDestroyTransferBuffer(int32_t id);
- void OnGetTransferBuffer(int32_t id, IPC::Message* reply_message);
-
- void OnCreateVideoDecoder(const media::VideoDecodeAccelerator::Config& config,
- int32_t route_id,
- IPC::Message* reply_message);
- void OnCreateVideoEncoder(media::VideoPixelFormat input_format,
- const gfx::Size& input_visible_size,
- media::VideoCodecProfile output_profile,
- uint32_t initial_bitrate,
- int32_t route_id,
- IPC::Message* reply_message);
-
- void OnEnsureBackbuffer();
-
- void OnRetireSyncPoint(uint32_t sync_point);
- bool OnWaitSyncPoint(uint32_t sync_point);
- void OnWaitSyncPointCompleted(uint32_t sync_point);
- void OnSignalSyncPoint(uint32_t sync_point, uint32_t id);
- void OnSignalSyncToken(const gpu::SyncToken& sync_token, uint32_t id);
- void OnSignalAck(uint32_t id);
- void OnSignalQuery(uint32_t query, uint32_t id);
-
- void OnFenceSyncRelease(uint64_t release);
- bool OnWaitFenceSync(gpu::CommandBufferNamespace namespace_id,
- uint64_t command_buffer_id,
- uint64_t release);
- void OnWaitFenceSyncCompleted(gpu::CommandBufferNamespace namespace_id,
- uint64_t command_buffer_id,
- uint64_t release);
-
- void OnCreateImage(const GpuCommandBufferMsg_CreateImage_Params& params);
- void OnDestroyImage(int32_t id);
- void OnCreateStreamTexture(uint32_t texture_id,
- int32_t stream_id,
- bool* succeeded);
-
- void OnCommandProcessed();
- void OnParseError();
- void OnSchedulingChanged(bool scheduled);
-
- void ReportState();
-
- // Wrapper for GpuScheduler::PutChanged that sets the crash report URL.
- void PutChanged();
-
- // Poll the command buffer to execute work.
- void PollWork();
- void PerformWork();
-
- // Schedule processing of delayed work. This updates the time at which
- // delayed work should be processed. |process_delayed_work_time_| is
- // updated to current time + delay. Call this after processing some amount
- // of delayed work.
- void ScheduleDelayedWork(base::TimeDelta delay);
-
- bool CheckContextLost();
- void CheckCompleteWaits();
- void PullTextureUpdates(gpu::CommandBufferNamespace namespace_id,
- uint64_t command_buffer_id,
- uint32_t release);
-
- // The lifetime of objects of this class is managed by a GpuChannel. The
- // GpuChannels destroy all the GpuCommandBufferStubs that they own when they
- // are destroyed. So a raw pointer is safe.
- GpuChannel* channel_;
-
- // Outlives the stub.
- gpu::SyncPointManager* sync_point_manager_;
-
- // Task runner for main thread.
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
-
- // The group of contexts that share namespaces with this context.
- scoped_refptr<gpu::gles2::ContextGroup> context_group_;
-
- bool initialized_;
- gfx::GLSurfaceHandle handle_;
- gfx::Size initial_size_;
- gpu::gles2::DisallowedFeatures disallowed_features_;
- std::vector<int32_t> requested_attribs_;
- gfx::GpuPreference gpu_preference_;
- bool use_virtualized_gl_context_;
- const uint64_t command_buffer_id_;
- const int32_t stream_id_;
- const int32_t route_id_;
- const bool offscreen_;
- uint32_t last_flush_count_;
-
- scoped_ptr<gpu::CommandBufferService> command_buffer_;
- scoped_ptr<gpu::gles2::GLES2Decoder> decoder_;
- scoped_ptr<gpu::GpuScheduler> scheduler_;
- scoped_ptr<gpu::SyncPointClient> sync_point_client_;
- scoped_refptr<gfx::GLSurface> surface_;
-
- GpuWatchdog* watchdog_;
-
- base::ObserverList<DestructionObserver> destruction_observers_;
-
- // A queue of sync points associated with this stub.
- std::deque<uint32_t> sync_points_;
- bool waiting_for_sync_point_;
-
- base::TimeTicks process_delayed_work_time_;
- uint32_t previous_processed_num_;
- base::TimeTicks last_idle_time_;
-
- scoped_refptr<gpu::PreemptionFlag> preemption_flag_;
-
- LatencyInfoCallback latency_info_callback_;
-
- GURL active_url_;
- size_t active_url_hash_;
-
- scoped_ptr<WaitForCommandState> wait_for_token_;
- scoped_ptr<WaitForCommandState> wait_for_get_offset_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferStub);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_COMMAND_BUFFER_STUB_H_
diff --git a/chromium/content/common/gpu/gpu_config.h b/chromium/content/common/gpu/gpu_config.h
deleted file mode 100644
index 74d5ee05b86..00000000000
--- a/chromium/content/common/gpu/gpu_config.h
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_CONFIG_H_
-#define CONTENT_COMMON_GPU_GPU_CONFIG_H_
-
-// This file declares common preprocessor configuration for the GPU process.
-
-#include "build/build_config.h"
-
-#endif // CONTENT_COMMON_GPU_GPU_CONFIG_H_
diff --git a/chromium/content/common/gpu/gpu_memory_buffer_factory.cc b/chromium/content/common/gpu/gpu_memory_buffer_factory.cc
deleted file mode 100644
index 8469e162178..00000000000
--- a/chromium/content/common/gpu/gpu_memory_buffer_factory.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_memory_buffer_factory.h"
-
-#include "base/logging.h"
-#include "build/build_config.h"
-
-#if defined(OS_MACOSX)
-#include "content/common/gpu/gpu_memory_buffer_factory_io_surface.h"
-#endif
-
-#if defined(OS_ANDROID)
-#include "content/common/gpu/gpu_memory_buffer_factory_surface_texture.h"
-#endif
-
-#if defined(USE_OZONE)
-#include "content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.h"
-#endif
-
-namespace content {
-
-// static
-gfx::GpuMemoryBufferType GpuMemoryBufferFactory::GetNativeType() {
-#if defined(OS_MACOSX)
- return gfx::IO_SURFACE_BUFFER;
-#endif
-#if defined(OS_ANDROID)
- return gfx::SURFACE_TEXTURE_BUFFER;
-#endif
-#if defined(USE_OZONE)
- return gfx::OZONE_NATIVE_PIXMAP;
-#endif
- return gfx::EMPTY_BUFFER;
-}
-
-// static
-scoped_ptr<GpuMemoryBufferFactory> GpuMemoryBufferFactory::CreateNativeType() {
-#if defined(OS_MACOSX)
- return make_scoped_ptr(new GpuMemoryBufferFactoryIOSurface);
-#endif
-#if defined(OS_ANDROID)
- return make_scoped_ptr(new GpuMemoryBufferFactorySurfaceTexture);
-#endif
-#if defined(USE_OZONE)
- return make_scoped_ptr(new GpuMemoryBufferFactoryOzoneNativePixmap);
-#endif
- NOTREACHED();
- return nullptr;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_memory_buffer_factory.h b/chromium/content/common/gpu/gpu_memory_buffer_factory.h
deleted file mode 100644
index 77a50f2775c..00000000000
--- a/chromium/content/common/gpu/gpu_memory_buffer_factory.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_H_
-#define CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_H_
-
-#include <vector>
-
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "content/common/content_export.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gfx/gpu_memory_buffer.h"
-#include "ui/gfx/native_widget_types.h"
-
-namespace gpu {
-class ImageFactory;
-}
-
-namespace content {
-
-class CONTENT_EXPORT GpuMemoryBufferFactory {
- public:
- virtual ~GpuMemoryBufferFactory() {}
-
- // Returns the native GPU memory buffer factory type. Returns EMPTY_BUFFER
- // type if native buffers are not supported.
- static gfx::GpuMemoryBufferType GetNativeType();
-
- // Creates a new factory instance for native GPU memory buffers.
- static scoped_ptr<GpuMemoryBufferFactory> CreateNativeType();
-
- // Creates a new GPU memory buffer instance. A valid handle is returned on
- // success. It can be called on any thread.
- virtual gfx::GpuMemoryBufferHandle CreateGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- int client_id,
- gfx::PluginWindowHandle surface_handle) = 0;
-
- // Creates a new GPU memory buffer instance from an existing handle. A valid
- // handle is returned on success. It can be called on any thread.
- virtual gfx::GpuMemoryBufferHandle CreateGpuMemoryBufferFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- int client_id) = 0;
-
- // Destroys GPU memory buffer identified by |id|.
- // It can be called on any thread.
- virtual void DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
- int client_id) = 0;
-
- // Type-checking downcast routine.
- virtual gpu::ImageFactory* AsImageFactory() = 0;
-
- protected:
- GpuMemoryBufferFactory() {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferFactory);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_H_
diff --git a/chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface.cc b/chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface.cc
deleted file mode 100644
index 49fab37b596..00000000000
--- a/chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface.cc
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_memory_buffer_factory_io_surface.h"
-
-#include <vector>
-
-#include "base/logging.h"
-#include "content/common/gpu/client/gpu_memory_buffer_impl.h"
-#include "ui/gfx/buffer_format_util.h"
-#include "ui/gfx/mac/io_surface.h"
-#include "ui/gl/gl_image_io_surface.h"
-
-namespace content {
-
-GpuMemoryBufferFactoryIOSurface::GpuMemoryBufferFactoryIOSurface() {
-}
-
-GpuMemoryBufferFactoryIOSurface::~GpuMemoryBufferFactoryIOSurface() {
-}
-
-// static
-bool GpuMemoryBufferFactoryIOSurface::IsGpuMemoryBufferConfigurationSupported(
- gfx::BufferFormat format,
- gfx::BufferUsage usage) {
- switch (usage) {
- case gfx::BufferUsage::GPU_READ:
- case gfx::BufferUsage::SCANOUT:
- return format == gfx::BufferFormat::BGRA_8888 ||
- format == gfx::BufferFormat::RGBA_8888;
- case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE:
- case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT:
- return format == gfx::BufferFormat::R_8 ||
- format == gfx::BufferFormat::BGRA_8888 ||
- format == gfx::BufferFormat::UYVY_422 ||
- format == gfx::BufferFormat::YUV_420_BIPLANAR;
- }
- NOTREACHED();
- return false;
-}
-
-gfx::GpuMemoryBufferHandle
-GpuMemoryBufferFactoryIOSurface::CreateGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- int client_id,
- gfx::PluginWindowHandle surface_handle) {
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface(
- gfx::CreateIOSurface(size, format));
- if (!io_surface)
- return gfx::GpuMemoryBufferHandle();
-
- {
- base::AutoLock lock(io_surfaces_lock_);
-
- IOSurfaceMapKey key(id, client_id);
- DCHECK(io_surfaces_.find(key) == io_surfaces_.end());
- io_surfaces_[key] = io_surface;
- }
-
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::IO_SURFACE_BUFFER;
- handle.id = id;
- handle.mach_port.reset(IOSurfaceCreateMachPort(io_surface));
- return handle;
-}
-
-gfx::GpuMemoryBufferHandle
-GpuMemoryBufferFactoryIOSurface::CreateGpuMemoryBufferFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- int client_id) {
- NOTIMPLEMENTED();
- return gfx::GpuMemoryBufferHandle();
-}
-
-void GpuMemoryBufferFactoryIOSurface::DestroyGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- int client_id) {
- {
- base::AutoLock lock(io_surfaces_lock_);
-
- IOSurfaceMapKey key(id, client_id);
- DCHECK(io_surfaces_.find(key) != io_surfaces_.end());
- io_surfaces_.erase(key);
- }
-}
-
-gpu::ImageFactory* GpuMemoryBufferFactoryIOSurface::AsImageFactory() {
- return this;
-}
-
-scoped_refptr<gl::GLImage>
-GpuMemoryBufferFactoryIOSurface::CreateImageForGpuMemoryBuffer(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- unsigned internalformat,
- int client_id) {
- base::AutoLock lock(io_surfaces_lock_);
-
- DCHECK_EQ(handle.type, gfx::IO_SURFACE_BUFFER);
- IOSurfaceMapKey key(handle.id, client_id);
- IOSurfaceMap::iterator it = io_surfaces_.find(key);
- if (it == io_surfaces_.end())
- return scoped_refptr<gl::GLImage>();
-
- scoped_refptr<gl::GLImageIOSurface> image(
- new gl::GLImageIOSurface(size, internalformat));
- if (!image->Initialize(it->second.get(), handle.id, format))
- return scoped_refptr<gl::GLImage>();
-
- return image;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface.h b/chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface.h
deleted file mode 100644
index d2fd00dbe3d..00000000000
--- a/chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_IO_SURFACE_H_
-#define CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_IO_SURFACE_H_
-
-#include <utility>
-
-#include <IOSurface/IOSurface.h>
-
-#include "base/containers/hash_tables.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/synchronization/lock.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/gpu_memory_buffer_factory.h"
-#include "gpu/command_buffer/service/image_factory.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gfx/gpu_memory_buffer.h"
-#include "ui/gfx/mac/io_surface.h"
-
-namespace gl {
-class GLImage;
-}
-
-namespace content {
-
-class CONTENT_EXPORT GpuMemoryBufferFactoryIOSurface
- : public GpuMemoryBufferFactory,
- public gpu::ImageFactory {
- public:
- GpuMemoryBufferFactoryIOSurface();
- ~GpuMemoryBufferFactoryIOSurface() override;
-
- static bool IsGpuMemoryBufferConfigurationSupported(gfx::BufferFormat format,
- gfx::BufferUsage usage);
-
- // Overridden from GpuMemoryBufferFactory:
- gfx::GpuMemoryBufferHandle CreateGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- int client_id,
- gfx::PluginWindowHandle surface_handle) override;
- gfx::GpuMemoryBufferHandle CreateGpuMemoryBufferFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- int client_id) override;
- void DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
- int client_id) override;
- gpu::ImageFactory* AsImageFactory() override;
-
- // Overridden from gpu::ImageFactory:
- scoped_refptr<gl::GLImage> CreateImageForGpuMemoryBuffer(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- unsigned internalformat,
- int client_id) override;
-
- private:
- typedef std::pair<gfx::IOSurfaceId, int> IOSurfaceMapKey;
- typedef base::hash_map<IOSurfaceMapKey, base::ScopedCFTypeRef<IOSurfaceRef>>
- IOSurfaceMap;
- // TOOD(reveman): Remove |io_surfaces_| and allow IOSurface backed GMBs to be
- // used with any GPU process by passing a mach_port to CreateImageCHROMIUM.
- IOSurfaceMap io_surfaces_;
- base::Lock io_surfaces_lock_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferFactoryIOSurface);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_IO_SURFACE_H_
diff --git a/chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface_unittest.cc b/chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface_unittest.cc
deleted file mode 100644
index 7f4a13257f4..00000000000
--- a/chromium/content/common/gpu/gpu_memory_buffer_factory_io_surface_unittest.cc
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_memory_buffer_factory_io_surface.h"
-#include "content/test/gpu_memory_buffer_factory_test_template.h"
-
-namespace content {
-namespace {
-
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferFactoryIOSurface,
- GpuMemoryBufferFactoryTest,
- GpuMemoryBufferFactoryIOSurface);
-
-} // namespace
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.cc b/chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.cc
deleted file mode 100644
index 69fdd3085cb..00000000000
--- a/chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.cc
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.h"
-
-#include "ui/gl/gl_image_ozone_native_pixmap.h"
-#include "ui/ozone/public/client_native_pixmap.h"
-#include "ui/ozone/public/client_native_pixmap_factory.h"
-#include "ui/ozone/public/ozone_platform.h"
-#include "ui/ozone/public/surface_factory_ozone.h"
-
-namespace content {
-
-GpuMemoryBufferFactoryOzoneNativePixmap::
- GpuMemoryBufferFactoryOzoneNativePixmap() {}
-
-GpuMemoryBufferFactoryOzoneNativePixmap::
- ~GpuMemoryBufferFactoryOzoneNativePixmap() {}
-
-// static
-bool GpuMemoryBufferFactoryOzoneNativePixmap::
- IsGpuMemoryBufferConfigurationSupported(gfx::BufferFormat format,
- gfx::BufferUsage usage) {
- if (!ui::ClientNativePixmapFactory::GetInstance()) {
- // unittests don't have to set ClientNativePixmapFactory.
- return false;
- }
- return ui::ClientNativePixmapFactory::GetInstance()->IsConfigurationSupported(
- format, usage);
-}
-
-gfx::GpuMemoryBufferHandle
-GpuMemoryBufferFactoryOzoneNativePixmap::CreateGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- int client_id,
- gfx::PluginWindowHandle surface_handle) {
- scoped_refptr<ui::NativePixmap> pixmap =
- ui::OzonePlatform::GetInstance()
- ->GetSurfaceFactoryOzone()
- ->CreateNativePixmap(surface_handle, size, format, usage);
- if (!pixmap.get()) {
- DLOG(ERROR) << "Failed to create pixmap " << size.width() << "x"
- << size.height() << " format " << static_cast<int>(format)
- << ", usage " << static_cast<int>(usage);
- return gfx::GpuMemoryBufferHandle();
- }
-
- gfx::GpuMemoryBufferHandle new_handle;
- new_handle.type = gfx::OZONE_NATIVE_PIXMAP;
- new_handle.id = id;
- new_handle.native_pixmap_handle = pixmap->ExportHandle();
-
- {
- base::AutoLock lock(native_pixmaps_lock_);
- NativePixmapMapKey key(id.id, client_id);
- DCHECK(native_pixmaps_.find(key) == native_pixmaps_.end());
- native_pixmaps_[key] = pixmap;
- }
-
- return new_handle;
-}
-
-gfx::GpuMemoryBufferHandle
-GpuMemoryBufferFactoryOzoneNativePixmap::CreateGpuMemoryBufferFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- int client_id) {
- scoped_refptr<ui::NativePixmap> pixmap =
- ui::OzonePlatform::GetInstance()
- ->GetSurfaceFactoryOzone()
- ->CreateNativePixmapFromHandle(handle.native_pixmap_handle);
- if (!pixmap.get()) {
- DLOG(ERROR) << "Failed to create pixmap from handle";
- return gfx::GpuMemoryBufferHandle();
- }
-
- gfx::GpuMemoryBufferHandle new_handle;
- new_handle.type = gfx::OZONE_NATIVE_PIXMAP;
- new_handle.id = id;
- new_handle.native_pixmap_handle = pixmap->ExportHandle();
-
- {
- base::AutoLock lock(native_pixmaps_lock_);
- NativePixmapMapKey key(id.id, client_id);
- DCHECK(native_pixmaps_.find(key) == native_pixmaps_.end());
- native_pixmaps_[key] = pixmap;
- }
-
- return new_handle;
-}
-
-void GpuMemoryBufferFactoryOzoneNativePixmap::DestroyGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- int client_id) {
- base::AutoLock lock(native_pixmaps_lock_);
- auto it = native_pixmaps_.find(NativePixmapMapKey(id.id, client_id));
- DCHECK(it != native_pixmaps_.end());
- native_pixmaps_.erase(it);
-}
-
-gpu::ImageFactory* GpuMemoryBufferFactoryOzoneNativePixmap::AsImageFactory() {
- return this;
-}
-
-scoped_refptr<gl::GLImage>
-GpuMemoryBufferFactoryOzoneNativePixmap::CreateImageForGpuMemoryBuffer(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- unsigned internalformat,
- int client_id) {
- DCHECK_EQ(handle.type, gfx::OZONE_NATIVE_PIXMAP);
- scoped_refptr<ui::NativePixmap> pixmap;
- {
- base::AutoLock lock(native_pixmaps_lock_);
- NativePixmapMap::iterator it =
- native_pixmaps_.find(NativePixmapMapKey(handle.id.id, client_id));
- if (it == native_pixmaps_.end()) {
- return nullptr;
- }
- pixmap = it->second;
- }
-
- scoped_refptr<gfx::GLImageOzoneNativePixmap> image(
- new gfx::GLImageOzoneNativePixmap(size, internalformat));
- if (!image->Initialize(pixmap.get(), format)) {
- LOG(ERROR) << "Failed to create GLImage";
- return nullptr;
- }
- return image;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.h b/chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.h
deleted file mode 100644
index f9ea0d25b80..00000000000
--- a/chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_OZONE_NATIVE_PIXMAP_H_
-#define CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_OZONE_NATIVE_PIXMAP_H_
-
-#include "base/containers/hash_tables.h"
-#include "base/macros.h"
-#include "base/synchronization/lock.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/gpu_memory_buffer_factory.h"
-#include "gpu/command_buffer/service/image_factory.h"
-#include "ui/ozone/public/native_pixmap.h"
-
-namespace gl {
-class GLImage;
-}
-
-namespace content {
-
-class CONTENT_EXPORT GpuMemoryBufferFactoryOzoneNativePixmap
- : public GpuMemoryBufferFactory,
- public gpu::ImageFactory {
- public:
- GpuMemoryBufferFactoryOzoneNativePixmap();
- ~GpuMemoryBufferFactoryOzoneNativePixmap() override;
-
- static bool IsGpuMemoryBufferConfigurationSupported(gfx::BufferFormat format,
- gfx::BufferUsage usage);
-
- // Overridden from GpuMemoryBufferFactory:
- gfx::GpuMemoryBufferHandle CreateGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- int client_id,
- gfx::PluginWindowHandle surface_handle) override;
- gfx::GpuMemoryBufferHandle CreateGpuMemoryBufferFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- int client_id) override;
- void DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
- int client_id) override;
- gpu::ImageFactory* AsImageFactory() override;
-
- // Overridden from gpu::ImageFactory:
- scoped_refptr<gl::GLImage> CreateImageForGpuMemoryBuffer(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- unsigned internalformat,
- int client_id) override;
-
- private:
- using NativePixmapMapKey = std::pair<int, int>;
- using NativePixmapMap =
- base::hash_map<NativePixmapMapKey, scoped_refptr<ui::NativePixmap>>;
- NativePixmapMap native_pixmaps_;
- base::Lock native_pixmaps_lock_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferFactoryOzoneNativePixmap);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_OZONE_NATIVE_PIXMAP_H_
diff --git a/chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap_unittest.cc b/chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap_unittest.cc
deleted file mode 100644
index bdb974702d9..00000000000
--- a/chromium/content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap_unittest.cc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.h"
-#include "content/test/gpu_memory_buffer_factory_test_template.h"
-
-namespace content {
-namespace {
-
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferFactoryOzoneNativePixmap,
- GpuMemoryBufferFactoryTest,
- GpuMemoryBufferFactoryOzoneNativePixmap);
-
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferFactoryOzoneNativePixmap,
- GpuMemoryBufferFactoryImportTest,
- GpuMemoryBufferFactoryOzoneNativePixmap);
-
-} // namespace
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture.cc b/chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture.cc
deleted file mode 100644
index a4cf15ed30e..00000000000
--- a/chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_memory_buffer_factory_surface_texture.h"
-
-#include "content/common/android/surface_texture_manager.h"
-#include "ui/gl/android/surface_texture.h"
-#include "ui/gl/gl_image_surface_texture.h"
-
-namespace content {
-
-GpuMemoryBufferFactorySurfaceTexture::GpuMemoryBufferFactorySurfaceTexture() {
-}
-
-GpuMemoryBufferFactorySurfaceTexture::~GpuMemoryBufferFactorySurfaceTexture() {
-}
-
-// static
-bool GpuMemoryBufferFactorySurfaceTexture::
- IsGpuMemoryBufferConfigurationSupported(gfx::BufferFormat format,
- gfx::BufferUsage usage) {
- switch (usage) {
- case gfx::BufferUsage::GPU_READ:
- case gfx::BufferUsage::SCANOUT:
- case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT:
- return false;
- case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE:
- return format == gfx::BufferFormat::RGBA_8888;
- }
- NOTREACHED();
- return false;
-}
-
-gfx::GpuMemoryBufferHandle
-GpuMemoryBufferFactorySurfaceTexture::CreateGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- int client_id,
- gfx::PluginWindowHandle surface_handle) {
- // Note: this needs to be 0 as the surface texture implemenation will take
- // ownership of the texture and call glDeleteTextures when the GPU service
- // attaches the surface texture to a real texture id. glDeleteTextures
- // silently ignores 0.
- const int kDummyTextureId = 0;
- scoped_refptr<gfx::SurfaceTexture> surface_texture =
- gfx::SurfaceTexture::Create(kDummyTextureId);
- if (!surface_texture.get())
- return gfx::GpuMemoryBufferHandle();
-
- SurfaceTextureManager::GetInstance()->RegisterSurfaceTexture(
- id.id, client_id, surface_texture.get());
-
- {
- base::AutoLock lock(surface_textures_lock_);
-
- SurfaceTextureMapKey key(id.id, client_id);
- DCHECK(surface_textures_.find(key) == surface_textures_.end());
- surface_textures_[key] = surface_texture;
- }
-
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::SURFACE_TEXTURE_BUFFER;
- handle.id = id;
- return handle;
-}
-
-gfx::GpuMemoryBufferHandle
-GpuMemoryBufferFactorySurfaceTexture::CreateGpuMemoryBufferFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- int client_id) {
- NOTIMPLEMENTED();
- return gfx::GpuMemoryBufferHandle();
-}
-
-void GpuMemoryBufferFactorySurfaceTexture::DestroyGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- int client_id) {
- {
- base::AutoLock lock(surface_textures_lock_);
-
- SurfaceTextureMapKey key(id.id, client_id);
- DCHECK(surface_textures_.find(key) != surface_textures_.end());
- surface_textures_.erase(key);
- }
-
- SurfaceTextureManager::GetInstance()->UnregisterSurfaceTexture(id.id,
- client_id);
-}
-
-gpu::ImageFactory* GpuMemoryBufferFactorySurfaceTexture::AsImageFactory() {
- return this;
-}
-
-scoped_refptr<gl::GLImage>
-GpuMemoryBufferFactorySurfaceTexture::CreateImageForGpuMemoryBuffer(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- unsigned internalformat,
- int client_id) {
- base::AutoLock lock(surface_textures_lock_);
-
- DCHECK_EQ(handle.type, gfx::SURFACE_TEXTURE_BUFFER);
-
- SurfaceTextureMapKey key(handle.id.id, client_id);
- SurfaceTextureMap::iterator it = surface_textures_.find(key);
- if (it == surface_textures_.end())
- return scoped_refptr<gl::GLImage>();
-
- scoped_refptr<gl::GLImageSurfaceTexture> image(
- new gl::GLImageSurfaceTexture(size));
- if (!image->Initialize(it->second.get()))
- return scoped_refptr<gl::GLImage>();
-
- return image;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture.h b/chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture.h
deleted file mode 100644
index b9eda55dcbd..00000000000
--- a/chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_SURFACE_TEXTURE_H_
-#define CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_SURFACE_TEXTURE_H_
-
-#include <utility>
-
-#include "base/containers/hash_tables.h"
-#include "base/memory/ref_counted.h"
-#include "base/synchronization/lock.h"
-#include "content/common/content_export.h"
-#include "content/common/gpu/gpu_memory_buffer_factory.h"
-#include "gpu/command_buffer/service/image_factory.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gfx/gpu_memory_buffer.h"
-
-namespace gfx {
-class SurfaceTexture;
-}
-
-namespace gl {
-class GLImage;
-}
-
-namespace content {
-
-class CONTENT_EXPORT GpuMemoryBufferFactorySurfaceTexture
- : public GpuMemoryBufferFactory,
- public gpu::ImageFactory {
- public:
- GpuMemoryBufferFactorySurfaceTexture();
- ~GpuMemoryBufferFactorySurfaceTexture() override;
-
- static bool IsGpuMemoryBufferConfigurationSupported(gfx::BufferFormat format,
- gfx::BufferUsage usage);
-
- // Overridden from GpuMemoryBufferFactory:
- gfx::GpuMemoryBufferHandle CreateGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- int client_id,
- gfx::PluginWindowHandle surface_handle) override;
- gfx::GpuMemoryBufferHandle CreateGpuMemoryBufferFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- int client_id) override;
- void DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
- int client_id) override;
- gpu::ImageFactory* AsImageFactory() override;
-
- // Overridden from gpu::ImageFactory:
- scoped_refptr<gl::GLImage> CreateImageForGpuMemoryBuffer(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- unsigned internalformat,
- int client_id) override;
-
- private:
- typedef std::pair<int, int> SurfaceTextureMapKey;
- typedef base::hash_map<SurfaceTextureMapKey,
- scoped_refptr<gfx::SurfaceTexture>> SurfaceTextureMap;
- SurfaceTextureMap surface_textures_;
- base::Lock surface_textures_lock_;
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_MEMORY_BUFFER_FACTORY_SURFACE_TEXTURE_H_
diff --git a/chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture_unittest.cc b/chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture_unittest.cc
deleted file mode 100644
index 683860fee17..00000000000
--- a/chromium/content/common/gpu/gpu_memory_buffer_factory_surface_texture_unittest.cc
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_memory_buffer_factory_surface_texture.h"
-#include "content/test/gpu_memory_buffer_factory_test_template.h"
-
-namespace content {
-namespace {
-
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferFactorySurfaceTexture,
- GpuMemoryBufferFactoryTest,
- GpuMemoryBufferFactorySurfaceTexture);
-
-} // namespace
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_memory_manager.cc b/chromium/content/common/gpu/gpu_memory_manager.cc
deleted file mode 100644
index 4b9eb7c2888..00000000000
--- a/chromium/content/common/gpu/gpu_memory_manager.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_memory_manager.h"
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/message_loop/message_loop.h"
-#include "base/process/process_handle.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/trace_event/trace_event.h"
-#include "content/common/gpu/gpu_channel_manager.h"
-#include "content/common/gpu/gpu_memory_tracking.h"
-#include "content/common/gpu/gpu_memory_uma_stats.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "gpu/command_buffer/common/gpu_memory_allocation.h"
-#include "gpu/command_buffer/service/gpu_switches.h"
-
-using gpu::MemoryAllocation;
-
-namespace content {
-namespace {
-
-const uint64_t kBytesAllocatedStep = 16 * 1024 * 1024;
-
-void TrackValueChanged(uint64_t old_size,
- uint64_t new_size,
- uint64_t* total_size) {
- DCHECK(new_size > old_size || *total_size >= (old_size - new_size));
- *total_size += (new_size - old_size);
-}
-
-}
-
-GpuMemoryManager::GpuMemoryManager(GpuChannelManager* channel_manager)
- : channel_manager_(channel_manager),
- bytes_allocated_current_(0),
- bytes_allocated_historical_max_(0) {}
-
-GpuMemoryManager::~GpuMemoryManager() {
- DCHECK(tracking_groups_.empty());
- DCHECK(!bytes_allocated_current_);
-}
-
-void GpuMemoryManager::TrackMemoryAllocatedChange(
- GpuMemoryTrackingGroup* tracking_group,
- uint64_t old_size,
- uint64_t new_size) {
- TrackValueChanged(old_size, new_size, &tracking_group->size_);
- TrackValueChanged(old_size, new_size, &bytes_allocated_current_);
-
- if (GetCurrentUsage() > bytes_allocated_historical_max_ +
- kBytesAllocatedStep) {
- bytes_allocated_historical_max_ = GetCurrentUsage();
- // If we're blowing into new memory usage territory, spam the browser
- // process with the most up-to-date information about our memory usage.
- SendUmaStatsToBrowser();
- }
-}
-
-bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64_t /* size_needed */) {
- // TODO: Check if there is enough space. Lose contexts until there is.
- return true;
-}
-
-uint64_t GpuMemoryManager::GetTrackerMemoryUsage(
- gpu::gles2::MemoryTracker* tracker) const {
- TrackingGroupMap::const_iterator tracking_group_it =
- tracking_groups_.find(tracker);
- DCHECK(tracking_group_it != tracking_groups_.end());
- return tracking_group_it->second->GetSize();
-}
-
-GpuMemoryTrackingGroup* GpuMemoryManager::CreateTrackingGroup(
- base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker) {
- GpuMemoryTrackingGroup* tracking_group = new GpuMemoryTrackingGroup(
- pid, memory_tracker, this);
- DCHECK(!tracking_groups_.count(tracking_group->GetMemoryTracker()));
- tracking_groups_.insert(std::make_pair(tracking_group->GetMemoryTracker(),
- tracking_group));
- return tracking_group;
-}
-
-void GpuMemoryManager::OnDestroyTrackingGroup(
- GpuMemoryTrackingGroup* tracking_group) {
- DCHECK(tracking_groups_.count(tracking_group->GetMemoryTracker()));
- tracking_groups_.erase(tracking_group->GetMemoryTracker());
-}
-
-void GpuMemoryManager::GetVideoMemoryUsageStats(
- GPUVideoMemoryUsageStats* video_memory_usage_stats) const {
- // For each context group, assign its memory usage to its PID
- video_memory_usage_stats->process_map.clear();
- for (TrackingGroupMap::const_iterator i =
- tracking_groups_.begin(); i != tracking_groups_.end(); ++i) {
- const GpuMemoryTrackingGroup* tracking_group = i->second;
- video_memory_usage_stats->process_map[
- tracking_group->GetPid()].video_memory += tracking_group->GetSize();
- }
-
- // Assign the total across all processes in the GPU process
- video_memory_usage_stats->process_map[
- base::GetCurrentProcId()].video_memory = GetCurrentUsage();
- video_memory_usage_stats->process_map[
- base::GetCurrentProcId()].has_duplicates = true;
-
- video_memory_usage_stats->bytes_allocated = GetCurrentUsage();
- video_memory_usage_stats->bytes_allocated_historical_max =
- bytes_allocated_historical_max_;
-}
-
-void GpuMemoryManager::SendUmaStatsToBrowser() {
- if (!channel_manager_)
- return;
- GPUMemoryUmaStats params;
- params.bytes_allocated_current = GetCurrentUsage();
- params.bytes_allocated_max = bytes_allocated_historical_max_;
- params.context_group_count = tracking_groups_.size();
- channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params));
-}
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_memory_manager.h b/chromium/content/common/gpu/gpu_memory_manager.h
deleted file mode 100644
index 7de22e5aa78..00000000000
--- a/chromium/content/common/gpu/gpu_memory_manager.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_
-#define CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_
-
-#include <stdint.h>
-
-#include <list>
-#include <map>
-
-#include "base/cancelable_callback.h"
-#include "base/containers/hash_tables.h"
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/memory/weak_ptr.h"
-#include "content/common/content_export.h"
-#include "content/public/common/gpu_memory_stats.h"
-#include "gpu/command_buffer/common/gpu_memory_allocation.h"
-#include "gpu/command_buffer/service/memory_tracking.h"
-
-namespace content {
-
-class GpuChannelManager;
-class GpuMemoryTrackingGroup;
-
-class CONTENT_EXPORT GpuMemoryManager :
- public base::SupportsWeakPtr<GpuMemoryManager> {
- public:
- explicit GpuMemoryManager(GpuChannelManager* channel_manager);
- ~GpuMemoryManager();
-
- // Retrieve GPU Resource consumption statistics for the task manager
- void GetVideoMemoryUsageStats(
- content::GPUVideoMemoryUsageStats* video_memory_usage_stats) const;
-
- GpuMemoryTrackingGroup* CreateTrackingGroup(
- base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker);
-
- uint64_t GetTrackerMemoryUsage(gpu::gles2::MemoryTracker* tracker) const;
-
- private:
- friend class GpuMemoryManagerTest;
- friend class GpuMemoryTrackingGroup;
- friend class GpuMemoryManagerClientState;
-
- typedef std::map<gpu::gles2::MemoryTracker*, GpuMemoryTrackingGroup*>
- TrackingGroupMap;
-
- // Send memory usage stats to the browser process.
- void SendUmaStatsToBrowser();
-
- // Get the current number of bytes allocated.
- uint64_t GetCurrentUsage() const { return bytes_allocated_current_; }
-
- // GpuMemoryTrackingGroup interface
- void TrackMemoryAllocatedChange(GpuMemoryTrackingGroup* tracking_group,
- uint64_t old_size,
- uint64_t new_size);
- void OnDestroyTrackingGroup(GpuMemoryTrackingGroup* tracking_group);
- bool EnsureGPUMemoryAvailable(uint64_t size_needed);
-
- GpuChannelManager* channel_manager_;
-
- // All context groups' tracking structures
- TrackingGroupMap tracking_groups_;
-
- // The current total memory usage, and historical maximum memory usage
- uint64_t bytes_allocated_current_;
- uint64_t bytes_allocated_historical_max_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuMemoryManager);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_
diff --git a/chromium/content/common/gpu/gpu_memory_tracking.cc b/chromium/content/common/gpu/gpu_memory_tracking.cc
deleted file mode 100644
index 6fa447b5b87..00000000000
--- a/chromium/content/common/gpu/gpu_memory_tracking.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_memory_tracking.h"
-
-#include "content/common/gpu/gpu_memory_manager.h"
-
-namespace content {
-
-GpuMemoryTrackingGroup::GpuMemoryTrackingGroup(
- base::ProcessId pid,
- gpu::gles2::MemoryTracker* memory_tracker,
- GpuMemoryManager* memory_manager)
- : pid_(pid),
- size_(0),
- hibernated_(false),
- memory_tracker_(memory_tracker),
- memory_manager_(memory_manager) {
-}
-
-GpuMemoryTrackingGroup::~GpuMemoryTrackingGroup() {
- memory_manager_->OnDestroyTrackingGroup(this);
-}
-
-void GpuMemoryTrackingGroup::TrackMemoryAllocatedChange(uint64_t old_size,
- uint64_t new_size) {
- memory_manager_->TrackMemoryAllocatedChange(
- this, old_size, new_size);
-}
-
-bool GpuMemoryTrackingGroup::EnsureGPUMemoryAvailable(uint64_t size_needed) {
- return memory_manager_->EnsureGPUMemoryAvailable(size_needed);
-}
-
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_memory_tracking.h b/chromium/content/common/gpu/gpu_memory_tracking.h
deleted file mode 100644
index 28892832f96..00000000000
--- a/chromium/content/common/gpu/gpu_memory_tracking.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_MEMORY_TRACKING_H_
-#define CONTENT_COMMON_GPU_GPU_MEMORY_TRACKING_H_
-
-#include <stdint.h>
-
-#include "base/process/process.h"
-#include "content/common/content_export.h"
-#include "gpu/command_buffer/service/memory_tracking.h"
-
-namespace content {
-
-class GpuMemoryManager;
-
-// All decoders in a context group point to a single GpuMemoryTrackingGroup,
-// which tracks GPU resource consumption for the entire context group.
-class CONTENT_EXPORT GpuMemoryTrackingGroup {
- public:
- ~GpuMemoryTrackingGroup();
- void TrackMemoryAllocatedChange(uint64_t old_size, uint64_t new_size);
- bool EnsureGPUMemoryAvailable(uint64_t size_needed);
- base::ProcessId GetPid() const {
- return pid_;
- }
- uint64_t GetSize() const { return size_; }
- gpu::gles2::MemoryTracker* GetMemoryTracker() const {
- return memory_tracker_;
- }
-
- private:
- friend class GpuMemoryManager;
-
- GpuMemoryTrackingGroup(base::ProcessId pid,
- gpu::gles2::MemoryTracker* memory_tracker,
- GpuMemoryManager* memory_manager);
-
- base::ProcessId pid_;
- uint64_t size_;
-
- // Set and used only during the Manage function, to determine which
- // non-surface clients should be hibernated.
- bool hibernated_;
-
- gpu::gles2::MemoryTracker* memory_tracker_;
- GpuMemoryManager* memory_manager_;
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_MEMORY_TRACKING_H_
diff --git a/chromium/content/common/gpu/gpu_memory_uma_stats.h b/chromium/content/common/gpu/gpu_memory_uma_stats.h
deleted file mode 100644
index 15d874f195c..00000000000
--- a/chromium/content/common/gpu/gpu_memory_uma_stats.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_MEMORY_UMA_STATS_H_
-#define CONTENT_COMMON_GPU_GPU_MEMORY_UMA_STATS_H_
-
-#include <stddef.h>
-
-namespace content {
-
-// Memory usage statistics send periodically to the browser process to report
-// in UMA histograms if the GPU process crashes.
-struct GPUMemoryUmaStats {
- GPUMemoryUmaStats()
- : bytes_allocated_current(0),
- bytes_allocated_max(0),
- context_group_count(0) {
- }
-
- // The number of bytes currently allocated.
- size_t bytes_allocated_current;
-
- // The maximum number of bytes ever allocated at once.
- size_t bytes_allocated_max;
-
- // The number of context groups.
- size_t context_group_count;
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_MEMORY_UMA_STATS_H_
diff --git a/chromium/content/common/gpu/gpu_messages.h b/chromium/content/common/gpu/gpu_messages.h
deleted file mode 100644
index cfe01a347a7..00000000000
--- a/chromium/content/common/gpu/gpu_messages.h
+++ /dev/null
@@ -1,878 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Multiply-included message file, hence no include guard here, but see below
-// for a much smaller-than-usual include guard section.
-
-#include <stdint.h>
-
-#include <string>
-#include <vector>
-
-#include "base/memory/shared_memory.h"
-#include "build/build_config.h"
-#include "content/common/content_export.h"
-#include "content/common/content_param_traits.h"
-#include "content/common/gpu/gpu_memory_uma_stats.h"
-#include "content/common/gpu/gpu_process_launch_causes.h"
-#include "content/common/gpu/gpu_result_codes.h"
-#include "content/common/gpu/gpu_stream_priority.h"
-#include "content/public/common/common_param_traits.h"
-#include "content/public/common/gpu_memory_stats.h"
-#include "gpu/command_buffer/common/capabilities.h"
-#include "gpu/command_buffer/common/command_buffer.h"
-#include "gpu/command_buffer/common/constants.h"
-#include "gpu/command_buffer/common/gpu_memory_allocation.h"
-#include "gpu/command_buffer/common/mailbox.h"
-#include "gpu/command_buffer/common/sync_token.h"
-#include "gpu/command_buffer/common/value_state.h"
-#include "gpu/config/gpu_info.h"
-#include "gpu/ipc/gpu_command_buffer_traits.h"
-#include "ipc/ipc_channel_handle.h"
-#include "ipc/ipc_message_macros.h"
-#include "media/base/decrypt_config.h"
-#include "media/base/video_types.h"
-#include "media/video/jpeg_decode_accelerator.h"
-#include "media/video/video_decode_accelerator.h"
-#include "media/video/video_encode_accelerator.h"
-#include "ui/events/latency_info.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gfx/gpu_memory_buffer.h"
-#include "ui/gfx/ipc/gfx_param_traits.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gfx/swap_result.h"
-#include "ui/gl/gpu_preference.h"
-
-#if defined(OS_ANDROID)
-#include "content/common/android/surface_texture_peer.h"
-#elif defined(OS_MACOSX)
-#include "ui/base/cocoa/remote_layer_api.h"
-#include "ui/gfx/mac/io_surface.h"
-#endif
-
-#undef IPC_MESSAGE_EXPORT
-#define IPC_MESSAGE_EXPORT CONTENT_EXPORT
-
-#define IPC_MESSAGE_START GpuMsgStart
-
-IPC_ENUM_TRAITS_MAX_VALUE(content::CauseForGpuLaunch,
- content::CAUSE_FOR_GPU_LAUNCH_MAX_ENUM - 1)
-IPC_ENUM_TRAITS_MAX_VALUE(content::CreateCommandBufferResult,
- content::CREATE_COMMAND_BUFFER_RESULT_LAST)
-IPC_ENUM_TRAITS_MAX_VALUE(gfx::GpuPreference,
- gfx::GpuPreferenceLast)
-IPC_ENUM_TRAITS_MAX_VALUE(content::GpuStreamPriority,
- content::GpuStreamPriority::LAST)
-IPC_ENUM_TRAITS_MAX_VALUE(gfx::SurfaceType,
- gfx::SURFACE_TYPE_LAST)
-IPC_ENUM_TRAITS_MAX_VALUE(gfx::SwapResult, gfx::SwapResult::SWAP_RESULT_LAST)
-IPC_ENUM_TRAITS_MAX_VALUE(gpu::MemoryAllocation::PriorityCutoff,
- gpu::MemoryAllocation::CUTOFF_LAST)
-IPC_ENUM_TRAITS_MAX_VALUE(gpu::error::ContextLostReason,
- gpu::error::kContextLostReasonLast)
-IPC_ENUM_TRAITS_MAX_VALUE(media::JpegDecodeAccelerator::Error,
- media::JpegDecodeAccelerator::LARGEST_ERROR_ENUM)
-IPC_ENUM_TRAITS_MAX_VALUE(media::VideoEncodeAccelerator::Error,
- media::VideoEncodeAccelerator::kErrorMax)
-IPC_ENUM_TRAITS_MIN_MAX_VALUE(media::VideoCodecProfile,
- media::VIDEO_CODEC_PROFILE_MIN,
- media::VIDEO_CODEC_PROFILE_MAX)
-IPC_ENUM_TRAITS_MIN_MAX_VALUE(gpu::CollectInfoResult,
- gpu::kCollectInfoNone,
- gpu::kCollectInfoFatalFailure)
-IPC_ENUM_TRAITS_MIN_MAX_VALUE(gpu::VideoCodecProfile,
- gpu::VIDEO_CODEC_PROFILE_MIN,
- gpu::VIDEO_CODEC_PROFILE_MAX)
-
-IPC_STRUCT_BEGIN(GPUCreateCommandBufferConfig)
- IPC_STRUCT_MEMBER(int32_t, share_group_id)
- IPC_STRUCT_MEMBER(int32_t, stream_id)
- IPC_STRUCT_MEMBER(content::GpuStreamPriority, stream_priority)
- IPC_STRUCT_MEMBER(std::vector<int>, attribs)
- IPC_STRUCT_MEMBER(GURL, active_url)
- IPC_STRUCT_MEMBER(gfx::GpuPreference, gpu_preference)
-IPC_STRUCT_END()
-
-IPC_STRUCT_BEGIN(GpuMsg_EstablishChannel_Params)
- IPC_STRUCT_MEMBER(int, client_id)
- IPC_STRUCT_MEMBER(uint64_t, client_tracing_id)
- IPC_STRUCT_MEMBER(bool, preempts)
- IPC_STRUCT_MEMBER(bool, preempted)
- IPC_STRUCT_MEMBER(bool, allow_future_sync_points)
- IPC_STRUCT_MEMBER(bool, allow_real_time_streams)
-IPC_STRUCT_END()
-
-IPC_STRUCT_BEGIN(GpuMsg_CreateGpuMemoryBuffer_Params)
- IPC_STRUCT_MEMBER(gfx::GpuMemoryBufferId, id)
- IPC_STRUCT_MEMBER(gfx::Size, size)
- IPC_STRUCT_MEMBER(gfx::BufferFormat, format)
- IPC_STRUCT_MEMBER(gfx::BufferUsage, usage)
- IPC_STRUCT_MEMBER(int32_t, client_id)
- IPC_STRUCT_MEMBER(gfx::PluginWindowHandle, surface_handle)
-IPC_STRUCT_END()
-
-IPC_STRUCT_BEGIN(GpuMsg_CreateGpuMemoryBufferFromHandle_Params)
- IPC_STRUCT_MEMBER(gfx::GpuMemoryBufferHandle, handle)
- IPC_STRUCT_MEMBER(gfx::GpuMemoryBufferId, id)
- IPC_STRUCT_MEMBER(gfx::Size, size)
- IPC_STRUCT_MEMBER(gfx::BufferFormat, format)
- IPC_STRUCT_MEMBER(int32_t, client_id)
-IPC_STRUCT_END()
-
-#if defined(OS_MACOSX)
-IPC_STRUCT_BEGIN(GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params)
- IPC_STRUCT_MEMBER(int32_t, surface_id)
- // Only one of ca_context_id or io_surface may be non-0.
- IPC_STRUCT_MEMBER(CAContextID, ca_context_id)
- IPC_STRUCT_MEMBER(gfx::ScopedRefCountedIOSurfaceMachPort, io_surface)
- IPC_STRUCT_MEMBER(int32_t, route_id)
- IPC_STRUCT_MEMBER(gfx::Size, size)
- IPC_STRUCT_MEMBER(float, scale_factor)
- IPC_STRUCT_MEMBER(std::vector<ui::LatencyInfo>, latency_info)
-IPC_STRUCT_END()
-
-IPC_STRUCT_BEGIN(AcceleratedSurfaceMsg_BufferPresented_Params)
- // The vsync parameters, to synchronize presentation with the display.
- IPC_STRUCT_MEMBER(base::TimeTicks, vsync_timebase)
- IPC_STRUCT_MEMBER(base::TimeDelta, vsync_interval)
-IPC_STRUCT_END()
-#endif
-
-IPC_STRUCT_BEGIN(AcceleratedJpegDecoderMsg_Decode_Params)
- IPC_STRUCT_MEMBER(int32_t, input_buffer_id)
- IPC_STRUCT_MEMBER(gfx::Size, coded_size)
- IPC_STRUCT_MEMBER(base::SharedMemoryHandle, input_buffer_handle)
- IPC_STRUCT_MEMBER(uint32_t, input_buffer_size)
- IPC_STRUCT_MEMBER(base::SharedMemoryHandle, output_video_frame_handle)
- IPC_STRUCT_MEMBER(uint32_t, output_buffer_size)
-IPC_STRUCT_END()
-
-IPC_STRUCT_BEGIN(AcceleratedVideoDecoderMsg_Decode_Params)
- IPC_STRUCT_MEMBER(int32_t, bitstream_buffer_id)
- IPC_STRUCT_MEMBER(base::SharedMemoryHandle, buffer_handle)
- IPC_STRUCT_MEMBER(uint32_t, size)
- IPC_STRUCT_MEMBER(base::TimeDelta, presentation_timestamp)
- IPC_STRUCT_MEMBER(std::string, key_id)
- IPC_STRUCT_MEMBER(std::string, iv)
- IPC_STRUCT_MEMBER(std::vector<media::SubsampleEntry>, subsamples)
-IPC_STRUCT_END()
-
-IPC_STRUCT_BEGIN(AcceleratedVideoEncoderMsg_Encode_Params)
- IPC_STRUCT_MEMBER(int32_t, frame_id)
- IPC_STRUCT_MEMBER(base::TimeDelta, timestamp)
- IPC_STRUCT_MEMBER(base::SharedMemoryHandle, buffer_handle)
- IPC_STRUCT_MEMBER(uint32_t, buffer_offset)
- IPC_STRUCT_MEMBER(uint32_t, buffer_size)
- IPC_STRUCT_MEMBER(bool, force_keyframe)
-IPC_STRUCT_END()
-
-IPC_STRUCT_BEGIN(AcceleratedVideoEncoderMsg_Encode_Params2)
- IPC_STRUCT_MEMBER(int32_t, frame_id)
- IPC_STRUCT_MEMBER(base::TimeDelta, timestamp)
- IPC_STRUCT_MEMBER(std::vector<gfx::GpuMemoryBufferHandle>,
- gpu_memory_buffer_handles)
- IPC_STRUCT_MEMBER(gfx::Size, size)
- IPC_STRUCT_MEMBER(bool, force_keyframe)
-IPC_STRUCT_END()
-
-IPC_STRUCT_BEGIN(GPUCommandBufferConsoleMessage)
- IPC_STRUCT_MEMBER(int32_t, id)
- IPC_STRUCT_MEMBER(std::string, message)
-IPC_STRUCT_END()
-
-#if defined(OS_ANDROID)
-IPC_STRUCT_BEGIN(GpuStreamTextureMsg_MatrixChanged_Params)
- IPC_STRUCT_MEMBER(float, m00)
- IPC_STRUCT_MEMBER(float, m01)
- IPC_STRUCT_MEMBER(float, m02)
- IPC_STRUCT_MEMBER(float, m03)
- IPC_STRUCT_MEMBER(float, m10)
- IPC_STRUCT_MEMBER(float, m11)
- IPC_STRUCT_MEMBER(float, m12)
- IPC_STRUCT_MEMBER(float, m13)
- IPC_STRUCT_MEMBER(float, m20)
- IPC_STRUCT_MEMBER(float, m21)
- IPC_STRUCT_MEMBER(float, m22)
- IPC_STRUCT_MEMBER(float, m23)
- IPC_STRUCT_MEMBER(float, m30)
- IPC_STRUCT_MEMBER(float, m31)
- IPC_STRUCT_MEMBER(float, m32)
- IPC_STRUCT_MEMBER(float, m33)
-IPC_STRUCT_END()
-#endif
-
-IPC_STRUCT_BEGIN(GpuCommandBufferMsg_CreateImage_Params)
- IPC_STRUCT_MEMBER(int32_t, id)
- IPC_STRUCT_MEMBER(gfx::GpuMemoryBufferHandle, gpu_memory_buffer)
- IPC_STRUCT_MEMBER(gfx::Size, size)
- IPC_STRUCT_MEMBER(gfx::BufferFormat, format)
- IPC_STRUCT_MEMBER(uint32_t, internal_format)
- IPC_STRUCT_MEMBER(uint64_t, image_release_count)
-IPC_STRUCT_END()
-
-IPC_STRUCT_TRAITS_BEGIN(gpu::DxDiagNode)
- IPC_STRUCT_TRAITS_MEMBER(values)
- IPC_STRUCT_TRAITS_MEMBER(children)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(gpu::GPUInfo::GPUDevice)
- IPC_STRUCT_TRAITS_MEMBER(vendor_id)
- IPC_STRUCT_TRAITS_MEMBER(device_id)
- IPC_STRUCT_TRAITS_MEMBER(active)
- IPC_STRUCT_TRAITS_MEMBER(vendor_string)
- IPC_STRUCT_TRAITS_MEMBER(device_string)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(media::VideoDecodeAccelerator::Config)
- IPC_STRUCT_TRAITS_MEMBER(profile)
- IPC_STRUCT_TRAITS_MEMBER(is_encrypted)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(gpu::VideoDecodeAcceleratorSupportedProfile)
- IPC_STRUCT_TRAITS_MEMBER(profile)
- IPC_STRUCT_TRAITS_MEMBER(max_resolution)
- IPC_STRUCT_TRAITS_MEMBER(min_resolution)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(gpu::VideoDecodeAcceleratorCapabilities)
- IPC_STRUCT_TRAITS_MEMBER(supported_profiles)
- IPC_STRUCT_TRAITS_MEMBER(flags)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(gpu::VideoEncodeAcceleratorSupportedProfile)
- IPC_STRUCT_TRAITS_MEMBER(profile)
- IPC_STRUCT_TRAITS_MEMBER(max_resolution)
- IPC_STRUCT_TRAITS_MEMBER(max_framerate_numerator)
- IPC_STRUCT_TRAITS_MEMBER(max_framerate_denominator)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(gpu::GPUInfo)
- IPC_STRUCT_TRAITS_MEMBER(initialization_time)
- IPC_STRUCT_TRAITS_MEMBER(optimus)
- IPC_STRUCT_TRAITS_MEMBER(amd_switchable)
- IPC_STRUCT_TRAITS_MEMBER(lenovo_dcute)
- IPC_STRUCT_TRAITS_MEMBER(gpu)
- IPC_STRUCT_TRAITS_MEMBER(secondary_gpus)
- IPC_STRUCT_TRAITS_MEMBER(adapter_luid)
- IPC_STRUCT_TRAITS_MEMBER(driver_vendor)
- IPC_STRUCT_TRAITS_MEMBER(driver_version)
- IPC_STRUCT_TRAITS_MEMBER(driver_date)
- IPC_STRUCT_TRAITS_MEMBER(pixel_shader_version)
- IPC_STRUCT_TRAITS_MEMBER(vertex_shader_version)
- IPC_STRUCT_TRAITS_MEMBER(max_msaa_samples)
- IPC_STRUCT_TRAITS_MEMBER(machine_model_name)
- IPC_STRUCT_TRAITS_MEMBER(machine_model_version)
- IPC_STRUCT_TRAITS_MEMBER(gl_version)
- IPC_STRUCT_TRAITS_MEMBER(gl_vendor)
- IPC_STRUCT_TRAITS_MEMBER(gl_renderer)
- IPC_STRUCT_TRAITS_MEMBER(gl_extensions)
- IPC_STRUCT_TRAITS_MEMBER(gl_ws_vendor)
- IPC_STRUCT_TRAITS_MEMBER(gl_ws_version)
- IPC_STRUCT_TRAITS_MEMBER(gl_ws_extensions)
- IPC_STRUCT_TRAITS_MEMBER(gl_reset_notification_strategy)
- IPC_STRUCT_TRAITS_MEMBER(can_lose_context)
- IPC_STRUCT_TRAITS_MEMBER(software_rendering)
- IPC_STRUCT_TRAITS_MEMBER(direct_rendering)
- IPC_STRUCT_TRAITS_MEMBER(sandboxed)
- IPC_STRUCT_TRAITS_MEMBER(process_crash_count)
- IPC_STRUCT_TRAITS_MEMBER(in_process_gpu)
- IPC_STRUCT_TRAITS_MEMBER(basic_info_state)
- IPC_STRUCT_TRAITS_MEMBER(context_info_state)
-#if defined(OS_WIN)
- IPC_STRUCT_TRAITS_MEMBER(dx_diagnostics_info_state)
- IPC_STRUCT_TRAITS_MEMBER(dx_diagnostics)
-#endif
- IPC_STRUCT_TRAITS_MEMBER(video_decode_accelerator_capabilities)
- IPC_STRUCT_TRAITS_MEMBER(video_encode_accelerator_supported_profiles)
- IPC_STRUCT_TRAITS_MEMBER(jpeg_decode_accelerator_supported)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(content::GPUVideoMemoryUsageStats::ProcessStats)
- IPC_STRUCT_TRAITS_MEMBER(video_memory)
- IPC_STRUCT_TRAITS_MEMBER(has_duplicates)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(content::GPUVideoMemoryUsageStats)
- IPC_STRUCT_TRAITS_MEMBER(process_map)
- IPC_STRUCT_TRAITS_MEMBER(bytes_allocated)
- IPC_STRUCT_TRAITS_MEMBER(bytes_allocated_historical_max)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(content::GPUMemoryUmaStats)
- IPC_STRUCT_TRAITS_MEMBER(bytes_allocated_current)
- IPC_STRUCT_TRAITS_MEMBER(bytes_allocated_max)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(gpu::MemoryAllocation)
- IPC_STRUCT_TRAITS_MEMBER(bytes_limit_when_visible)
- IPC_STRUCT_TRAITS_MEMBER(priority_cutoff_when_visible)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(gfx::GLSurfaceHandle)
- IPC_STRUCT_TRAITS_MEMBER(handle)
- IPC_STRUCT_TRAITS_MEMBER(transport_type)
-IPC_STRUCT_TRAITS_END()
-
-IPC_STRUCT_TRAITS_BEGIN(media::SubsampleEntry)
- IPC_STRUCT_TRAITS_MEMBER(clear_bytes)
- IPC_STRUCT_TRAITS_MEMBER(cypher_bytes)
-IPC_STRUCT_TRAITS_END()
-
-//------------------------------------------------------------------------------
-// GPU Messages
-// These are messages from the browser to the GPU process.
-
-// Tells the GPU process to initialize itself. The browser explicitly
-// requests this be done so that we are guaranteed that the channel is set
-// up between the browser and GPU process before doing any work that might
-// potentially crash the GPU process. Detection of the child process
-// exiting abruptly is predicated on having the IPC channel set up.
-IPC_MESSAGE_CONTROL0(GpuMsg_Initialize)
-
-// Tells the GPU process to shutdown itself.
-IPC_MESSAGE_CONTROL0(GpuMsg_Finalize)
-
-// Tells the GPU process to create a new channel for communication with a
-// given client. The channel name is returned in a
-// GpuHostMsg_ChannelEstablished message. The client ID is passed so that
-// the GPU process reuses an existing channel to that process if it exists.
-// This ID is a unique opaque identifier generated by the browser process.
-// The client_tracing_id is a unique ID used for the purposes of tracing.
-IPC_MESSAGE_CONTROL1(GpuMsg_EstablishChannel,
- GpuMsg_EstablishChannel_Params /* params */)
-
-// Tells the GPU process to close the channel identified by IPC channel
-// handle. If no channel can be identified, do nothing.
-IPC_MESSAGE_CONTROL1(GpuMsg_CloseChannel,
- IPC::ChannelHandle /* channel_handle */)
-
-// Tells the GPU process to create a new command buffer that renders directly
-// to a native view. A corresponding GpuCommandBufferStub is created.
-IPC_MESSAGE_CONTROL4(GpuMsg_CreateViewCommandBuffer,
- gfx::GLSurfaceHandle, /* compositing_surface */
- int32_t, /* client_id */
- GPUCreateCommandBufferConfig, /* init_params */
- int32_t /* route_id */)
-
-// Tells the GPU process to create a new gpu memory buffer.
-IPC_MESSAGE_CONTROL1(GpuMsg_CreateGpuMemoryBuffer,
- GpuMsg_CreateGpuMemoryBuffer_Params)
-
-// Tells the GPU process to create a new gpu memory buffer from an existing
-// handle.
-IPC_MESSAGE_CONTROL1(GpuMsg_CreateGpuMemoryBufferFromHandle,
- GpuMsg_CreateGpuMemoryBufferFromHandle_Params)
-
-// Tells the GPU process to destroy buffer.
-IPC_MESSAGE_CONTROL3(GpuMsg_DestroyGpuMemoryBuffer,
- gfx::GpuMemoryBufferId, /* id */
- int32_t, /* client_id */
- gpu::SyncToken /* sync_token */)
-
-// Create and initialize a hardware jpeg decoder using the specified route_id.
-// Created decoders should be freed with AcceleratedJpegDecoderMsg_Destroy when
-// no longer needed.
-IPC_SYNC_MESSAGE_CONTROL1_1(GpuMsg_CreateJpegDecoder,
- int32_t /* route_id */,
- bool /* succeeded */)
-
-// Tells the GPU process to create a context for collecting graphics card
-// information.
-IPC_MESSAGE_CONTROL0(GpuMsg_CollectGraphicsInfo)
-
-// Tells the GPU process to report video_memory information for the task manager
-IPC_MESSAGE_CONTROL0(GpuMsg_GetVideoMemoryUsageStats)
-
-#if defined(OS_MACOSX)
-// Tells the GPU process that the browser process has handled the swap
-// buffers or post sub-buffer request.
-IPC_MESSAGE_ROUTED1(AcceleratedSurfaceMsg_BufferPresented,
- AcceleratedSurfaceMsg_BufferPresented_Params)
-#endif
-
-#if defined(OS_ANDROID)
-// Tells the GPU process to wake up the GPU because we're about to draw.
-IPC_MESSAGE_CONTROL0(GpuMsg_WakeUpGpu)
-#endif
-
-// Tells the GPU process to remove all contexts.
-IPC_MESSAGE_CONTROL0(GpuMsg_Clean)
-
-// Tells the GPU process to crash.
-IPC_MESSAGE_CONTROL0(GpuMsg_Crash)
-
-// Tells the GPU process to hang.
-IPC_MESSAGE_CONTROL0(GpuMsg_Hang)
-
-// Tells the GPU process to disable the watchdog thread.
-IPC_MESSAGE_CONTROL0(GpuMsg_DisableWatchdog)
-
-// Tells the GPU process that the browser has seen a GPU switch.
-IPC_MESSAGE_CONTROL0(GpuMsg_GpuSwitched)
-
-// Sends an input event to the gpu service.
-IPC_MESSAGE_CONTROL3(GpuMsg_UpdateValueState,
- int, /* client_id */
- unsigned int, /* target */
- gpu::ValueState /* valuestate */)
-
-//------------------------------------------------------------------------------
-// GPU Host Messages
-// These are messages to the browser.
-
-// A renderer sends this when it wants to create a connection to the GPU
-// process. The browser will create the GPU process if necessary, and will
-// return a handle to the channel via a GpuChannelEstablished message.
-IPC_SYNC_MESSAGE_CONTROL1_3(GpuHostMsg_EstablishGpuChannel,
- content::CauseForGpuLaunch,
- int /* client id */,
- IPC::ChannelHandle /* handle to channel */,
- gpu::GPUInfo /* stats about GPU process*/)
-
-// Response from GPU to a GputMsg_Initialize message.
-IPC_MESSAGE_CONTROL2(GpuHostMsg_Initialized,
- bool /* result */,
- ::gpu::GPUInfo /* gpu_info */)
-
-// Response from GPU to a GpuHostMsg_EstablishChannel message.
-IPC_MESSAGE_CONTROL1(GpuHostMsg_ChannelEstablished,
- IPC::ChannelHandle /* channel_handle */)
-
-// Message from GPU to notify to destroy the channel.
-IPC_MESSAGE_CONTROL1(GpuHostMsg_DestroyChannel, int32_t /* client_id */)
-
-// Message to cache the given shader information.
-IPC_MESSAGE_CONTROL3(GpuHostMsg_CacheShader,
- int32_t /* client_id */,
- std::string /* key */,
- std::string /* shader */)
-
-// Message to the GPU that a shader was loaded from disk.
-IPC_MESSAGE_CONTROL1(GpuMsg_LoadedShader,
- std::string /* encoded shader */)
-
-// Respond from GPU to a GpuMsg_CreateViewCommandBuffer message.
-IPC_MESSAGE_CONTROL1(GpuHostMsg_CommandBufferCreated,
- content::CreateCommandBufferResult /* result */)
-
-// Response from GPU to a GpuMsg_CreateGpuMemoryBuffer message.
-IPC_MESSAGE_CONTROL1(GpuHostMsg_GpuMemoryBufferCreated,
- gfx::GpuMemoryBufferHandle /* handle */)
-
-// Response from GPU to a GpuMsg_CollectGraphicsInfo.
-IPC_MESSAGE_CONTROL1(GpuHostMsg_GraphicsInfoCollected,
- gpu::GPUInfo /* GPU logging stats */)
-
-// Response from GPU to a GpuMsg_GetVideoMemory.
-IPC_MESSAGE_CONTROL1(GpuHostMsg_VideoMemoryUsageStats,
- content::GPUVideoMemoryUsageStats /* GPU memory stats */)
-
-// Message from GPU to add a GPU log message to the about:gpu page.
-IPC_MESSAGE_CONTROL3(GpuHostMsg_OnLogMessage,
- int /*severity*/,
- std::string /* header */,
- std::string /* message */)
-
-
-#if defined(OS_MACOSX)
-// Tells the browser that an accelerated surface has swapped.
-IPC_MESSAGE_CONTROL1(GpuHostMsg_AcceleratedSurfaceBuffersSwapped,
- GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params)
-#endif
-
-#if defined(OS_WIN)
-IPC_MESSAGE_CONTROL2(GpuHostMsg_AcceleratedSurfaceCreatedChildWindow,
- gfx::PluginWindowHandle /* parent_window */,
- gfx::PluginWindowHandle /* child_window */)
-#endif
-
-IPC_MESSAGE_CONTROL1(GpuHostMsg_DidCreateOffscreenContext,
- GURL /* url */)
-
-IPC_MESSAGE_CONTROL3(GpuHostMsg_DidLoseContext,
- bool /* offscreen */,
- gpu::error::ContextLostReason /* reason */,
- GURL /* url */)
-
-IPC_MESSAGE_CONTROL1(GpuHostMsg_DidDestroyOffscreenContext,
- GURL /* url */)
-
-// Tells the browser about GPU memory usage statistics for UMA logging.
-IPC_MESSAGE_CONTROL1(GpuHostMsg_GpuMemoryUmaStats,
- content::GPUMemoryUmaStats /* GPU memory UMA stats */)
-
-// Tells the browser that a context has subscribed to a new target and
-// the browser should start sending the corresponding information
-IPC_MESSAGE_CONTROL2(GpuHostMsg_AddSubscription,
- int32_t /* client_id */,
- unsigned int /* target */)
-
-// Tells the browser that no contexts are subscribed to the target anymore
-// so the browser should stop sending the corresponding information
-IPC_MESSAGE_CONTROL2(GpuHostMsg_RemoveSubscription,
- int32_t /* client_id */,
- unsigned int /* target */)
-
-//------------------------------------------------------------------------------
-// GPU Channel Messages
-// These are messages from a renderer process to the GPU process.
-
-// Tells the GPU process to create a new command buffer that renders to an
-// offscreen frame buffer.
-IPC_SYNC_MESSAGE_CONTROL3_1(GpuChannelMsg_CreateOffscreenCommandBuffer,
- gfx::Size, /* size */
- GPUCreateCommandBufferConfig, /* init_params */
- int32_t, /* route_id */
- bool /* succeeded */)
-
-// The CommandBufferProxy sends this to the GpuCommandBufferStub in its
-// destructor, so that the stub deletes the actual CommandBufferService
-// object that it's hosting.
-IPC_SYNC_MESSAGE_CONTROL1_0(GpuChannelMsg_DestroyCommandBuffer,
- int32_t /* instance_id */)
-
-// Simple NOP message which can be used as fence to ensure all previous sent
-// messages have been received.
-IPC_SYNC_MESSAGE_CONTROL0_0(GpuChannelMsg_Nop)
-
-#if defined(OS_ANDROID)
-//------------------------------------------------------------------------------
-// Stream Texture Messages
-// Tells the GPU process create and send the java surface texture object to
-// the renderer process through the binder thread.
-IPC_MESSAGE_ROUTED2(GpuStreamTextureMsg_EstablishPeer,
- int32_t, /* primary_id */
- int32_t /* secondary_id */)
-
-// Tells the GPU process to set the size of StreamTexture from the given
-// stream Id.
-IPC_MESSAGE_ROUTED1(GpuStreamTextureMsg_SetSize,
- gfx::Size /* size */)
-
-// Tells the service-side instance to start sending frame available
-// notifications.
-IPC_MESSAGE_ROUTED0(GpuStreamTextureMsg_StartListening)
-
-// Inform the renderer that a new frame is available.
-IPC_MESSAGE_ROUTED0(GpuStreamTextureMsg_FrameAvailable)
-
-// Inform the renderer process that the transform matrix has changed.
-IPC_MESSAGE_ROUTED1(GpuStreamTextureMsg_MatrixChanged,
- GpuStreamTextureMsg_MatrixChanged_Params /* params */)
-#endif
-
-//------------------------------------------------------------------------------
-// GPU Command Buffer Messages
-// These are messages between a renderer process to the GPU process relating to
-// a single OpenGL context.
-// Initialize a command buffer with the given number of command entries.
-// Returns the shared memory handle for the command buffer mapped to the
-// calling process.
-IPC_SYNC_MESSAGE_ROUTED1_2(GpuCommandBufferMsg_Initialize,
- base::SharedMemoryHandle /* shared_state */,
- bool /* result */,
- gpu::Capabilities /* capabilities */)
-
-// Sets the shared memory buffer used for commands.
-IPC_SYNC_MESSAGE_ROUTED1_0(GpuCommandBufferMsg_SetGetBuffer,
- int32_t /* shm_id */)
-
-// Produces the front buffer into a mailbox. This allows another context to draw
-// the output of this context.
-IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_ProduceFrontBuffer,
- gpu::Mailbox /* mailbox */)
-
-// Wait until the token is in a specific range, inclusive.
-IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_WaitForTokenInRange,
- int32_t /* start */,
- int32_t /* end */,
- gpu::CommandBuffer::State /* state */)
-
-// Wait until the get offset is in a specific range, inclusive.
-IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_WaitForGetOffsetInRange,
- int32_t /* start */,
- int32_t /* end */,
- gpu::CommandBuffer::State /* state */)
-
-// Asynchronously synchronize the put and get offsets of both processes.
-// Caller passes its current put offset. Current state (including get offset)
-// is returned in shared memory. The input latency info for the current
-// frame is also sent to the GPU process.
-IPC_MESSAGE_ROUTED3(GpuCommandBufferMsg_AsyncFlush,
- int32_t /* put_offset */,
- uint32_t /* flush_count */,
- std::vector<ui::LatencyInfo> /* latency_info */)
-
-// Sent by the GPU process to display messages in the console.
-IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_ConsoleMsg,
- GPUCommandBufferConsoleMessage /* msg */)
-
-// Register an existing shared memory transfer buffer. The id that can be
-// used to identify the transfer buffer from a command buffer.
-IPC_MESSAGE_ROUTED3(GpuCommandBufferMsg_RegisterTransferBuffer,
- int32_t /* id */,
- base::SharedMemoryHandle /* transfer_buffer */,
- uint32_t /* size */)
-
-// Destroy a previously created transfer buffer.
-IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_DestroyTransferBuffer, int32_t /* id */)
-
-// Create and initialize a hardware video decoder using the specified route_id.
-// Created decoders should be freed with AcceleratedVideoDecoderMsg_Destroy when
-// no longer needed.
-IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_CreateVideoDecoder,
- media::VideoDecodeAccelerator::Config, /* config */
- int32_t, /* route_id */
- bool /* succeeded */)
-
-// Create and initialize a hardware video encoder using the specified route_id.
-// Created encoders should be freed with AcceleratedVideoEncoderMsg_Destroy when
-// no longer needed.
-IPC_SYNC_MESSAGE_ROUTED5_1(GpuCommandBufferMsg_CreateVideoEncoder,
- media::VideoPixelFormat /* input_format */,
- gfx::Size /* input_visible_size */,
- media::VideoCodecProfile /* output_profile */,
- uint32_t /* initial_bitrate */,
- int32_t, /* route_id */
- bool /* succeeded */)
-
-// Tells the proxy that there was an error and the command buffer had to be
-// destroyed for some reason.
-IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_Destroyed,
- gpu::error::ContextLostReason, /* reason */
- gpu::error::Error /* error */)
-
-// Tells the browser that SwapBuffers returned and passes latency info
-IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_SwapBuffersCompleted,
- std::vector<ui::LatencyInfo> /* latency_info */,
- gfx::SwapResult /* result */)
-
-// Tells the browser about updated parameters for vsync alignment.
-IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_UpdateVSyncParameters,
- base::TimeTicks /* timebase */,
- base::TimeDelta /* interval */)
-
-// Inserts a sync point into the channel. This is handled on the IO thread, so
-// can be expected to be reasonably fast, but the sync point is actually
-// retired in order with respect to the other calls. The sync point is shared
-// across channels.
-IPC_SYNC_MESSAGE_ROUTED1_1(GpuCommandBufferMsg_InsertSyncPoint,
- bool /* retire */,
- uint32_t /* sync_point */)
-
-// Retires the sync point.
-IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_RetireSyncPoint,
- uint32_t /* sync_point */)
-
-// Makes this command buffer signal when a sync point is reached, by sending
-// back a GpuCommandBufferMsg_SignalSyncPointAck message with the same
-// signal_id.
-IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_SignalSyncPoint,
- uint32_t /* sync_point */,
- uint32_t /* signal_id */)
-
-IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_SignalSyncToken,
- gpu::SyncToken /* sync_token */,
- uint32_t /* signal_id */)
-
-// Makes this command buffer signal when a query is reached, by sending
-// back a GpuCommandBufferMsg_SignalSyncPointAck message with the same
-// signal_id.
-IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_SignalQuery,
- uint32_t /* query */,
- uint32_t /* signal_id */)
-
-// Response to SignalSyncPoint, SignalSyncToken, and SignalQuery.
-IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_SignalAck, uint32_t /* signal_id */)
-
-// Create an image from an existing gpu memory buffer. The id that can be
-// used to identify the image from a command buffer.
-IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_CreateImage,
- GpuCommandBufferMsg_CreateImage_Params /* params */)
-
-// Destroy a previously created image.
-IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_DestroyImage, int32_t /* id */)
-
-// Attaches an external image stream to the client texture.
-IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_CreateStreamTexture,
- uint32_t, /* client_texture_id */
- int32_t, /* stream_id */
- bool /* succeeded */)
-
-//------------------------------------------------------------------------------
-// Accelerated Video Decoder Messages
-// These messages are sent from Renderer process to GPU process.
-
-// Set a CDM on the decoder to handle encrypted buffers.
-IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderMsg_SetCdm,
- int32_t) /* CDM ID */
-
-// Send input buffer for decoding.
-IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderMsg_Decode,
- AcceleratedVideoDecoderMsg_Decode_Params)
-
-// Give the texture IDs for the textures the decoder will use for output.
-IPC_MESSAGE_ROUTED2(AcceleratedVideoDecoderMsg_AssignPictureBuffers,
- std::vector<int32_t>, /* Picture buffer ID */
- std::vector<uint32_t>) /* Texture ID */
-
-// Send from Renderer process to the GPU process to recycle the given picture
-// buffer for further decoding.
-IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
- int32_t) /* Picture buffer ID */
-
-// Send flush request to the decoder.
-IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderMsg_Flush)
-
-// Send reset request to the decoder.
-IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderMsg_Reset)
-
-// Send destroy request to the decoder.
-IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderMsg_Destroy)
-
-//------------------------------------------------------------------------------
-// Accelerated Video Decoder Host Messages
-// These messages are sent from GPU process to Renderer process.
-// Inform AcceleratedVideoDecoderHost that AcceleratedVideoDecoder has been
-// created.
-
-// Notify the CDM setting result.
-IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderHostMsg_CdmAttached,
- bool) /* success */
-
-// Accelerated video decoder has consumed input buffer from transfer buffer.
-IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed,
- int32_t) /* Processed buffer ID */
-
-// Allocate video frames for output of the hardware video decoder.
-IPC_MESSAGE_ROUTED3(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers,
- int32_t, /* Number of video frames to generate */
- gfx::Size, /* Requested size of buffer */
- uint32_t) /* Texture target */
-
-// Decoder reports that a picture is ready and buffer does not need to be passed
-// back to the decoder.
-IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer,
- int32_t) /* Picture buffer ID */
-
-// Decoder reports that a picture is ready.
-IPC_MESSAGE_ROUTED4(AcceleratedVideoDecoderHostMsg_PictureReady,
- int32_t, /* Picture buffer ID */
- int32_t, /* Bitstream buffer ID */
- gfx::Rect, /* Visible rectangle */
- bool) /* Buffer is HW overlay capable */
-
-// Confirm decoder has been flushed.
-IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderHostMsg_FlushDone)
-
-// Confirm decoder has been reset.
-IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderHostMsg_ResetDone)
-
-// Video decoder has encountered an error.
-IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderHostMsg_ErrorNotification,
- uint32_t) /* Error ID */
-
-//------------------------------------------------------------------------------
-// Accelerated Video Encoder Messages
-// These messages are sent from the Renderer process to GPU process.
-
-// Queue a video frame to the encoder to encode. |frame_id| will be returned
-// by AcceleratedVideoEncoderHostMsg_NotifyInputDone.
-IPC_MESSAGE_ROUTED1(AcceleratedVideoEncoderMsg_Encode,
- AcceleratedVideoEncoderMsg_Encode_Params)
-
-// Queue a GpuMemoryBuffer backed video frame to the encoder to encode.
-// |frame_id| will be returned by
-// AcceleratedVideoEncoderHostMsg_NotifyInputDone.
-IPC_MESSAGE_ROUTED1(AcceleratedVideoEncoderMsg_Encode2,
- AcceleratedVideoEncoderMsg_Encode_Params2)
-
-// Queue a buffer to the encoder for use in returning output. |buffer_id| will
-// be returned by AcceleratedVideoEncoderHostMsg_BitstreamBufferReady.
-IPC_MESSAGE_ROUTED3(AcceleratedVideoEncoderMsg_UseOutputBitstreamBuffer,
- int32_t /* buffer_id */,
- base::SharedMemoryHandle /* buffer_handle */,
- uint32_t /* buffer_size */)
-
-// Request a runtime encoding parameter change.
-IPC_MESSAGE_ROUTED2(AcceleratedVideoEncoderMsg_RequestEncodingParametersChange,
- uint32_t /* bitrate */,
- uint32_t /* framerate */)
-
-//------------------------------------------------------------------------------
-// Accelerated Video Encoder Host Messages
-// These messages are sent from GPU process to Renderer process.
-
-// Notify renderer of the input/output buffer requirements of the encoder.
-IPC_MESSAGE_ROUTED3(AcceleratedVideoEncoderHostMsg_RequireBitstreamBuffers,
- uint32_t /* input_count */,
- gfx::Size /* input_coded_size */,
- uint32_t /* output_buffer_size */)
-
-// Notify the renderer that the encoder has finished using an input buffer.
-// There is no congruent entry point in the media::VideoEncodeAccelerator
-// interface, in VEA this same done condition is indicated by dropping the
-// reference to the media::VideoFrame passed to VEA::Encode().
-IPC_MESSAGE_ROUTED1(AcceleratedVideoEncoderHostMsg_NotifyInputDone,
- int32_t /* frame_id */)
-
-// Notify the renderer that an output buffer has been filled with encoded data.
-IPC_MESSAGE_ROUTED3(AcceleratedVideoEncoderHostMsg_BitstreamBufferReady,
- int32_t /* bitstream_buffer_id */,
- uint32_t /* payload_size */,
- bool /* key_frame */)
-
-// Report error condition.
-IPC_MESSAGE_ROUTED1(AcceleratedVideoEncoderHostMsg_NotifyError,
- media::VideoEncodeAccelerator::Error /* error */)
-
-// Send destroy request to the encoder.
-IPC_MESSAGE_ROUTED0(AcceleratedVideoEncoderMsg_Destroy)
-
-//------------------------------------------------------------------------------
-// Accelerated JPEG Decoder Messages
-// These messages are sent from the Browser process to GPU process.
-
-// Decode one JPEG image from shared memory |input_buffer_handle| with size
-// |input_buffer_size|. The input buffer is associated with |input_buffer_id|
-// and the size of JPEG image is |coded_size|. Decoded I420 frame data will
-// be put onto shared memory associated with |output_video_frame_handle|
-// with size limit |output_buffer_size|.
-IPC_MESSAGE_ROUTED1(AcceleratedJpegDecoderMsg_Decode,
- AcceleratedJpegDecoderMsg_Decode_Params)
-
-// Send destroy request to the decoder.
-IPC_MESSAGE_ROUTED0(AcceleratedJpegDecoderMsg_Destroy)
-
-//------------------------------------------------------------------------------
-// Accelerated JPEG Decoder Host Messages
-// These messages are sent from the GPU process to Browser process.
-//
-// Report decode status.
-IPC_MESSAGE_ROUTED2(AcceleratedJpegDecoderHostMsg_DecodeAck,
- int32_t, /* bitstream_buffer_id */
- media::JpegDecodeAccelerator::Error /* error */)
-
-#if defined(OS_CHROMEOS)
-//------------------------------------------------------------------------------
-// Arc Video Accelerator Messages
-// These messages are sent from the Browser process to GPU process.
-
-// Tells the GPU process to create a new channel for communication with
-// ArcVideoAccelerator. The channel is returned using
-// GpuHostMsg_ArcVideoAcceleratorChannelCreated message.
-IPC_MESSAGE_CONTROL0(GpuMsg_CreateArcVideoAcceleratorChannel)
-
-// Tells the GPU process to shutdown arc video service and terminate all
-// instances of ArcVideoAccelerator.
-IPC_MESSAGE_CONTROL0(GpuMsg_ShutdownArcVideoService)
-
-//------------------------------------------------------------------------------
-// Arc Video Accelerator Host Messages
-// These messages are sent from the GPU process to Browser process.
-
-// Response from GPU to a GpuMsg_CreateArcVideoAcceleratorChannel message.
-IPC_MESSAGE_CONTROL1(GpuHostMsg_ArcVideoAcceleratorChannelCreated,
- IPC::ChannelHandle /* handle to channel */)
-#endif
diff --git a/chromium/content/common/gpu/gpu_process_launch_causes.h b/chromium/content/common/gpu/gpu_process_launch_causes.h
deleted file mode 100644
index 1c280761ef9..00000000000
--- a/chromium/content/common/gpu/gpu_process_launch_causes.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_PROCESS_LAUNCH_CAUSES_H_
-#define CONTENT_COMMON_GPU_GPU_PROCESS_LAUNCH_CAUSES_H_
-
-namespace content {
-
-// If you change this enum you also need to update
-// tools/metrics/histograms/histograms.xml
-enum CauseForGpuLaunch {
- // Start enum from 2 to keep the same values for the histogram.
- CAUSE_FOR_GPU_LAUNCH_GPUDATAMANAGER_REQUESTCOMPLETEGPUINFOIFNEEDED = 2,
- CAUSE_FOR_GPU_LAUNCH_RENDERWIDGETFULLSCREENPEPPER_CREATECONTEXT,
- CAUSE_FOR_GPU_LAUNCH_WEBGRAPHICSCONTEXT3DCOMMANDBUFFERIMPL_INITIALIZE,
- CAUSE_FOR_GPU_LAUNCH_NO_LAUNCH,
- CAUSE_FOR_GPU_LAUNCH_VIDEODECODEACCELERATOR_INITIALIZE,
- CAUSE_FOR_GPU_LAUNCH_PEPPERPLATFORMCONTEXT3DIMPL_INITIALIZE,
- CAUSE_FOR_GPU_LAUNCH_BROWSER_STARTUP,
- CAUSE_FOR_GPU_LAUNCH_CANVAS_2D,
- CAUSE_FOR_GPU_LAUNCH_PEPPERVIDEOENCODERACCELERATOR_INITIALIZE,
- CAUSE_FOR_GPU_LAUNCH_GPU_MEMORY_BUFFER_ALLOCATE,
- CAUSE_FOR_GPU_LAUNCH_JPEGDECODEACCELERATOR_INITIALIZE,
- CAUSE_FOR_GPU_LAUNCH_MOJO_SETUP,
- CAUSE_FOR_GPU_LAUNCH_ARCVIDEOACCELERATOR,
-
- // All new values should be inserted above this point so that
- // existing values continue to match up with those in histograms.xml.
- CAUSE_FOR_GPU_LAUNCH_MAX_ENUM
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_PROCESS_LAUNCH_CAUSES_H_
diff --git a/chromium/content/common/gpu/gpu_result_codes.h b/chromium/content/common/gpu/gpu_result_codes.h
deleted file mode 100644
index 11f4272a2ae..00000000000
--- a/chromium/content/common/gpu/gpu_result_codes.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_RESULT_CODES_H_
-#define CONTENT_COMMON_GPU_GPU_RESULT_CODES_H_
-
-namespace content {
-
-enum CreateCommandBufferResult {
- CREATE_COMMAND_BUFFER_SUCCEEDED,
- CREATE_COMMAND_BUFFER_FAILED,
- CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST,
- CREATE_COMMAND_BUFFER_RESULT_LAST =
- CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_RESULT_CODES_H_
diff --git a/chromium/content/common/gpu/gpu_stream_priority.h b/chromium/content/common/gpu/gpu_stream_priority.h
deleted file mode 100644
index 089fb97e04e..00000000000
--- a/chromium/content/common/gpu/gpu_stream_priority.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_STREAM_PRIORITY_H_
-#define CONTENT_COMMON_GPU_GPU_STREAM_PRIORITY_H_
-
-namespace content {
-
-enum class GpuStreamPriority {
- REAL_TIME,
- NORMAL,
- LOW,
- INHERIT,
- LAST = INHERIT
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_STREAM_PRIORITY_H_
diff --git a/chromium/content/common/gpu/gpu_surface_lookup.cc b/chromium/content/common/gpu/gpu_surface_lookup.cc
deleted file mode 100644
index 61bbc045e5c..00000000000
--- a/chromium/content/common/gpu/gpu_surface_lookup.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_surface_lookup.h"
-
-#include "base/logging.h"
-
-namespace content {
-namespace {
-GpuSurfaceLookup* g_instance = NULL;
-} // anonymous namespace
-
-// static
-GpuSurfaceLookup* GpuSurfaceLookup::GetInstance() {
- DCHECK(g_instance);
- return g_instance;
-}
-
-// static
-void GpuSurfaceLookup::InitInstance(GpuSurfaceLookup* lookup) {
- DCHECK(!g_instance || !lookup);
- g_instance = lookup;
-}
-
-#if defined(OS_ANDROID)
-gfx::ScopedJavaSurface GpuSurfaceLookup::AcquireJavaSurface(int surface_id) {
- NOTIMPLEMENTED();
- return gfx::ScopedJavaSurface();
-}
-#endif
-
-} // namespace content
diff --git a/chromium/content/common/gpu/gpu_surface_lookup.h b/chromium/content/common/gpu/gpu_surface_lookup.h
deleted file mode 100644
index c47e7e37901..00000000000
--- a/chromium/content/common/gpu/gpu_surface_lookup.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_SURFACE_LOOKUP_H_
-#define CONTENT_COMMON_GPU_GPU_SURFACE_LOOKUP_H_
-
-#include "base/macros.h"
-#include "content/common/content_export.h"
-#include "ui/gfx/native_widget_types.h"
-
-#if defined(OS_ANDROID)
-#include "ui/gl/android/scoped_java_surface.h"
-#endif
-
-namespace content {
-
-// This class provides an interface to look up window surface handles
-// that cannot be sent through the IPC channel.
-class CONTENT_EXPORT GpuSurfaceLookup {
- public:
- GpuSurfaceLookup() { }
- virtual ~GpuSurfaceLookup() { }
-
- static GpuSurfaceLookup* GetInstance();
- static void InitInstance(GpuSurfaceLookup* lookup);
-
- virtual gfx::AcceleratedWidget AcquireNativeWidget(int surface_id) = 0;
-
-#if defined(OS_ANDROID)
- virtual gfx::ScopedJavaSurface AcquireJavaSurface(int surface_id);
-#endif
-
- private:
- DISALLOW_COPY_AND_ASSIGN(GpuSurfaceLookup);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_SURFACE_LOOKUP_H_
diff --git a/chromium/content/common/gpu/gpu_watchdog.h b/chromium/content/common/gpu/gpu_watchdog.h
deleted file mode 100644
index 069aeb72737..00000000000
--- a/chromium/content/common/gpu/gpu_watchdog.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_WATCHDOG_H_
-#define CONTENT_COMMON_GPU_GPU_WATCHDOG_H_
-
-#include "base/macros.h"
-
-namespace content {
-
-// Interface for objects that monitor the a GPUProcessor's progress. The
-// GPUProcessor will regularly invoke CheckArmed.
-class GpuWatchdog {
- public:
- virtual void CheckArmed() = 0;
-
- protected:
- GpuWatchdog() {}
- virtual ~GpuWatchdog() {};
-
- private:
- DISALLOW_COPY_AND_ASSIGN(GpuWatchdog);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_WATCHDOG_H_
diff --git a/chromium/content/common/gpu/image_transport_surface.cc b/chromium/content/common/gpu/image_transport_surface.cc
deleted file mode 100644
index 192ec9620b6..00000000000
--- a/chromium/content/common/gpu/image_transport_surface.cc
+++ /dev/null
@@ -1,302 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/image_transport_surface.h"
-
-#include <stddef.h>
-#include <utility>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/command_line.h"
-#include "base/trace_event/trace_event.h"
-#include "build/build_config.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_channel_manager.h"
-#include "content/common/gpu/gpu_command_buffer_stub.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "gpu/command_buffer/service/sync_point_manager.h"
-#include "ui/gfx/vsync_provider.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_implementation.h"
-#include "ui/gl/gl_switches.h"
-
-namespace content {
-
-ImageTransportSurface::ImageTransportSurface() {}
-
-ImageTransportSurface::~ImageTransportSurface() {}
-
-scoped_refptr<gfx::GLSurface> ImageTransportSurface::CreateSurface(
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- const gfx::GLSurfaceHandle& handle) {
- scoped_refptr<gfx::GLSurface> surface;
- if (handle.transport_type == gfx::NULL_TRANSPORT) {
- surface = manager->GetDefaultOffscreenSurface();
- } else {
- surface = CreateNativeSurface(manager, stub, handle);
- if (!surface.get() || !surface->Initialize())
- return NULL;
- }
-
- return surface;
-}
-
-ImageTransportHelper::ImageTransportHelper(ImageTransportSurface* surface,
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- gfx::PluginWindowHandle handle)
- : surface_(surface),
- manager_(manager),
- stub_(stub->AsWeakPtr()),
- handle_(handle) {
- route_id_ = manager_->GenerateRouteID();
- manager_->AddRoute(route_id_, this);
-}
-
-ImageTransportHelper::~ImageTransportHelper() {
- if (stub_.get()) {
- stub_->SetLatencyInfoCallback(
- base::Callback<void(const std::vector<ui::LatencyInfo>&)>());
- }
- manager_->RemoveRoute(route_id_);
-}
-
-bool ImageTransportHelper::Initialize() {
- gpu::gles2::GLES2Decoder* decoder = Decoder();
-
- if (!decoder)
- return false;
-
- stub_->SetLatencyInfoCallback(
- base::Bind(&ImageTransportHelper::SetLatencyInfo,
- base::Unretained(this)));
-
- return true;
-}
-
-bool ImageTransportHelper::OnMessageReceived(const IPC::Message& message) {
-#if defined(OS_MACOSX)
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(ImageTransportHelper, message)
- IPC_MESSAGE_HANDLER(AcceleratedSurfaceMsg_BufferPresented,
- OnBufferPresented)
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
- return handled;
-#else
- NOTREACHED();
- return false;
-#endif
-}
-
-#if defined(OS_MACOSX)
-void ImageTransportHelper::SendAcceleratedSurfaceBuffersSwapped(
- GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params) {
- // TRACE_EVENT for gpu tests:
- TRACE_EVENT_INSTANT2("test_gpu", "SwapBuffers",
- TRACE_EVENT_SCOPE_THREAD,
- "GLImpl", static_cast<int>(gfx::GetGLImplementation()),
- "width", params.size.width());
- // On mac, handle_ is a surface id. See
- // GpuProcessTransportFactory::CreatePerCompositorData
- params.surface_id = handle_;
- params.route_id = route_id_;
- manager_->Send(new GpuHostMsg_AcceleratedSurfaceBuffersSwapped(params));
-}
-#endif
-
-bool ImageTransportHelper::MakeCurrent() {
- gpu::gles2::GLES2Decoder* decoder = Decoder();
- if (!decoder)
- return false;
- return decoder->MakeCurrent();
-}
-
-void ImageTransportHelper::SetSwapInterval(gfx::GLContext* context) {
- if (base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableGpuVsync))
- context->ForceSwapIntervalZero(true);
- else
- context->SetSwapInterval(1);
-}
-
-gpu::gles2::GLES2Decoder* ImageTransportHelper::Decoder() {
- if (!stub_.get())
- return NULL;
- return stub_->decoder();
-}
-
-#if defined(OS_MACOSX)
-void ImageTransportHelper::OnBufferPresented(
- const AcceleratedSurfaceMsg_BufferPresented_Params& params) {
- surface_->OnBufferPresented(params);
-}
-#endif
-
-void ImageTransportHelper::SetLatencyInfo(
- const std::vector<ui::LatencyInfo>& latency_info) {
- surface_->SetLatencyInfo(latency_info);
-}
-
-PassThroughImageTransportSurface::PassThroughImageTransportSurface(
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- gfx::GLSurface* surface)
- : GLSurfaceAdapter(surface),
- did_set_swap_interval_(false),
- weak_ptr_factory_(this) {
- helper_.reset(new ImageTransportHelper(this,
- manager,
- stub,
- gfx::kNullPluginWindow));
-}
-
-bool PassThroughImageTransportSurface::Initialize() {
- // The surface is assumed to have already been initialized.
- return helper_->Initialize();
-}
-
-void PassThroughImageTransportSurface::Destroy() {
- GLSurfaceAdapter::Destroy();
-}
-
-void PassThroughImageTransportSurface::SetLatencyInfo(
- const std::vector<ui::LatencyInfo>& latency_info) {
- for (size_t i = 0; i < latency_info.size(); i++)
- latency_info_.push_back(latency_info[i]);
-}
-
-gfx::SwapResult PassThroughImageTransportSurface::SwapBuffers() {
- scoped_ptr<std::vector<ui::LatencyInfo>> latency_info = StartSwapBuffers();
- gfx::SwapResult result = gfx::GLSurfaceAdapter::SwapBuffers();
- FinishSwapBuffers(std::move(latency_info), result);
- return result;
-}
-
-void PassThroughImageTransportSurface::SwapBuffersAsync(
- const GLSurface::SwapCompletionCallback& callback) {
- scoped_ptr<std::vector<ui::LatencyInfo>> latency_info = StartSwapBuffers();
-
- // We use WeakPtr here to avoid manual management of life time of an instance
- // of this class. Callback will not be called once the instance of this class
- // is destroyed. However, this also means that the callback can be run on
- // the calling thread only.
- gfx::GLSurfaceAdapter::SwapBuffersAsync(base::Bind(
- &PassThroughImageTransportSurface::FinishSwapBuffersAsync,
- weak_ptr_factory_.GetWeakPtr(), base::Passed(&latency_info), callback));
-}
-
-gfx::SwapResult PassThroughImageTransportSurface::PostSubBuffer(int x,
- int y,
- int width,
- int height) {
- scoped_ptr<std::vector<ui::LatencyInfo>> latency_info = StartSwapBuffers();
- gfx::SwapResult result =
- gfx::GLSurfaceAdapter::PostSubBuffer(x, y, width, height);
- FinishSwapBuffers(std::move(latency_info), result);
- return result;
-}
-
-void PassThroughImageTransportSurface::PostSubBufferAsync(
- int x,
- int y,
- int width,
- int height,
- const GLSurface::SwapCompletionCallback& callback) {
- scoped_ptr<std::vector<ui::LatencyInfo>> latency_info = StartSwapBuffers();
- gfx::GLSurfaceAdapter::PostSubBufferAsync(
- x, y, width, height,
- base::Bind(&PassThroughImageTransportSurface::FinishSwapBuffersAsync,
- weak_ptr_factory_.GetWeakPtr(), base::Passed(&latency_info),
- callback));
-}
-
-gfx::SwapResult PassThroughImageTransportSurface::CommitOverlayPlanes() {
- scoped_ptr<std::vector<ui::LatencyInfo>> latency_info = StartSwapBuffers();
- gfx::SwapResult result = gfx::GLSurfaceAdapter::CommitOverlayPlanes();
- FinishSwapBuffers(std::move(latency_info), result);
- return result;
-}
-
-void PassThroughImageTransportSurface::CommitOverlayPlanesAsync(
- const GLSurface::SwapCompletionCallback& callback) {
- scoped_ptr<std::vector<ui::LatencyInfo>> latency_info = StartSwapBuffers();
- gfx::GLSurfaceAdapter::CommitOverlayPlanesAsync(base::Bind(
- &PassThroughImageTransportSurface::FinishSwapBuffersAsync,
- weak_ptr_factory_.GetWeakPtr(), base::Passed(&latency_info), callback));
-}
-
-bool PassThroughImageTransportSurface::OnMakeCurrent(gfx::GLContext* context) {
- if (!did_set_swap_interval_) {
- ImageTransportHelper::SetSwapInterval(context);
- did_set_swap_interval_ = true;
- }
- return true;
-}
-
-#if defined(OS_MACOSX)
-void PassThroughImageTransportSurface::OnBufferPresented(
- const AcceleratedSurfaceMsg_BufferPresented_Params& /* params */) {
- NOTREACHED();
-}
-#endif
-
-gfx::Size PassThroughImageTransportSurface::GetSize() {
- return GLSurfaceAdapter::GetSize();
-}
-
-PassThroughImageTransportSurface::~PassThroughImageTransportSurface() {}
-
-void PassThroughImageTransportSurface::SendVSyncUpdateIfAvailable() {
- gfx::VSyncProvider* vsync_provider = GetVSyncProvider();
- if (vsync_provider) {
- vsync_provider->GetVSyncParameters(
- base::Bind(&GpuCommandBufferStub::SendUpdateVSyncParameters,
- helper_->stub()->AsWeakPtr()));
- }
-}
-
-scoped_ptr<std::vector<ui::LatencyInfo>>
-PassThroughImageTransportSurface::StartSwapBuffers() {
- // GetVsyncValues before SwapBuffers to work around Mali driver bug:
- // crbug.com/223558.
- SendVSyncUpdateIfAvailable();
-
- base::TimeTicks swap_time = base::TimeTicks::Now();
- for (auto& latency : latency_info_) {
- latency.AddLatencyNumberWithTimestamp(
- ui::INPUT_EVENT_GPU_SWAP_BUFFER_COMPONENT, 0, 0, swap_time, 1);
- }
-
- scoped_ptr<std::vector<ui::LatencyInfo>> latency_info(
- new std::vector<ui::LatencyInfo>());
- latency_info->swap(latency_info_);
-
- return latency_info;
-}
-
-void PassThroughImageTransportSurface::FinishSwapBuffers(
- scoped_ptr<std::vector<ui::LatencyInfo>> latency_info,
- gfx::SwapResult result) {
- base::TimeTicks swap_ack_time = base::TimeTicks::Now();
- for (auto& latency : *latency_info) {
- latency.AddLatencyNumberWithTimestamp(
- ui::INPUT_EVENT_LATENCY_TERMINATED_FRAME_SWAP_COMPONENT, 0, 0,
- swap_ack_time, 1);
- }
-
- helper_->stub()->SendSwapBuffersCompleted(*latency_info, result);
-}
-
-void PassThroughImageTransportSurface::FinishSwapBuffersAsync(
- scoped_ptr<std::vector<ui::LatencyInfo>> latency_info,
- GLSurface::SwapCompletionCallback callback,
- gfx::SwapResult result) {
- FinishSwapBuffers(std::move(latency_info), result);
- callback.Run(result);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/image_transport_surface.h b/chromium/content/common/gpu/image_transport_surface.h
deleted file mode 100644
index bfb68928cf6..00000000000
--- a/chromium/content/common/gpu/image_transport_surface.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_IMAGE_TRANSPORT_SURFACE_H_
-#define CONTENT_COMMON_GPU_IMAGE_TRANSPORT_SURFACE_H_
-
-#include <stdint.h>
-
-#include <vector>
-
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "build/build_config.h"
-#include "content/common/content_export.h"
-#include "ipc/ipc_listener.h"
-#include "ipc/ipc_message.h"
-#include "ui/events/latency_info.h"
-#include "ui/gfx/geometry/rect.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gfx/swap_result.h"
-#include "ui/gl/gl_surface.h"
-
-#if defined(OS_MACOSX)
-struct AcceleratedSurfaceMsg_BufferPresented_Params;
-struct GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params;
-#endif
-
-namespace gfx {
-class GLSurface;
-}
-
-namespace gpu {
-class PreemptionFlag;
-namespace gles2 {
-class GLES2Decoder;
-}
-}
-
-namespace content {
-class GpuChannelManager;
-class GpuCommandBufferStub;
-
-// The GPU process is agnostic as to how it displays results. On some platforms
-// it renders directly to window. On others it renders offscreen and transports
-// the results to the browser process to display. This file provides a simple
-// framework for making the offscreen path seem more like the onscreen path.
-//
-// The ImageTransportSurface class defines an simple interface for events that
-// should be responded to. The factory returns an offscreen surface that looks
-// a lot like an onscreen surface to the GPU process.
-//
-// The ImageTransportSurfaceHelper provides some glue to the outside world:
-// making sure outside events reach the ImageTransportSurface and
-// allowing the ImageTransportSurface to send events to the outside world.
-
-class ImageTransportSurface {
- public:
- ImageTransportSurface();
-
-#if defined(OS_MACOSX)
- virtual void OnBufferPresented(
- const AcceleratedSurfaceMsg_BufferPresented_Params& params) = 0;
-#endif
- virtual void SetLatencyInfo(
- const std::vector<ui::LatencyInfo>& latency_info) = 0;
-
- // Creates a surface with the given attributes.
- static scoped_refptr<gfx::GLSurface> CreateSurface(
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- const gfx::GLSurfaceHandle& handle);
-
-#if defined(OS_MACOSX)
- CONTENT_EXPORT static void SetAllowOSMesaForTesting(bool allow);
-#endif
-
- virtual gfx::Size GetSize() = 0;
-
- protected:
- virtual ~ImageTransportSurface();
-
- private:
- // Creates the appropriate native surface depending on the GL implementation.
- // This will be implemented separately by each platform.
- //
- // This will not be called for texture transport surfaces which are
- // cross-platform. The platform implementation should only create the
- // surface and should not initialize it. On failure, a null scoped_refptr
- // should be returned.
- static scoped_refptr<gfx::GLSurface> CreateNativeSurface(
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- const gfx::GLSurfaceHandle& handle);
-
- DISALLOW_COPY_AND_ASSIGN(ImageTransportSurface);
-};
-
-class ImageTransportHelper
- : public IPC::Listener,
- public base::SupportsWeakPtr<ImageTransportHelper> {
- public:
- // Takes weak pointers to objects that outlive the helper.
- ImageTransportHelper(ImageTransportSurface* surface,
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- gfx::PluginWindowHandle handle);
- ~ImageTransportHelper() override;
-
- bool Initialize();
-
- // IPC::Listener implementation:
- bool OnMessageReceived(const IPC::Message& message) override;
-
- // Helper send functions. Caller fills in the surface specific params
- // like size and surface id. The helper fills in the rest.
-#if defined(OS_MACOSX)
- void SendAcceleratedSurfaceBuffersSwapped(
- GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params);
-#endif
-
- // Make the surface's context current.
- bool MakeCurrent();
-
- // Set the default swap interval on the surface.
- static void SetSwapInterval(gfx::GLContext* context);
-
- GpuChannelManager* manager() const { return manager_; }
- GpuCommandBufferStub* stub() const { return stub_.get(); }
-
- private:
- gpu::gles2::GLES2Decoder* Decoder();
-
- // IPC::Message handlers.
-#if defined(OS_MACOSX)
- void OnBufferPresented(
- const AcceleratedSurfaceMsg_BufferPresented_Params& params);
-#endif
-
- // Backbuffer resize callback.
- void Resize(gfx::Size size, float scale_factor);
-
- void SetLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info);
-
- // Weak pointers that point to objects that outlive this helper.
- ImageTransportSurface* surface_;
- GpuChannelManager* manager_;
-
- base::WeakPtr<GpuCommandBufferStub> stub_;
- int32_t route_id_;
- gfx::PluginWindowHandle handle_;
-
- DISALLOW_COPY_AND_ASSIGN(ImageTransportHelper);
-};
-
-// An implementation of ImageTransportSurface that implements GLSurface through
-// GLSurfaceAdapter, thereby forwarding GLSurface methods through to it.
-class PassThroughImageTransportSurface
- : public gfx::GLSurfaceAdapter,
- public ImageTransportSurface {
- public:
- PassThroughImageTransportSurface(GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- gfx::GLSurface* surface);
-
- // GLSurface implementation.
- bool Initialize() override;
- void Destroy() override;
- gfx::SwapResult SwapBuffers() override;
- void SwapBuffersAsync(const SwapCompletionCallback& callback) override;
- gfx::SwapResult PostSubBuffer(int x, int y, int width, int height) override;
- void PostSubBufferAsync(int x,
- int y,
- int width,
- int height,
- const SwapCompletionCallback& callback) override;
- gfx::SwapResult CommitOverlayPlanes() override;
- void CommitOverlayPlanesAsync(
- const SwapCompletionCallback& callback) override;
- bool OnMakeCurrent(gfx::GLContext* context) override;
-
- // ImageTransportSurface implementation.
-#if defined(OS_MACOSX)
- void OnBufferPresented(
- const AcceleratedSurfaceMsg_BufferPresented_Params& params) override;
-#endif
- gfx::Size GetSize() override;
- void SetLatencyInfo(
- const std::vector<ui::LatencyInfo>& latency_info) override;
-
- protected:
- ~PassThroughImageTransportSurface() override;
-
- // If updated vsync parameters can be determined, send this information to
- // the browser.
- virtual void SendVSyncUpdateIfAvailable();
-
- scoped_ptr<std::vector<ui::LatencyInfo>> StartSwapBuffers();
- void FinishSwapBuffers(scoped_ptr<std::vector<ui::LatencyInfo>> latency_info,
- gfx::SwapResult result);
- void FinishSwapBuffersAsync(
- scoped_ptr<std::vector<ui::LatencyInfo>> latency_info,
- GLSurface::SwapCompletionCallback callback,
- gfx::SwapResult result);
-
- ImageTransportHelper* GetHelper() { return helper_.get(); }
-
- private:
- scoped_ptr<ImageTransportHelper> helper_;
- bool did_set_swap_interval_;
- std::vector<ui::LatencyInfo> latency_info_;
- base::WeakPtrFactory<PassThroughImageTransportSurface> weak_ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(PassThroughImageTransportSurface);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_IMAGE_TRANSPORT_SURFACE_H_
diff --git a/chromium/content/common/gpu/image_transport_surface_android.cc b/chromium/content/common/gpu/image_transport_surface_android.cc
deleted file mode 100644
index 79a16c0bbdf..00000000000
--- a/chromium/content/common/gpu/image_transport_surface_android.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/image_transport_surface.h"
-
-#include "base/command_line.h"
-#include "base/logging.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_channel_manager.h"
-#include "content/common/gpu/gpu_command_buffer_stub.h"
-#include "content/common/gpu/gpu_surface_lookup.h"
-#include "content/public/common/content_switches.h"
-#include "ui/gl/gl_surface_egl.h"
-
-namespace content {
-
-// static
-scoped_refptr<gfx::GLSurface> ImageTransportSurface::CreateNativeSurface(
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- const gfx::GLSurfaceHandle& handle) {
- DCHECK(GpuSurfaceLookup::GetInstance());
- DCHECK_EQ(handle.transport_type, gfx::NATIVE_DIRECT);
- ANativeWindow* window =
- GpuSurfaceLookup::GetInstance()->AcquireNativeWidget(handle.handle);
- if (!window) {
- LOG(WARNING) << "Failed to acquire native widget.";
- return scoped_refptr<gfx::GLSurface>();
- }
- scoped_refptr<gfx::GLSurface> surface =
- new gfx::NativeViewGLSurfaceEGL(window);
- bool initialize_success = surface->Initialize();
- ANativeWindow_release(window);
- if (!initialize_success)
- return scoped_refptr<gfx::GLSurface>();
-
- return scoped_refptr<gfx::GLSurface>(
- new PassThroughImageTransportSurface(manager, stub, surface.get()));
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/image_transport_surface_linux.cc b/chromium/content/common/gpu/image_transport_surface_linux.cc
deleted file mode 100644
index db36efea135..00000000000
--- a/chromium/content/common/gpu/image_transport_surface_linux.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/image_transport_surface.h"
-
-namespace content {
-
-// static
-scoped_refptr<gfx::GLSurface> ImageTransportSurface::CreateNativeSurface(
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- const gfx::GLSurfaceHandle& handle) {
- DCHECK(handle.handle);
- DCHECK(handle.transport_type == gfx::NATIVE_DIRECT);
- scoped_refptr<gfx::GLSurface> surface;
-#if defined(USE_OZONE)
- surface = gfx::GLSurface::CreateSurfacelessViewGLSurface(handle.handle);
-#endif
- if (!surface)
- surface = gfx::GLSurface::CreateViewGLSurface(handle.handle);
- if (!surface)
- return surface;
- return scoped_refptr<gfx::GLSurface>(new PassThroughImageTransportSurface(
- manager, stub, surface.get()));
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/image_transport_surface_mac.mm b/chromium/content/common/gpu/image_transport_surface_mac.mm
deleted file mode 100644
index 1059589b084..00000000000
--- a/chromium/content/common/gpu/image_transport_surface_mac.mm
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/image_transport_surface.h"
-
-#include "base/macros.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_implementation.h"
-#include "ui/gl/gl_surface_osmesa.h"
-
-namespace content {
-
-scoped_refptr<gfx::GLSurface> ImageTransportSurfaceCreateNativeSurface(
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- gfx::PluginWindowHandle handle);
-
-namespace {
-
-// A subclass of GLSurfaceOSMesa that doesn't print an error message when
-// SwapBuffers() is called.
-class DRTSurfaceOSMesa : public gfx::GLSurfaceOSMesa {
- public:
- // Size doesn't matter, the surface is resized to the right size later.
- DRTSurfaceOSMesa()
- : GLSurfaceOSMesa(gfx::OSMesaSurfaceFormatRGBA, gfx::Size(1, 1)) {}
-
- // Implement a subset of GLSurface.
- gfx::SwapResult SwapBuffers() override;
-
- private:
- ~DRTSurfaceOSMesa() override {}
- DISALLOW_COPY_AND_ASSIGN(DRTSurfaceOSMesa);
-};
-
-gfx::SwapResult DRTSurfaceOSMesa::SwapBuffers() {
- return gfx::SwapResult::SWAP_ACK;
-}
-
-bool g_allow_os_mesa = false;
-
-} // namespace
-
-// static
-scoped_refptr<gfx::GLSurface> ImageTransportSurface::CreateNativeSurface(
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- const gfx::GLSurfaceHandle& surface_handle) {
- DCHECK(surface_handle.transport_type == gfx::NATIVE_DIRECT ||
- surface_handle.transport_type == gfx::NULL_TRANSPORT);
-
- switch (gfx::GetGLImplementation()) {
- case gfx::kGLImplementationDesktopGL:
- case gfx::kGLImplementationDesktopGLCoreProfile:
- case gfx::kGLImplementationAppleGL:
- return ImageTransportSurfaceCreateNativeSurface(manager, stub,
- surface_handle.handle);
- default:
- // Content shell in DRT mode spins up a gpu process which needs an
- // image transport surface, but that surface isn't used to read pixel
- // baselines. So this is mostly a dummy surface.
- if (!g_allow_os_mesa) {
- NOTREACHED();
- return scoped_refptr<gfx::GLSurface>();
- }
- scoped_refptr<gfx::GLSurface> surface(new DRTSurfaceOSMesa());
- if (!surface.get() || !surface->Initialize())
- return surface;
- return scoped_refptr<gfx::GLSurface>(new PassThroughImageTransportSurface(
- manager, stub, surface.get()));
- }
-}
-
-// static
-void ImageTransportSurface::SetAllowOSMesaForTesting(bool allow) {
- g_allow_os_mesa = allow;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/image_transport_surface_overlay_mac.h b/chromium/content/common/gpu/image_transport_surface_overlay_mac.h
deleted file mode 100644
index 2103bbf7108..00000000000
--- a/chromium/content/common/gpu/image_transport_surface_overlay_mac.h
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_IMAGE_TRANSPORT_SURFACE_OVERLAY_MAC_H_
-#define CONTENT_COMMON_GPU_IMAGE_TRANSPORT_SURFACE_OVERLAY_MAC_H_
-
-#include <list>
-#include <vector>
-
-#import "base/mac/scoped_nsobject.h"
-#include "base/timer/timer.h"
-#include "content/common/gpu/gpu_command_buffer_stub.h"
-#include "content/common/gpu/image_transport_surface.h"
-#include "ui/gl/gl_surface.h"
-#include "ui/gl/gpu_switching_observer.h"
-
-@class CAContext;
-@class CALayer;
-
-namespace content {
-
-class CALayerTree;
-class CALayerPartialDamageTree;
-
-class ImageTransportSurfaceOverlayMac : public gfx::GLSurface,
- public ImageTransportSurface,
- public ui::GpuSwitchingObserver {
- public:
- ImageTransportSurfaceOverlayMac(GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- gfx::PluginWindowHandle handle);
-
- // GLSurface implementation
- bool Initialize() override;
- void Destroy() override;
- bool Resize(const gfx::Size& size,
- float scale_factor,
- bool has_alpha) override;
- bool IsOffscreen() override;
- gfx::SwapResult SwapBuffers() override;
- gfx::SwapResult PostSubBuffer(int x, int y, int width, int height) override;
- bool SupportsPostSubBuffer() override;
- gfx::Size GetSize() override;
- void* GetHandle() override;
- bool OnMakeCurrent(gfx::GLContext* context) override;
- bool SetBackbufferAllocation(bool allocated) override;
- bool ScheduleOverlayPlane(int z_order,
- gfx::OverlayTransform transform,
- gl::GLImage* image,
- const gfx::Rect& bounds_rect,
- const gfx::RectF& crop_rect) override;
- bool ScheduleCALayer(gl::GLImage* contents_image,
- const gfx::RectF& contents_rect,
- float opacity,
- unsigned background_color,
- unsigned edge_aa_mask,
- const gfx::RectF& rect,
- bool is_clipped,
- const gfx::RectF& clip_rect,
- const gfx::Transform& transform,
- int sorting_context_id) override;
- bool IsSurfaceless() const override;
-
- // ImageTransportSurface implementation
- void OnBufferPresented(
- const AcceleratedSurfaceMsg_BufferPresented_Params& params) override;
- void SetLatencyInfo(const std::vector<ui::LatencyInfo>&) override;
-
- // ui::GpuSwitchingObserver implementation.
- void OnGpuSwitched() override;
-
- private:
- class PendingSwap;
- class OverlayPlane;
-
- ~ImageTransportSurfaceOverlayMac() override;
-
- gfx::SwapResult SwapBuffersInternal(const gfx::Rect& pixel_damage_rect);
-
- // Returns true if the front of |pending_swaps_| has completed, or has timed
- // out by |now|.
- bool IsFirstPendingSwapReadyToDisplay(
- const base::TimeTicks& now);
- // Sets the CALayer contents to the IOSurface for the front of
- // |pending_swaps_|, and removes it from the queue.
- void DisplayFirstPendingSwapImmediately();
- // Force that all of |pending_swaps_| displayed immediately, and the list be
- // cleared.
- void DisplayAndClearAllPendingSwaps();
- // Callback issued during the next vsync period ofter a SwapBuffers call,
- // to check if the swap is completed, and display the frame. Note that if
- // another SwapBuffers happens before this callback, the pending swap will
- // be tested at that time, too.
- void CheckPendingSwapsCallback();
- // Function to post the above callback. The argument |now| is passed as an
- // argument to avoid redundant calls to base::TimeTicks::Now.
- void PostCheckPendingSwapsCallbackIfNeeded(const base::TimeTicks& now);
-
- // Return the time of |interval_fraction| of the way through the next
- // vsync period that starts after |from|. If the vsync parameters are not
- // valid then return |from|.
- base::TimeTicks GetNextVSyncTimeAfter(
- const base::TimeTicks& from, double interval_fraction);
-
- scoped_ptr<ImageTransportHelper> helper_;
- bool use_remote_layer_api_;
- base::scoped_nsobject<CAContext> ca_context_;
- base::scoped_nsobject<CALayer> ca_root_layer_;
-
- gfx::Size pixel_size_;
- float scale_factor_;
- std::vector<ui::LatencyInfo> latency_info_;
-
- // The renderer ID that all contexts made current to this surface should be
- // targeting.
- GLint gl_renderer_id_;
-
- // Planes that have been scheduled, but have not had a subsequent SwapBuffers
- // call made yet.
- scoped_ptr<CALayerPartialDamageTree> pending_partial_damage_tree_;
- scoped_ptr<CALayerTree> pending_ca_layer_tree_;
-
- // A queue of all frames that have been created by SwapBuffersInternal but
- // have not yet been displayed. This queue is checked at the beginning of
- // every swap and also by a callback.
- std::deque<linked_ptr<PendingSwap>> pending_swaps_;
-
- // The planes that are currently being displayed on the screen.
- scoped_ptr<CALayerPartialDamageTree> current_partial_damage_tree_;
- scoped_ptr<CALayerTree> current_ca_layer_tree_;
-
- // The time of the last swap was issued. If this is more than two vsyncs, then
- // use the simpler non-smooth animation path.
- base::TimeTicks last_swap_time_;
-
- // The vsync information provided by the browser.
- bool vsync_parameters_valid_;
- base::TimeTicks vsync_timebase_;
- base::TimeDelta vsync_interval_;
-
- base::Timer display_pending_swap_timer_;
- base::WeakPtrFactory<ImageTransportSurfaceOverlayMac> weak_factory_;
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_IMAGE_TRANSPORT_SURFACE_OVERLAY_MAC_H_
diff --git a/chromium/content/common/gpu/image_transport_surface_overlay_mac.mm b/chromium/content/common/gpu/image_transport_surface_overlay_mac.mm
deleted file mode 100644
index 1b0e3372e2d..00000000000
--- a/chromium/content/common/gpu/image_transport_surface_overlay_mac.mm
+++ /dev/null
@@ -1,522 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/image_transport_surface_overlay_mac.h"
-
-#include <CoreGraphics/CoreGraphics.h>
-#include <IOSurface/IOSurface.h>
-#include <OpenGL/CGLRenderers.h>
-#include <OpenGL/CGLTypes.h>
-#include <OpenGL/gl.h>
-#include <stddef.h>
-
-#include <algorithm>
-
-// This type consistently causes problem on Mac, and needs to be dealt with
-// in a systemic way.
-// http://crbug.com/517208
-#ifndef GL_OES_EGL_image
-typedef void* GLeglImageOES;
-#endif
-
-#include "base/mac/scoped_cftyperef.h"
-#include "base/trace_event/trace_event.h"
-#include "content/common/gpu/ca_layer_partial_damage_tree_mac.h"
-#include "content/common/gpu/ca_layer_tree_mac.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "ui/accelerated_widget_mac/io_surface_context.h"
-#include "ui/base/cocoa/animation_utils.h"
-#include "ui/base/cocoa/remote_layer_api.h"
-#include "ui/gfx/geometry/rect_conversions.h"
-#include "ui/gfx/transform.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_fence.h"
-#include "ui/gl/gl_image_io_surface.h"
-#include "ui/gl/gpu_switching_manager.h"
-#include "ui/gl/scoped_api.h"
-#include "ui/gl/scoped_cgl.h"
-
-namespace {
-
-// Don't let a frame draw until 5% of the way through the next vsync interval
-// after the call to SwapBuffers. This slight offset is to ensure that skew
-// doesn't result in the frame being presented to the previous vsync interval.
-const double kVSyncIntervalFractionForEarliestDisplay = 0.05;
-
-// After doing a glFlush and putting in a fence in SwapBuffers, post a task to
-// query the fence 50% of the way through the next vsync interval. If we are
-// trying to animate smoothly, then want to query the fence at the next
-// SwapBuffers. For this reason we schedule the callback for a long way into
-// the next frame.
-const double kVSyncIntervalFractionForDisplayCallback = 0.5;
-
-// If swaps arrive regularly and nearly at the vsync rate, then attempt to
-// make animation smooth (each frame is shown for one vsync interval) by sending
-// them to the window server only when their GL work completes. If frames are
-// not coming in with each vsync, then just throw them at the window server as
-// they come.
-const double kMaximumVSyncsBetweenSwapsForSmoothAnimation = 1.5;
-
-void CheckGLErrors(const char* msg) {
- GLenum gl_error;
- while ((gl_error = glGetError()) != GL_NO_ERROR) {
- LOG(ERROR) << "OpenGL error hit " << msg << ": " << gl_error;
- }
-}
-
-void IOSurfaceContextNoOp(scoped_refptr<ui::IOSurfaceContext>) {
-}
-
-} // namespace
-
-@interface CALayer(Private)
--(void)setContentsChanged;
-@end
-
-namespace content {
-
-scoped_refptr<gfx::GLSurface> ImageTransportSurfaceCreateNativeSurface(
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- gfx::PluginWindowHandle handle) {
- return new ImageTransportSurfaceOverlayMac(manager, stub, handle);
-}
-
-class ImageTransportSurfaceOverlayMac::PendingSwap {
- public:
- PendingSwap() {}
- ~PendingSwap() { DCHECK(!gl_fence); }
-
- gfx::Size pixel_size;
- float scale_factor;
- gfx::Rect pixel_damage_rect;
-
- scoped_ptr<CALayerPartialDamageTree> partial_damage_tree;
- scoped_ptr<CALayerTree> ca_layer_tree;
- std::vector<ui::LatencyInfo> latency_info;
-
- // A fence object, and the CGL context it was issued in.
- base::ScopedTypeRef<CGLContextObj> cgl_context;
- scoped_ptr<gfx::GLFence> gl_fence;
-
- // The earliest time that this frame may be drawn. A frame is not allowed
- // to draw until a fraction of the way through the vsync interval after its
- // This extra latency is to allow wiggle-room for smoothness.
- base::TimeTicks earliest_display_time_allowed;
-
- // The time that this will wake up and draw, if a following swap does not
- // cause it to draw earlier.
- base::TimeTicks target_display_time;
-};
-
-ImageTransportSurfaceOverlayMac::ImageTransportSurfaceOverlayMac(
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- gfx::PluginWindowHandle handle)
- : use_remote_layer_api_(ui::RemoteLayerAPISupported()),
- scale_factor_(1),
- gl_renderer_id_(0),
- vsync_parameters_valid_(false),
- display_pending_swap_timer_(true, false),
- weak_factory_(this) {
- helper_.reset(new ImageTransportHelper(this, manager, stub, handle));
- ui::GpuSwitchingManager::GetInstance()->AddObserver(this);
-}
-
-ImageTransportSurfaceOverlayMac::~ImageTransportSurfaceOverlayMac() {
- ui::GpuSwitchingManager::GetInstance()->RemoveObserver(this);
- Destroy();
-}
-
-bool ImageTransportSurfaceOverlayMac::Initialize() {
- if (!helper_->Initialize())
- return false;
-
- // Create the CAContext to send this to the GPU process, and the layer for
- // the context.
- if (use_remote_layer_api_) {
- CGSConnectionID connection_id = CGSMainConnectionID();
- ca_context_.reset([
- [CAContext contextWithCGSConnection:connection_id options:@{}] retain]);
- ca_root_layer_.reset([[CALayer alloc] init]);
- [ca_root_layer_ setGeometryFlipped:YES];
- [ca_root_layer_ setOpaque:YES];
- [ca_context_ setLayer:ca_root_layer_];
- }
- return true;
-}
-
-void ImageTransportSurfaceOverlayMac::Destroy() {
- DisplayAndClearAllPendingSwaps();
-
- current_partial_damage_tree_.reset();
- current_ca_layer_tree_.reset();
-}
-
-bool ImageTransportSurfaceOverlayMac::IsOffscreen() {
- return false;
-}
-
-gfx::SwapResult ImageTransportSurfaceOverlayMac::SwapBuffersInternal(
- const gfx::Rect& pixel_damage_rect) {
- TRACE_EVENT0("gpu", "ImageTransportSurfaceOverlayMac::SwapBuffersInternal");
-
- // Use the same concept of 'now' for the entire function. The duration of
- // this function only affect the result if this function lasts across a vsync
- // boundary, in which case smooth animation is out the window anyway.
- const base::TimeTicks now = base::TimeTicks::Now();
-
- // Decide if the frame should be drawn immediately, or if we should wait until
- // its work finishes before drawing immediately.
- bool display_immediately = false;
- if (vsync_parameters_valid_ &&
- now - last_swap_time_ >
- kMaximumVSyncsBetweenSwapsForSmoothAnimation * vsync_interval_) {
- display_immediately = true;
- }
- last_swap_time_ = now;
-
- // If the previous swap is ready to display, do it before flushing the
- // new swap. It is desirable to always be hitting this path when trying to
- // animate smoothly with vsync.
- if (!pending_swaps_.empty()) {
- if (IsFirstPendingSwapReadyToDisplay(now))
- DisplayFirstPendingSwapImmediately();
- }
-
- // The remainder of the function will populate the PendingSwap structure and
- // then enqueue it.
- linked_ptr<PendingSwap> new_swap(new PendingSwap);
- new_swap->pixel_size = pixel_size_;
- new_swap->scale_factor = scale_factor_;
- new_swap->pixel_damage_rect = pixel_damage_rect;
- new_swap->partial_damage_tree.swap(pending_partial_damage_tree_);
- new_swap->ca_layer_tree.swap(pending_ca_layer_tree_);
- new_swap->latency_info.swap(latency_info_);
-
- // A flush is required to ensure that all content appears in the layer.
- {
- gfx::ScopedSetGLToRealGLApi scoped_set_gl_api;
- TRACE_EVENT0("gpu", "ImageTransportSurfaceOverlayMac::glFlush");
- CheckGLErrors("before flushing frame");
- new_swap->cgl_context.reset(CGLGetCurrentContext(),
- base::scoped_policy::RETAIN);
- if (gfx::GLFence::IsSupported() && !display_immediately)
- new_swap->gl_fence.reset(gfx::GLFence::Create());
- else
- glFlush();
- CheckGLErrors("while flushing frame");
- }
-
- // Compute the deadlines for drawing this frame.
- if (display_immediately) {
- new_swap->earliest_display_time_allowed = now;
- new_swap->target_display_time = now;
- } else {
- new_swap->earliest_display_time_allowed =
- GetNextVSyncTimeAfter(now, kVSyncIntervalFractionForEarliestDisplay);
- new_swap->target_display_time =
- GetNextVSyncTimeAfter(now, kVSyncIntervalFractionForDisplayCallback);
- }
-
- pending_swaps_.push_back(new_swap);
- if (display_immediately)
- DisplayFirstPendingSwapImmediately();
- else
- PostCheckPendingSwapsCallbackIfNeeded(now);
- return gfx::SwapResult::SWAP_ACK;
-}
-
-bool ImageTransportSurfaceOverlayMac::IsFirstPendingSwapReadyToDisplay(
- const base::TimeTicks& now) {
- DCHECK(!pending_swaps_.empty());
- linked_ptr<PendingSwap> swap = pending_swaps_.front();
-
- // Frames are disallowed from drawing until the vsync interval after their
- // swap is issued.
- if (now < swap->earliest_display_time_allowed)
- return false;
-
- // If we've passed that marker, then wait for the work behind the fence to
- // complete.
- if (swap->gl_fence) {
- gfx::ScopedSetGLToRealGLApi scoped_set_gl_api;
- gfx::ScopedCGLSetCurrentContext scoped_set_current(swap->cgl_context);
-
- CheckGLErrors("before waiting on fence");
- if (!swap->gl_fence->HasCompleted()) {
- TRACE_EVENT0("gpu", "ImageTransportSurfaceOverlayMac::ClientWait");
- swap->gl_fence->ClientWait();
- }
- swap->gl_fence.reset();
- CheckGLErrors("after waiting on fence");
- }
- return true;
-}
-
-void ImageTransportSurfaceOverlayMac::DisplayFirstPendingSwapImmediately() {
- TRACE_EVENT0("gpu",
- "ImageTransportSurfaceOverlayMac::DisplayFirstPendingSwapImmediately");
- DCHECK(!pending_swaps_.empty());
- linked_ptr<PendingSwap> swap = pending_swaps_.front();
-
- // If there is a fence for this object, delete it.
- if (swap->gl_fence) {
- gfx::ScopedSetGLToRealGLApi scoped_set_gl_api;
- gfx::ScopedCGLSetCurrentContext scoped_set_current(swap->cgl_context);
-
- CheckGLErrors("before deleting active fence");
- swap->gl_fence.reset();
- CheckGLErrors("while deleting active fence");
- }
-
- // Update the CALayer hierarchy.
- {
- gfx::RectF pixel_damage_rect = gfx::RectF(swap->pixel_damage_rect);
- ScopedCAActionDisabler disabler;
- if (swap->ca_layer_tree) {
- swap->ca_layer_tree->CommitScheduledCALayers(
- ca_root_layer_.get(), std::move(current_ca_layer_tree_),
- swap->scale_factor);
- current_ca_layer_tree_.swap(swap->ca_layer_tree);
- current_partial_damage_tree_.reset();
- } else if (swap->partial_damage_tree) {
- swap->partial_damage_tree->CommitCALayers(
- ca_root_layer_.get(), std::move(current_partial_damage_tree_),
- swap->scale_factor, swap->pixel_damage_rect);
- current_partial_damage_tree_.swap(swap->partial_damage_tree);
- current_ca_layer_tree_.reset();
- } else {
- TRACE_EVENT0("gpu", "Blank frame: No overlays or CALayers");
- [ca_root_layer_ setSublayers:nil];
- current_partial_damage_tree_.reset();
- current_ca_layer_tree_.reset();
- }
- swap->ca_layer_tree.reset();
- swap->partial_damage_tree.reset();
- }
-
- // Update the latency info to reflect the swap time.
- base::TimeTicks swap_time = base::TimeTicks::Now();
- for (auto latency_info : swap->latency_info) {
- latency_info.AddLatencyNumberWithTimestamp(
- ui::INPUT_EVENT_GPU_SWAP_BUFFER_COMPONENT, 0, 0, swap_time, 1);
- latency_info.AddLatencyNumberWithTimestamp(
- ui::INPUT_EVENT_LATENCY_TERMINATED_FRAME_SWAP_COMPONENT, 0, 0,
- swap_time, 1);
- }
-
- // Send acknowledgement to the browser.
- GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params;
- if (use_remote_layer_api_) {
- params.ca_context_id = [ca_context_ contextId];
- } else if (current_partial_damage_tree_) {
- params.io_surface.reset(IOSurfaceCreateMachPort(
- current_partial_damage_tree_->RootLayerIOSurface()));
- }
- params.size = swap->pixel_size;
- params.scale_factor = swap->scale_factor;
- params.latency_info.swap(swap->latency_info);
- helper_->SendAcceleratedSurfaceBuffersSwapped(params);
-
- // Remove this from the queue, and reset any callback timers.
- pending_swaps_.pop_front();
-}
-
-void ImageTransportSurfaceOverlayMac::DisplayAndClearAllPendingSwaps() {
- TRACE_EVENT0("gpu",
- "ImageTransportSurfaceOverlayMac::DisplayAndClearAllPendingSwaps");
- while (!pending_swaps_.empty())
- DisplayFirstPendingSwapImmediately();
-}
-
-void ImageTransportSurfaceOverlayMac::CheckPendingSwapsCallback() {
- TRACE_EVENT0("gpu",
- "ImageTransportSurfaceOverlayMac::CheckPendingSwapsCallback");
-
- if (pending_swaps_.empty())
- return;
-
- const base::TimeTicks now = base::TimeTicks::Now();
- if (IsFirstPendingSwapReadyToDisplay(now))
- DisplayFirstPendingSwapImmediately();
- PostCheckPendingSwapsCallbackIfNeeded(now);
-}
-
-void ImageTransportSurfaceOverlayMac::PostCheckPendingSwapsCallbackIfNeeded(
- const base::TimeTicks& now) {
- TRACE_EVENT0("gpu",
- "ImageTransportSurfaceOverlayMac::PostCheckPendingSwapsCallbackIfNeeded");
-
- if (pending_swaps_.empty()) {
- display_pending_swap_timer_.Stop();
- } else {
- display_pending_swap_timer_.Start(
- FROM_HERE,
- pending_swaps_.front()->target_display_time - now,
- base::Bind(&ImageTransportSurfaceOverlayMac::CheckPendingSwapsCallback,
- weak_factory_.GetWeakPtr()));
- }
-}
-
-gfx::SwapResult ImageTransportSurfaceOverlayMac::SwapBuffers() {
- return SwapBuffersInternal(
- gfx::Rect(0, 0, pixel_size_.width(), pixel_size_.height()));
-}
-
-gfx::SwapResult ImageTransportSurfaceOverlayMac::PostSubBuffer(int x,
- int y,
- int width,
- int height) {
- return SwapBuffersInternal(gfx::Rect(x, y, width, height));
-}
-
-bool ImageTransportSurfaceOverlayMac::SupportsPostSubBuffer() {
- return true;
-}
-
-gfx::Size ImageTransportSurfaceOverlayMac::GetSize() {
- return gfx::Size();
-}
-
-void* ImageTransportSurfaceOverlayMac::GetHandle() {
- return nullptr;
-}
-
-bool ImageTransportSurfaceOverlayMac::OnMakeCurrent(gfx::GLContext* context) {
- // Ensure that the context is on the appropriate GL renderer. The GL renderer
- // will generally only change when the GPU changes.
- if (gl_renderer_id_ && context)
- context->share_group()->SetRendererID(gl_renderer_id_);
- return true;
-}
-
-bool ImageTransportSurfaceOverlayMac::SetBackbufferAllocation(bool allocated) {
- if (!allocated) {
- DisplayAndClearAllPendingSwaps();
- last_swap_time_ = base::TimeTicks();
- }
- return true;
-}
-
-bool ImageTransportSurfaceOverlayMac::ScheduleOverlayPlane(
- int z_order,
- gfx::OverlayTransform transform,
- gl::GLImage* image,
- const gfx::Rect& pixel_frame_rect,
- const gfx::RectF& crop_rect) {
- if (transform != gfx::OVERLAY_TRANSFORM_NONE) {
- DLOG(ERROR) << "Invalid overlay plane transform.";
- return false;
- }
- if (z_order) {
- DLOG(ERROR) << "Invalid non-zero Z order.";
- return false;
- }
- if (pending_partial_damage_tree_) {
- DLOG(ERROR) << "Only one overlay per swap is allowed.";
- return false;
- }
- pending_partial_damage_tree_.reset(new CALayerPartialDamageTree(
- use_remote_layer_api_,
- static_cast<gl::GLImageIOSurface*>(image)->io_surface(),
- pixel_frame_rect));
- return true;
-}
-
-bool ImageTransportSurfaceOverlayMac::ScheduleCALayer(
- gl::GLImage* contents_image,
- const gfx::RectF& contents_rect,
- float opacity,
- unsigned background_color,
- unsigned edge_aa_mask,
- const gfx::RectF& rect,
- bool is_clipped,
- const gfx::RectF& clip_rect,
- const gfx::Transform& transform,
- int sorting_context_id) {
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface;
- if (contents_image) {
- io_surface =
- static_cast<gl::GLImageIOSurface*>(contents_image)->io_surface();
- }
- if (!pending_ca_layer_tree_)
- pending_ca_layer_tree_.reset(new CALayerTree);
- return pending_ca_layer_tree_->ScheduleCALayer(
- is_clipped, gfx::ToEnclosingRect(clip_rect), sorting_context_id,
- transform, io_surface, contents_rect, gfx::ToEnclosingRect(rect),
- background_color, edge_aa_mask, opacity);
-}
-
-bool ImageTransportSurfaceOverlayMac::IsSurfaceless() const {
- return true;
-}
-
-void ImageTransportSurfaceOverlayMac::OnBufferPresented(
- const AcceleratedSurfaceMsg_BufferPresented_Params& params) {
- vsync_timebase_ = params.vsync_timebase;
- vsync_interval_ = params.vsync_interval;
- vsync_parameters_valid_ = (vsync_interval_ != base::TimeDelta());
-
- // Compute |vsync_timebase_| to be the first vsync after time zero.
- if (vsync_parameters_valid_) {
- vsync_timebase_ -=
- vsync_interval_ *
- ((vsync_timebase_ - base::TimeTicks()) / vsync_interval_);
- }
-}
-
-bool ImageTransportSurfaceOverlayMac::Resize(const gfx::Size& pixel_size,
- float scale_factor,
- bool has_alpha) {
- // Flush through any pending frames.
- DisplayAndClearAllPendingSwaps();
- pixel_size_ = pixel_size;
- scale_factor_ = scale_factor;
- return true;
-}
-
-void ImageTransportSurfaceOverlayMac::SetLatencyInfo(
- const std::vector<ui::LatencyInfo>& latency_info) {
- latency_info_.insert(
- latency_info_.end(), latency_info.begin(), latency_info.end());
-}
-
-void ImageTransportSurfaceOverlayMac::OnGpuSwitched() {
- // Create a new context, and use the GL renderer ID that the new context gets.
- scoped_refptr<ui::IOSurfaceContext> context_on_new_gpu =
- ui::IOSurfaceContext::Get(ui::IOSurfaceContext::kCALayerContext);
- if (!context_on_new_gpu)
- return;
- GLint context_renderer_id = -1;
- if (CGLGetParameter(context_on_new_gpu->cgl_context(),
- kCGLCPCurrentRendererID,
- &context_renderer_id) != kCGLNoError) {
- LOG(ERROR) << "Failed to create test context after GPU switch";
- return;
- }
- gl_renderer_id_ = context_renderer_id & kCGLRendererIDMatchingMask;
-
- // Post a task holding a reference to the new GL context. The reason for
- // this is to avoid creating-then-destroying the context for every image
- // transport surface that is observing the GPU switch.
- base::MessageLoop::current()->PostTask(
- FROM_HERE, base::Bind(&IOSurfaceContextNoOp, context_on_new_gpu));
-}
-
-base::TimeTicks ImageTransportSurfaceOverlayMac::GetNextVSyncTimeAfter(
- const base::TimeTicks& from, double interval_fraction) {
- if (!vsync_parameters_valid_)
- return from;
-
- // Compute the previous vsync time.
- base::TimeTicks previous_vsync =
- vsync_interval_ * ((from - vsync_timebase_) / vsync_interval_) +
- vsync_timebase_;
-
- // Return |interval_fraction| through the next vsync.
- return previous_vsync + (1 + interval_fraction) * vsync_interval_;
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/image_transport_surface_win.cc b/chromium/content/common/gpu/image_transport_surface_win.cc
deleted file mode 100644
index 15f43d2a00f..00000000000
--- a/chromium/content/common/gpu/image_transport_surface_win.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/image_transport_surface.h"
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/win/windows_version.h"
-#include "content/common/gpu/child_window_surface_win.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/public/common/content_switches.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_implementation.h"
-#include "ui/gl/gl_surface_egl.h"
-#include "ui/gl/vsync_provider_win.h"
-
-namespace content {
-
-// static
-scoped_refptr<gfx::GLSurface> ImageTransportSurface::CreateNativeSurface(
- GpuChannelManager* manager,
- GpuCommandBufferStub* stub,
- const gfx::GLSurfaceHandle& handle) {
- DCHECK(handle.handle);
- DCHECK_EQ(handle.transport_type, gfx::NATIVE_DIRECT);
-
- scoped_refptr<gfx::GLSurface> surface;
- if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2 &&
- gfx::GLSurfaceEGL::IsDirectCompositionSupported()) {
- scoped_refptr<ChildWindowSurfaceWin> egl_surface(
- new ChildWindowSurfaceWin(manager, handle.handle));
- surface = egl_surface;
-
- // TODO(jbauman): Get frame statistics from DirectComposition
- scoped_ptr<gfx::VSyncProvider> vsync_provider(
- new gfx::VSyncProviderWin(handle.handle));
- if (!egl_surface->Initialize(std::move(vsync_provider)))
- return nullptr;
- } else {
- surface = gfx::GLSurface::CreateViewGLSurface(handle.handle);
- }
-
- return scoped_refptr<gfx::GLSurface>(new PassThroughImageTransportSurface(
- manager, stub, surface.get()));
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/media/OWNERS b/chromium/content/common/gpu/media/OWNERS
index 633a1877529..9999d737345 100644
--- a/chromium/content/common/gpu/media/OWNERS
+++ b/chromium/content/common/gpu/media/OWNERS
@@ -2,3 +2,16 @@ dalecurtis@chromium.org
posciak@chromium.org
sandersd@chromium.org
wuchengli@chromium.org
+
+# For security review of IPC message files.
+per-file *_messages*.h=set noparent
+per-file *_messages*.h=dcheng@chromium.org
+per-file *_messages*.h=inferno@chromium.org
+per-file *_messages*.h=jln@chromium.org
+per-file *_messages*.h=jschuh@chromium.org
+per-file *_messages*.h=kenrb@chromium.org
+per-file *_messages*.h=mkwst@chromium.org
+per-file *_messages*.h=nasko@chromium.org
+per-file *_messages*.h=palmer@chromium.org
+per-file *_messages*.h=tsepez@chromium.org
+per-file *_messages*.h=wfh@chromium.org
diff --git a/chromium/content/common/gpu/media/android_copying_backing_strategy.cc b/chromium/content/common/gpu/media/android_copying_backing_strategy.cc
index f80a16f3d72..b5216154829 100644
--- a/chromium/content/common/gpu/media/android_copying_backing_strategy.cc
+++ b/chromium/content/common/gpu/media/android_copying_backing_strategy.cc
@@ -8,6 +8,7 @@
#include "base/logging.h"
#include "base/trace_event/trace_event.h"
#include "content/common/gpu/media/avda_return_on_failure.h"
+#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "media/base/limits.h"
@@ -17,24 +18,47 @@
namespace content {
-const static GLfloat kIdentityMatrix[16] = {1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f,
- 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 1.0f};
-
-AndroidCopyingBackingStrategy::AndroidCopyingBackingStrategy()
- : state_provider_(nullptr), surface_texture_id_(0), media_codec_(nullptr) {}
+AndroidCopyingBackingStrategy::AndroidCopyingBackingStrategy(
+ AVDAStateProvider* state_provider)
+ : state_provider_(state_provider),
+ surface_texture_id_(0),
+ media_codec_(nullptr) {}
AndroidCopyingBackingStrategy::~AndroidCopyingBackingStrategy() {}
-void AndroidCopyingBackingStrategy::Initialize(
- AVDAStateProvider* state_provider) {
- state_provider_ = state_provider;
+gfx::ScopedJavaSurface AndroidCopyingBackingStrategy::Initialize(
+ int surface_view_id) {
+ if (surface_view_id != media::VideoDecodeAccelerator::Config::kNoSurfaceID) {
+ LOG(ERROR) << "The copying strategy should not be initialized with a "
+ "surface id.";
+ return gfx::ScopedJavaSurface();
+ }
+
+ // Create a texture and attach the SurfaceTexture to it.
+ glGenTextures(1, &surface_texture_id_);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, surface_texture_id_);
+
+ // Note that the target will be correctly sized, so nearest filtering is all
+ // that's needed.
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ state_provider_->GetGlDecoder()->RestoreTextureUnitBindings(0);
+ state_provider_->GetGlDecoder()->RestoreActiveTexture();
+
+ surface_texture_ = gfx::SurfaceTexture::Create(surface_texture_id_);
+
+ return gfx::ScopedJavaSurface(surface_texture_.get());
}
void AndroidCopyingBackingStrategy::Cleanup(
bool have_context,
const AndroidVideoDecodeAccelerator::OutputBufferMap&) {
DCHECK(state_provider_->ThreadChecker().CalledOnValidThread());
+
if (copier_)
copier_->Destroy();
@@ -42,26 +66,17 @@ void AndroidCopyingBackingStrategy::Cleanup(
glDeleteTextures(1, &surface_texture_id_);
}
+scoped_refptr<gfx::SurfaceTexture>
+AndroidCopyingBackingStrategy::GetSurfaceTexture() const {
+ return surface_texture_;
+}
+
uint32_t AndroidCopyingBackingStrategy::GetTextureTarget() const {
return GL_TEXTURE_2D;
}
-scoped_refptr<gfx::SurfaceTexture>
-AndroidCopyingBackingStrategy::CreateSurfaceTexture() {
- glGenTextures(1, &surface_texture_id_);
- glActiveTexture(GL_TEXTURE0);
- glBindTexture(GL_TEXTURE_EXTERNAL_OES, surface_texture_id_);
-
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- state_provider_->GetGlDecoder()->RestoreTextureUnitBindings(0);
- state_provider_->GetGlDecoder()->RestoreActiveTexture();
-
- surface_texture_ = gfx::SurfaceTexture::Create(surface_texture_id_);
-
- return surface_texture_;
+gfx::Size AndroidCopyingBackingStrategy::GetPictureBufferSize() const {
+ return state_provider_->GetSize();
}
void AndroidCopyingBackingStrategy::UseCodecBufferForPictureBuffer(
@@ -99,16 +114,19 @@ void AndroidCopyingBackingStrategy::UseCodecBufferForPictureBuffer(
surface_texture_->UpdateTexImage();
}
- float transfrom_matrix[16];
- surface_texture_->GetTransformMatrix(transfrom_matrix);
+ float transform_matrix[16];
+ surface_texture_->GetTransformMatrix(transform_matrix);
- uint32_t picture_buffer_texture_id = picture_buffer.texture_id();
+ DCHECK_LE(1u, picture_buffer.texture_ids().size());
+ uint32_t picture_buffer_texture_id = picture_buffer.texture_ids()[0];
// Defer initializing the CopyTextureCHROMIUMResourceManager until it is
// needed because it takes 10s of milliseconds to initialize.
if (!copier_) {
copier_.reset(new gpu::CopyTextureCHROMIUMResourceManager());
- copier_->Initialize(state_provider_->GetGlDecoder().get());
+ copier_->Initialize(state_provider_->GetGlDecoder().get(),
+ state_provider_->GetGlDecoder()->GetContextGroup()->
+ feature_info()->feature_flags());
}
// Here, we copy |surface_texture_id_| to the picture buffer instead of
@@ -118,13 +136,11 @@ void AndroidCopyingBackingStrategy::UseCodecBufferForPictureBuffer(
// attached.
// 2. SurfaceTexture requires us to apply a transform matrix when we show
// the texture.
- // TODO(hkuang): get the StreamTexture transform matrix in GPU process
- // instead of using default matrix crbug.com/226218.
copier_->DoCopyTextureWithTransform(
state_provider_->GetGlDecoder().get(), GL_TEXTURE_EXTERNAL_OES,
surface_texture_id_, GL_TEXTURE_2D, picture_buffer_texture_id,
state_provider_->GetSize().width(), state_provider_->GetSize().height(),
- false, false, false, kIdentityMatrix);
+ true, false, false, transform_matrix);
}
void AndroidCopyingBackingStrategy::CodecChanged(
@@ -140,4 +156,37 @@ void AndroidCopyingBackingStrategy::OnFrameAvailable() {
// instead preserve the old behavior.
}
+bool AndroidCopyingBackingStrategy::ArePicturesOverlayable() {
+ return false;
+}
+
+void AndroidCopyingBackingStrategy::UpdatePictureBufferSize(
+ media::PictureBuffer* picture_buffer,
+ const gfx::Size& new_size) {
+ // This strategy uses 2D textures who's allocated memory is dependent on the
+ // size. To update size in all places, we must:
+ // 1) Update the PictureBuffer meta-data
+ picture_buffer->set_size(new_size);
+
+ // 2) Update the GL texture via glTexImage2D. This step assumes the caller
+ // has made our GL context current.
+ DCHECK_LE(1u, picture_buffer->texture_ids().size());
+ glBindTexture(GL_TEXTURE_2D, picture_buffer->texture_ids()[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, new_size.width(), new_size.height(),
+ 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+ state_provider_->GetGlDecoder()->RestoreActiveTextureUnitBinding(
+ GL_TEXTURE_2D);
+
+ // 3) Update the CHROMIUM Texture's size.
+ gpu::gles2::TextureRef* texture_ref =
+ state_provider_->GetTextureForPicture(*picture_buffer);
+ RETURN_IF_NULL(texture_ref);
+ gpu::gles2::TextureManager* texture_manager =
+ state_provider_->GetGlDecoder()->GetContextGroup()->texture_manager();
+ RETURN_IF_NULL(texture_manager);
+ texture_manager->SetLevelInfo(texture_ref, GetTextureTarget(), 0, GL_RGBA,
+ new_size.width(), new_size.height(), 1, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, gfx::Rect(new_size));
+}
+
} // namespace content
diff --git a/chromium/content/common/gpu/media/android_copying_backing_strategy.h b/chromium/content/common/gpu/media/android_copying_backing_strategy.h
index 17b096aecd5..8980404dfdb 100644
--- a/chromium/content/common/gpu/media/android_copying_backing_strategy.h
+++ b/chromium/content/common/gpu/media/android_copying_backing_strategy.h
@@ -28,21 +28,25 @@ class AVDAStateProvider;
class CONTENT_EXPORT AndroidCopyingBackingStrategy
: public AndroidVideoDecodeAccelerator::BackingStrategy {
public:
- AndroidCopyingBackingStrategy();
+ explicit AndroidCopyingBackingStrategy(AVDAStateProvider* state_provider);
~AndroidCopyingBackingStrategy() override;
// AndroidVideoDecodeAccelerator::BackingStrategy
- void Initialize(AVDAStateProvider*) override;
+ gfx::ScopedJavaSurface Initialize(int surface_view_id) override;
void Cleanup(bool have_context,
const AndroidVideoDecodeAccelerator::OutputBufferMap&) override;
+ scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture() const override;
uint32_t GetTextureTarget() const override;
- scoped_refptr<gfx::SurfaceTexture> CreateSurfaceTexture() override;
+ gfx::Size GetPictureBufferSize() const override;
void UseCodecBufferForPictureBuffer(int32_t codec_buffer_index,
const media::PictureBuffer&) override;
void CodecChanged(
media::VideoCodecBridge*,
const AndroidVideoDecodeAccelerator::OutputBufferMap&) override;
void OnFrameAvailable() override;
+ bool ArePicturesOverlayable() override;
+ void UpdatePictureBufferSize(media::PictureBuffer* picture_buffer,
+ const gfx::Size& new_size) override;
private:
// Used for copy the texture from surface texture to picture buffers.
diff --git a/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc b/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc
index 660785eea90..3e62629745d 100644
--- a/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc
+++ b/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc
@@ -4,30 +4,41 @@
#include "content/common/gpu/media/android_deferred_rendering_backing_strategy.h"
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+
+#include "base/android/build_info.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
-#include "content/common/gpu/gpu_channel.h"
#include "content/common/gpu/media/avda_codec_image.h"
#include "content/common/gpu/media/avda_return_on_failure.h"
#include "content/common/gpu/media/avda_shared_state.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/gl_stream_texture_image.h"
+#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/ipc/common/gpu_surface_lookup.h"
+#include "gpu/ipc/service/gpu_channel.h"
#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/egl_util.h"
#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_surface_egl.h"
+#include "ui/gl/scoped_binders.h"
+#include "ui/gl/scoped_make_current.h"
namespace content {
AndroidDeferredRenderingBackingStrategy::
- AndroidDeferredRenderingBackingStrategy()
- : state_provider_(nullptr), media_codec_(nullptr) {}
+ AndroidDeferredRenderingBackingStrategy(AVDAStateProvider* state_provider)
+ : state_provider_(state_provider), media_codec_(nullptr) {}
AndroidDeferredRenderingBackingStrategy::
~AndroidDeferredRenderingBackingStrategy() {}
-void AndroidDeferredRenderingBackingStrategy::Initialize(
- AVDAStateProvider* state_provider) {
- state_provider_ = state_provider;
+gfx::ScopedJavaSurface AndroidDeferredRenderingBackingStrategy::Initialize(
+ int surface_view_id) {
shared_state_ = new AVDASharedState();
// Create a texture for the SurfaceTexture to use. We don't attach it here
@@ -36,6 +47,27 @@ void AndroidDeferredRenderingBackingStrategy::Initialize(
glGenTextures(1, &service_id);
DCHECK(service_id);
shared_state_->set_surface_texture_service_id(service_id);
+
+ gfx::ScopedJavaSurface surface;
+ if (surface_view_id != media::VideoDecodeAccelerator::Config::kNoSurfaceID) {
+ surface = gpu::GpuSurfaceLookup::GetInstance()->AcquireJavaSurface(
+ surface_view_id);
+ } else {
+ if (DoesSurfaceTextureDetachWork()) {
+ // Create a detached SurfaceTexture. Detaching it will silently fail to
+ // delete texture 0.
+ surface_texture_ = gfx::SurfaceTexture::Create(0);
+ surface_texture_->DetachFromGLContext();
+ } else {
+ // Detach doesn't work so well on all platforms. Just attach the
+ // SurfaceTexture here, and probably context switch later.
+ surface_texture_ = gfx::SurfaceTexture::Create(service_id);
+ shared_state_->DidAttachSurfaceTexture();
+ }
+ surface = gfx::ScopedJavaSurface(surface_texture_.get());
+ }
+
+ return surface;
}
void AndroidDeferredRenderingBackingStrategy::Cleanup(
@@ -50,6 +82,11 @@ void AndroidDeferredRenderingBackingStrategy::Cleanup(
for (const std::pair<int, media::PictureBuffer>& entry : buffers)
SetImageForPicture(entry.second, nullptr);
+ // If we're rendering to a SurfaceTexture we can make a copy of the current
+ // front buffer so that the PictureBuffer textures are still valid.
+ if (surface_texture_ && have_context && ShouldCopyPictures())
+ CopySurfaceTextureToPictures(buffers);
+
// Now that no AVDACodecImages refer to the SurfaceTexture's texture, delete
// the texture name.
GLuint service_id = shared_state_->surface_texture_service_id();
@@ -57,38 +94,35 @@ void AndroidDeferredRenderingBackingStrategy::Cleanup(
glDeleteTextures(1, &service_id);
}
-uint32_t AndroidDeferredRenderingBackingStrategy::GetTextureTarget() const {
- return GL_TEXTURE_EXTERNAL_OES;
-}
-
scoped_refptr<gfx::SurfaceTexture>
-AndroidDeferredRenderingBackingStrategy::CreateSurfaceTexture() {
- // AVDACodecImage will handle attaching this to a texture later.
- surface_texture_ = gfx::SurfaceTexture::Create(0);
- // Detach from our GL context so that the GLImages can attach. It will
- // silently fail to delete texture 0.
- surface_texture_->DetachFromGLContext();
-
+AndroidDeferredRenderingBackingStrategy::GetSurfaceTexture() const {
return surface_texture_;
}
-gpu::gles2::TextureRef*
-AndroidDeferredRenderingBackingStrategy::GetTextureForPicture(
- const media::PictureBuffer& picture_buffer) {
- RETURN_NULL_IF_NULL(state_provider_->GetGlDecoder());
- gpu::gles2::TextureManager* texture_manager =
- state_provider_->GetGlDecoder()->GetContextGroup()->texture_manager();
- RETURN_NULL_IF_NULL(texture_manager);
- gpu::gles2::TextureRef* texture_ref =
- texture_manager->GetTexture(picture_buffer.internal_texture_id());
- RETURN_NULL_IF_NULL(texture_ref);
+uint32_t AndroidDeferredRenderingBackingStrategy::GetTextureTarget() const {
+ // If we're using a surface texture, then we need an external texture target
+ // to sample from it. If not, then we'll use 2D transparent textures to draw
+ // a transparent hole through which to see the SurfaceView. This is normally
+ // needed only for the devtools inspector, since the overlay mechanism handles
+ // it otherwise.
+ return surface_texture_ ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+}
- return texture_ref;
+gfx::Size AndroidDeferredRenderingBackingStrategy::GetPictureBufferSize()
+ const {
+ // For SurfaceView, request a 1x1 2D texture to reduce memory during
+ // initialization. For SurfaceTexture, allocate a picture buffer that is the
+ // actual frame size. Note that it will be an external texture anyway, so it
+ // doesn't allocate an image of that size. However, it's still important to
+ // get the coded size right, so that VideoLayerImpl doesn't try to scale the
+ // texture when building the quad for it.
+ return surface_texture_ ? state_provider_->GetSize() : gfx::Size(1, 1);
}
AVDACodecImage* AndroidDeferredRenderingBackingStrategy::GetImageForPicture(
const media::PictureBuffer& picture_buffer) {
- gpu::gles2::TextureRef* texture_ref = GetTextureForPicture(picture_buffer);
+ gpu::gles2::TextureRef* texture_ref =
+ state_provider_->GetTextureForPicture(picture_buffer);
RETURN_NULL_IF_NULL(texture_ref);
gl::GLImage* image =
texture_ref->texture()->GetLevelImage(GetTextureTarget(), 0);
@@ -97,8 +131,9 @@ AVDACodecImage* AndroidDeferredRenderingBackingStrategy::GetImageForPicture(
void AndroidDeferredRenderingBackingStrategy::SetImageForPicture(
const media::PictureBuffer& picture_buffer,
- const scoped_refptr<gl::GLImage>& image) {
- gpu::gles2::TextureRef* texture_ref = GetTextureForPicture(picture_buffer);
+ const scoped_refptr<gpu::gles2::GLStreamTextureImage>& image) {
+ gpu::gles2::TextureRef* texture_ref =
+ state_provider_->GetTextureForPicture(picture_buffer);
RETURN_IF_NULL(texture_ref);
gpu::gles2::TextureManager* texture_manager =
@@ -120,15 +155,25 @@ void AndroidDeferredRenderingBackingStrategy::SetImageForPicture(
shared_state_->surface_texture_service_id());
static_cast<AVDACodecImage*>(image.get())
- ->setTexture(texture_ref->texture());
+ ->SetTexture(texture_ref->texture());
} else {
// Clear the unowned service_id, so that this texture is no longer going
// to depend on the surface texture at all.
texture_ref->texture()->SetUnownedServiceId(0);
}
- texture_manager->SetLevelImage(texture_ref, GetTextureTarget(), 0,
- image.get(), gpu::gles2::Texture::UNBOUND);
+ // For SurfaceTexture we set the image to UNBOUND so that the implementation
+ // will call CopyTexImage, which is where AVDACodecImage updates the
+ // SurfaceTexture to the right frame.
+ // For SurfaceView we set the image to be BOUND because ScheduleOverlayPlane
+ // expects it. If something tries to sample from this texture it won't work,
+ // but there's no way to sample from a SurfaceView anyway, so it doesn't
+ // matter. The only way to use this texture is to schedule it as an overlay.
+ const gpu::gles2::Texture::ImageState image_state =
+ surface_texture_ ? gpu::gles2::Texture::UNBOUND
+ : gpu::gles2::Texture::BOUND;
+ texture_manager->SetLevelStreamTextureImage(texture_ref, GetTextureTarget(),
+ 0, image.get(), image_state);
}
void AndroidDeferredRenderingBackingStrategy::UseCodecBufferForPictureBuffer(
@@ -139,36 +184,53 @@ void AndroidDeferredRenderingBackingStrategy::UseCodecBufferForPictureBuffer(
// Notify the AVDACodecImage for picture_buffer that it should use the
// decoded buffer codec_buf_index to render this frame.
- AVDACodecImage* avImage = GetImageForPicture(picture_buffer);
- RETURN_IF_NULL(avImage);
- DCHECK_EQ(avImage->GetMediaCodecBufferIndex(), -1);
+ AVDACodecImage* avda_image = GetImageForPicture(picture_buffer);
+ RETURN_IF_NULL(avda_image);
+ DCHECK_EQ(avda_image->GetMediaCodecBufferIndex(), -1);
// Note that this is not a race, since we do not re-use a PictureBuffer
// until after the CC is done drawing it.
- avImage->SetMediaCodecBufferIndex(codec_buf_index);
- avImage->SetSize(state_provider_->GetSize());
+ avda_image->SetMediaCodecBufferIndex(codec_buf_index);
+ avda_image->SetSize(state_provider_->GetSize());
}
void AndroidDeferredRenderingBackingStrategy::AssignOnePictureBuffer(
- const media::PictureBuffer& picture_buffer) {
+ const media::PictureBuffer& picture_buffer,
+ bool have_context) {
// Attach a GLImage to each texture that will use the surface texture.
// We use a refptr here in case SetImageForPicture fails.
- scoped_refptr<gl::GLImage> gl_image(
+ scoped_refptr<gpu::gles2::GLStreamTextureImage> gl_image =
new AVDACodecImage(shared_state_, media_codec_,
- state_provider_->GetGlDecoder(), surface_texture_));
+ state_provider_->GetGlDecoder(), surface_texture_);
SetImageForPicture(picture_buffer, gl_image);
+
+ if (!surface_texture_ && have_context) {
+ // To make devtools work, we're using a 2D texture. Make it transparent,
+ // so that it draws a hole for the SV to show through. This is only
+ // because devtools draws and reads back, which skips overlay processing.
+ // It's unclear why devtools renders twice -- once normally, and once
+ // including a readback layer. The result is that the device screen
+ // flashes as we alternately draw the overlay hole and this texture,
+ // unless we make the texture transparent.
+ static const uint8_t rgba[] = {0, 0, 0, 0};
+ const gfx::Size size(1, 1);
+ DCHECK_LE(1u, picture_buffer.texture_ids().size());
+ glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_ids()[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.width(), size.height(), 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, rgba);
+ }
}
void AndroidDeferredRenderingBackingStrategy::ReleaseCodecBufferForPicture(
const media::PictureBuffer& picture_buffer) {
- AVDACodecImage* avImage = GetImageForPicture(picture_buffer);
+ AVDACodecImage* avda_image = GetImageForPicture(picture_buffer);
// See if there is a media codec buffer still attached to this image.
- const int32_t codec_buffer = avImage->GetMediaCodecBufferIndex();
+ const int32_t codec_buffer = avda_image->GetMediaCodecBufferIndex();
if (codec_buffer >= 0) {
// PictureBuffer wasn't displayed, so release the buffer.
media_codec_->ReleaseOutputBuffer(codec_buffer, false);
- avImage->SetMediaCodecBufferIndex(-1);
+ avda_image->SetMediaCodecBufferIndex(-1);
}
}
@@ -181,18 +243,6 @@ void AndroidDeferredRenderingBackingStrategy::ReuseOnePictureBuffer(
ReleaseCodecBufferForPicture(picture_buffer);
}
-void AndroidDeferredRenderingBackingStrategy::DismissOnePictureBuffer(
- const media::PictureBuffer& picture_buffer) {
- // If there is an outstanding codec buffer attached to this image, then
- // release it.
- ReleaseCodecBufferForPicture(picture_buffer);
-
- // This makes sure that the Texture no longer refers to the codec or to the
- // SurfaceTexture's service_id. That's important, so that it doesn't refer
- // to the texture by name after we've deleted it.
- SetImageForPicture(picture_buffer, nullptr);
-}
-
void AndroidDeferredRenderingBackingStrategy::CodecChanged(
media::VideoCodecBridge* codec,
const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) {
@@ -200,9 +250,9 @@ void AndroidDeferredRenderingBackingStrategy::CodecChanged(
// doesn't know about them.
media_codec_ = codec;
for (const std::pair<int, media::PictureBuffer>& entry : buffers) {
- AVDACodecImage* avImage = GetImageForPicture(entry.second);
- avImage->SetMediaCodec(codec);
- avImage->SetMediaCodecBufferIndex(-1);
+ AVDACodecImage* avda_image = GetImageForPicture(entry.second);
+ avda_image->SetMediaCodec(codec);
+ avda_image->SetMediaCodecBufferIndex(-1);
}
}
@@ -210,4 +260,163 @@ void AndroidDeferredRenderingBackingStrategy::OnFrameAvailable() {
shared_state_->SignalFrameAvailable();
}
+bool AndroidDeferredRenderingBackingStrategy::ArePicturesOverlayable() {
+ // SurfaceView frames are always overlayable because that's the only way to
+ // display them.
+ return !surface_texture_;
+}
+
+void AndroidDeferredRenderingBackingStrategy::UpdatePictureBufferSize(
+ media::PictureBuffer* picture_buffer,
+ const gfx::Size& new_size) {
+ // This strategy uses EGL images which manage the texture size for us. We
+ // simply update the PictureBuffer meta-data and leave the texture as-is.
+ picture_buffer->set_size(new_size);
+}
+
+void AndroidDeferredRenderingBackingStrategy::CopySurfaceTextureToPictures(
+ const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) {
+ DVLOG(3) << __FUNCTION__;
+
+ // Don't try to copy if the SurfaceTexture was never attached because that
+ // means it was never updated.
+ if (!shared_state_->surface_texture_is_attached())
+ return;
+
+ gpu::gles2::GLES2Decoder* gl_decoder = state_provider_->GetGlDecoder().get();
+ if (!gl_decoder)
+ return;
+
+ const gfx::Size size = state_provider_->GetSize();
+
+ // Create a 2D texture to hold a copy of the SurfaceTexture's front buffer.
+ GLuint tmp_texture_id;
+ glGenTextures(1, &tmp_texture_id);
+ {
+ gfx::ScopedTextureBinder texture_binder(GL_TEXTURE_2D, tmp_texture_id);
+ // The target texture's size will exactly match the source.
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.width(), size.height(), 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+ }
+
+
+
+ float transform_matrix[16];
+ surface_texture_->GetTransformMatrix(transform_matrix);
+
+ gpu::CopyTextureCHROMIUMResourceManager copier;
+ copier.Initialize(
+ gl_decoder,
+ gl_decoder->GetContextGroup()->feature_info()->feature_flags());
+ copier.DoCopyTextureWithTransform(gl_decoder, GL_TEXTURE_EXTERNAL_OES,
+ shared_state_->surface_texture_service_id(),
+ GL_TEXTURE_2D, tmp_texture_id, size.width(),
+ size.height(), true, false, false,
+ transform_matrix);
+
+ // Create an EGLImage from the 2D texture we just copied into. By associating
+ // the EGLImage with the PictureBuffer textures they will remain valid even
+ // after we delete the 2D texture and EGLImage.
+ const EGLImageKHR egl_image = eglCreateImageKHR(
+ gfx::GLSurfaceEGL::GetHardwareDisplay(), eglGetCurrentContext(),
+ EGL_GL_TEXTURE_2D_KHR, reinterpret_cast<EGLClientBuffer>(tmp_texture_id),
+ nullptr /* attrs */);
+
+ glDeleteTextures(1, &tmp_texture_id);
+ DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ if (egl_image == EGL_NO_IMAGE_KHR) {
+ DLOG(ERROR) << "Failed creating EGLImage: " << ui::GetLastEGLErrorString();
+ return;
+ }
+
+ for (const std::pair<int, media::PictureBuffer>& entry : buffers) {
+ gpu::gles2::TextureRef* texture_ref =
+ state_provider_->GetTextureForPicture(entry.second);
+ if (!texture_ref)
+ continue;
+ gfx::ScopedTextureBinder texture_binder(
+ GL_TEXTURE_EXTERNAL_OES, texture_ref->texture()->service_id());
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, egl_image);
+ DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ }
+
+ EGLBoolean result =
+ eglDestroyImageKHR(gfx::GLSurfaceEGL::GetHardwareDisplay(), egl_image);
+ if (result == EGL_FALSE) {
+ DLOG(ERROR) << "Error destroying EGLImage: "
+ << ui::GetLastEGLErrorString();
+ }
+}
+
+bool AndroidDeferredRenderingBackingStrategy::DoesSurfaceTextureDetachWork()
+ const {
+ bool surface_texture_detach_works = true;
+ if (gpu::gles2::GLES2Decoder* gl_decoder =
+ state_provider_->GetGlDecoder().get()) {
+ if (gpu::gles2::ContextGroup* group = gl_decoder->GetContextGroup()) {
+ if (gpu::gles2::FeatureInfo* feature_info = group->feature_info()) {
+ surface_texture_detach_works =
+ !feature_info->workarounds().surface_texture_cant_detach;
+ }
+ }
+ }
+
+ // As a special case, the MicroMax A114 doesn't get the workaround, even
+ // though it should. Hardcode it here until we get a device and figure out
+ // why. crbug.com/591600
+ if (base::android::BuildInfo::GetInstance()->sdk_int() <= 18) { // JB
+ const std::string brand(
+ base::ToLowerASCII(base::android::BuildInfo::GetInstance()->brand()));
+ if (brand == "micromax") {
+ const std::string model(
+ base::ToLowerASCII(base::android::BuildInfo::GetInstance()->model()));
+ if (model.find("a114") != std::string::npos)
+ surface_texture_detach_works = false;
+ }
+ }
+
+ return surface_texture_detach_works;
+}
+
+bool AndroidDeferredRenderingBackingStrategy::ShouldCopyPictures() const {
+ // Mali + <= KitKat crashes when we try to do this. We don't know if it's
+ // due to detaching a surface texture, but it's the same set of devices.
+ if (!DoesSurfaceTextureDetachWork())
+ return false;
+
+ // Other devices are unreliable for other reasons (e.g., EGLImage).
+ if (gpu::gles2::GLES2Decoder* gl_decoder =
+ state_provider_->GetGlDecoder().get()) {
+ if (gpu::gles2::ContextGroup* group = gl_decoder->GetContextGroup()) {
+ if (gpu::gles2::FeatureInfo* feature_info = group->feature_info()) {
+ return !feature_info->workarounds().avda_dont_copy_pictures;
+ }
+ }
+ }
+
+ // Samsung Galaxy Tab A, J3, and J1 Mini all like to crash on Lollipop in
+ // glEGLImageTargetTexture2DOES . Exact models were SM-T280, SM-J320F,
+ // and SM-j105H.
+ if (base::android::BuildInfo::GetInstance()->sdk_int() <= 22) { // L MR1
+ const std::string brand(
+ base::ToLowerASCII(base::android::BuildInfo::GetInstance()->brand()));
+ if (brand == "samsung") {
+ const std::string model(
+ base::ToLowerASCII(base::android::BuildInfo::GetInstance()->model()));
+ if (model.find("sm-t280") != std::string::npos ||
+ model.find("sm-j320f") != std::string::npos ||
+ model.find("sm-j105") != std::string::npos)
+ return false;
+ }
+ }
+
+ // Assume so.
+ return true;
+}
+
} // namespace content
diff --git a/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.h b/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.h
index 6fc1873cf5a..733b25b0a45 100644
--- a/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.h
+++ b/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.h
@@ -17,6 +17,7 @@ class GLImage;
namespace gpu {
namespace gles2 {
+class GLStreamTextureImage;
class TextureRef;
}
}
@@ -33,42 +34,61 @@ class AVDASharedState;
class CONTENT_EXPORT AndroidDeferredRenderingBackingStrategy
: public AndroidVideoDecodeAccelerator::BackingStrategy {
public:
- AndroidDeferredRenderingBackingStrategy();
+ explicit AndroidDeferredRenderingBackingStrategy(
+ AVDAStateProvider* state_provider);
~AndroidDeferredRenderingBackingStrategy() override;
// AndroidVideoDecodeAccelerator::BackingStrategy
- void Initialize(AVDAStateProvider*) override;
+ gfx::ScopedJavaSurface Initialize(int surface_view_id) override;
void Cleanup(bool have_context,
const AndroidVideoDecodeAccelerator::OutputBufferMap&) override;
+ scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture() const override;
uint32_t GetTextureTarget() const override;
- scoped_refptr<gfx::SurfaceTexture> CreateSurfaceTexture() override;
+ gfx::Size GetPictureBufferSize() const override;
void UseCodecBufferForPictureBuffer(int32_t codec_buffer_index,
const media::PictureBuffer&) override;
- void AssignOnePictureBuffer(const media::PictureBuffer&) override;
+ void AssignOnePictureBuffer(const media::PictureBuffer&, bool) override;
void ReuseOnePictureBuffer(const media::PictureBuffer&) override;
- void DismissOnePictureBuffer(const media::PictureBuffer&) override;
void CodecChanged(
media::VideoCodecBridge*,
const AndroidVideoDecodeAccelerator::OutputBufferMap&) override;
void OnFrameAvailable() override;
+ bool ArePicturesOverlayable() override;
+ void UpdatePictureBufferSize(media::PictureBuffer* picture_buffer,
+ const gfx::Size& new_size) override;
private:
// Release any codec buffer that is associated with the given picture buffer
// back to the codec. It is okay if there is no such buffer.
void ReleaseCodecBufferForPicture(const media::PictureBuffer& picture_buffer);
- // Return the TextureRef for a given PictureBuffer's texture.
- gpu::gles2::TextureRef* GetTextureForPicture(const media::PictureBuffer&);
-
// Return the AVDACodecImage for a given PictureBuffer's texture.
AVDACodecImage* GetImageForPicture(const media::PictureBuffer&);
- void SetImageForPicture(const media::PictureBuffer& picture_buffer,
- const scoped_refptr<gl::GLImage>& image);
+ void SetImageForPicture(
+ const media::PictureBuffer& picture_buffer,
+ const scoped_refptr<gpu::gles2::GLStreamTextureImage>& image);
+
+ // Make a copy of the SurfaceTexture's front buffer and associate all given
+ // picture buffer textures with it. The picture buffer textures will not
+ // dependend on |this|, the SurfaceTexture, the MediaCodec or the VDA, so it's
+ // used to back the picture buffers when the VDA is being destroyed.
+ void CopySurfaceTextureToPictures(
+ const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers);
+
+ // Return true if and only if the surface_texture_cant_detach workaround is
+ // not set.
+ bool DoesSurfaceTextureDetachWork() const;
+
+ // Return true if and only if CopySurfaceTextureToPictures is expected to work
+ // on this device.
+ bool ShouldCopyPictures() const;
scoped_refptr<AVDASharedState> shared_state_;
AVDAStateProvider* state_provider_;
+ // The SurfaceTexture to render to. Non-null after Initialize() if
+ // we're not rendering to a SurfaceView.
scoped_refptr<gfx::SurfaceTexture> surface_texture_;
media::VideoCodecBridge* media_codec_;
diff --git a/chromium/content/common/gpu/media/android_video_decode_accelerator.cc b/chromium/content/common/gpu/media/android_video_decode_accelerator.cc
index 832d8dca218..e4f45ca73b3 100644
--- a/chromium/content/common/gpu/media/android_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/android_video_decode_accelerator.cc
@@ -7,21 +7,30 @@
#include <stddef.h>
#include "base/android/build_info.h"
+#include "base/auto_reset.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
+#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
+#include "base/task_runner_util.h"
+#include "base/threading/thread_checker.h"
#include "base/trace_event/trace_event.h"
-#include "content/common/gpu/gpu_channel.h"
#include "content/common/gpu/media/android_copying_backing_strategy.h"
#include "content/common/gpu/media/android_deferred_rendering_backing_strategy.h"
+#include "content/common/gpu/media/avda_return_on_failure.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_codec_util.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/limits.h"
-#include "media/base/media_switches.h"
+#include "media/base/media.h"
#include "media/base/timestamp_constants.h"
#include "media/base/video_decoder_config.h"
#include "media/video/picture.h"
@@ -30,7 +39,6 @@
#include "ui/gl/gl_bindings.h"
#if defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
-#include "media/base/media_keys.h"
#include "media/mojo/services/mojo_cdm_service.h"
#endif
@@ -92,6 +100,23 @@ static inline const base::TimeDelta IdleTimerTimeOut() {
return base::TimeDelta::FromSeconds(1);
}
+// Time between when we notice an error, and when we actually notify somebody.
+// This is to prevent codec errors caused by SurfaceView fullscreen transitions
+// from breaking the pipeline, if we're about to be reset anyway.
+static inline const base::TimeDelta ErrorPostingDelay() {
+ return base::TimeDelta::FromSeconds(2);
+}
+
+// For RecordFormatChangedMetric.
+enum FormatChangedValue {
+ CodecInitialized = false,
+ MissingFormatChanged = true
+};
+
+static inline void RecordFormatChangedMetric(FormatChangedValue value) {
+ UMA_HISTOGRAM_BOOLEAN("Media.AVDA.MissingFormatChanged", !!value);
+}
+
// Handle OnFrameAvailable callbacks safely. Since they occur asynchronously,
// we take care that the AVDA that wants them still exists. A WeakPtr to
// the AVDA would be preferable, except that OnFrameAvailable callbacks can
@@ -143,34 +168,166 @@ class AndroidVideoDecodeAccelerator::OnFrameAvailableHandler
DISALLOW_COPY_AND_ASSIGN(OnFrameAvailableHandler);
};
+// Helper class to share an IO timer for DoIOTask() execution; prevents each
+// AVDA instance from starting its own high frequency timer. The intuition
+// behind this is that, if we're waiting for long enough, then either (a)
+// MediaCodec is broken or (b) MediaCodec is waiting on us to change state
+// (e.g., get new demuxed data / get a free picture buffer / return an output
+// buffer to MediaCodec). This is inherently a race, since we don't know if
+// MediaCodec is broken or just slow. Since the MediaCodec API doesn't let
+// us wait on MediaCodec state changes prior to L, we more or less have to
+// time out or keep polling forever in some common cases.
+class AVDATimerManager {
+ public:
+ // Make sure that the construction thread is started for |avda_instance|.
+ bool StartThread(AndroidVideoDecodeAccelerator* avda_instance) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (thread_avda_instances_.empty()) {
+ if (!construction_thread_.Start()) {
+ LOG(ERROR) << "Failed to start construction thread.";
+ return false;
+ }
+ }
+
+ thread_avda_instances_.insert(avda_instance);
+ return true;
+ }
+
+ // |avda_instance| will no longer need the construction thread. Stop the
+ // thread if this is the last instance.
+ void StopThread(AndroidVideoDecodeAccelerator* avda_instance) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ thread_avda_instances_.erase(avda_instance);
+ if (thread_avda_instances_.empty())
+ construction_thread_.Stop();
+ }
+
+ // Request periodic callback of |avda_instance|->DoIOTask(). Does nothing if
+ // the instance is already registered and the timer started. The first request
+ // will start the repeating timer on an interval of DecodePollDelay().
+ void StartTimer(AndroidVideoDecodeAccelerator* avda_instance) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ timer_avda_instances_.insert(avda_instance);
+
+ // If the timer is running, StopTimer() might have been called earlier, if
+ // so remove the instance from the pending erasures.
+ if (timer_running_)
+ pending_erase_.erase(avda_instance);
+
+ if (io_timer_.IsRunning())
+ return;
+ io_timer_.Start(FROM_HERE, DecodePollDelay(), this,
+ &AVDATimerManager::RunTimer);
+ }
+
+ // Stop callbacks to |avda_instance|->DoIOTask(). Does nothing if the instance
+ // is not registered. If there are no instances left, the repeating timer will
+ // be stopped.
+ void StopTimer(AndroidVideoDecodeAccelerator* avda_instance) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // If the timer is running, defer erasures to avoid iterator invalidation.
+ if (timer_running_) {
+ pending_erase_.insert(avda_instance);
+ return;
+ }
+
+ timer_avda_instances_.erase(avda_instance);
+ if (timer_avda_instances_.empty())
+ io_timer_.Stop();
+ }
+
+ // Eventually, we should run the timer on this thread. For now, we just keep
+ // it as a convenience for construction.
+ scoped_refptr<base::SingleThreadTaskRunner> ConstructionTaskRunner() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return construction_thread_.task_runner();
+ }
+
+ private:
+ friend struct base::DefaultLazyInstanceTraits<AVDATimerManager>;
+
+ AVDATimerManager() : construction_thread_("AVDAThread") {}
+ ~AVDATimerManager() { NOTREACHED(); }
+
+ void RunTimer() {
+ {
+ // Call out to all AVDA instances, some of which may attempt to remove
+ // themselves from the list during this operation; those removals will be
+ // deferred until after all iterations are complete.
+ base::AutoReset<bool> scoper(&timer_running_, true);
+ for (auto* avda : timer_avda_instances_)
+ avda->DoIOTask(false);
+ }
+
+ // Take care of any deferred erasures.
+ for (auto* avda : pending_erase_)
+ StopTimer(avda);
+ pending_erase_.clear();
+
+ // TODO(dalecurtis): We may want to consider chunking this if task execution
+ // takes too long for the combined timer.
+ }
+
+ // All AVDA instances that would like us to poll DoIOTask.
+ std::set<AndroidVideoDecodeAccelerator*> timer_avda_instances_;
+
+ // All AVDA instances that might like to use the construction thread.
+ std::set<AndroidVideoDecodeAccelerator*> thread_avda_instances_;
+
+ // Since we can't delete while iterating when using a set, defer erasure until
+ // after iteration complete.
+ bool timer_running_ = false;
+ std::set<AndroidVideoDecodeAccelerator*> pending_erase_;
+
+ // Repeating timer responsible for draining pending IO to the codecs.
+ base::RepeatingTimer io_timer_;
+
+ base::Thread construction_thread_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(AVDATimerManager);
+};
+
+static base::LazyInstance<AVDATimerManager>::Leaky g_avda_timer =
+ LAZY_INSTANCE_INITIALIZER;
+
+AndroidVideoDecodeAccelerator::CodecConfig::CodecConfig() {}
+
+AndroidVideoDecodeAccelerator::CodecConfig::~CodecConfig() {}
+
AndroidVideoDecodeAccelerator::AndroidVideoDecodeAccelerator(
- const base::WeakPtr<gpu::gles2::GLES2Decoder> decoder,
- const base::Callback<bool(void)>& make_context_current)
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb)
: client_(NULL),
- make_context_current_(make_context_current),
- codec_(media::kCodecH264),
+ make_context_current_cb_(make_context_current_cb),
+ get_gles2_decoder_cb_(get_gles2_decoder_cb),
is_encrypted_(false),
- needs_protected_surface_(false),
state_(NO_ERROR),
picturebuffers_requested_(false),
- gl_decoder_(decoder),
+ media_drm_bridge_cdm_context_(nullptr),
cdm_registration_id_(0),
- weak_this_factory_(this) {
- if (UseDeferredRenderingStrategy())
- strategy_.reset(new AndroidDeferredRenderingBackingStrategy());
- else
- strategy_.reset(new AndroidCopyingBackingStrategy());
-}
+ pending_input_buf_index_(-1),
+ error_sequence_token_(0),
+ defer_errors_(false),
+ deferred_initialization_pending_(false),
+ weak_this_factory_(this) {}
AndroidVideoDecodeAccelerator::~AndroidVideoDecodeAccelerator() {
DCHECK(thread_checker_.CalledOnValidThread());
+ g_avda_timer.Pointer()->StopTimer(this);
+ g_avda_timer.Pointer()->StopThread(this);
#if defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
- if (cdm_) {
- DCHECK(cdm_registration_id_);
- static_cast<media::MediaDrmBridge*>(cdm_.get())
- ->UnregisterPlayer(cdm_registration_id_);
- }
+ if (!media_drm_bridge_cdm_context_)
+ return;
+
+ DCHECK(cdm_registration_id_);
+ media_drm_bridge_cdm_context_->UnregisterPlayer(cdm_registration_id_);
#endif // defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
}
@@ -182,76 +339,130 @@ bool AndroidVideoDecodeAccelerator::Initialize(const Config& config,
DVLOG(1) << __FUNCTION__ << ": " << config.AsHumanReadableString();
+ if (make_context_current_cb_.is_null() || get_gles2_decoder_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
DCHECK(client);
client_ = client;
- codec_ = VideoCodecProfileToVideoCodec(config.profile);
+ codec_config_ = new CodecConfig();
+ codec_config_->codec_ = VideoCodecProfileToVideoCodec(config.profile);
+ codec_config_->initial_expected_coded_size_ =
+ config.initial_expected_coded_size;
is_encrypted_ = config.is_encrypted;
- bool profile_supported = codec_ == media::kCodecVP8 ||
- codec_ == media::kCodecVP9 ||
- codec_ == media::kCodecH264;
+ bool profile_supported = codec_config_->codec_ == media::kCodecVP8 ||
+ codec_config_->codec_ == media::kCodecVP9 ||
+ codec_config_->codec_ == media::kCodecH264;
+
+ // We signalled that we support deferred initialization, so see if the client
+ // does also.
+ deferred_initialization_pending_ = config.is_deferred_initialization_allowed;
if (!profile_supported) {
LOG(ERROR) << "Unsupported profile: " << config.profile;
return false;
}
+ // For encrypted streams we postpone configuration until MediaCrypto is
+ // available.
+ DCHECK(!is_encrypted_ || deferred_initialization_pending_);
+
// Only use MediaCodec for VP8/9 if it's likely backed by hardware
// or if the stream is encrypted.
- if ((codec_ == media::kCodecVP8 || codec_ == media::kCodecVP9) &&
- !is_encrypted_) {
- if (media::VideoCodecBridge::IsKnownUnaccelerated(
- codec_, media::MEDIA_CODEC_DECODER)) {
- DVLOG(1) << "Initialization failed: "
- << (codec_ == media::kCodecVP8 ? "vp8" : "vp9")
- << " is not hardware accelerated";
- return false;
- }
+ if ((codec_config_->codec_ == media::kCodecVP8 ||
+ codec_config_->codec_ == media::kCodecVP9) &&
+ !is_encrypted_ &&
+ media::VideoCodecBridge::IsKnownUnaccelerated(
+ codec_config_->codec_, media::MEDIA_CODEC_DECODER)) {
+ DVLOG(1) << "Initialization failed: "
+ << (codec_config_->codec_ == media::kCodecVP8 ? "vp8" : "vp9")
+ << " is not hardware accelerated";
+ return false;
}
- if (!make_context_current_.Run()) {
+ auto gles_decoder = get_gles2_decoder_cb_.Run();
+ if (!gles_decoder) {
+ LOG(ERROR) << "Failed to get gles2 decoder instance.";
+ return false;
+ }
+
+ const gpu::GpuPreferences& gpu_preferences =
+ gles_decoder->GetContextGroup()->gpu_preferences();
+
+ if (UseDeferredRenderingStrategy(gpu_preferences)) {
+ // TODO(liberato, watk): Figure out what we want to do about zero copy for
+ // fullscreen external SurfaceView in WebView. http://crbug.com/582170.
+ DCHECK(!gles_decoder->GetContextGroup()->mailbox_manager()->UsesSync());
+ DVLOG(1) << __FUNCTION__ << ", using deferred rendering strategy.";
+ strategy_.reset(new AndroidDeferredRenderingBackingStrategy(this));
+ } else {
+ DVLOG(1) << __FUNCTION__ << ", using copy back strategy.";
+ strategy_.reset(new AndroidCopyingBackingStrategy(this));
+ }
+
+ if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "Failed to make this decoder's GL context current.";
return false;
}
- if (!gl_decoder_) {
- LOG(ERROR) << "Failed to get gles2 decoder instance.";
+ codec_config_->surface_ = strategy_->Initialize(config.surface_id);
+ if (codec_config_->surface_.IsEmpty()) {
+ LOG(ERROR) << "Failed to initialize the backing strategy. The returned "
+ "Java surface is empty.";
return false;
}
- strategy_->Initialize(this);
+ // TODO(watk,liberato): move this into the strategy.
+ scoped_refptr<gfx::SurfaceTexture> surface_texture =
+ strategy_->GetSurfaceTexture();
+ if (surface_texture) {
+ on_frame_available_handler_ =
+ new OnFrameAvailableHandler(this, surface_texture);
+ }
- surface_texture_ = strategy_->CreateSurfaceTexture();
- on_frame_available_handler_ =
- new OnFrameAvailableHandler(this, surface_texture_);
+ // Start the thread for async configuration, even if we don't need it now.
+ // ResetCodecState might rebuild the codec later, for example.
+ if (!g_avda_timer.Pointer()->StartThread(this)) {
+ LOG(ERROR) << "Failed to start thread for AVDA timer";
+ return false;
+ }
- // For encrypted streams we postpone configuration until MediaCrypto is
- // available.
+ // If we are encrypted, then we aren't able to create the codec yet.
if (is_encrypted_)
return true;
- return ConfigureMediaCodec();
+ if (deferred_initialization_pending_) {
+ ConfigureMediaCodecAsynchronously();
+ return true;
+ }
+
+ // If the client doesn't support deferred initialization (WebRTC), then we
+ // should complete it now and return a meaningful result.
+ return ConfigureMediaCodecSynchronously();
}
void AndroidVideoDecodeAccelerator::SetCdm(int cdm_id) {
DVLOG(2) << __FUNCTION__ << ": " << cdm_id;
#if defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
- using media::MediaDrmBridge;
-
DCHECK(client_) << "SetCdm() must be called after Initialize().";
- if (cdm_) {
+ if (media_drm_bridge_cdm_context_) {
NOTREACHED() << "We do not support resetting CDM.";
- NotifyCdmAttached(false);
+ NotifyInitializationComplete(false);
return;
}
- cdm_ = media::MojoCdmService::GetCdm(cdm_id);
- DCHECK(cdm_);
+ // Store the CDM to hold a reference to it.
+ cdm_for_reference_holding_only_ = media::MojoCdmService::LegacyGetCdm(cdm_id);
+ DCHECK(cdm_for_reference_holding_only_);
- // On Android platform the MediaKeys will be its subclass MediaDrmBridge.
- MediaDrmBridge* drm_bridge = static_cast<MediaDrmBridge*>(cdm_.get());
+ // On Android platform the CdmContext must be a MediaDrmBridgeCdmContext.
+ media_drm_bridge_cdm_context_ = static_cast<media::MediaDrmBridgeCdmContext*>(
+ cdm_for_reference_holding_only_->GetCdmContext());
+ DCHECK(media_drm_bridge_cdm_context_);
// Register CDM callbacks. The callbacks registered will be posted back to
// this thread via BindToCurrentLoop.
@@ -261,31 +472,30 @@ void AndroidVideoDecodeAccelerator::SetCdm(int cdm_id) {
// destructed as well. So the |cdm_unset_cb| will never have a chance to be
// called.
// TODO(xhwang): Remove |cdm_unset_cb| after it's not used on all platforms.
- cdm_registration_id_ =
- drm_bridge->RegisterPlayer(media::BindToCurrentLoop(base::Bind(
- &AndroidVideoDecodeAccelerator::OnKeyAdded,
- weak_this_factory_.GetWeakPtr())),
- base::Bind(&base::DoNothing));
+ cdm_registration_id_ = media_drm_bridge_cdm_context_->RegisterPlayer(
+ media::BindToCurrentLoop(
+ base::Bind(&AndroidVideoDecodeAccelerator::OnKeyAdded,
+ weak_this_factory_.GetWeakPtr())),
+ base::Bind(&base::DoNothing));
- drm_bridge->SetMediaCryptoReadyCB(media::BindToCurrentLoop(
+ media_drm_bridge_cdm_context_->SetMediaCryptoReadyCB(media::BindToCurrentLoop(
base::Bind(&AndroidVideoDecodeAccelerator::OnMediaCryptoReady,
weak_this_factory_.GetWeakPtr())));
- // Postpone NotifyCdmAttached() call till we create the MediaCodec after
- // OnMediaCryptoReady().
-
+// Postpone NotifyInitializationComplete() call till we create the MediaCodec
+// after OnMediaCryptoReady().
#else
NOTIMPLEMENTED();
- NotifyCdmAttached(false);
+ NotifyInitializationComplete(false);
#endif // !defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
}
-void AndroidVideoDecodeAccelerator::DoIOTask() {
+void AndroidVideoDecodeAccelerator::DoIOTask(bool start_timer) {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT0("media", "AVDA::DoIOTask");
- if (state_ == ERROR) {
+ if (state_ == ERROR || state_ == WAITING_FOR_CODEC) {
return;
}
@@ -293,49 +503,69 @@ void AndroidVideoDecodeAccelerator::DoIOTask() {
while (DequeueOutput())
did_work = true;
- ManageTimer(did_work);
+ ManageTimer(did_work || start_timer);
}
bool AndroidVideoDecodeAccelerator::QueueInput() {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT0("media", "AVDA::QueueInput");
+ base::AutoReset<bool> auto_reset(&defer_errors_, true);
if (bitstreams_notified_in_advance_.size() > kMaxBitstreamsNotifiedInAdvance)
return false;
if (pending_bitstream_buffers_.empty())
return false;
+ if (state_ == WAITING_FOR_KEY)
+ return false;
- int input_buf_index = 0;
- media::MediaCodecStatus status =
- media_codec_->DequeueInputBuffer(NoWaitTimeOut(), &input_buf_index);
+ int input_buf_index = pending_input_buf_index_;
- if (status == media::MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER)
- return false;
- if (status == media::MEDIA_CODEC_ERROR) {
- POST_ERROR(PLATFORM_FAILURE, "Failed to DequeueInputBuffer");
- return false;
+ // Do not dequeue a new input buffer if we failed with MEDIA_CODEC_NO_KEY.
+ // That status does not return this buffer back to the pool of
+ // available input buffers. We have to reuse it in QueueSecureInputBuffer().
+ if (input_buf_index == -1) {
+ media::MediaCodecStatus status =
+ media_codec_->DequeueInputBuffer(NoWaitTimeOut(), &input_buf_index);
+ switch (status) {
+ case media::MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER:
+ return false;
+ case media::MEDIA_CODEC_ERROR:
+ POST_ERROR(PLATFORM_FAILURE, "Failed to DequeueInputBuffer");
+ return false;
+ case media::MEDIA_CODEC_OK:
+ break;
+ default:
+ NOTREACHED() << "Unknown DequeueInputBuffer status " << status;
+ return false;
+ }
}
- DCHECK_EQ(status, media::MEDIA_CODEC_OK);
- base::Time queued_time = pending_bitstream_buffers_.front().second;
- UMA_HISTOGRAM_TIMES("Media.AVDA.InputQueueTime",
- base::Time::Now() - queued_time);
- media::BitstreamBuffer bitstream_buffer =
- pending_bitstream_buffers_.front().first;
+ DCHECK_NE(input_buf_index, -1);
+
+ media::BitstreamBuffer bitstream_buffer = pending_bitstream_buffers_.front();
if (bitstream_buffer.id() == -1) {
pending_bitstream_buffers_.pop();
TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
pending_bitstream_buffers_.size());
+ DCHECK_NE(state_, ERROR);
+ state_ = WAITING_FOR_EOS;
media_codec_->QueueEOS(input_buf_index);
return true;
}
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(bitstream_buffer.handle(), true));
- if (!shm->Map(bitstream_buffer.size())) {
- POST_ERROR(UNREADABLE_INPUT, "Failed to SharedMemory::Map()");
- return false;
+ scoped_ptr<SharedMemoryRegion> shm;
+
+ if (pending_input_buf_index_ == -1) {
+ // When |pending_input_buf_index_| is not -1, the buffer is already dequeued
+ // from MediaCodec, filled with data and bitstream_buffer.handle() is
+ // closed.
+ shm.reset(new SharedMemoryRegion(bitstream_buffer, true));
+
+ if (!shm->Map()) {
+ POST_ERROR(UNREADABLE_INPUT, "Failed to SharedMemoryRegion::Map()");
+ return false;
+ }
}
const base::TimeDelta presentation_timestamp =
@@ -351,12 +581,16 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
// result in them finding the right timestamp.
bitstream_buffers_in_decoder_[presentation_timestamp] = bitstream_buffer.id();
- const uint8_t* memory = static_cast<const uint8_t*>(shm->memory());
+ // Notice that |memory| will be null if we repeatedly enqueue the same buffer,
+ // this happens after MEDIA_CODEC_NO_KEY.
+ const uint8_t* memory =
+ shm ? static_cast<const uint8_t*>(shm->memory()) : nullptr;
const std::string& key_id = bitstream_buffer.key_id();
const std::string& iv = bitstream_buffer.iv();
const std::vector<media::SubsampleEntry>& subsamples =
bitstream_buffer.subsamples();
+ media::MediaCodecStatus status;
if (key_id.empty() || iv.empty()) {
status = media_codec_->QueueInputBuffer(input_buf_index, memory,
bitstream_buffer.size(),
@@ -372,24 +606,18 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
<< " status:" << status;
if (status == media::MEDIA_CODEC_NO_KEY) {
- // Keep trying to enqueue the front pending buffer.
- //
- // TODO(timav): Figure out whether stopping the pipeline in response to
- // this error and restarting it in OnKeyAdded() has significant benefits
- // (e.g. saving power).
+ // Keep trying to enqueue the same input buffer.
+ // The buffer is owned by us (not the MediaCodec) and is filled with data.
DVLOG(1) << "QueueSecureInputBuffer failed: NO_KEY";
- return true;
+ pending_input_buf_index_ = input_buf_index;
+ state_ = WAITING_FOR_KEY;
+ return false;
}
+ pending_input_buf_index_ = -1;
pending_bitstream_buffers_.pop();
TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
pending_bitstream_buffers_.size());
-
- if (status != media::MEDIA_CODEC_OK) {
- POST_ERROR(PLATFORM_FAILURE, "Failed to QueueInputBuffer: " << status);
- return false;
- }
-
// We should call NotifyEndOfBitstreamBuffer(), when no more decoded output
// will be returned from the bitstream buffer. However, MediaCodec API is
// not enough to guarantee it.
@@ -403,12 +631,18 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
weak_this_factory_.GetWeakPtr(), bitstream_buffer.id()));
bitstreams_notified_in_advance_.push_back(bitstream_buffer.id());
+ if (status != media::MEDIA_CODEC_OK) {
+ POST_ERROR(PLATFORM_FAILURE, "Failed to QueueInputBuffer: " << status);
+ return false;
+ }
+
return true;
}
bool AndroidVideoDecodeAccelerator::DequeueOutput() {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT0("media", "AVDA::DequeueOutput");
+ base::AutoReset<bool> auto_reset(&defer_errors_, true);
if (picturebuffers_requested_ && output_picture_buffers_.empty())
return false;
@@ -432,10 +666,6 @@ bool AndroidVideoDecodeAccelerator::DequeueOutput() {
"presentation_timestamp (ms)",
presentation_timestamp.InMilliseconds());
- DVLOG(3) << "AVDA::DequeueOutput: pts:" << presentation_timestamp
- << " buf_index:" << buf_index << " offset:" << offset
- << " size:" << size << " eos:" << eos;
-
switch (status) {
case media::MEDIA_CODEC_ERROR:
POST_ERROR(PLATFORM_FAILURE, "DequeueOutputBuffer failed.");
@@ -445,23 +675,30 @@ bool AndroidVideoDecodeAccelerator::DequeueOutput() {
return false;
case media::MEDIA_CODEC_OUTPUT_FORMAT_CHANGED: {
- if (!output_picture_buffers_.empty()) {
- // TODO(chcunningham): This will likely dismiss a handful of decoded
- // frames that have not yet been drawn and returned to us for re-use.
- // Consider a more complicated design that would wait for them to be
- // drawn before dismissing.
- DismissPictureBuffers();
+ if (media_codec_->GetOutputSize(&size_) != media::MEDIA_CODEC_OK) {
+ POST_ERROR(PLATFORM_FAILURE, "GetOutputSize failed.");
+ return false;
+ }
+ DVLOG(3) << __FUNCTION__
+ << " OUTPUT_FORMAT_CHANGED, new size: " << size_.ToString();
+
+ // Don't request picture buffers if we already have some. This avoids
+ // having to dismiss the existing buffers which may actively reference
+ // decoded images. Breaking their connection to the decoded image will
+ // cause rendering of black frames. Instead, we let the existing
+ // PictureBuffers live on and we simply update their size the next time
+ // they're attachted to an image of the new resolution. See the
+ // size update in |SendDecodedFrameToClient| and https://crbug/587994.
+ if (output_picture_buffers_.empty() && !picturebuffers_requested_) {
+ picturebuffers_requested_ = true;
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&AndroidVideoDecodeAccelerator::RequestPictureBuffers,
+ weak_this_factory_.GetWeakPtr()));
+ return false;
}
- picturebuffers_requested_ = true;
- int32_t width, height;
- media_codec_->GetOutputFormat(&width, &height);
- size_ = gfx::Size(width, height);
- base::MessageLoop::current()->PostTask(
- FROM_HERE,
- base::Bind(&AndroidVideoDecodeAccelerator::RequestPictureBuffers,
- weak_this_factory_.GetWeakPtr()));
- return false;
+ return true;
}
case media::MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
@@ -469,6 +706,9 @@ bool AndroidVideoDecodeAccelerator::DequeueOutput() {
case media::MEDIA_CODEC_OK:
DCHECK_GE(buf_index, 0);
+ DVLOG(3) << __FUNCTION__ << ": pts:" << presentation_timestamp
+ << " buf_index:" << buf_index << " offset:" << offset
+ << " size:" << size << " eos:" << eos;
break;
default:
@@ -478,12 +718,36 @@ bool AndroidVideoDecodeAccelerator::DequeueOutput() {
} while (buf_index < 0);
if (eos) {
- DVLOG(3) << "AVDA::DequeueOutput: Resetting codec state after EOS";
+ DVLOG(3) << __FUNCTION__ << ": Resetting codec state after EOS";
+
+ // If we were waiting for an EOS, clear the state and reset the MediaCodec
+ // as normal. Otherwise, enter the ERROR state which will force destruction
+ // of MediaCodec during ResetCodecState().
+ //
+ // Some Android platforms seem to send an EOS buffer even when we're not
+ // expecting it. In this case, destroy and reset the codec but don't notify
+ // flush done since it violates the state machine. http://crbug.com/585959.
+ const bool was_waiting_for_eos = state_ == WAITING_FOR_EOS;
+ state_ = was_waiting_for_eos ? NO_ERROR : ERROR;
+
ResetCodecState();
+ // |media_codec_| might still be null.
+ if (was_waiting_for_eos) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyFlushDone,
+ weak_this_factory_.GetWeakPtr()));
+ }
+ return false;
+ }
- base::MessageLoop::current()->PostTask(
- FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyFlushDone,
- weak_this_factory_.GetWeakPtr()));
+ if (!picturebuffers_requested_) {
+ // If, somehow, we get a decoded frame back before a FORMAT_CHANGED
+ // message, then we might not have any picture buffers to use. This
+ // isn't supposed to happen (see EncodeDecodeTest.java#617).
+ // Log a metric to see how common this is.
+ RecordFormatChangedMetric(FormatChangedValue::MissingFormatChanged);
+ media_codec_->ReleaseOutputBuffer(buf_index, false);
+ POST_ERROR(PLATFORM_FAILURE, "Dequeued buffers before FORMAT_CHANGED.");
return false;
}
@@ -515,7 +779,7 @@ bool AndroidVideoDecodeAccelerator::DequeueOutput() {
// correction and provides a non-decreasing timestamp sequence, which might
// result in timestamp duplicates. Discard the frame if we cannot get the
// corresponding buffer id.
- DVLOG(3) << "AVDA::DequeueOutput: Releasing buffer with unexpected PTS: "
+ DVLOG(3) << __FUNCTION__ << ": Releasing buffer with unexpected PTS: "
<< presentation_timestamp;
media_codec_->ReleaseOutputBuffer(buf_index, false);
}
@@ -532,7 +796,7 @@ void AndroidVideoDecodeAccelerator::SendDecodedFrameToClient(
DCHECK(!free_picture_ids_.empty());
TRACE_EVENT0("media", "AVDA::SendDecodedFrameToClient");
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
POST_ERROR(PLATFORM_FAILURE, "Failed to make the GL context current.");
return;
}
@@ -541,46 +805,71 @@ void AndroidVideoDecodeAccelerator::SendDecodedFrameToClient(
free_picture_ids_.pop();
TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
- OutputBufferMap::const_iterator i =
- output_picture_buffers_.find(picture_buffer_id);
+ const auto& i = output_picture_buffers_.find(picture_buffer_id);
if (i == output_picture_buffers_.end()) {
POST_ERROR(PLATFORM_FAILURE,
"Can't find PictureBuffer id: " << picture_buffer_id);
return;
}
+ bool size_changed = false;
+ if (i->second.size() != size_) {
+ // Size may have changed due to resolution change since the last time this
+ // PictureBuffer was used.
+ strategy_->UpdatePictureBufferSize(&i->second, size_);
+ size_changed = true;
+ }
+
// Connect the PictureBuffer to the decoded frame, via whatever
// mechanism the strategy likes.
strategy_->UseCodecBufferForPictureBuffer(codec_buffer_index, i->second);
+ const bool allow_overlay = strategy_->ArePicturesOverlayable();
+
+ media::Picture picture(picture_buffer_id, bitstream_id, gfx::Rect(size_),
+ allow_overlay);
+ picture.set_size_changed(size_changed);
+
base::MessageLoop::current()->PostTask(
FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyPictureReady,
- weak_this_factory_.GetWeakPtr(),
- media::Picture(picture_buffer_id, bitstream_id,
- gfx::Rect(size_), false)));
+ weak_this_factory_.GetWeakPtr(), picture));
}
void AndroidVideoDecodeAccelerator::Decode(
const media::BitstreamBuffer& bitstream_buffer) {
DCHECK(thread_checker_.CalledOnValidThread());
- if (bitstream_buffer.id() != -1 && bitstream_buffer.size() == 0) {
+
+ if (bitstream_buffer.id() >= 0 && bitstream_buffer.size() > 0) {
+ DecodeBuffer(bitstream_buffer);
+ return;
+ }
+
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+
+ if (bitstream_buffer.id() < 0) {
+ POST_ERROR(INVALID_ARGUMENT,
+ "Invalid bistream_buffer, id: " << bitstream_buffer.id());
+ } else {
base::MessageLoop::current()->PostTask(
FROM_HERE,
base::Bind(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer,
weak_this_factory_.GetWeakPtr(), bitstream_buffer.id()));
- return;
}
+}
- pending_bitstream_buffers_.push(
- std::make_pair(bitstream_buffer, base::Time::Now()));
+void AndroidVideoDecodeAccelerator::DecodeBuffer(
+ const media::BitstreamBuffer& bitstream_buffer) {
+ pending_bitstream_buffers_.push(bitstream_buffer);
TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
pending_bitstream_buffers_.size());
- DoIOTask();
+ DoIOTask(true);
}
void AndroidVideoDecodeAccelerator::RequestPictureBuffers() {
- client_->ProvidePictureBuffers(kNumPictureBuffers, size_,
+ client_->ProvidePictureBuffers(kNumPictureBuffers, 1,
+ strategy_->GetPictureBufferSize(),
strategy_->GetTextureTarget());
}
@@ -595,8 +884,12 @@ void AndroidVideoDecodeAccelerator::AssignPictureBuffers(
return;
}
+ const bool have_context = make_context_current_cb_.Run();
+ LOG_IF(WARNING, !have_context)
+ << "Failed to make GL context current for Assign, continuing.";
+
for (size_t i = 0; i < buffers.size(); ++i) {
- if (buffers[i].size() != size_) {
+ if (buffers[i].size() != strategy_->GetPictureBufferSize()) {
POST_ERROR(INVALID_ARGUMENT,
"Invalid picture buffer size assigned. Wanted "
<< size_.ToString() << ", but got "
@@ -606,29 +899,17 @@ void AndroidVideoDecodeAccelerator::AssignPictureBuffers(
int32_t id = buffers[i].id();
output_picture_buffers_.insert(std::make_pair(id, buffers[i]));
free_picture_ids_.push(id);
- // Since the client might be re-using |picture_buffer_id| values, forget
- // about previously-dismissed IDs now. See ReusePictureBuffer() comment
- // about "zombies" for why we maintain this set in the first place.
- dismissed_picture_ids_.erase(id);
- strategy_->AssignOnePictureBuffer(buffers[i]);
+ strategy_->AssignOnePictureBuffer(buffers[i], have_context);
}
TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
-
- DoIOTask();
+ DoIOTask(true);
}
void AndroidVideoDecodeAccelerator::ReusePictureBuffer(
int32_t picture_buffer_id) {
DCHECK(thread_checker_.CalledOnValidThread());
- // This ReusePictureBuffer() might have been in a pipe somewhere (queued in
- // IPC, or in a PostTask either at the sender or receiver) when we sent a
- // DismissPictureBuffer() for this |picture_buffer_id|. Account for such
- // potential "zombie" IDs here.
- if (dismissed_picture_ids_.erase(picture_buffer_id))
- return;
-
free_picture_ids_.push(picture_buffer_id);
TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
@@ -641,58 +922,139 @@ void AndroidVideoDecodeAccelerator::ReusePictureBuffer(
}
strategy_->ReuseOnePictureBuffer(i->second);
-
- DoIOTask();
+ DoIOTask(true);
}
void AndroidVideoDecodeAccelerator::Flush() {
DCHECK(thread_checker_.CalledOnValidThread());
- Decode(media::BitstreamBuffer(-1, base::SharedMemoryHandle(), 0));
+ DecodeBuffer(media::BitstreamBuffer(-1, base::SharedMemoryHandle(), 0));
}
-bool AndroidVideoDecodeAccelerator::ConfigureMediaCodec() {
+void AndroidVideoDecodeAccelerator::ConfigureMediaCodecAsynchronously() {
DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(surface_texture_.get());
- TRACE_EVENT0("media", "AVDA::ConfigureMediaCodec");
- gfx::ScopedJavaSurface surface(surface_texture_.get());
+ // It's probably okay just to return here, since the codec will be configured
+ // asynchronously. It's unclear that any state for the new request could
+ // be different, unless somebody modifies |codec_config_| while we're already
+ // waiting for a codec. One shouldn't do that for thread safety.
+ DCHECK_NE(state_, WAITING_FOR_CODEC);
+
+ state_ = WAITING_FOR_CODEC;
+
+ // Tell the strategy that we're changing codecs. The codec itself could be
+ // used normally, since we don't replace it until we're back on the main
+ // thread. However, if we're using an output surface, then the incoming codec
+ // might access that surface while the main thread is drawing. Telling the
+ // strategy to forget the codec avoids this.
+ if (media_codec_) {
+ media_codec_.reset();
+ strategy_->CodecChanged(nullptr, output_picture_buffers_);
+ }
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner =
+ g_avda_timer.Pointer()->ConstructionTaskRunner();
+ CHECK(task_runner);
+
+ base::PostTaskAndReplyWithResult(
+ task_runner.get(), FROM_HERE,
+ base::Bind(&AndroidVideoDecodeAccelerator::ConfigureMediaCodecOnAnyThread,
+ codec_config_),
+ base::Bind(&AndroidVideoDecodeAccelerator::OnCodecConfigured,
+ weak_this_factory_.GetWeakPtr()));
+}
+
+bool AndroidVideoDecodeAccelerator::ConfigureMediaCodecSynchronously() {
+ state_ = WAITING_FOR_CODEC;
+ scoped_ptr<media::VideoCodecBridge> media_codec =
+ ConfigureMediaCodecOnAnyThread(codec_config_);
+ OnCodecConfigured(std::move(media_codec));
+ return !!media_codec_;
+}
+
+scoped_ptr<media::VideoCodecBridge>
+AndroidVideoDecodeAccelerator::ConfigureMediaCodecOnAnyThread(
+ scoped_refptr<CodecConfig> codec_config) {
+ TRACE_EVENT0("media", "AVDA::ConfigureMediaCodec");
- jobject media_crypto = media_crypto_ ? media_crypto_->obj() : nullptr;
+ jobject media_crypto = codec_config->media_crypto_
+ ? codec_config->media_crypto_->obj()
+ : nullptr;
// |needs_protected_surface_| implies encrypted stream.
- DCHECK(!needs_protected_surface_ || media_crypto);
+ DCHECK(!codec_config->needs_protected_surface_ || media_crypto);
+
+ return scoped_ptr<media::VideoCodecBridge>(
+ media::VideoCodecBridge::CreateDecoder(
+ codec_config->codec_, codec_config->needs_protected_surface_,
+ codec_config->initial_expected_coded_size_,
+ codec_config->surface_.j_surface().obj(), media_crypto, true));
+}
+
+void AndroidVideoDecodeAccelerator::OnCodecConfigured(
+ scoped_ptr<media::VideoCodecBridge> media_codec) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(state_, WAITING_FOR_CODEC);
+
+ media_codec_ = std::move(media_codec);
+
+ // Record one instance of the codec being initialized.
+ RecordFormatChangedMetric(FormatChangedValue::CodecInitialized);
- // Pass a dummy 320x240 canvas size and let the codec signal the real size
- // when it's known from the bitstream.
- media_codec_.reset(media::VideoCodecBridge::CreateDecoder(
- codec_, needs_protected_surface_, gfx::Size(320, 240),
- surface.j_surface().obj(), media_crypto));
strategy_->CodecChanged(media_codec_.get(), output_picture_buffers_);
+
+ // If we are supposed to notify that initialization is complete, then do so
+ // now. Otherwise, this is a reconfiguration.
+ if (deferred_initialization_pending_) {
+ NotifyInitializationComplete(!!media_codec_);
+ deferred_initialization_pending_ = false;
+ }
+
if (!media_codec_) {
- LOG(ERROR) << "Failed to create MediaCodec instance.";
- return false;
+ POST_ERROR(PLATFORM_FAILURE, "Failed to create MediaCodec.");
+ return;
}
+ state_ = NO_ERROR;
+
ManageTimer(true);
- return true;
}
void AndroidVideoDecodeAccelerator::ResetCodecState() {
DCHECK(thread_checker_.CalledOnValidThread());
+
+ // If there is already a reset in flight, then that counts. This can really
+ // only happen if somebody calls Reset.
+ if (state_ == WAITING_FOR_CODEC)
+ return;
+
bitstream_buffers_in_decoder_.clear();
- // We don't dismiss picture buffers here since we might not get a format
- // changed message to re-request them, such as during a seek. In that case,
- // we want to reuse the existing buffers. However, we're about to invalidate
- // all the output buffers, so we must be sure that the strategy no longer
- // refers to them.
+ if (pending_input_buf_index_ != -1) {
+ // The data for that index exists in the input buffer, but corresponding
+ // shm block been deleted. Check that it is safe to flush the coec, i.e.
+ // |pending_bitstream_buffers_| is empty.
+ // TODO(timav): keep shm block for that buffer and remove this restriction.
+ DCHECK(pending_bitstream_buffers_.empty());
+ pending_input_buf_index_ = -1;
+ }
+
+ if (state_ == WAITING_FOR_KEY)
+ state_ = NO_ERROR;
+
+ // We might increment error_sequence_token here to cancel any delayed errors,
+ // but right now it's unclear that it's safe to do so. If we are in an error
+ // state because of a codec error, then it would be okay. Otherwise, it's
+ // less obvious that we are exiting the error state. Since deferred errors
+ // are only intended for fullscreen transitions right now, we take the more
+ // conservative approach and let the errors post.
+ // TODO(liberato): revisit this once we sort out the error state a bit more.
// When codec is not in error state we can quickly reset (internally calls
// flush()) for JB-MR2 and beyond. Prior to JB-MR2, flush() had several bugs
- // (b/8125974, b/8347958) so we must stop() and reconfigure MediaCodec. The
- // full reconfigure is much slower and may cause visible freezing if done
- // mid-stream.
+ // (b/8125974, b/8347958) so we must delete the MediaCodec and create a new
+ // one. The full reconfigure is much slower and may cause visible freezing if
+ // done mid-stream.
if (state_ == NO_ERROR &&
base::android::BuildInfo::GetInstance()->sdk_int() >= 18) {
DVLOG(3) << __FUNCTION__ << " Doing fast MediaCodec reset (flush).";
@@ -702,37 +1064,21 @@ void AndroidVideoDecodeAccelerator::ResetCodecState() {
strategy_->CodecChanged(media_codec_.get(), output_picture_buffers_);
} else {
DVLOG(3) << __FUNCTION__
- << " Doing slow MediaCodec reset (stop/re-configure).";
- io_timer_.Stop();
- media_codec_->Stop();
+ << " Deleting the MediaCodec and creating a new one.";
+ g_avda_timer.Pointer()->StopTimer(this);
// Changing the codec will also notify the strategy to forget about any
// output buffers it has currently.
- ConfigureMediaCodec();
state_ = NO_ERROR;
+ ConfigureMediaCodecAsynchronously();
}
}
-void AndroidVideoDecodeAccelerator::DismissPictureBuffers() {
- DCHECK(thread_checker_.CalledOnValidThread());
- DVLOG(3) << __FUNCTION__;
-
- for (const auto& pb : output_picture_buffers_) {
- strategy_->DismissOnePictureBuffer(pb.second);
- client_->DismissPictureBuffer(pb.first);
- dismissed_picture_ids_.insert(pb.first);
- }
- output_picture_buffers_.clear();
- std::queue<int32_t> empty;
- std::swap(free_picture_ids_, empty);
- picturebuffers_requested_ = false;
-}
-
void AndroidVideoDecodeAccelerator::Reset() {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT0("media", "AVDA::Reset");
while (!pending_bitstream_buffers_.empty()) {
- int32_t bitstream_buffer_id = pending_bitstream_buffers_.front().first.id();
+ int32_t bitstream_buffer_id = pending_bitstream_buffers_.front().id();
pending_bitstream_buffers_.pop();
if (bitstream_buffer_id != -1) {
@@ -745,8 +1091,13 @@ void AndroidVideoDecodeAccelerator::Reset() {
TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount", 0);
bitstreams_notified_in_advance_.clear();
+ // Any error that is waiting to post can be ignored.
+ error_sequence_token_++;
+
ResetCodecState();
+ // Note that |media_codec_| might not yet be ready, but we can still post
+ // this anyway.
base::MessageLoop::current()->PostTask(
FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyResetDone,
weak_this_factory_.GetWeakPtr()));
@@ -755,11 +1106,12 @@ void AndroidVideoDecodeAccelerator::Reset() {
void AndroidVideoDecodeAccelerator::Destroy() {
DCHECK(thread_checker_.CalledOnValidThread());
- bool have_context = make_context_current_.Run();
+ bool have_context = make_context_current_cb_.Run();
if (!have_context)
LOG(WARNING) << "Failed make GL context current for Destroy, continuing.";
- strategy_->Cleanup(have_context, output_picture_buffers_);
+ if (strategy_)
+ strategy_->Cleanup(have_context, output_picture_buffers_);
// If we have an OnFrameAvailable handler, tell it that we're going away.
if (on_frame_available_handler_) {
@@ -767,15 +1119,20 @@ void AndroidVideoDecodeAccelerator::Destroy() {
on_frame_available_handler_ = nullptr;
}
+ // Note that async codec construction might still be in progress. In that
+ // case, the codec will be deleted when it completes once we invalidate all
+ // our weak refs.
weak_this_factory_.InvalidateWeakPtrs();
if (media_codec_) {
- io_timer_.Stop();
- media_codec_->Stop();
+ g_avda_timer.Pointer()->StopTimer(this);
+ media_codec_.reset();
}
delete this;
}
-bool AndroidVideoDecodeAccelerator::CanDecodeOnIOThread() {
+bool AndroidVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
return false;
}
@@ -790,7 +1147,29 @@ const base::ThreadChecker& AndroidVideoDecodeAccelerator::ThreadChecker()
base::WeakPtr<gpu::gles2::GLES2Decoder>
AndroidVideoDecodeAccelerator::GetGlDecoder() const {
- return gl_decoder_;
+ return get_gles2_decoder_cb_.Run();
+}
+
+gpu::gles2::TextureRef* AndroidVideoDecodeAccelerator::GetTextureForPicture(
+ const media::PictureBuffer& picture_buffer) {
+ auto gles_decoder = GetGlDecoder();
+ RETURN_ON_FAILURE(this, gles_decoder, "Failed to get GL decoder",
+ ILLEGAL_STATE, nullptr);
+ RETURN_ON_FAILURE(this, gles_decoder->GetContextGroup(),
+ "Null gles_decoder->GetContextGroup()", ILLEGAL_STATE,
+ nullptr);
+ gpu::gles2::TextureManager* texture_manager =
+ gles_decoder->GetContextGroup()->texture_manager();
+ RETURN_ON_FAILURE(this, texture_manager, "Null texture_manager",
+ ILLEGAL_STATE, nullptr);
+
+ DCHECK_LE(1u, picture_buffer.internal_texture_ids().size());
+ gpu::gles2::TextureRef* texture_ref =
+ texture_manager->GetTexture(picture_buffer.internal_texture_ids()[0]);
+ RETURN_ON_FAILURE(this, texture_manager, "Null texture_ref", ILLEGAL_STATE,
+ nullptr);
+
+ return texture_ref;
}
void AndroidVideoDecodeAccelerator::OnFrameAvailable() {
@@ -802,20 +1181,24 @@ void AndroidVideoDecodeAccelerator::OnFrameAvailable() {
void AndroidVideoDecodeAccelerator::PostError(
const ::tracked_objects::Location& from_here,
media::VideoDecodeAccelerator::Error error) {
- base::MessageLoop::current()->PostTask(
- from_here, base::Bind(&AndroidVideoDecodeAccelerator::NotifyError,
- weak_this_factory_.GetWeakPtr(), error));
+ base::MessageLoop::current()->PostDelayedTask(
+ from_here,
+ base::Bind(&AndroidVideoDecodeAccelerator::NotifyError,
+ weak_this_factory_.GetWeakPtr(), error, error_sequence_token_),
+ (defer_errors_ ? ErrorPostingDelay() : base::TimeDelta()));
state_ = ERROR;
}
void AndroidVideoDecodeAccelerator::OnMediaCryptoReady(
- media::MediaDrmBridge::JavaObjectPtr media_crypto,
+ media::MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto,
bool needs_protected_surface) {
DVLOG(1) << __FUNCTION__;
if (!media_crypto) {
LOG(ERROR) << "MediaCrypto is not available, can't play encrypted stream.";
- NotifyCdmAttached(false);
+ cdm_for_reference_holding_only_ = nullptr;
+ media_drm_bridge_cdm_context_ = nullptr;
+ NotifyInitializationComplete(false);
return;
}
@@ -825,23 +1208,24 @@ void AndroidVideoDecodeAccelerator::OnMediaCryptoReady(
// is not created yet.
DCHECK(!media_codec_);
- media_crypto_ = std::move(media_crypto);
- needs_protected_surface_ = needs_protected_surface;
+ codec_config_->media_crypto_ = std::move(media_crypto);
+ codec_config_->needs_protected_surface_ = needs_protected_surface;
// After receiving |media_crypto_| we can configure MediaCodec.
- const bool success = ConfigureMediaCodec();
- NotifyCdmAttached(success);
+ ConfigureMediaCodecAsynchronously();
}
void AndroidVideoDecodeAccelerator::OnKeyAdded() {
DVLOG(1) << __FUNCTION__;
- // TODO(timav): Figure out whether stopping the pipeline in response to
- // NO_KEY error and restarting it here has significant benefits (e.g. saving
- // power). Right now do nothing here.
+
+ if (state_ == WAITING_FOR_KEY)
+ state_ = NO_ERROR;
+
+ DoIOTask(true);
}
-void AndroidVideoDecodeAccelerator::NotifyCdmAttached(bool success) {
- client_->NotifyCdmAttached(success);
+void AndroidVideoDecodeAccelerator::NotifyInitializationComplete(bool success) {
+ client_->NotifyInitializationComplete(success);
}
void AndroidVideoDecodeAccelerator::NotifyPictureReady(
@@ -863,7 +1247,13 @@ void AndroidVideoDecodeAccelerator::NotifyResetDone() {
}
void AndroidVideoDecodeAccelerator::NotifyError(
- media::VideoDecodeAccelerator::Error error) {
+ media::VideoDecodeAccelerator::Error error,
+ int token) {
+ DVLOG(1) << __FUNCTION__ << ": error: " << error << " token: " << token
+ << " current: " << error_sequence_token_;
+ if (token != error_sequence_token_)
+ return;
+
client_->NotifyError(error);
}
@@ -871,45 +1261,72 @@ void AndroidVideoDecodeAccelerator::ManageTimer(bool did_work) {
bool should_be_running = true;
base::TimeTicks now = base::TimeTicks::Now();
- if (!did_work) {
+ if (!did_work && !most_recent_work_.is_null()) {
// Make sure that we have done work recently enough, else stop the timer.
- if (now - most_recent_work_ > IdleTimerTimeOut())
+ if (now - most_recent_work_ > IdleTimerTimeOut()) {
+ most_recent_work_ = base::TimeTicks();
should_be_running = false;
+ }
} else {
most_recent_work_ = now;
}
- if (should_be_running && !io_timer_.IsRunning()) {
- io_timer_.Start(FROM_HERE, DecodePollDelay(), this,
- &AndroidVideoDecodeAccelerator::DoIOTask);
- } else if (!should_be_running && io_timer_.IsRunning()) {
- io_timer_.Stop();
- }
+ if (should_be_running)
+ g_avda_timer.Pointer()->StartTimer(this);
+ else
+ g_avda_timer.Pointer()->StopTimer(this);
}
// static
-bool AndroidVideoDecodeAccelerator::UseDeferredRenderingStrategy() {
- return base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableUnifiedMediaPipeline);
+bool AndroidVideoDecodeAccelerator::UseDeferredRenderingStrategy(
+ const gpu::GpuPreferences& gpu_preferences) {
+ // TODO(liberato, watk): Figure out what we want to do about zero copy for
+ // fullscreen external SurfaceView in WebView. http://crbug.com/582170.
+ return !gpu_preferences.enable_threaded_texture_mailboxes;
}
// static
media::VideoDecodeAccelerator::Capabilities
-AndroidVideoDecodeAccelerator::GetCapabilities() {
+AndroidVideoDecodeAccelerator::GetCapabilities(
+ const gpu::GpuPreferences& gpu_preferences) {
Capabilities capabilities;
SupportedProfiles& profiles = capabilities.supported_profiles;
- SupportedProfile profile;
-
- profile.profile = media::VP8PROFILE_ANY;
- profile.min_resolution.SetSize(0, 0);
- profile.max_resolution.SetSize(1920, 1088);
- profiles.push_back(profile);
+ if (media::MediaCodecUtil::IsVp8DecoderAvailable()) {
+ SupportedProfile profile;
+ profile.profile = media::VP8PROFILE_ANY;
+ profile.min_resolution.SetSize(0, 0);
+ profile.max_resolution.SetSize(1920, 1088);
+ // If we know MediaCodec will just create a software codec, prefer our
+ // internal software decoder instead. It's more up to date and secured
+ // within the renderer sandbox. However if the content is encrypted, we
+ // must use MediaCodec anyways since MediaDrm offers no way to decrypt
+ // the buffers and let us use our internal software decoders.
+ profile.encrypted_only = media::VideoCodecBridge::IsKnownUnaccelerated(
+ media::kCodecVP8, media::MEDIA_CODEC_DECODER);
+ profiles.push_back(profile);
+ }
- profile.profile = media::VP9PROFILE_ANY;
- profile.min_resolution.SetSize(0, 0);
- profile.max_resolution.SetSize(1920, 1088);
- profiles.push_back(profile);
+ if (media::MediaCodecUtil::IsVp9DecoderAvailable()) {
+ SupportedProfile profile;
+ profile.min_resolution.SetSize(0, 0);
+ profile.max_resolution.SetSize(1920, 1088);
+ // If we know MediaCodec will just create a software codec, prefer our
+ // internal software decoder instead. It's more up to date and secured
+ // within the renderer sandbox. However if the content is encrypted, we
+ // must use MediaCodec anyways since MediaDrm offers no way to decrypt
+ // the buffers and let us use our internal software decoders.
+ profile.encrypted_only = media::VideoCodecBridge::IsKnownUnaccelerated(
+ media::kCodecVP9, media::MEDIA_CODEC_DECODER);
+ profile.profile = media::VP9PROFILE_PROFILE0;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE1;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE2;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE3;
+ profiles.push_back(profile);
+ }
for (const auto& supported_profile : kSupportedH264Profiles) {
SupportedProfile profile;
@@ -922,9 +1339,15 @@ AndroidVideoDecodeAccelerator::GetCapabilities() {
profiles.push_back(profile);
}
- if (UseDeferredRenderingStrategy()) {
- capabilities.flags = media::VideoDecodeAccelerator::Capabilities::
+ capabilities.flags = media::VideoDecodeAccelerator::Capabilities::
+ SUPPORTS_DEFERRED_INITIALIZATION;
+ if (UseDeferredRenderingStrategy(gpu_preferences)) {
+ capabilities.flags |= media::VideoDecodeAccelerator::Capabilities::
NEEDS_ALL_PICTURE_BUFFERS_TO_DECODE;
+ if (media::MediaCodecUtil::IsSurfaceViewOutputSupported()) {
+ capabilities.flags |= media::VideoDecodeAccelerator::Capabilities::
+ SUPPORTS_EXTERNAL_OUTPUT_SURFACE;
+ }
}
return capabilities;
diff --git a/chromium/content/common/gpu/media/android_video_decode_accelerator.h b/chromium/content/common/gpu/media/android_video_decode_accelerator.h
index 1dd6816a72d..1e0543d3fc5 100644
--- a/chromium/content/common/gpu/media/android_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/android_video_decode_accelerator.h
@@ -18,11 +18,14 @@
#include "base/timer/timer.h"
#include "content/common/content_export.h"
#include "content/common/gpu/media/avda_state_provider.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
-#include "media/base/android/media_drm_bridge.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "media/base/android/media_drm_bridge_cdm_context.h"
#include "media/base/android/sdk_media_codec_bridge.h"
#include "media/base/media_keys.h"
#include "media/video/video_decode_accelerator.h"
+#include "ui/gl/android/scoped_java_surface.h"
namespace gfx {
class SurfaceTexture;
@@ -39,7 +42,7 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
: public media::VideoDecodeAccelerator,
public AVDAStateProvider {
public:
- typedef std::map<int32_t, media::PictureBuffer> OutputBufferMap;
+ using OutputBufferMap = std::map<int32_t, media::PictureBuffer>;
// A BackingStrategy is responsible for making a PictureBuffer's texture
// contain the image that a MediaCodec decoder buffer tells it to.
@@ -47,20 +50,26 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
public:
virtual ~BackingStrategy() {}
- // Called after the state provider is given, but before any other
- // calls to the BackingStrategy.
- virtual void Initialize(AVDAStateProvider* provider) = 0;
+ // Must be called before anything else. If surface_view_id is not equal to
+ // |kNoSurfaceID| it refers to a SurfaceView that the strategy must render
+ // to.
+ // Returns the Java surface to configure MediaCodec with.
+ virtual gfx::ScopedJavaSurface Initialize(int surface_view_id) = 0;
// Called before the AVDA does any Destroy() work. This will be
// the last call that the BackingStrategy receives.
virtual void Cleanup(bool have_context,
const OutputBufferMap& buffer_map) = 0;
+ // This returns the SurfaceTexture created by Initialize, or nullptr if
+ // the strategy was initialized with a SurfaceView.
+ virtual scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture() const = 0;
+
// Return the GL texture target that the PictureBuffer textures use.
virtual uint32_t GetTextureTarget() const = 0;
- // Create and return a surface texture for the MediaCodec to use.
- virtual scoped_refptr<gfx::SurfaceTexture> CreateSurfaceTexture() = 0;
+ // Return the size to use when requesting picture buffers.
+ virtual gfx::Size GetPictureBufferSize() const = 0;
// Make the provided PictureBuffer draw the image that is represented by
// the decoded output buffer at codec_buffer_index.
@@ -70,16 +79,13 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// Notify strategy that a picture buffer has been assigned.
virtual void AssignOnePictureBuffer(
- const media::PictureBuffer& picture_buffer) {}
+ const media::PictureBuffer& picture_buffer,
+ bool have_context) {}
// Notify strategy that a picture buffer has been reused.
virtual void ReuseOnePictureBuffer(
const media::PictureBuffer& picture_buffer) {}
- // Notify strategy that we are about to dismiss a picture buffer.
- virtual void DismissOnePictureBuffer(
- const media::PictureBuffer& picture_buffer) {}
-
// Notify strategy that we have a new android MediaCodec instance. This
// happens when we're starting up or re-configuring mid-stream. Any
// previously provided codec should no longer be referenced.
@@ -91,11 +97,22 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// Notify the strategy that a frame is available. This callback can happen
// on any thread at any time.
virtual void OnFrameAvailable() = 0;
+
+ // Whether the pictures produced by this backing strategy are overlayable.
+ virtual bool ArePicturesOverlayable() = 0;
+
+ // Size may have changed due to resolution change since the last time this
+ // PictureBuffer was used. Update the size of the picture buffer to
+ // |new_size| and also update any size-dependent state (e.g. size of
+ // associated texture). Callers should set the correct GL context prior to
+ // calling.
+ virtual void UpdatePictureBufferSize(media::PictureBuffer* picture_buffer,
+ const gfx::Size& new_size) = 0;
};
AndroidVideoDecodeAccelerator(
- const base::WeakPtr<gpu::gles2::GLES2Decoder> decoder,
- const base::Callback<bool(void)>& make_context_current);
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb);
~AndroidVideoDecodeAccelerator() override;
@@ -109,31 +126,98 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
// AVDAStateProvider implementation:
const gfx::Size& GetSize() const override;
const base::ThreadChecker& ThreadChecker() const override;
base::WeakPtr<gpu::gles2::GLES2Decoder> GetGlDecoder() const override;
+ gpu::gles2::TextureRef* GetTextureForPicture(
+ const media::PictureBuffer& picture_buffer) override;
void PostError(const ::tracked_objects::Location& from_here,
media::VideoDecodeAccelerator::Error error) override;
- static media::VideoDecodeAccelerator::Capabilities GetCapabilities();
+ static media::VideoDecodeAccelerator::Capabilities GetCapabilities(
+ const gpu::GpuPreferences& gpu_preferences);
// Notifies about SurfaceTexture::OnFrameAvailable. This can happen on any
// thread at any time!
void OnFrameAvailable();
private:
+ friend class AVDATimerManager;
+
+ // TODO(timav): evaluate the need for more states in the AVDA state machine.
enum State {
NO_ERROR,
ERROR,
+ // Set when we are asynchronously constructing the codec. Will transition
+ // to NO_ERROR or ERROR depending on success.
+ WAITING_FOR_CODEC,
+ // Set when we have a codec, but it doesn't yet have a key.
+ WAITING_FOR_KEY,
+ WAITING_FOR_EOS,
};
- static const base::TimeDelta kDecodePollDelay;
+ // Configuration info for MediaCodec.
+ // This is used to shuttle configuration info between threads without needing
+ // to worry about the lifetime of the AVDA instance. All of these should not
+ // be modified while |state_| is WAITING_FOR_CODEC.
+ class CodecConfig : public base::RefCountedThreadSafe<CodecConfig> {
+ public:
+ CodecConfig();
+
+ // Codec type. Used when we configure media codec.
+ media::VideoCodec codec_ = media::kUnknownVideoCodec;
+
+ // Whether encryption scheme requires to use protected surface.
+ bool needs_protected_surface_ = false;
+
+ // The surface that MediaCodec is configured to output to. It's created by
+ // the backing strategy.
+ gfx::ScopedJavaSurface surface_;
+
+ // The MediaCrypto object is used in the MediaCodec.configure() in case of
+ // an encrypted stream.
+ media::MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto_;
+
+ // Initial coded size. The actual size might change at any time, so this
+ // is only a hint.
+ gfx::Size initial_expected_coded_size_;
+
+ protected:
+ friend class base::RefCountedThreadSafe<CodecConfig>;
+ virtual ~CodecConfig();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CodecConfig);
+ };
// Configures |media_codec_| with the given codec parameters from the client.
- bool ConfigureMediaCodec();
+ // This configuration will (probably) not be complete before this call
+ // returns. Multiple calls before completion will be ignored. |state_|
+ // must be NO_ERROR or WAITING_FOR_CODEC. Note that, once you call this,
+ // you should be careful to avoid modifying members of |codec_config_| until
+ // |state_| is no longer WAITING_FOR_CODEC.
+ void ConfigureMediaCodecAsynchronously();
+
+ // Like ConfigureMediaCodecAsynchronously, but synchronous. Returns true if
+ // and only if |media_codec_| is non-null. Since all configuration is done
+ // synchronously, there is no concern with modifying |codec_config_| after
+ // this returns.
+ bool ConfigureMediaCodecSynchronously();
+
+ // Instantiate a media codec using |codec_config|.
+ // This may be called on any thread.
+ static scoped_ptr<media::VideoCodecBridge> ConfigureMediaCodecOnAnyThread(
+ scoped_refptr<CodecConfig> codec_config);
+
+ // Called on the main thread to update |media_codec_| and complete codec
+ // configuration. |media_codec| will be null if configuration failed.
+ void OnCodecConfigured(scoped_ptr<media::VideoCodecBridge> media_codec);
// Sends the decoded frame specified by |codec_buffer_index| to the client.
void SendDecodedFrameToClient(int32_t codec_buffer_index,
@@ -142,7 +226,7 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// Does pending IO tasks if any. Once this is called, it polls |media_codec_|
// until it finishes pending tasks. For the polling, |kDecodePollDelay| is
// used.
- void DoIOTask();
+ void DoIOTask(bool start_timer);
// Feeds input data to |media_codec_|. This checks
// |pending_bitstream_buffers_| and queues a buffer to |media_codec_|.
@@ -157,15 +241,20 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// Requests picture buffers from the client.
void RequestPictureBuffers();
+ // Decode the content in the |bitstream_buffer|. Note that a
+ // |bitstream_buffer| of id as -1 indicates a flush command.
+ void DecodeBuffer(const media::BitstreamBuffer& bitstream_buffer);
+
// This callback is called after CDM obtained a MediaCrypto object.
- void OnMediaCryptoReady(media::MediaDrmBridge::JavaObjectPtr media_crypto,
- bool needs_protected_surface);
+ void OnMediaCryptoReady(
+ media::MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto,
+ bool needs_protected_surface);
// This callback is called when a new key is added to CDM.
void OnKeyAdded();
- // Notifies the client of the CDM setting result.
- void NotifyCdmAttached(bool success);
+ // Notifies the client of the result of deferred initialization.
+ void NotifyInitializationComplete(bool success);
// Notifies the client about the availability of a picture.
void NotifyPictureReady(const media::Picture& picture);
@@ -181,7 +270,12 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
void NotifyResetDone();
// Notifies about decoding errors.
- void NotifyError(media::VideoDecodeAccelerator::Error error);
+ // Note: you probably don't want to call this directly. Use PostError or
+ // RETURN_ON_FAILURE, since we can defer error reporting to keep the pipeline
+ // from breaking. NotifyError will do so immediately, PostError may wait.
+ // |token| has to match |error_sequence_token_|, or else it's assumed to be
+ // from a post that's prior to a previous reset, and ignored.
+ void NotifyError(media::VideoDecodeAccelerator::Error error, int token);
// Start or stop our work-polling timer based on whether we did any work, and
// how long it has been since we've done work. Calling this with true will
@@ -194,12 +288,9 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// is still valid and should be processed.
void ResetCodecState();
- // Dismiss all |output_picture_buffers_| in preparation for requesting new
- // ones.
- void DismissPictureBuffers();
-
// Return true if and only if we should use deferred rendering.
- static bool UseDeferredRenderingStrategy();
+ static bool UseDeferredRenderingStrategy(
+ const gpu::GpuPreferences& gpu_preferences);
// Used to DCHECK that we are called on the correct thread.
base::ThreadChecker thread_checker_;
@@ -208,17 +299,14 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
Client* client_;
// Callback to set the correct gl context.
- base::Callback<bool(void)> make_context_current_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
- // Codec type. Used when we configure media codec.
- media::VideoCodec codec_;
+ // Callback to get the GLES2Decoder instance.
+ GetGLES2DecoderCallback get_gles2_decoder_cb_;
// Whether the stream is encrypted.
bool is_encrypted_;
- // Whether encryption scheme requires to use protected surface.
- bool needs_protected_surface_;
-
// The current state of this class. For now, this is used only for setting
// error state.
State state_;
@@ -231,17 +319,9 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// decoded frames to the client.
std::queue<int32_t> free_picture_ids_;
- // Picture buffer ids which have been dismissed and not yet re-assigned. Used
- // to ignore ReusePictureBuffer calls that were in flight when the
- // DismissPictureBuffer call was made.
- std::set<int32_t> dismissed_picture_ids_;
-
// The low-level decoder which Android SDK provides.
scoped_ptr<media::VideoCodecBridge> media_codec_;
- // A container of texture. Used to set a texture to |media_codec_|.
- scoped_refptr<gfx::SurfaceTexture> surface_texture_;
-
// Set to true after requesting picture buffers to the client.
bool picturebuffers_requested_;
@@ -249,11 +329,8 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
gfx::Size size_;
// Encoded bitstream buffers to be passed to media codec, queued until an
- // input buffer is available, along with the time when they were first
- // enqueued.
- typedef std::queue<std::pair<media::BitstreamBuffer, base::Time> >
- PendingBitstreamBuffers;
- PendingBitstreamBuffers pending_bitstream_buffers_;
+ // input buffer is available.
+ std::queue<media::BitstreamBuffer> pending_bitstream_buffers_;
// A map of presentation timestamp to bitstream buffer id for the bitstream
// buffers that have been submitted to the decoder but haven't yet produced an
@@ -265,12 +342,6 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// NotifyEndOfBitstreamBuffer() before getting output from the bitstream.
std::list<int32_t> bitstreams_notified_in_advance_;
- // Owner of the GL context. Used to restore the context state.
- base::WeakPtr<gpu::gles2::GLES2Decoder> gl_decoder_;
-
- // Repeating timer responsible for draining pending IO to the codec.
- base::RepeatingTimer io_timer_;
-
// Backing strategy that we'll use to connect PictureBuffers to frames.
scoped_ptr<BackingStrategy> strategy_;
@@ -283,16 +354,33 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// CDM related stuff.
- // Holds a ref-count to the CDM.
- scoped_refptr<media::MediaKeys> cdm_;
+ // Holds a ref-count to the CDM to avoid using the CDM after it's destroyed.
+ scoped_refptr<media::MediaKeys> cdm_for_reference_holding_only_;
+
+ media::MediaDrmBridgeCdmContext* media_drm_bridge_cdm_context_;
// MediaDrmBridge requires registration/unregistration of the player, this
// registration id is used for this.
int cdm_registration_id_;
- // The MediaCrypto object is used in the MediaCodec.configure() in case of
- // an encrypted stream.
- media::MediaDrmBridge::JavaObjectPtr media_crypto_;
+ // Configuration that we use for MediaCodec.
+ // Do not update any of its members while |state_| is WAITING_FOR_CODEC.
+ scoped_refptr<CodecConfig> codec_config_;
+
+ // Index of the dequeued and filled buffer that we keep trying to enqueue.
+ // Such buffer appears in MEDIA_CODEC_NO_KEY processing.
+ int pending_input_buf_index_;
+
+ // Monotonically increasing value that is used to prevent old, delayed errors
+ // from being sent after a reset.
+ int error_sequence_token_;
+
+ // PostError will defer sending an error if and only if this is true.
+ bool defer_errors_;
+
+ // True if and only if VDA initialization is deferred, and we have not yet
+ // called NotifyInitializationComplete.
+ bool deferred_initialization_pending_;
// WeakPtrFactory for posting tasks back to |this|.
base::WeakPtrFactory<AndroidVideoDecodeAccelerator> weak_this_factory_;
diff --git a/chromium/content/common/gpu/media/android_video_decode_accelerator_unittest.cc b/chromium/content/common/gpu/media/android_video_decode_accelerator_unittest.cc
index 3cd79157162..d21ad9e58a8 100644
--- a/chromium/content/common/gpu/media/android_video_decode_accelerator_unittest.cc
+++ b/chromium/content/common/gpu/media/android_video_decode_accelerator_unittest.cc
@@ -27,13 +27,15 @@ bool MockMakeContextCurrent() {
return true;
}
+static base::WeakPtr<gpu::gles2::GLES2Decoder> MockGetGLES2Decoder(
+ const base::WeakPtr<gpu::gles2::GLES2Decoder>& decoder) {
+ return decoder;
+}
+
} // namespace
namespace content {
-// TODO(felipeg): Add more unit tests to test the ordinary behavior of
-// AndroidVideoDecodeAccelerator.
-// http://crbug.com/178647
class MockVideoDecodeAcceleratorClient
: public media::VideoDecodeAccelerator::Client {
public:
@@ -42,6 +44,7 @@ class MockVideoDecodeAcceleratorClient
// VideoDecodeAccelerator::Client implementation.
void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
+ uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) override {}
void DismissPictureBuffer(int32_t picture_buffer_id) override {}
@@ -60,8 +63,6 @@ class AndroidVideoDecodeAcceleratorTest : public testing::Test {
void SetUp() override {
JNIEnv* env = base::android::AttachCurrentThread();
media::RegisterJni(env);
- // TODO(felipeg): fix GL bindings, so that the decoder can perform GL
- // calls.
// Start message loop because
// AndroidVideoDecodeAccelerator::ConfigureMediaCodec() starts a timer task.
@@ -72,15 +73,19 @@ class AndroidVideoDecodeAcceleratorTest : public testing::Test {
scoped_ptr<MockVideoDecodeAcceleratorClient> client(
new MockVideoDecodeAcceleratorClient());
accelerator_.reset(new AndroidVideoDecodeAccelerator(
- decoder->AsWeakPtr(), base::Bind(&MockMakeContextCurrent)));
+ base::Bind(&MockMakeContextCurrent),
+ base::Bind(&MockGetGLES2Decoder, decoder->AsWeakPtr())));
}
bool Configure(media::VideoCodec codec) {
AndroidVideoDecodeAccelerator* accelerator =
static_cast<AndroidVideoDecodeAccelerator*>(accelerator_.get());
- accelerator->surface_texture_ = gfx::SurfaceTexture::Create(0);
- accelerator->codec_ = codec;
- return accelerator->ConfigureMediaCodec();
+ scoped_refptr<gfx::SurfaceTexture> surface_texture =
+ gfx::SurfaceTexture::Create(0);
+ accelerator->codec_config_->surface_ =
+ gfx::ScopedJavaSurface(surface_texture.get());
+ accelerator->codec_config_->codec_ = codec;
+ return accelerator->ConfigureMediaCodecSynchronously();
}
private:
diff --git a/chromium/content/common/gpu/media/android_video_encode_accelerator.cc b/chromium/content/common/gpu/media/android_video_encode_accelerator.cc
index eb383081d7f..ac2ff39e9b7 100644
--- a/chromium/content/common/gpu/media/android_video_encode_accelerator.cc
+++ b/chromium/content/common/gpu/media/android_video_encode_accelerator.cc
@@ -7,13 +7,12 @@
#include <set>
#include "base/bind.h"
-#include "base/command_line.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/public/common/content_switches.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/ipc/service/gpu_channel.h"
#include "media/base/android/media_codec_util.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/limits.h"
@@ -95,8 +94,6 @@ static bool GetSupportedColorFormatForMime(const std::string& mime,
AndroidVideoEncodeAccelerator::AndroidVideoEncodeAccelerator()
: num_buffers_at_codec_(0),
- num_output_buffers_(-1),
- output_buffers_capacity_(0),
last_set_bitrate_(0) {}
AndroidVideoEncodeAccelerator::~AndroidVideoEncodeAccelerator() {
@@ -107,12 +104,6 @@ media::VideoEncodeAccelerator::SupportedProfiles
AndroidVideoEncodeAccelerator::GetSupportedProfiles() {
SupportedProfiles profiles;
-#if defined(ENABLE_WEBRTC)
- const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kDisableWebRtcHWEncoding))
- return profiles;
-#endif
-
const struct {
const media::VideoCodec codec;
const media::VideoCodecProfile profile;
@@ -123,6 +114,11 @@ AndroidVideoEncodeAccelerator::GetSupportedProfiles() {
};
for (const auto& supported_codec : kSupportedCodecs) {
+ if (supported_codec.codec == media::kCodecVP8 &&
+ !media::MediaCodecUtil::IsVp8EncoderAvailable()) {
+ continue;
+ }
+
if (VideoCodecBridge::IsKnownUnaccelerated(supported_codec.codec,
media::MEDIA_CODEC_ENCODER)) {
continue;
@@ -164,17 +160,24 @@ bool AndroidVideoEncodeAccelerator::Initialize(
std::string mime_type;
media::VideoCodec codec;
+ // The client should be prepared to feed at least this many frames into the
+ // encoder before being returned any output frames, since the encoder may
+ // need to hold onto some subset of inputs as reference pictures.
+ uint32_t frame_input_count;
if (output_profile == media::VP8PROFILE_ANY) {
codec = media::kCodecVP8;
mime_type = "video/x-vnd.on2.vp8";
+ frame_input_count = 1;
} else if (output_profile == media::H264PROFILE_BASELINE ||
output_profile == media::H264PROFILE_MAIN) {
codec = media::kCodecH264;
mime_type = "video/avc";
+ frame_input_count = 30;
} else {
return false;
}
+ frame_size_ = input_visible_size;
last_set_bitrate_ = initial_bitrate;
// Only consider using MediaCodec if it's likely backed by hardware.
@@ -202,15 +205,16 @@ bool AndroidVideoEncodeAccelerator::Initialize(
return false;
}
- num_output_buffers_ = media_codec_->GetOutputBuffersCount();
- output_buffers_capacity_ = media_codec_->GetOutputBuffersCapacity();
+ // Conservative upper bound for output buffer size: decoded size + 2KB.
+ const size_t output_buffer_capacity =
+ VideoFrame::AllocationSize(format, input_visible_size) + 2048;
base::MessageLoop::current()->PostTask(
FROM_HERE,
base::Bind(&VideoEncodeAccelerator::Client::RequireBitstreamBuffers,
client_ptr_factory_->GetWeakPtr(),
- num_output_buffers_,
+ frame_input_count,
input_visible_size,
- output_buffers_capacity_));
+ output_buffer_capacity));
return true;
}
@@ -238,7 +242,8 @@ void AndroidVideoEncodeAccelerator::Encode(
DCHECK(thread_checker_.CalledOnValidThread());
RETURN_ON_FAILURE(frame->format() == media::PIXEL_FORMAT_I420,
"Unexpected format", kInvalidArgumentError);
-
+ RETURN_ON_FAILURE(frame->visible_rect().size() == frame_size_,
+ "Unexpected resolution", kInvalidArgumentError);
// MediaCodec doesn't have a way to specify stride for non-Packed formats, so
// we insist on being called with packed frames and no cropping :(
RETURN_ON_FAILURE(frame->row_bytes(VideoFrame::kYPlane) ==
@@ -260,9 +265,6 @@ void AndroidVideoEncodeAccelerator::UseOutputBitstreamBuffer(
const media::BitstreamBuffer& buffer) {
DVLOG(3) << __PRETTY_FUNCTION__ << ": bitstream_buffer_id=" << buffer.id();
DCHECK(thread_checker_.CalledOnValidThread());
- RETURN_ON_FAILURE(buffer.size() >= media_codec_->GetOutputBuffersCapacity(),
- "Output buffers too small!",
- kInvalidArgumentError);
available_bitstream_buffers_.push_back(buffer);
DoIOTask();
}
@@ -331,7 +333,9 @@ void AndroidVideoEncodeAccelerator::QueueInput() {
uint8_t* buffer = NULL;
size_t capacity = 0;
- media_codec_->GetInputBuffer(input_buf_index, &buffer, &capacity);
+ status = media_codec_->GetInputBuffer(input_buf_index, &buffer, &capacity);
+ RETURN_ON_FAILURE(status == media::MEDIA_CODEC_OK, "GetInputBuffer failed.",
+ kPlatformFailureError);
size_t queued_size =
VideoFrame::AllocationSize(media::PIXEL_FORMAT_I420, frame->coded_size());
@@ -373,21 +377,6 @@ void AndroidVideoEncodeAccelerator::QueueInput() {
pending_frames_.pop();
}
-bool AndroidVideoEncodeAccelerator::DoOutputBuffersSuffice() {
- // If this returns false ever, then the VEA::Client interface will need to
- // grow a DismissBitstreamBuffer() call, and VEA::Client impls will have to be
- // prepared to field multiple requests to RequireBitstreamBuffers().
- int count = media_codec_->GetOutputBuffersCount();
- size_t capacity = media_codec_->GetOutputBuffersCapacity();
- bool ret = count <= num_output_buffers_ &&
- capacity <= output_buffers_capacity_;
- LOG_IF(ERROR, !ret) << "Need more/bigger buffers; before: "
- << num_output_buffers_ << "x" << output_buffers_capacity_
- << ", now: " << count << "x" << capacity;
- UMA_HISTOGRAM_BOOLEAN("Media.AVEA.OutputBuffersSuffice", ret);
- return ret;
-}
-
void AndroidVideoEncodeAccelerator::DequeueOutput() {
if (!client_ptr_factory_->GetWeakPtr() ||
available_bitstream_buffers_.empty() || num_buffers_at_codec_ == 0) {
@@ -410,13 +399,14 @@ void AndroidVideoEncodeAccelerator::DequeueOutput() {
// Unreachable because of previous statement, but included for clarity.
return;
- case media::MEDIA_CODEC_OUTPUT_FORMAT_CHANGED: // Fall-through.
- case media::MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
- RETURN_ON_FAILURE(DoOutputBuffersSuffice(),
- "Bitstream now requires more/larger buffers",
+ case media::MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
+ RETURN_ON_FAILURE(false, "Unexpected output format change",
kPlatformFailureError);
break;
+ case media::MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
+ break;
+
case media::MEDIA_CODEC_OK:
DCHECK_GE(buf_index, 0);
break;
@@ -429,17 +419,17 @@ void AndroidVideoEncodeAccelerator::DequeueOutput() {
media::BitstreamBuffer bitstream_buffer = available_bitstream_buffers_.back();
available_bitstream_buffers_.pop_back();
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(bitstream_buffer.handle(), false));
- RETURN_ON_FAILURE(shm->Map(bitstream_buffer.size()),
- "Failed to map SHM",
- kPlatformFailureError);
- RETURN_ON_FAILURE(size <= shm->mapped_size(),
- "Encoded buffer too large: " << size << ">"
- << shm->mapped_size(),
+ scoped_ptr<SharedMemoryRegion> shm(
+ new SharedMemoryRegion(bitstream_buffer, false));
+ RETURN_ON_FAILURE(shm->Map(), "Failed to map SHM", kPlatformFailureError);
+ RETURN_ON_FAILURE(size <= shm->size(),
+ "Encoded buffer too large: " << size << ">" << shm->size(),
kPlatformFailureError);
- media_codec_->CopyFromOutputBuffer(buf_index, offset, shm->memory(), size);
+ media::MediaCodecStatus status = media_codec_->CopyFromOutputBuffer(
+ buf_index, offset, shm->memory(), size);
+ RETURN_ON_FAILURE(status == media::MEDIA_CODEC_OK,
+ "CopyFromOutputBuffer failed", kPlatformFailureError);
media_codec_->ReleaseOutputBuffer(buf_index, false);
--num_buffers_at_codec_;
diff --git a/chromium/content/common/gpu/media/android_video_encode_accelerator.h b/chromium/content/common/gpu/media/android_video_encode_accelerator.h
index 426360dca7c..0de3d1866b1 100644
--- a/chromium/content/common/gpu/media/android_video_encode_accelerator.h
+++ b/chromium/content/common/gpu/media/android_video_encode_accelerator.h
@@ -67,9 +67,6 @@ class CONTENT_EXPORT AndroidVideoEncodeAccelerator
void QueueInput();
void DequeueOutput();
- // Returns true if we don't need more or bigger output buffers.
- bool DoOutputBuffersSuffice();
-
// Start & stop |io_timer_| if the time seems right.
void MaybeStartIOTimer();
void MaybeStopIOTimer();
@@ -103,9 +100,9 @@ class CONTENT_EXPORT AndroidVideoEncodeAccelerator
// appearing to move forward.
base::TimeDelta fake_input_timestamp_;
- // Number of requested output buffers and their capacity.
- int num_output_buffers_; // -1 until RequireBitstreamBuffers.
- size_t output_buffers_capacity_; // 0 until RequireBitstreamBuffers.
+ // Resolution of input stream. Set once in initialization and not allowed to
+ // change after.
+ gfx::Size frame_size_;
uint32_t last_set_bitrate_; // In bps.
diff --git a/chromium/content/common/gpu/media/avda_codec_image.cc b/chromium/content/common/gpu/media/avda_codec_image.cc
index 1df753d167e..5830433cdf2 100644
--- a/chromium/content/common/gpu/media/avda_codec_image.cc
+++ b/chromium/content/common/gpu/media/avda_codec_image.cc
@@ -24,16 +24,17 @@ AVDACodecImage::AVDACodecImage(
const base::WeakPtr<gpu::gles2::GLES2Decoder>& decoder,
const scoped_refptr<gfx::SurfaceTexture>& surface_texture)
: shared_state_(shared_state),
- codec_buffer_index_(-1),
+ codec_buffer_index_(kInvalidCodecBufferIndex),
media_codec_(codec),
decoder_(decoder),
surface_texture_(surface_texture),
detach_surface_texture_on_destruction_(false),
- texture_(0),
- need_shader_info_(true),
- texmatrix_uniform_location_(-1) {
+ texture_(0) {
+ // Default to a sane guess of "flip Y", just in case we can't get
+ // the matrix on the first call.
memset(gl_matrix_, 0, sizeof(gl_matrix_));
- gl_matrix_[0] = gl_matrix_[5] = gl_matrix_[10] = gl_matrix_[15] = 1.0f;
+ gl_matrix_[0] = gl_matrix_[10] = gl_matrix_[15] = 1.0f;
+ gl_matrix_[5] = -1.0f;
}
AVDACodecImage::~AVDACodecImage() {}
@@ -55,38 +56,35 @@ bool AVDACodecImage::BindTexImage(unsigned target) {
void AVDACodecImage::ReleaseTexImage(unsigned target) {}
bool AVDACodecImage::CopyTexImage(unsigned target) {
+ if (!surface_texture_)
+ return false;
+
if (target != GL_TEXTURE_EXTERNAL_OES)
return false;
- // Verify that the currently bound texture is the right one. If we're not
- // copying to a Texture that shares our service_id, then we can't do much.
- // This will force a copy.
- // TODO(liberato): Fall back to a copy that uses the texture matrix.
GLint bound_service_id = 0;
glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &bound_service_id);
+ // We insist that the currently bound texture is the right one. We could
+ // make a new glimage from a 2D image.
if (bound_service_id != shared_state_->surface_texture_service_id())
return false;
- // Attach the surface texture to our GL context if needed.
+ // If the surface texture isn't attached yet, then attach it. Note that this
+ // will be to the texture in |shared_state_|, because of the checks above.
if (!shared_state_->surface_texture_is_attached())
AttachSurfaceTextureToContext();
- // Make sure that we have the right image in the front buffer.
- UpdateSurfaceTexture();
-
- InstallTextureMatrix();
-
- // TODO(liberato): Handle the texture matrix properly.
- // Either we can update the shader with it or we can move all of the logic
- // to updateTexImage() to the right place in the cc to send it to the shader.
- // For now, we just skip it. crbug.com/530681
+ // Make sure that we have the right image in the front buffer. Note that the
+ // bound_service_id is guaranteed to be equal to the surface texture's client
+ // texture id, so we can skip preserving it if the right context is current.
+ UpdateSurfaceTexture(kDontRestoreBindings);
// By setting image state to UNBOUND instead of COPIED we ensure that
// CopyTexImage() is called each time the surface texture is used for drawing.
// It would be nice if we could do this via asking for the currently bound
// Texture, but the active unit never seems to change.
- texture_->SetLevelImage(GL_TEXTURE_EXTERNAL_OES, 0, this,
- gpu::gles2::Texture::UNBOUND);
+ texture_->SetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES, 0, this,
+ gpu::gles2::Texture::UNBOUND);
return true;
}
@@ -102,16 +100,29 @@ bool AVDACodecImage::ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
gfx::OverlayTransform transform,
const gfx::Rect& bounds_rect,
const gfx::RectF& crop_rect) {
- return false;
+ // This should only be called when we're rendering to a SurfaceView.
+ if (surface_texture_) {
+ DVLOG(1) << "Invalid call to ScheduleOverlayPlane; this image is "
+ "SurfaceTexture backed.";
+ return false;
+ }
+
+ if (codec_buffer_index_ != kInvalidCodecBufferIndex) {
+ media_codec_->ReleaseOutputBuffer(codec_buffer_index_, true);
+ codec_buffer_index_ = kInvalidCodecBufferIndex;
+ }
+ return true;
}
void AVDACodecImage::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
uint64_t process_tracing_id,
const std::string& dump_name) {}
-void AVDACodecImage::UpdateSurfaceTexture() {
+void AVDACodecImage::UpdateSurfaceTexture(RestoreBindingsMode mode) {
+ DCHECK(surface_texture_);
+
// Render via the media codec if needed.
- if (codec_buffer_index_ <= -1 || !media_codec_)
+ if (!IsCodecBufferOutstanding())
return;
// The decoder buffer is still pending.
@@ -123,15 +134,24 @@ void AVDACodecImage::UpdateSurfaceTexture() {
}
// Don't bother to check if we're rendered again.
- codec_buffer_index_ = -1;
+ codec_buffer_index_ = kInvalidCodecBufferIndex;
// Swap the rendered image to the front.
- scoped_ptr<ui::ScopedMakeCurrent> scoped_make_current;
- if (!shared_state_->context()->IsCurrent(NULL)) {
- scoped_make_current.reset(new ui::ScopedMakeCurrent(
- shared_state_->context(), shared_state_->surface()));
- }
+ scoped_ptr<ui::ScopedMakeCurrent> scoped_make_current = MakeCurrentIfNeeded();
+
+ // If we changed contexts, then we always want to restore it, since the caller
+ // doesn't know that we're switching contexts.
+ if (scoped_make_current)
+ mode = kDoRestoreBindings;
+
+ // Save the current binding if requested.
+ GLint bound_service_id = 0;
+ if (mode == kDoRestoreBindings)
+ glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &bound_service_id);
+
surface_texture_->UpdateTexImage();
+ if (mode == kDoRestoreBindings)
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, bound_service_id);
// Helpfully, this is already column major.
surface_texture_->GetTransformMatrix(gl_matrix_);
@@ -153,16 +173,19 @@ void AVDACodecImage::SetMediaCodec(media::MediaCodecBridge* codec) {
media_codec_ = codec;
}
-void AVDACodecImage::setTexture(gpu::gles2::Texture* texture) {
+void AVDACodecImage::SetTexture(gpu::gles2::Texture* texture) {
texture_ = texture;
}
void AVDACodecImage::AttachSurfaceTextureToContext() {
+ DCHECK(surface_texture_);
+
+ // We assume that the currently bound texture is the intended one.
+
// Attach the surface texture to the first context we're bound on, so that
// no context switch is needed later.
-
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
@@ -170,35 +193,48 @@ void AVDACodecImage::AttachSurfaceTextureToContext() {
// We could do this earlier, but SurfaceTexture has context affinity, and we
// don't want to require a context switch.
surface_texture_->AttachToGLContext();
- shared_state_->did_attach_surface_texture();
-}
-
-void AVDACodecImage::InstallTextureMatrix() {
- // glUseProgram() has been run already -- just modify the uniform.
- // Updating this via VideoFrameProvider::Client::DidUpdateMatrix() would
- // be a better solution, except that we'd definitely miss a frame at this
- // point in drawing.
- // Our current method assumes that we'll end up being a stream resource,
- // and that the program has a texMatrix uniform that does what we want.
- if (need_shader_info_) {
- GLint program_id = -1;
- glGetIntegerv(GL_CURRENT_PROGRAM, &program_id);
-
- if (program_id >= 0) {
- // This is memorized from cc/output/shader.cc .
- const char* uniformName = "texMatrix";
- texmatrix_uniform_location_ =
- glGetUniformLocation(program_id, uniformName);
- DCHECK(texmatrix_uniform_location_ != -1);
- }
+ shared_state_->DidAttachSurfaceTexture();
+}
- // Only try once.
- need_shader_info_ = false;
+scoped_ptr<ui::ScopedMakeCurrent> AVDACodecImage::MakeCurrentIfNeeded() {
+ DCHECK(shared_state_->context());
+ scoped_ptr<ui::ScopedMakeCurrent> scoped_make_current;
+ if (!shared_state_->context()->IsCurrent(NULL)) {
+ scoped_make_current.reset(new ui::ScopedMakeCurrent(
+ shared_state_->context(), shared_state_->surface()));
}
- if (texmatrix_uniform_location_ >= 0) {
- glUniformMatrix4fv(texmatrix_uniform_location_, 1, false, gl_matrix_);
+ return scoped_make_current;
+}
+
+void AVDACodecImage::GetTextureMatrix(float matrix[16]) {
+ if (IsCodecBufferOutstanding() && shared_state_ && surface_texture_) {
+ // Our current matrix may be stale. Update it if possible.
+ if (!shared_state_->surface_texture_is_attached()) {
+ // Don't attach the surface texture permanently. Perhaps we should
+ // just attach the surface texture in avda and be done with it.
+ GLuint service_id = 0;
+ glGenTextures(1, &service_id);
+ GLint bound_service_id = 0;
+ glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &bound_service_id);
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, service_id);
+ AttachSurfaceTextureToContext();
+ UpdateSurfaceTexture(kDontRestoreBindings);
+ // Detach the surface texture, which deletes the generated texture.
+ surface_texture_->DetachFromGLContext();
+ shared_state_->DidDetachSurfaceTexture();
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, bound_service_id);
+ } else {
+ // Surface texture is already attached, so just update it.
+ UpdateSurfaceTexture(kDoRestoreBindings);
+ }
}
+
+ memcpy(matrix, gl_matrix_, sizeof(gl_matrix_));
+}
+
+bool AVDACodecImage::IsCodecBufferOutstanding() const {
+ return codec_buffer_index_ != kInvalidCodecBufferIndex && media_codec_;
}
} // namespace content
diff --git a/chromium/content/common/gpu/media/avda_codec_image.h b/chromium/content/common/gpu/media/avda_codec_image.h
index ef0456a9fba..46547e478c8 100644
--- a/chromium/content/common/gpu/media/avda_codec_image.h
+++ b/chromium/content/common/gpu/media/avda_codec_image.h
@@ -9,13 +9,17 @@
#include "base/macros.h"
#include "content/common/gpu/media/avda_shared_state.h"
-#include "ui/gl/gl_image.h"
+#include "gpu/command_buffer/service/gl_stream_texture_image.h"
+
+namespace ui {
+class ScopedMakeCurrent;
+}
namespace content {
-// GLImage that renders MediaCodec buffers to a SurfaceTexture as needed
-// in order to draw them.
-class AVDACodecImage : public gl::GLImage {
+// GLImage that renders MediaCodec buffers to a SurfaceTexture or SurfaceView as
+// needed in order to draw them.
+class AVDACodecImage : public gpu::gles2::GLStreamTextureImage {
public:
AVDACodecImage(const scoped_refptr<AVDASharedState>&,
media::VideoCodecBridge* codec,
@@ -44,6 +48,8 @@ class AVDACodecImage : public gl::GLImage {
void OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
uint64_t process_tracing_id,
const std::string& dump_name) override;
+ // gpu::gles2::GLStreamTextureMatrix implementation
+ void GetTextureMatrix(float xform[16]) override;
public:
// Decoded buffer index that has the image for us to display.
@@ -58,24 +64,42 @@ class AVDACodecImage : public gl::GLImage {
void SetMediaCodec(media::MediaCodecBridge* codec);
- void setTexture(gpu::gles2::Texture* texture);
+ void SetTexture(gpu::gles2::Texture* texture);
private:
- // Make sure that the surface texture's front buffer is current.
- void UpdateSurfaceTexture();
-
- // Attach the surface texture to our GL context, with a texture that we
- // create for it.
+ enum { kInvalidCodecBufferIndex = -1 };
+
+ // Make sure that the surface texture's front buffer is current. This will
+ // save / restore the current context. It will optionally restore the texture
+ // bindings in the surface texture's context, based on |mode|. This is
+ // intended as a hint if we don't need to change contexts. If we do need to
+ // change contexts, then we'll always preserve the texture bindings in the
+ // both contexts. In other words, the caller is telling us whether it's
+ // okay to change the binding in the current context.
+ enum RestoreBindingsMode { kDontRestoreBindings, kDoRestoreBindings };
+ void UpdateSurfaceTexture(RestoreBindingsMode mode);
+
+ // Attach the surface texture to our GL context to whatever texture is bound
+ // on the active unit.
void AttachSurfaceTextureToContext();
- // Install the current texture matrix into the shader.
- void InstallTextureMatrix();
+ // Make shared_state_->context() current if it isn't already.
+ scoped_ptr<ui::ScopedMakeCurrent> MakeCurrentIfNeeded();
+
+ // Return whether or not the current context is in the same share group as
+ // |surface_texture_|'s client texture.
+ // TODO(liberato): is this needed?
+ bool IsCorrectShareGroup() const;
+
+ // Return whether there is a codec buffer that we haven't rendered yet. Will
+ // return false also if there's no codec or we otherwise can't update.
+ bool IsCodecBufferOutstanding() const;
// Shared state between the AVDA and all AVDACodecImages.
scoped_refptr<AVDASharedState> shared_state_;
- // Codec's buffer index that we should render to the surface texture,
- // or <0 if none.
+ // The MediaCodec buffer index that we should render. Only valid if not equal
+ // to |kInvalidCodecBufferIndex|.
int codec_buffer_index_;
// Our image size.
@@ -86,6 +110,8 @@ class AVDACodecImage : public gl::GLImage {
const base::WeakPtr<gpu::gles2::GLES2Decoder> decoder_;
+ // The SurfaceTexture to render to. This is null when rendering to a
+ // SurfaceView.
const scoped_refptr<gfx::SurfaceTexture> surface_texture_;
// Should we detach |surface_texture_| from its GL context when we are
@@ -95,12 +121,6 @@ class AVDACodecImage : public gl::GLImage {
// The texture that we're attached to.
gpu::gles2::Texture* texture_;
- // Have we cached |texmatrix_uniform_location_| yet?
- bool need_shader_info_;
-
- // Uniform ID of the texture matrix in the shader.
- GLint texmatrix_uniform_location_;
-
// Texture matrix of the front buffer of the surface texture.
float gl_matrix_[16];
diff --git a/chromium/content/common/gpu/media/avda_shared_state.cc b/chromium/content/common/gpu/media/avda_shared_state.cc
index c182bf05385..7746254fee9 100644
--- a/chromium/content/common/gpu/media/avda_shared_state.cc
+++ b/chromium/content/common/gpu/media/avda_shared_state.cc
@@ -4,6 +4,7 @@
#include "content/common/gpu/media/avda_shared_state.h"
+#include "base/time/time.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/scoped_make_current.h"
@@ -21,10 +22,13 @@ void AVDASharedState::SignalFrameAvailable() {
}
void AVDASharedState::WaitForFrameAvailable() {
- frame_available_event_.Wait();
+ // 10msec covers >99.9% of cases, so just wait for up to that much before
+ // giving up. If an error occurs, we might not ever get a notification.
+ const base::TimeDelta max_wait_time(base::TimeDelta::FromMilliseconds(10));
+ frame_available_event_.TimedWait(max_wait_time);
}
-void AVDASharedState::did_attach_surface_texture() {
+void AVDASharedState::DidAttachSurfaceTexture() {
context_ = gfx::GLContext::GetCurrent();
surface_ = gfx::GLSurface::GetCurrent();
DCHECK(context_);
@@ -33,4 +37,10 @@ void AVDASharedState::did_attach_surface_texture() {
surface_texture_is_attached_ = true;
}
+void AVDASharedState::DidDetachSurfaceTexture() {
+ context_ = nullptr;
+ surface_ = nullptr;
+ surface_texture_is_attached_ = false;
+}
+
} // namespace content
diff --git a/chromium/content/common/gpu/media/avda_shared_state.h b/chromium/content/common/gpu/media/avda_shared_state.h
index eb62681fcd5..5f80c44d729 100644
--- a/chromium/content/common/gpu/media/avda_shared_state.h
+++ b/chromium/content/common/gpu/media/avda_shared_state.h
@@ -50,10 +50,19 @@ class AVDASharedState : public base::RefCounted<AVDASharedState> {
return surface_texture_is_attached_;
}
+ // TODO(liberato): move the surface texture here and make these calls
+ // attach / detach it also. There are several changes going on in avda
+ // concurrently, so I don't want to change that until the dust settles.
+ // AVDACodecImage would no longer hold the surface texture.
+
// Call this when the SurfaceTexture is attached to a GL context. This will
// update surface_texture_is_attached(), and set the context() and surface()
// to match.
- void did_attach_surface_texture();
+ void DidAttachSurfaceTexture();
+
+ // Call this when the SurfaceTexture is detached from its GL context. This
+ // will cause us to forget the last binding.
+ void DidDetachSurfaceTexture();
private:
// Platform gl texture Id for |surface_texture_|. This will be zero if
diff --git a/chromium/content/common/gpu/media/avda_state_provider.h b/chromium/content/common/gpu/media/avda_state_provider.h
index 2c84f2ed04a..e7dfac62ded 100644
--- a/chromium/content/common/gpu/media/avda_state_provider.h
+++ b/chromium/content/common/gpu/media/avda_state_provider.h
@@ -8,6 +8,7 @@
#include "base/compiler_specific.h"
#include "base/threading/thread_checker.h"
#include "content/common/content_export.h"
+#include "gpu/command_buffer/service/texture_manager.h"
#include "media/video/video_decode_accelerator.h"
namespace gfx {
@@ -36,6 +37,8 @@ class AVDAStateProvider {
virtual const gfx::Size& GetSize() const = 0;
virtual const base::ThreadChecker& ThreadChecker() const = 0;
virtual base::WeakPtr<gpu::gles2::GLES2Decoder> GetGlDecoder() const = 0;
+ virtual gpu::gles2::TextureRef* GetTextureForPicture(
+ const media::PictureBuffer& picture_buffer) = 0;
// Helper function to report an error condition and stop decoding.
// This will post NotifyError(), and transition to the error state.
diff --git a/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.cc b/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.cc
index 40a3239cb25..e55c9009720 100644
--- a/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.cc
+++ b/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.cc
@@ -21,7 +21,6 @@
#include "base/base_paths_win.h"
#include "base/bind.h"
#include "base/callback.h"
-#include "base/command_line.h"
#include "base/debug/alias.h"
#include "base/file_version_info.h"
#include "base/files/file_path.h"
@@ -34,15 +33,14 @@
#include "base/trace_event/trace_event.h"
#include "base/win/windows_version.h"
#include "build/build_config.h"
-#include "content/public/common/content_switches.h"
#include "media/base/win/mf_initializer.h"
#include "media/video/video_decode_accelerator.h"
#include "third_party/angle/include/EGL/egl.h"
#include "third_party/angle/include/EGL/eglext.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_surface_egl.h"
-#include "ui/gl/gl_switches.h"
namespace {
@@ -113,6 +111,91 @@ DEFINE_GUID(CLSID_VideoProcessorMFT,
DEFINE_GUID(MF_XVP_PLAYBACK_MODE, 0x3c5d293f, 0xad67, 0x4e29, 0xaf, 0x12,
0xcf, 0x3e, 0x23, 0x8a, 0xcc, 0xe9);
+// Defines the GUID for the Intel H264 DXVA device.
+static const GUID DXVA2_Intel_ModeH264_E = {
+ 0x604F8E68, 0x4951, 0x4c54,{ 0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6}
+};
+
+// R600, R700, Evergreen and Cayman AMD cards. These support DXVA via UVD3
+// or earlier, and don't handle resolutions higher than 1920 x 1088 well.
+static const DWORD g_AMDUVD3GPUList[] = {
+ 0x9400, 0x9401, 0x9402, 0x9403, 0x9405, 0x940a, 0x940b, 0x940f, 0x94c0,
+ 0x94c1, 0x94c3, 0x94c4, 0x94c5, 0x94c6, 0x94c7, 0x94c8, 0x94c9, 0x94cb,
+ 0x94cc, 0x94cd, 0x9580, 0x9581, 0x9583, 0x9586, 0x9587, 0x9588, 0x9589,
+ 0x958a, 0x958b, 0x958c, 0x958d, 0x958e, 0x958f, 0x9500, 0x9501, 0x9504,
+ 0x9505, 0x9506, 0x9507, 0x9508, 0x9509, 0x950f, 0x9511, 0x9515, 0x9517,
+ 0x9519, 0x95c0, 0x95c2, 0x95c4, 0x95c5, 0x95c6, 0x95c7, 0x95c9, 0x95cc,
+ 0x95cd, 0x95ce, 0x95cf, 0x9590, 0x9591, 0x9593, 0x9595, 0x9596, 0x9597,
+ 0x9598, 0x9599, 0x959b, 0x9610, 0x9611, 0x9612, 0x9613, 0x9614, 0x9615,
+ 0x9616, 0x9710, 0x9711, 0x9712, 0x9713, 0x9714, 0x9715, 0x9440, 0x9441,
+ 0x9442, 0x9443, 0x9444, 0x9446, 0x944a, 0x944b, 0x944c, 0x944e, 0x9450,
+ 0x9452, 0x9456, 0x945a, 0x945b, 0x945e, 0x9460, 0x9462, 0x946a, 0x946b,
+ 0x947a, 0x947b, 0x9480, 0x9487, 0x9488, 0x9489, 0x948a, 0x948f, 0x9490,
+ 0x9491, 0x9495, 0x9498, 0x949c, 0x949e, 0x949f, 0x9540, 0x9541, 0x9542,
+ 0x954e, 0x954f, 0x9552, 0x9553, 0x9555, 0x9557, 0x955f, 0x94a0, 0x94a1,
+ 0x94a3, 0x94b1, 0x94b3, 0x94b4, 0x94b5, 0x94b9, 0x68e0, 0x68e1, 0x68e4,
+ 0x68e5, 0x68e8, 0x68e9, 0x68f1, 0x68f2, 0x68f8, 0x68f9, 0x68fa, 0x68fe,
+ 0x68c0, 0x68c1, 0x68c7, 0x68c8, 0x68c9, 0x68d8, 0x68d9, 0x68da, 0x68de,
+ 0x68a0, 0x68a1, 0x68a8, 0x68a9, 0x68b0, 0x68b8, 0x68b9, 0x68ba, 0x68be,
+ 0x68bf, 0x6880, 0x6888, 0x6889, 0x688a, 0x688c, 0x688d, 0x6898, 0x6899,
+ 0x689b, 0x689e, 0x689c, 0x689d, 0x9802, 0x9803, 0x9804, 0x9805, 0x9806,
+ 0x9807, 0x9808, 0x9809, 0x980a, 0x9640, 0x9641, 0x9647, 0x9648, 0x964a,
+ 0x964b, 0x964c, 0x964e, 0x964f, 0x9642, 0x9643, 0x9644, 0x9645, 0x9649,
+ 0x6720, 0x6721, 0x6722, 0x6723, 0x6724, 0x6725, 0x6726, 0x6727, 0x6728,
+ 0x6729, 0x6738, 0x6739, 0x673e, 0x6740, 0x6741, 0x6742, 0x6743, 0x6744,
+ 0x6745, 0x6746, 0x6747, 0x6748, 0x6749, 0x674a, 0x6750, 0x6751, 0x6758,
+ 0x6759, 0x675b, 0x675d, 0x675f, 0x6840, 0x6841, 0x6842, 0x6843, 0x6849,
+ 0x6850, 0x6858, 0x6859, 0x6760, 0x6761, 0x6762, 0x6763, 0x6764, 0x6765,
+ 0x6766, 0x6767, 0x6768, 0x6770, 0x6771, 0x6772, 0x6778, 0x6779, 0x677b,
+ 0x6700, 0x6701, 0x6702, 0x6703, 0x6704, 0x6705, 0x6706, 0x6707, 0x6708,
+ 0x6709, 0x6718, 0x6719, 0x671c, 0x671d, 0x671f, 0x683D, 0x9900, 0x9901,
+ 0x9903, 0x9904, 0x9905, 0x9906, 0x9907, 0x9908, 0x9909, 0x990a, 0x990b,
+ 0x990c, 0x990d, 0x990e, 0x990f, 0x9910, 0x9913, 0x9917, 0x9918, 0x9919,
+ 0x9990, 0x9991, 0x9992, 0x9993, 0x9994, 0x9995, 0x9996, 0x9997, 0x9998,
+ 0x9999, 0x999a, 0x999b, 0x999c, 0x999d, 0x99a0, 0x99a2, 0x99a4,
+};
+
+// Legacy Intel GPUs (Second generation) which have trouble with resolutions
+// higher than 1920 x 1088
+static const DWORD g_IntelLegacyGPUList[] = {
+ 0x102, 0x106, 0x116, 0x126,
+};
+
+// Provides scoped access to the underlying buffer in an IMFMediaBuffer
+// instance.
+class MediaBufferScopedPointer {
+ public:
+ MediaBufferScopedPointer(IMFMediaBuffer* media_buffer)
+ : media_buffer_(media_buffer),
+ buffer_(nullptr),
+ max_length_(0),
+ current_length_(0) {
+ HRESULT hr = media_buffer_->Lock(&buffer_, &max_length_, &current_length_);
+ CHECK(SUCCEEDED(hr));
+ }
+
+ ~MediaBufferScopedPointer() {
+ HRESULT hr = media_buffer_->Unlock();
+ CHECK(SUCCEEDED(hr));
+ }
+
+ uint8_t* get() {
+ return buffer_;
+ }
+
+ DWORD current_length() const {
+ return current_length_;
+ }
+
+ private:
+ base::win::ScopedComPtr<IMFMediaBuffer> media_buffer_;
+ uint8_t* buffer_;
+ DWORD max_length_;
+ DWORD current_length_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaBufferScopedPointer);
+};
+
} // namespace
namespace content {
@@ -122,7 +205,10 @@ static const media::VideoCodecProfile kSupportedProfiles[] = {
media::H264PROFILE_MAIN,
media::H264PROFILE_HIGH,
media::VP8PROFILE_ANY,
- media::VP9PROFILE_ANY
+ media::VP9PROFILE_PROFILE0,
+ media::VP9PROFILE_PROFILE1,
+ media::VP9PROFILE_PROFILE2,
+ media::VP9PROFILE_PROFILE3
};
CreateDXGIDeviceManager DXVAVideoDecodeAccelerator::create_dxgi_device_manager_
@@ -162,10 +248,16 @@ enum {
kFlushDecoderSurfaceTimeoutMs = 1,
// Maximum iterations where we try to flush the d3d device.
kMaxIterationsForD3DFlush = 4,
+ // Maximum iterations where we try to flush the ANGLE device before reusing
+ // the texture.
+ kMaxIterationsForANGLEReuseFlush = 16,
// We only request 5 picture buffers from the client which are used to hold
// the decoded samples. These buffers are then reused when the client tells
// us that it is done with the buffer.
kNumPictureBuffers = 5,
+ // The keyed mutex should always be released before the other thread
+ // attempts to acquire it, so AcquireSync should always return immediately.
+ kAcquireSyncWaitMs = 0,
};
static IMFSample* CreateEmptySample() {
@@ -177,8 +269,9 @@ static IMFSample* CreateEmptySample() {
// Creates a Media Foundation sample with one buffer of length |buffer_length|
// on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0.
-static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
- CHECK_GT(buffer_length, 0);
+static IMFSample* CreateEmptySampleWithBuffer(uint32_t buffer_length,
+ int align) {
+ CHECK_GT(buffer_length, 0U);
base::win::ScopedComPtr<IMFSample> sample;
sample.Attach(CreateEmptySample());
@@ -209,11 +302,11 @@ static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
// |min_size| specifies the minimum size of the buffer (might be required by
// the decoder for input). If no alignment is required, provide 0.
static IMFSample* CreateInputSample(const uint8_t* stream,
- int size,
- int min_size,
+ uint32_t size,
+ uint32_t min_size,
int alignment) {
CHECK(stream);
- CHECK_GT(size, 0);
+ CHECK_GT(size, 0U);
base::win::ScopedComPtr<IMFSample> sample;
sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size),
alignment));
@@ -230,28 +323,16 @@ static IMFSample* CreateInputSample(const uint8_t* stream,
RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL);
CHECK_EQ(current_length, 0u);
- CHECK_GE(static_cast<int>(max_length), size);
+ CHECK_GE(max_length, size);
memcpy(destination, stream, size);
- hr = buffer->Unlock();
- RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL);
-
hr = buffer->SetCurrentLength(size);
RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", NULL);
- return sample.Detach();
-}
-
-static IMFSample* CreateSampleFromInputBuffer(
- const media::BitstreamBuffer& bitstream_buffer,
- DWORD stream_size,
- DWORD alignment) {
- base::SharedMemory shm(bitstream_buffer.handle(), true);
- RETURN_ON_FAILURE(shm.Map(bitstream_buffer.size()),
- "Failed in base::SharedMemory::Map", NULL);
+ hr = buffer->Unlock();
+ RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL);
- return CreateInputSample(reinterpret_cast<const uint8_t*>(shm.memory()),
- bitstream_buffer.size(), stream_size, alignment);
+ return sample.Detach();
}
// Helper function to create a COM object instance from a DLL. The alternative
@@ -289,55 +370,188 @@ template<class T>
base::win::ScopedComPtr<T> QueryDeviceObjectFromANGLE(int object_type) {
base::win::ScopedComPtr<T> device_object;
- EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display = nullptr;
intptr_t egl_device = 0;
intptr_t device = 0;
+ {
+ TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. GetHardwareDisplay");
+ egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
+ }
+
RETURN_ON_FAILURE(
gfx::GLSurfaceEGL::HasEGLExtension("EGL_EXT_device_query"),
"EGL_EXT_device_query missing",
device_object);
- PFNEGLQUERYDISPLAYATTRIBEXTPROC QueryDisplayAttribEXT =
- reinterpret_cast<PFNEGLQUERYDISPLAYATTRIBEXTPROC>(eglGetProcAddress(
- "eglQueryDisplayAttribEXT"));
+ PFNEGLQUERYDISPLAYATTRIBEXTPROC QueryDisplayAttribEXT = nullptr;
- RETURN_ON_FAILURE(
- QueryDisplayAttribEXT,
- "Failed to get the eglQueryDisplayAttribEXT function from ANGLE",
- device_object);
+ {
+ TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress");
- PFNEGLQUERYDEVICEATTRIBEXTPROC QueryDeviceAttribEXT =
- reinterpret_cast<PFNEGLQUERYDEVICEATTRIBEXTPROC>(eglGetProcAddress(
- "eglQueryDeviceAttribEXT"));
+ QueryDisplayAttribEXT =
+ reinterpret_cast<PFNEGLQUERYDISPLAYATTRIBEXTPROC>(eglGetProcAddress(
+ "eglQueryDisplayAttribEXT"));
- RETURN_ON_FAILURE(
- QueryDeviceAttribEXT,
- "Failed to get the eglQueryDeviceAttribEXT function from ANGLE",
- device_object);
+ RETURN_ON_FAILURE(
+ QueryDisplayAttribEXT,
+ "Failed to get the eglQueryDisplayAttribEXT function from ANGLE",
+ device_object);
+ }
- RETURN_ON_FAILURE(
- QueryDisplayAttribEXT(egl_display, EGL_DEVICE_EXT, &egl_device),
- "The eglQueryDisplayAttribEXT function failed to get the EGL device",
- device_object);
+ PFNEGLQUERYDEVICEATTRIBEXTPROC QueryDeviceAttribEXT = nullptr;
+
+ {
+ TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress");
+
+ QueryDeviceAttribEXT =
+ reinterpret_cast<PFNEGLQUERYDEVICEATTRIBEXTPROC>(eglGetProcAddress(
+ "eglQueryDeviceAttribEXT"));
+
+ RETURN_ON_FAILURE(
+ QueryDeviceAttribEXT,
+ "Failed to get the eglQueryDeviceAttribEXT function from ANGLE",
+ device_object);
+ }
+
+ {
+ TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. QueryDisplayAttribEXT");
+
+ RETURN_ON_FAILURE(
+ QueryDisplayAttribEXT(egl_display, EGL_DEVICE_EXT, &egl_device),
+ "The eglQueryDisplayAttribEXT function failed to get the EGL device",
+ device_object);
+ }
RETURN_ON_FAILURE(
egl_device,
"Failed to get the EGL device",
device_object);
- RETURN_ON_FAILURE(
- QueryDeviceAttribEXT(
- reinterpret_cast<EGLDeviceEXT>(egl_device), object_type, &device),
- "The eglQueryDeviceAttribEXT function failed to get the device",
- device_object);
+ {
+ TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. QueryDisplayAttribEXT");
- RETURN_ON_FAILURE(device, "Failed to get the ANGLE device", device_object);
+ RETURN_ON_FAILURE(
+ QueryDeviceAttribEXT(
+ reinterpret_cast<EGLDeviceEXT>(egl_device), object_type, &device),
+ "The eglQueryDeviceAttribEXT function failed to get the device",
+ device_object);
+
+ RETURN_ON_FAILURE(device, "Failed to get the ANGLE device", device_object);
+ }
device_object = reinterpret_cast<T*>(device);
return device_object;
}
+H264ConfigChangeDetector::H264ConfigChangeDetector()
+ : last_sps_id_(0),
+ last_pps_id_(0),
+ config_changed_(false),
+ pending_config_changed_(false) {
+}
+
+H264ConfigChangeDetector::~H264ConfigChangeDetector() {
+}
+
+bool H264ConfigChangeDetector::DetectConfig(const uint8_t* stream,
+ unsigned int size) {
+ std::vector<uint8_t> sps;
+ std::vector<uint8_t> pps;
+ media::H264NALU nalu;
+ bool idr_seen = false;
+
+ if (!parser_.get())
+ parser_.reset(new media::H264Parser);
+
+ parser_->SetStream(stream, size);
+ config_changed_ = false;
+
+ while (true) {
+ media::H264Parser::Result result = parser_->AdvanceToNextNALU(&nalu);
+
+ if (result == media::H264Parser::kEOStream)
+ break;
+
+ if (result == media::H264Parser::kUnsupportedStream) {
+ DLOG(ERROR) << "Unsupported H.264 stream";
+ return false;
+ }
+
+ if (result != media::H264Parser::kOk) {
+ DLOG(ERROR) << "Failed to parse H.264 stream";
+ return false;
+ }
+
+ switch (nalu.nal_unit_type) {
+ case media::H264NALU::kSPS:
+ result = parser_->ParseSPS(&last_sps_id_);
+ if (result == media::H264Parser::kUnsupportedStream) {
+ DLOG(ERROR) << "Unsupported SPS";
+ return false;
+ }
+
+ if (result != media::H264Parser::kOk) {
+ DLOG(ERROR) << "Could not parse SPS";
+ return false;
+ }
+
+ sps.assign(nalu.data, nalu.data + nalu.size);
+ break;
+
+ case media::H264NALU::kPPS:
+ result = parser_->ParsePPS(&last_pps_id_);
+ if (result == media::H264Parser::kUnsupportedStream) {
+ DLOG(ERROR) << "Unsupported PPS";
+ return false;
+ }
+ if (result != media::H264Parser::kOk) {
+ DLOG(ERROR) << "Could not parse PPS";
+ return false;
+ }
+ pps.assign(nalu.data, nalu.data + nalu.size);
+ break;
+
+ case media::H264NALU::kIDRSlice:
+ idr_seen = true;
+ // If we previously detected a configuration change, and see an IDR
+ // slice next time around, we need to flag a configuration change.
+ if (pending_config_changed_) {
+ config_changed_ = true;
+ pending_config_changed_ = false;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!sps.empty() && sps != last_sps_) {
+ if (!last_sps_.empty()) {
+ // Flag configuration changes after we see an IDR slice.
+ if (idr_seen) {
+ config_changed_ = true;
+ } else {
+ pending_config_changed_ = true;
+ }
+ }
+ last_sps_.swap(sps);
+ }
+
+ if (!pps.empty() && pps != last_pps_) {
+ if (!last_pps_.empty()) {
+ // Flag configuration changes after we see an IDR slice.
+ if (idr_seen) {
+ config_changed_ = true;
+ } else {
+ pending_config_changed_ = true;
+ }
+ }
+ last_pps_.swap(pps);
+ }
+ return true;
+}
// Maintains information about a DXVA picture buffer, i.e. whether it is
// available for rendering, the texture information, etc.
@@ -349,7 +563,11 @@ struct DXVAVideoDecodeAccelerator::DXVAPictureBuffer {
EGLConfig egl_config);
~DXVAPictureBuffer();
- void ReusePictureBuffer();
+ bool InitializeTexture(const DXVAVideoDecodeAccelerator& decoder,
+ bool use_rgb);
+
+ bool ReusePictureBuffer();
+ void ResetReuseFence();
// Copies the output sample data to the picture buffer provided by the
// client.
// The dest_surface parameter contains the decoded bits.
@@ -375,20 +593,37 @@ struct DXVAVideoDecodeAccelerator::DXVAPictureBuffer {
return picture_buffer_.size();
}
+ bool waiting_to_reuse() const { return waiting_to_reuse_; }
+
+ gfx::GLFence* reuse_fence() { return reuse_fence_.get(); }
+
// Called when the source surface |src_surface| is copied to the destination
// |dest_surface|
- void CopySurfaceComplete(IDirect3DSurface9* src_surface,
+ bool CopySurfaceComplete(IDirect3DSurface9* src_surface,
IDirect3DSurface9* dest_surface);
private:
explicit DXVAPictureBuffer(const media::PictureBuffer& buffer);
bool available_;
+
+ // This is true if the decoder is currently waiting on the fence before
+ // reusing the buffer.
+ bool waiting_to_reuse_;
media::PictureBuffer picture_buffer_;
EGLSurface decoding_surface_;
+ scoped_ptr<gfx::GLFence> reuse_fence_;
+
+ HANDLE texture_share_handle_;
base::win::ScopedComPtr<IDirect3DTexture9> decoding_texture_;
base::win::ScopedComPtr<ID3D11Texture2D> dx11_decoding_texture_;
+ base::win::ScopedComPtr<IDXGIKeyedMutex> egl_keyed_mutex_;
+ base::win::ScopedComPtr<IDXGIKeyedMutex> dx11_keyed_mutex_;
+
+ // This is the last value that was used to release the keyed mutex.
+ uint64_t keyed_mutex_value_;
+
// The following |IDirect3DSurface9| interface pointers are used to hold
// references on the surfaces during the course of a StretchRect operation
// to copy the source surface to the target. The references are released
@@ -422,6 +657,9 @@ DXVAVideoDecodeAccelerator::DXVAPictureBuffer::Create(
eglGetConfigAttrib(egl_display, egl_config, EGL_BIND_TO_TEXTURE_RGB,
&use_rgb);
+ if (!picture_buffer->InitializeTexture(decoder, !!use_rgb))
+ return linked_ptr<DXVAPictureBuffer>(nullptr);
+
EGLint attrib_list[] = {
EGL_WIDTH, buffer.size().width(),
EGL_HEIGHT, buffer.size().height(),
@@ -430,59 +668,84 @@ DXVAVideoDecodeAccelerator::DXVAPictureBuffer::Create(
EGL_NONE
};
- picture_buffer->decoding_surface_ = eglCreatePbufferSurface(
- egl_display,
- egl_config,
- attrib_list);
+ picture_buffer->decoding_surface_ = eglCreatePbufferFromClientBuffer(
+ egl_display, EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE,
+ picture_buffer->texture_share_handle_, egl_config, attrib_list);
RETURN_ON_FAILURE(picture_buffer->decoding_surface_,
"Failed to create surface",
linked_ptr<DXVAPictureBuffer>(NULL));
+ if (decoder.d3d11_device_ && decoder.use_keyed_mutex_) {
+ void* keyed_mutex = nullptr;
+ EGLBoolean ret = eglQuerySurfacePointerANGLE(
+ egl_display, picture_buffer->decoding_surface_,
+ EGL_DXGI_KEYED_MUTEX_ANGLE, &keyed_mutex);
+ RETURN_ON_FAILURE(keyed_mutex && ret == EGL_TRUE,
+ "Failed to query ANGLE keyed mutex",
+ linked_ptr<DXVAPictureBuffer>(nullptr));
+ picture_buffer->egl_keyed_mutex_ = base::win::ScopedComPtr<IDXGIKeyedMutex>(
+ static_cast<IDXGIKeyedMutex*>(keyed_mutex));
+ }
+ picture_buffer->use_rgb_ = !!use_rgb;
+ return picture_buffer;
+}
- HANDLE share_handle = NULL;
- EGLBoolean ret = eglQuerySurfacePointerANGLE(
- egl_display,
- picture_buffer->decoding_surface_,
- EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE,
- &share_handle);
+bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::InitializeTexture(
+ const DXVAVideoDecodeAccelerator& decoder,
+ bool use_rgb) {
+ DCHECK(!texture_share_handle_);
+ if (decoder.d3d11_device_) {
+ D3D11_TEXTURE2D_DESC desc;
+ desc.Width = picture_buffer_.size().width();
+ desc.Height = picture_buffer_.size().height();
+ desc.MipLevels = 1;
+ desc.ArraySize = 1;
+ desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
+ desc.SampleDesc.Count = 1;
+ desc.SampleDesc.Quality = 0;
+ desc.Usage = D3D11_USAGE_DEFAULT;
+ desc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
+ desc.CPUAccessFlags = 0;
+ desc.MiscFlags = decoder.use_keyed_mutex_
+ ? D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX
+ : D3D11_RESOURCE_MISC_SHARED;
+
+ HRESULT hr = decoder.d3d11_device_->CreateTexture2D(
+ &desc, nullptr, dx11_decoding_texture_.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to create texture", false);
+ if (decoder.use_keyed_mutex_) {
+ hr = dx11_keyed_mutex_.QueryFrom(dx11_decoding_texture_.get());
+ RETURN_ON_HR_FAILURE(hr, "Failed to get keyed mutex", false);
+ }
- RETURN_ON_FAILURE(share_handle && ret == EGL_TRUE,
- "Failed to query ANGLE surface pointer",
- linked_ptr<DXVAPictureBuffer>(NULL));
+ base::win::ScopedComPtr<IDXGIResource> resource;
+ hr = resource.QueryFrom(dx11_decoding_texture_.get());
+ DCHECK(SUCCEEDED(hr));
+ hr = resource->GetSharedHandle(&texture_share_handle_);
+ RETURN_ON_FAILURE(SUCCEEDED(hr) && texture_share_handle_,
+ "Failed to query shared handle", false);
- HRESULT hr = E_FAIL;
- if (decoder.d3d11_device_) {
- base::win::ScopedComPtr<ID3D11Resource> resource;
- hr = decoder.d3d11_device_->OpenSharedResource(
- share_handle,
- __uuidof(ID3D11Resource),
- reinterpret_cast<void**>(resource.Receive()));
- RETURN_ON_HR_FAILURE(hr, "Failed to open shared resource",
- linked_ptr<DXVAPictureBuffer>(NULL));
- hr = picture_buffer->dx11_decoding_texture_.QueryFrom(resource.get());
} else {
+ HRESULT hr = E_FAIL;
hr = decoder.d3d9_device_ex_->CreateTexture(
- buffer.size().width(),
- buffer.size().height(),
- 1,
- D3DUSAGE_RENDERTARGET,
- use_rgb ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8,
- D3DPOOL_DEFAULT,
- picture_buffer->decoding_texture_.Receive(),
- &share_handle);
- }
- RETURN_ON_HR_FAILURE(hr, "Failed to create texture",
- linked_ptr<DXVAPictureBuffer>(NULL));
- picture_buffer->use_rgb_ = !!use_rgb;
- return picture_buffer;
+ picture_buffer_.size().width(), picture_buffer_.size().height(), 1,
+ D3DUSAGE_RENDERTARGET, use_rgb ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8,
+ D3DPOOL_DEFAULT, decoding_texture_.Receive(), &texture_share_handle_);
+ RETURN_ON_HR_FAILURE(hr, "Failed to create texture", false);
+ RETURN_ON_FAILURE(texture_share_handle_, "Failed to query shared handle",
+ false);
+ }
+ return true;
}
DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer(
const media::PictureBuffer& buffer)
: available_(true),
+ waiting_to_reuse_(false),
picture_buffer_(buffer),
decoding_surface_(NULL),
- use_rgb_(true) {
-}
+ texture_share_handle_(nullptr),
+ keyed_mutex_value_(0),
+ use_rgb_(true) {}
DXVAVideoDecodeAccelerator::DXVAPictureBuffer::~DXVAPictureBuffer() {
if (decoding_surface_) {
@@ -500,7 +763,7 @@ DXVAVideoDecodeAccelerator::DXVAPictureBuffer::~DXVAPictureBuffer() {
}
}
-void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() {
+bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() {
DCHECK(decoding_surface_);
EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
eglReleaseTexImage(
@@ -510,7 +773,21 @@ void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() {
decoder_surface_.Release();
target_surface_.Release();
decoder_dx11_texture_.Release();
+ waiting_to_reuse_ = false;
set_available(true);
+ if (egl_keyed_mutex_) {
+ HRESULT hr = egl_keyed_mutex_->ReleaseSync(++keyed_mutex_value_);
+ RETURN_ON_FAILURE(hr == S_OK, "Could not release sync mutex", false);
+ }
+ return true;
+}
+
+void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ResetReuseFence() {
+ if (!reuse_fence_ || !reuse_fence_->ResetSupported())
+ reuse_fence_.reset(gfx::GLFence::Create());
+ else
+ reuse_fence_->ResetState();
+ waiting_to_reuse_ = true;
}
bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::
@@ -525,8 +802,9 @@ bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::
// when we receive a notification that the copy was completed or when the
// DXVAPictureBuffer instance is destroyed.
decoder_dx11_texture_ = dx11_texture;
- decoder->CopyTexture(dx11_texture, dx11_decoding_texture_.get(), NULL,
- id(), input_buffer_id);
+ decoder->CopyTexture(dx11_texture, dx11_decoding_texture_.get(),
+ dx11_keyed_mutex_, keyed_mutex_value_, NULL, id(),
+ input_buffer_id);
return true;
}
D3DSURFACE_DESC surface_desc;
@@ -566,7 +844,7 @@ bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::
return true;
}
-void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete(
+bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete(
IDirect3DSurface9* src_surface,
IDirect3DSurface9* dest_surface) {
DCHECK(!available());
@@ -574,7 +852,7 @@ void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete(
GLint current_texture = 0;
glGetIntegerv(GL_TEXTURE_BINDING_2D, &current_texture);
- glBindTexture(GL_TEXTURE_2D, picture_buffer_.texture_id());
+ glBindTexture(GL_TEXTURE_2D, picture_buffer_.texture_ids()[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
@@ -587,6 +865,12 @@ void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete(
DCHECK(decoder_dx11_texture_.get());
decoder_dx11_texture_.Release();
}
+ if (egl_keyed_mutex_) {
+ keyed_mutex_value_++;
+ HRESULT result =
+ egl_keyed_mutex_->AcquireSync(keyed_mutex_value_, kAcquireSyncWaitMs);
+ RETURN_ON_FAILURE(result == S_OK, "Could not acquire sync mutex", false);
+ }
EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
eglBindTexImage(
@@ -596,6 +880,7 @@ void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete(
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, current_texture);
+ return true;
}
DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
@@ -608,8 +893,9 @@ DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {}
DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- gfx::GLContext* gl_context)
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ bool enable_accelerated_vpx_decode)
: client_(NULL),
dev_manager_reset_token_(0),
dx11_dev_manager_reset_token_(0),
@@ -618,14 +904,16 @@ DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
pictures_requested_(false),
inputs_before_decode_(0),
sent_drain_message_(false),
- make_context_current_(make_context_current),
+ get_gl_context_cb_(get_gl_context_cb),
+ make_context_current_cb_(make_context_current_cb),
codec_(media::kUnknownVideoCodec),
decoder_thread_("DXVAVideoDecoderThread"),
pending_flush_(false),
use_dx11_(false),
+ use_keyed_mutex_(false),
dx11_video_format_converter_media_type_needs_init_(true),
- gl_context_(gl_context),
using_angle_device_(false),
+ enable_accelerated_vpx_decode_(enable_accelerated_vpx_decode),
weak_this_factory_(this) {
weak_ptr_ = weak_this_factory_.GetWeakPtr();
memset(&input_stream_info_, 0, sizeof(input_stream_info_));
@@ -638,6 +926,11 @@ DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() {
bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
Client* client) {
+ if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
if (config.is_encrypted) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
return false;
@@ -695,6 +988,10 @@ bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
PLATFORM_FAILURE,
false);
+ RETURN_AND_NOTIFY_ON_FAILURE(gfx::GLFence::IsSupported(),
+ "GL fences are unsupported", PLATFORM_FAILURE,
+ false);
+
State state = GetState();
RETURN_AND_NOTIFY_ON_FAILURE((state == kUninitialized),
"Initialize: invalid state: " << state, ILLEGAL_STATE, false);
@@ -717,6 +1014,10 @@ bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
"Send MFT_MESSAGE_NOTIFY_START_OF_STREAM notification failed",
PLATFORM_FAILURE, false);
+ config_ = config;
+
+ config_change_detector_.reset(new H264ConfigChangeDetector);
+
SetState(kNormal);
StartDecoderThread();
@@ -883,15 +1184,28 @@ void DXVAVideoDecodeAccelerator::Decode(
const media::BitstreamBuffer& bitstream_buffer) {
DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
+ // SharedMemory will take over the ownership of handle.
+ base::SharedMemory shm(bitstream_buffer.handle(), true);
+
State state = GetState();
RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped ||
state == kFlushing),
"Invalid state: " << state, ILLEGAL_STATE,);
+ if (bitstream_buffer.id() < 0) {
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ false, "Invalid bitstream_buffer, id: " << bitstream_buffer.id(),
+ INVALID_ARGUMENT, );
+ }
base::win::ScopedComPtr<IMFSample> sample;
- sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer,
- input_stream_info_.cbSize,
- input_stream_info_.cbAlignment));
+ RETURN_AND_NOTIFY_ON_FAILURE(shm.Map(bitstream_buffer.size()),
+ "Failed in base::SharedMemory::Map",
+ PLATFORM_FAILURE, );
+
+ sample.Attach(CreateInputSample(
+ reinterpret_cast<const uint8_t*>(shm.memory()), bitstream_buffer.size(),
+ std::min<uint32_t>(bitstream_buffer.size(), input_stream_info_.cbSize),
+ input_stream_info_.cbAlignment));
RETURN_AND_NOTIFY_ON_FAILURE(sample.get(), "Failed to create input sample",
PLATFORM_FAILURE, );
@@ -919,6 +1233,7 @@ void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
// and mark these buffers as available for use.
for (size_t buffer_index = 0; buffer_index < buffers.size();
++buffer_index) {
+ DCHECK_LE(1u, buffers[buffer_index].texture_ids().size());
linked_ptr<DXVAPictureBuffer> picture_buffer =
DXVAPictureBuffer::Create(*this, buffers[buffer_index], egl_config_);
RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer.get(),
@@ -956,17 +1271,70 @@ void DXVAVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) {
// us that we can now recycle this picture buffer, so if we were waiting to
// dispose of it we now can.
if (it == output_picture_buffers_.end()) {
- it = stale_output_picture_buffers_.find(picture_buffer_id);
- RETURN_AND_NOTIFY_ON_FAILURE(it != stale_output_picture_buffers_.end(),
- "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,);
- main_thread_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer,
- weak_this_factory_.GetWeakPtr(), picture_buffer_id));
+ if (!stale_output_picture_buffers_.empty()) {
+ it = stale_output_picture_buffers_.find(picture_buffer_id);
+ RETURN_AND_NOTIFY_ON_FAILURE(it != stale_output_picture_buffers_.end(),
+ "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,);
+ main_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer,
+ weak_this_factory_.GetWeakPtr(), picture_buffer_id));
+ }
+ return;
+ }
+
+ if (it->second->available() || it->second->waiting_to_reuse())
+ return;
+
+ if (use_keyed_mutex_ || using_angle_device_) {
+ RETURN_AND_NOTIFY_ON_FAILURE(it->second->ReusePictureBuffer(),
+ "Failed to reuse picture buffer",
+ PLATFORM_FAILURE, );
+
+ ProcessPendingSamples();
+ if (pending_flush_) {
+ decoder_thread_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
+ base::Unretained(this)));
+ }
+ } else {
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
+ it->second->ResetReuseFence();
+
+ WaitForOutputBuffer(picture_buffer_id, 0);
+ }
+}
+
+void DXVAVideoDecodeAccelerator::WaitForOutputBuffer(int32_t picture_buffer_id,
+ int count) {
+ DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
+ OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id);
+ if (it == output_picture_buffers_.end())
+ return;
+
+ DXVAPictureBuffer* picture_buffer = it->second.get();
+
+ DCHECK(!picture_buffer->available());
+ DCHECK(picture_buffer->waiting_to_reuse());
+
+ gfx::GLFence* fence = picture_buffer->reuse_fence();
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
+ if (count <= kMaxIterationsForANGLEReuseFlush && !fence->HasCompleted()) {
+ main_thread_task_runner_->PostDelayedTask(
+ FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::WaitForOutputBuffer,
+ weak_this_factory_.GetWeakPtr(),
+ picture_buffer_id, count + 1),
+ base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs));
return;
}
+ RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer->ReusePictureBuffer(),
+ "Failed to reuse picture buffer",
+ PLATFORM_FAILURE, );
- it->second->ReusePictureBuffer();
ProcessPendingSamples();
if (pending_flush_) {
decoder_thread_task_runner_->PostTask(
@@ -1046,7 +1414,9 @@ void DXVAVideoDecodeAccelerator::Destroy() {
delete this;
}
-bool DXVAVideoDecodeAccelerator::CanDecodeOnIOThread() {
+bool DXVAVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
return false;
}
@@ -1057,17 +1427,19 @@ GLenum DXVAVideoDecodeAccelerator::GetSurfaceInternalFormat() const {
// static
media::VideoDecodeAccelerator::SupportedProfiles
DXVAVideoDecodeAccelerator::GetSupportedProfiles() {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::GetSupportedProfiles");
+
// TODO(henryhsu): Need to ensure the profiles are actually supported.
SupportedProfiles profiles;
for (const auto& supported_profile : kSupportedProfiles) {
+ std::pair<int, int> min_resolution = GetMinResolution(supported_profile);
+ std::pair<int, int> max_resolution = GetMaxResolution(supported_profile);
+
SupportedProfile profile;
profile.profile = supported_profile;
- // Windows Media Foundation H.264 decoding does not support decoding videos
- // with any dimension smaller than 48 pixels:
- // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815
- profile.min_resolution.SetSize(48, 48);
- // Use 1088 to account for 16x16 macroblocks.
- profile.max_resolution.SetSize(1920, 1088);
+ profile.min_resolution.SetSize(min_resolution.first, min_resolution.second);
+ profile.max_resolution.SetSize(max_resolution.first, max_resolution.second);
profiles.push_back(profile);
}
return profiles;
@@ -1077,17 +1449,224 @@ DXVAVideoDecodeAccelerator::GetSupportedProfiles() {
void DXVAVideoDecodeAccelerator::PreSandboxInitialization() {
::LoadLibrary(L"MFPlat.dll");
::LoadLibrary(L"msmpeg2vdec.dll");
+ ::LoadLibrary(L"mf.dll");
+ ::LoadLibrary(L"dxva2.dll");
if (base::win::GetVersion() > base::win::VERSION_WIN7) {
LoadLibrary(L"msvproc.dll");
} else {
- LoadLibrary(L"dxva2.dll");
#if defined(ENABLE_DX11_FOR_WIN7)
LoadLibrary(L"mshtmlmedia.dll");
#endif
}
}
+// static
+std::pair<int, int> DXVAVideoDecodeAccelerator::GetMinResolution(
+ media::VideoCodecProfile profile) {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::GetMinResolution");
+ std::pair<int, int> min_resolution;
+ if (profile >= media::H264PROFILE_BASELINE &&
+ profile <= media::H264PROFILE_HIGH) {
+ // Windows Media Foundation H.264 decoding does not support decoding videos
+ // with any dimension smaller than 48 pixels:
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815
+ min_resolution = std::make_pair(48, 48);
+ } else {
+ // TODO(ananta)
+ // Detect this properly for VP8/VP9 profiles.
+ min_resolution = std::make_pair(16, 16);
+ }
+ return min_resolution;
+}
+
+// static
+std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxResolution(
+ const media::VideoCodecProfile profile) {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::GetMaxResolution");
+ std::pair<int, int> max_resolution;
+ if (profile >= media::H264PROFILE_BASELINE &&
+ profile <= media::H264PROFILE_HIGH) {
+ max_resolution = GetMaxH264Resolution();
+ } else {
+ // TODO(ananta)
+ // Detect this properly for VP8/VP9 profiles.
+ max_resolution = std::make_pair(4096, 2160);
+ }
+ return max_resolution;
+}
+
+std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxH264Resolution() {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::GetMaxH264Resolution");
+ // The H.264 resolution detection operation is expensive. This static flag
+ // allows us to run the detection once.
+ static bool resolution_detected = false;
+ // Use 1088 to account for 16x16 macroblocks.
+ static std::pair<int, int> max_resolution = std::make_pair(1920, 1088);
+ if (resolution_detected)
+ return max_resolution;
+
+ resolution_detected = true;
+
+ // On Windows 7 the maximum resolution supported by media foundation is
+ // 1920 x 1088.
+ if (base::win::GetVersion() == base::win::VERSION_WIN7)
+ return max_resolution;
+
+ // To detect if a driver supports the desired resolutions, we try and create
+ // a DXVA decoder instance for that resolution and profile. If that succeeds
+ // we assume that the driver supports H/W H.264 decoding for that resolution.
+ HRESULT hr = E_FAIL;
+ base::win::ScopedComPtr<ID3D11Device> device;
+
+ {
+ TRACE_EVENT0("gpu,startup",
+ "GetMaxH264Resolution. QueryDeviceObjectFromANGLE");
+
+ device = QueryDeviceObjectFromANGLE<ID3D11Device>(EGL_D3D11_DEVICE_ANGLE);
+ if (!device.get())
+ return max_resolution;
+ }
+
+ base::win::ScopedComPtr<ID3D11VideoDevice> video_device;
+ hr = device.QueryInterface(IID_ID3D11VideoDevice,
+ video_device.ReceiveVoid());
+ if (FAILED(hr))
+ return max_resolution;
+
+ GUID decoder_guid = {};
+
+ {
+ TRACE_EVENT0("gpu,startup",
+ "GetMaxH264Resolution. H.264 guid search begin");
+ // Enumerate supported video profiles and look for the H264 profile.
+ bool found = false;
+ UINT profile_count = video_device->GetVideoDecoderProfileCount();
+ for (UINT profile_idx = 0; profile_idx < profile_count; profile_idx++) {
+ GUID profile_id = {};
+ hr = video_device->GetVideoDecoderProfile(profile_idx, &profile_id);
+ if (SUCCEEDED(hr) &&
+ (profile_id == DXVA2_ModeH264_E ||
+ profile_id == DXVA2_Intel_ModeH264_E)) {
+ decoder_guid = profile_id;
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return max_resolution;
+ }
+
+ // Legacy AMD drivers with UVD3 or earlier and some Intel GPU's crash while
+ // creating surfaces larger than 1920 x 1088.
+ if (IsLegacyGPU(device.get()))
+ return max_resolution;
+
+ // We look for the following resolutions in the driver.
+ // TODO(ananta)
+ // Look into whether this list needs to be expanded.
+ static std::pair<int, int> resolution_array[] = {
+ // Use 1088 to account for 16x16 macroblocks.
+ std::make_pair(1920, 1088),
+ std::make_pair(2560, 1440),
+ std::make_pair(3840, 2160),
+ std::make_pair(4096, 2160),
+ std::make_pair(4096, 2304),
+ };
+
+ {
+ TRACE_EVENT0("gpu,startup",
+ "GetMaxH264Resolution. Resolution search begin");
+
+ for (size_t res_idx = 0; res_idx < arraysize(resolution_array);
+ res_idx++) {
+ D3D11_VIDEO_DECODER_DESC desc = {};
+ desc.Guid = decoder_guid;
+ desc.SampleWidth = resolution_array[res_idx].first;
+ desc.SampleHeight = resolution_array[res_idx].second;
+ desc.OutputFormat = DXGI_FORMAT_NV12;
+ UINT config_count = 0;
+ hr = video_device->GetVideoDecoderConfigCount(&desc, &config_count);
+ if (FAILED(hr) || config_count == 0)
+ return max_resolution;
+
+ D3D11_VIDEO_DECODER_CONFIG config = {};
+ hr = video_device->GetVideoDecoderConfig(&desc, 0, &config);
+ if (FAILED(hr))
+ return max_resolution;
+
+ base::win::ScopedComPtr<ID3D11VideoDecoder> video_decoder;
+ hr = video_device->CreateVideoDecoder(&desc, &config,
+ video_decoder.Receive());
+ if (!video_decoder.get())
+ return max_resolution;
+
+ max_resolution = resolution_array[res_idx];
+ }
+ }
+ return max_resolution;
+}
+
+// static
+bool DXVAVideoDecodeAccelerator::IsLegacyGPU(ID3D11Device* device) {
+ static const int kAMDGPUId1 = 0x1002;
+ static const int kAMDGPUId2 = 0x1022;
+ static const int kIntelGPU = 0x8086;
+
+ static bool legacy_gpu = true;
+ // This flag ensures that we determine the GPU type once.
+ static bool legacy_gpu_determined = false;
+
+ if (legacy_gpu_determined)
+ return legacy_gpu;
+
+ legacy_gpu_determined = true;
+
+ base::win::ScopedComPtr<IDXGIDevice> dxgi_device;
+ HRESULT hr = dxgi_device.QueryFrom(device);
+ if (FAILED(hr))
+ return legacy_gpu;
+
+ base::win::ScopedComPtr<IDXGIAdapter> adapter;
+ hr = dxgi_device->GetAdapter(adapter.Receive());
+ if (FAILED(hr))
+ return legacy_gpu;
+
+ DXGI_ADAPTER_DESC adapter_desc = {};
+ hr = adapter->GetDesc(&adapter_desc);
+ if (FAILED(hr))
+ return legacy_gpu;
+
+ // We check if the device is an Intel or an AMD device and whether it is in
+ // the global list defined by the g_AMDUVD3GPUList and g_IntelLegacyGPUList
+ // arrays above. If yes then the device is treated as a legacy device.
+ if ((adapter_desc.VendorId == kAMDGPUId1) ||
+ adapter_desc.VendorId == kAMDGPUId2) {
+ {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::IsLegacyGPU. AMD check");
+ for (size_t i = 0; i < arraysize(g_AMDUVD3GPUList); i++) {
+ if (adapter_desc.DeviceId == g_AMDUVD3GPUList[i])
+ return legacy_gpu;
+ }
+ }
+ } else if (adapter_desc.VendorId == kIntelGPU) {
+ {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::IsLegacyGPU. Intel check");
+ for (size_t i = 0; i < arraysize(g_IntelLegacyGPUList); i++) {
+ if (adapter_desc.DeviceId == g_IntelLegacyGPUList[i])
+ return legacy_gpu;
+ }
+ }
+ }
+ legacy_gpu = false;
+ return legacy_gpu;
+}
+
bool DXVAVideoDecodeAccelerator::InitDecoder(media::VideoCodecProfile profile) {
HMODULE decoder_dll = NULL;
@@ -1104,24 +1683,26 @@ bool DXVAVideoDecodeAccelerator::InitDecoder(media::VideoCodecProfile profile) {
"msmpeg2vdec.dll required for decoding is not loaded",
false);
- // Check version of DLL, version 6.7.7140 is blacklisted due to high crash
+ // Check version of DLL, version 6.1.7140 is blacklisted due to high crash
// rates in browsers loading that DLL. If that is the version installed we
// fall back to software decoding. See crbug/403440.
- FileVersionInfo* version_info =
- FileVersionInfo::CreateFileVersionInfoForModule(decoder_dll);
+ scoped_ptr<FileVersionInfo> version_info(
+ FileVersionInfo::CreateFileVersionInfoForModule(decoder_dll));
RETURN_ON_FAILURE(version_info,
"unable to get version of msmpeg2vdec.dll",
false);
base::string16 file_version = version_info->file_version();
RETURN_ON_FAILURE(file_version.find(L"6.1.7140") == base::string16::npos,
- "blacklisted version of msmpeg2vdec.dll 6.7.7140",
+ "blacklisted version of msmpeg2vdec.dll 6.1.7140",
false);
codec_ = media::kCodecH264;
clsid = __uuidof(CMSH264DecoderMFT);
- } else if ((profile == media::VP8PROFILE_ANY ||
- profile == media::VP9PROFILE_ANY) &&
- base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableAcceleratedVpxDecode)) {
+ } else if (enable_accelerated_vpx_decode_ &&
+ (profile == media::VP8PROFILE_ANY ||
+ profile == media::VP9PROFILE_PROFILE0 ||
+ profile == media::VP9PROFILE_PROFILE1 ||
+ profile == media::VP9PROFILE_PROFILE2 ||
+ profile == media::VP9PROFILE_PROFILE3)) {
int program_files_key = base::DIR_PROGRAM_FILES;
if (base::win::OSInfo::GetInstance()->wow64_status() ==
base::win::OSInfo::WOW64_ENABLED) {
@@ -1230,19 +1811,24 @@ bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() {
DVLOG(1) << "Failed to set Low latency mode on decoder. Error: " << hr;
}
+ auto gl_context = get_gl_context_cb_.Run();
+ RETURN_ON_FAILURE(gl_context, "Couldn't get GL context", false);
+
// The decoder should use DX11 iff
// 1. The underlying H/W decoder supports it.
// 2. We have a pointer to the MFCreateDXGIDeviceManager function needed for
// this. This should always be true for Windows 8+.
// 3. ANGLE is using DX11.
- DCHECK(gl_context_);
if (create_dxgi_device_manager_ &&
- (gl_context_->GetGLRenderer().find("Direct3D11") !=
- std::string::npos)) {
+ (gl_context->GetGLRenderer().find("Direct3D11") != std::string::npos)) {
UINT32 dx11_aware = 0;
attributes->GetUINT32(MF_SA_D3D11_AWARE, &dx11_aware);
use_dx11_ = !!dx11_aware;
}
+
+ use_keyed_mutex_ =
+ use_dx11_ && gfx::GLSurfaceEGL::HasEGLExtension("EGL_ANGLE_keyed_mutex");
+
return true;
}
@@ -1436,8 +2022,9 @@ void DXVAVideoDecodeAccelerator::ProcessPendingSamples() {
if (!output_picture_buffers_.size())
return;
- RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_.Run(),
- "Failed to make context current", PLATFORM_FAILURE,);
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
OutputBuffers::iterator index;
@@ -1449,7 +2036,6 @@ void DXVAVideoDecodeAccelerator::ProcessPendingSamples() {
PendingSampleInfo* pending_sample = NULL;
{
base::AutoLock lock(decoder_lock_);
-
PendingSampleInfo& sample_info = pending_output_samples_.front();
if (sample_info.picture_buffer_id != -1)
continue;
@@ -1533,13 +2119,22 @@ void DXVAVideoDecodeAccelerator::Invalidate() {
if (GetState() == kUninitialized)
return;
+ // Best effort to make the GL context current.
+ make_context_current_cb_.Run();
+
decoder_thread_.Stop();
weak_this_factory_.InvalidateWeakPtrs();
output_picture_buffers_.clear();
stale_output_picture_buffers_.clear();
pending_output_samples_.clear();
- pending_input_buffers_.clear();
+ // We want to continue processing pending input after detecting a config
+ // change.
+ if (GetState() != kConfigChange)
+ pending_input_buffers_.clear();
decoder_.Release();
+ pictures_requested_ = false;
+
+ config_change_detector_.reset();
if (use_dx11_) {
if (video_format_converter_mft_.get()) {
@@ -1552,6 +2147,7 @@ void DXVAVideoDecodeAccelerator::Invalidate() {
d3d11_device_manager_.Release();
d3d11_query_.Release();
dx11_video_format_converter_media_type_needs_init_ = true;
+ multi_threaded_.Release();
} else {
d3d9_.Release();
d3d9_device_ex_.Release();
@@ -1591,10 +2187,8 @@ void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) {
DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
// This task could execute after the decoder has been torn down.
if (GetState() != kUninitialized && client_) {
- client_->ProvidePictureBuffers(
- kNumPictureBuffers,
- gfx::Size(width, height),
- GL_TEXTURE_2D);
+ client_->ProvidePictureBuffers(kNumPictureBuffers, 1,
+ gfx::Size(width, height), GL_TEXTURE_2D);
}
}
@@ -1706,13 +2300,31 @@ void DXVAVideoDecodeAccelerator::DecodeInternal(
return;
}
+ // Check if the resolution, bit rate, etc changed in the stream. If yes we
+ // reinitialize the decoder to ensure that the stream decodes correctly.
+ bool config_changed = false;
+
+ HRESULT hr = CheckConfigChanged(sample.get(), &config_changed);
+ RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to check video stream config",
+ PLATFORM_FAILURE,);
+
+ if (config_changed) {
+ pending_input_buffers_.push_back(sample);
+ main_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&DXVAVideoDecodeAccelerator::ConfigChanged,
+ weak_this_factory_.GetWeakPtr(),
+ config_));
+ return;
+ }
+
if (!inputs_before_decode_) {
TRACE_EVENT_ASYNC_BEGIN0("gpu", "DXVAVideoDecodeAccelerator.Decoding",
this);
}
inputs_before_decode_++;
- HRESULT hr = decoder_->ProcessInput(0, sample.get(), 0);
+ hr = decoder_->ProcessInput(0, sample.get(), 0);
// As per msdn if the decoder returns MF_E_NOTACCEPTING then it means that it
// has enough data to produce one or more output samples. In this case the
// recommended options are to
@@ -1790,7 +2402,7 @@ void DXVAVideoDecodeAccelerator::HandleResolutionChanged(int width,
main_thread_task_runner_->PostTask(
FROM_HERE,
base::Bind(&DXVAVideoDecodeAccelerator::DismissStaleBuffers,
- weak_this_factory_.GetWeakPtr()));
+ weak_this_factory_.GetWeakPtr(), false));
main_thread_task_runner_->PostTask(
FROM_HERE,
@@ -1800,13 +2412,17 @@ void DXVAVideoDecodeAccelerator::HandleResolutionChanged(int width,
height));
}
-void DXVAVideoDecodeAccelerator::DismissStaleBuffers() {
+void DXVAVideoDecodeAccelerator::DismissStaleBuffers(bool force) {
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
+
OutputBuffers::iterator index;
for (index = output_picture_buffers_.begin();
index != output_picture_buffers_.end();
++index) {
- if (index->second->available()) {
+ if (force || index->second->available()) {
DVLOG(1) << "Dismissing picture id: " << index->second->id();
client_->DismissPictureBuffer(index->second->id());
} else {
@@ -1821,6 +2437,10 @@ void DXVAVideoDecodeAccelerator::DismissStaleBuffers() {
void DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer(
int32_t picture_buffer_id) {
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
+
OutputBuffers::iterator it = stale_output_picture_buffers_.find(
picture_buffer_id);
DCHECK(it != stale_output_picture_buffers_.end());
@@ -1935,13 +2555,15 @@ void DXVAVideoDecodeAccelerator::CopySurfaceComplete(
if (picture_buffer->available())
return;
- RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_.Run(),
- "Failed to make context current", PLATFORM_FAILURE,);
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
DCHECK(!output_picture_buffers_.empty());
- picture_buffer->CopySurfaceComplete(src_surface,
- dest_surface);
+ bool result = picture_buffer->CopySurfaceComplete(src_surface, dest_surface);
+ RETURN_AND_NOTIFY_ON_FAILURE(result, "Failed to complete copying surface",
+ PLATFORM_FAILURE, );
NotifyPictureReady(picture_buffer->id(), input_buffer_id);
@@ -1964,11 +2586,14 @@ void DXVAVideoDecodeAccelerator::CopySurfaceComplete(
base::Unretained(this)));
}
-void DXVAVideoDecodeAccelerator::CopyTexture(ID3D11Texture2D* src_texture,
- ID3D11Texture2D* dest_texture,
- IMFSample* video_frame,
- int picture_buffer_id,
- int input_buffer_id) {
+void DXVAVideoDecodeAccelerator::CopyTexture(
+ ID3D11Texture2D* src_texture,
+ ID3D11Texture2D* dest_texture,
+ base::win::ScopedComPtr<IDXGIKeyedMutex> dest_keyed_mutex,
+ uint64_t keyed_mutex_value,
+ IMFSample* video_frame,
+ int picture_buffer_id,
+ int input_buffer_id) {
HRESULT hr = E_FAIL;
DCHECK(use_dx11_);
@@ -2005,14 +2630,11 @@ void DXVAVideoDecodeAccelerator::CopyTexture(ID3D11Texture2D* src_texture,
}
decoder_thread_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&DXVAVideoDecodeAccelerator::CopyTexture,
- base::Unretained(this),
- src_texture,
- dest_texture,
- input_sample_for_conversion.Detach(),
- picture_buffer_id,
- input_buffer_id));
+ FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopyTexture,
+ base::Unretained(this), src_texture, dest_texture,
+ dest_keyed_mutex, keyed_mutex_value,
+ input_sample_for_conversion.Detach(),
+ picture_buffer_id, input_buffer_id));
return;
}
@@ -2023,6 +2645,13 @@ void DXVAVideoDecodeAccelerator::CopyTexture(ID3D11Texture2D* src_texture,
DCHECK(video_format_converter_mft_.get());
+ if (dest_keyed_mutex) {
+ HRESULT hr =
+ dest_keyed_mutex->AcquireSync(keyed_mutex_value, kAcquireSyncWaitMs);
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ hr == S_OK, "D3D11 failed to acquire keyed mutex for texture.",
+ PLATFORM_FAILURE, );
+ }
// The video processor MFT requires output samples to be allocated by the
// caller. We create a sample with a buffer backed with the ID3D11Texture2D
// interface exposed by ANGLE. This works nicely as this ensures that the
@@ -2077,18 +2706,27 @@ void DXVAVideoDecodeAccelerator::CopyTexture(ID3D11Texture2D* src_texture,
"Failed to convert output sample format.", PLATFORM_FAILURE,);
}
- d3d11_device_context_->Flush();
- d3d11_device_context_->End(d3d11_query_.get());
+ if (dest_keyed_mutex) {
+ HRESULT hr = dest_keyed_mutex->ReleaseSync(keyed_mutex_value + 1);
+ RETURN_AND_NOTIFY_ON_FAILURE(hr == S_OK, "Failed to release keyed mutex.",
+ PLATFORM_FAILURE, );
- decoder_thread_task_runner_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder,
- base::Unretained(this), 0,
- reinterpret_cast<IDirect3DSurface9*>(NULL),
- reinterpret_cast<IDirect3DSurface9*>(NULL),
- picture_buffer_id, input_buffer_id),
- base::TimeDelta::FromMilliseconds(
- kFlushDecoderSurfaceTimeoutMs));
+ main_thread_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete,
+ weak_this_factory_.GetWeakPtr(), nullptr, nullptr,
+ picture_buffer_id, input_buffer_id));
+ } else {
+ d3d11_device_context_->Flush();
+ d3d11_device_context_->End(d3d11_query_.get());
+
+ decoder_thread_task_runner_->PostDelayedTask(
+ FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder,
+ base::Unretained(this), 0,
+ reinterpret_cast<IDirect3DSurface9*>(NULL),
+ reinterpret_cast<IDirect3DSurface9*>(NULL),
+ picture_buffer_id, input_buffer_id),
+ base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs));
+ }
}
void DXVAVideoDecodeAccelerator::FlushDecoder(
@@ -2290,12 +2928,6 @@ bool DXVAVideoDecodeAccelerator::SetTransformOutputType(
RETURN_ON_HR_FAILURE(hr, "Failed to set media type attributes", false);
}
hr = transform->SetOutputType(0, media_type.get(), 0); // No flags
- if (FAILED(hr)) {
- base::debug::Alias(&hr);
- // TODO(ananta)
- // Remove this CHECK when this stabilizes in the field.
- CHECK(false);
- }
RETURN_ON_HR_FAILURE(hr, "Failed to set output type", false);
return true;
}
@@ -2304,4 +2936,39 @@ bool DXVAVideoDecodeAccelerator::SetTransformOutputType(
return false;
}
+HRESULT DXVAVideoDecodeAccelerator::CheckConfigChanged(
+ IMFSample* sample, bool* config_changed) {
+ if (codec_ != media::kCodecH264)
+ return S_FALSE;
+
+ base::win::ScopedComPtr<IMFMediaBuffer> buffer;
+ HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from input sample", hr);
+
+ MediaBufferScopedPointer scoped_media_buffer(buffer.get());
+
+ if (!config_change_detector_->DetectConfig(
+ scoped_media_buffer.get(),
+ scoped_media_buffer.current_length())) {
+ RETURN_ON_HR_FAILURE(E_FAIL, "Failed to detect H.264 stream config",
+ E_FAIL);
+ }
+ *config_changed = config_change_detector_->config_changed();
+ return S_OK;
+}
+
+void DXVAVideoDecodeAccelerator::ConfigChanged(
+ const Config& config) {
+ DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
+
+ SetState(kConfigChange);
+ DismissStaleBuffers(true);
+ Invalidate();
+ Initialize(config_, client_);
+ decoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
+ base::Unretained(this)));
+}
+
} // namespace content
diff --git a/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.h b/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.h
index d3aeda62c9b..01c15e62430 100644
--- a/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.h
+++ b/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.h
@@ -7,6 +7,7 @@
#include <d3d11.h>
#include <d3d9.h>
+#include <initguid.h>
#include <stdint.h>
// Work around bug in this header by disabling the relevant warning for it.
// https://connect.microsoft.com/VisualStudio/feedback/details/911260/dxva2api-h-in-win8-sdk-triggers-c4201-with-w4
@@ -29,6 +30,8 @@
#include "base/threading/thread.h"
#include "base/win/scoped_comptr.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
+#include "media/filters/h264_parser.h"
#include "media/video/video_decode_accelerator.h"
interface IMFSample;
@@ -44,6 +47,43 @@ typedef HRESULT (WINAPI* CreateDXGIDeviceManager)(
namespace content {
+// Provides functionality to detect H.264 stream configuration changes.
+// TODO(ananta)
+// Move this to a common place so that all VDA's can use this.
+class H264ConfigChangeDetector {
+ public:
+ H264ConfigChangeDetector();
+ ~H264ConfigChangeDetector();
+
+ // Detects stream configuration changes.
+ // Returns false on failure.
+ bool DetectConfig(const uint8_t* stream, unsigned int size);
+
+ bool config_changed() const {
+ return config_changed_;
+ }
+
+ private:
+ // These fields are used to track the SPS/PPS in the H.264 bitstream and
+ // are eventually compared against the SPS/PPS in the bitstream to detect
+ // a change.
+ int last_sps_id_;
+ std::vector<uint8_t> last_sps_;
+ int last_pps_id_;
+ std::vector<uint8_t> last_pps_;
+ // Set to true if we detect a stream configuration change.
+ bool config_changed_;
+ // We want to indicate configuration changes only after we see IDR slices.
+ // This flag tracks that we potentially have a configuration change which
+ // we want to honor after we see an IDR slice.
+ bool pending_config_changed_;
+
+ scoped_ptr<media::H264Parser> parser_;
+
+ DISALLOW_COPY_AND_ASSIGN(H264ConfigChangeDetector);
+};
+
+
// Class to provide a DXVA 2.0 based accelerator using the Microsoft Media
// foundation APIs via the VideoDecodeAccelerator interface.
// This class lives on a single thread and DCHECKs that it is never accessed
@@ -57,12 +97,14 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
kResetting, // upon received Reset(), before ResetDone()
kStopped, // upon output EOS received.
kFlushing, // upon flush request received.
+ kConfigChange, // stream configuration change detected.
};
// Does not take ownership of |client| which must outlive |*this|.
- explicit DXVAVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- gfx::GLContext* gl_context);
+ DXVAVideoDecodeAccelerator(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ bool enable_accelerated_vpx_decode);
~DXVAVideoDecodeAccelerator() override;
// media::VideoDecodeAccelerator implementation.
@@ -74,7 +116,10 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
GLenum GetSurfaceInternalFormat() const override;
static media::VideoDecodeAccelerator::SupportedProfiles
@@ -87,6 +132,23 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
typedef void* EGLConfig;
typedef void* EGLSurface;
+ // Returns the minimum resolution for the |profile| passed in.
+ static std::pair<int, int> GetMinResolution(
+ const media::VideoCodecProfile profile);
+
+ // Returns the maximum resolution for the |profile| passed in.
+ static std::pair<int, int> GetMaxResolution(
+ const media::VideoCodecProfile profile);
+
+ // Returns the maximum resolution for H264 video.
+ static std::pair<int, int> GetMaxH264Resolution();
+
+ // Certain AMD GPU drivers like R600, R700, Evergreen and Cayman and
+ // some second generation Intel GPU drivers crash if we create a video
+ // device with a resolution higher then 1920 x 1088. This function
+ // checks if the GPU is in this list and if yes returns true.
+ static bool IsLegacyGPU(ID3D11Device* device);
+
// Creates and initializes an instance of the D3D device and the
// corresponding device manager. The device manager instance is eventually
// passed to the IMFTransform interface implemented by the decoder.
@@ -178,7 +240,7 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
typedef std::map<int32_t, linked_ptr<DXVAPictureBuffer>> OutputBuffers;
// Tells the client to dismiss the stale picture buffers passed in.
- void DismissStaleBuffers();
+ void DismissStaleBuffers(bool force);
// Called after the client indicates we can recycle a stale picture buffer.
void DeferredDismissStaleBuffer(int32_t picture_buffer_id);
@@ -191,10 +253,6 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
// the decoder thread. Thread safe.
State GetState();
- // Worker function for the Decoder Reset functionality. Executes on the
- // decoder thread and queues tasks on the main thread as needed.
- void ResetHelper();
-
// Starts the thread used for decoding.
void StartDecoderThread();
@@ -222,6 +280,8 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
// is the sample containing the frame to be copied.
void CopyTexture(ID3D11Texture2D* src_texture,
ID3D11Texture2D* dest_texture,
+ base::win::ScopedComPtr<IDXGIKeyedMutex> dest_keyed_mutex,
+ uint64_t keyed_mutex_value,
IMFSample* video_frame,
int picture_buffer_id,
int input_buffer_id);
@@ -235,6 +295,10 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
int picture_buffer_id,
int input_buffer_id);
+ // Polls to wait for GPU commands to be finished on the picture buffer
+ // before reusing it.
+ void WaitForOutputBuffer(int32_t picture_buffer_id, int count);
+
// Initializes the DX11 Video format converter media types.
// Returns true on success.
bool InitializeDX11VideoFormatConverterMediaType(int width, int height);
@@ -257,6 +321,18 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
int width,
int height);
+ // Checks if the resolution, bitrate etc of the stream changed. We do this
+ // by keeping track of the SPS/PPS frames and if they change we assume
+ // that the configuration changed.
+ // Returns S_OK or S_FALSE on succcess.
+ // The |config_changed| parameter is set to true if we detect a change in the
+ // stream.
+ HRESULT CheckConfigChanged(IMFSample* sample, bool* config_changed);
+
+ // Called when we detect a stream configuration change. We reinitialize the
+ // decoder here.
+ void ConfigChanged(const Config& config);
+
// To expose client callbacks from VideoDecodeAccelerator.
media::VideoDecodeAccelerator::Client* client_;
@@ -340,8 +416,10 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
typedef std::list<base::win::ScopedComPtr<IMFSample>> PendingInputs;
PendingInputs pending_input_buffers_;
+ // Callback to get current GLContext.
+ GetGLContextCallback get_gl_context_cb_;
// Callback to set the correct gl context.
- base::Callback<bool(void)> make_context_current_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
// Which codec we are decoding with hardware acceleration.
media::VideoCodec codec_;
@@ -373,16 +451,30 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
// H/W decoding.
bool use_dx11_;
+ // True if we should use DXGI keyed mutexes to synchronize between the two
+ // contexts.
+ bool use_keyed_mutex_;
+
// Set to true if the DX11 video format converter input media types need to
// be initialized. Defaults to true.
bool dx11_video_format_converter_media_type_needs_init_;
- // The GLContext to be used by the decoder.
- scoped_refptr<gfx::GLContext> gl_context_;
-
// Set to true if we are sharing ANGLE's device.
bool using_angle_device_;
+ // Enables experimental hardware acceleration for VP8/VP9 video decoding.
+ const bool enable_accelerated_vpx_decode_;
+
+ // The media foundation H.264 decoder has problems handling changes like
+ // resolution change, bitrate change etc. If we reinitialize the decoder
+ // when these changes occur then, the decoder works fine. The
+ // H264ConfigChangeDetector class provides functionality to check if the
+ // stream configuration changed.
+ scoped_ptr<H264ConfigChangeDetector> config_change_detector_;
+
+ // Contains the initialization parameters for the video.
+ Config config_;
+
// WeakPtrFactory for posting tasks back to |this|.
base::WeakPtrFactory<DXVAVideoDecodeAccelerator> weak_this_factory_;
diff --git a/chromium/content/common/gpu/media/fake_video_decode_accelerator.cc b/chromium/content/common/gpu/media/fake_video_decode_accelerator.cc
index 7524dd18ebf..01ac07dcd3b 100644
--- a/chromium/content/common/gpu/media/fake_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/fake_video_decode_accelerator.cc
@@ -29,17 +29,14 @@ static const unsigned int kNumBuffers = media::limits::kMaxVideoFrames +
(media::limits::kMaxVideoFrames & 1u);
FakeVideoDecodeAccelerator::FakeVideoDecodeAccelerator(
- gfx::GLContext* gl,
- gfx::Size size,
- const base::Callback<bool(void)>& make_context_current)
+ const gfx::Size& size,
+ const MakeGLContextCurrentCallback& make_context_current_cb)
: child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
client_(NULL),
- make_context_current_(make_context_current),
- gl_(gl),
+ make_context_current_cb_(make_context_current_cb),
frame_buffer_size_(size),
flushing_(false),
- weak_this_factory_(this) {
-}
+ weak_this_factory_(this) {}
FakeVideoDecodeAccelerator::~FakeVideoDecodeAccelerator() {
}
@@ -59,14 +56,23 @@ bool FakeVideoDecodeAccelerator::Initialize(const Config& config,
// V4L2VideoDecodeAccelerator waits until first decode call to ask for buffers
// This class asks for it on initialization instead.
client_ = client;
- client_->ProvidePictureBuffers(kNumBuffers,
- frame_buffer_size_,
+ client_->ProvidePictureBuffers(kNumBuffers, 1, frame_buffer_size_,
kDefaultTextureTarget);
return true;
}
void FakeVideoDecodeAccelerator::Decode(
const media::BitstreamBuffer& bitstream_buffer) {
+ // We won't really read from the bitstream_buffer, close the handle.
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+
+ if (bitstream_buffer.id() < 0) {
+ LOG(ERROR) << "Invalid bitstream: id=" << bitstream_buffer.id();
+ client_->NotifyError(INVALID_ARGUMENT);
+ return;
+ }
+
int bitstream_buffer_id = bitstream_buffer.id();
queued_bitstream_ids_.push(bitstream_buffer_id);
child_task_runner_->PostTask(
@@ -93,12 +99,13 @@ void FakeVideoDecodeAccelerator::AssignPictureBuffers(
memset(black_data.get(),
0,
frame_buffer_size_.width() * frame_buffer_size_.height() * 4);
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "ReusePictureBuffer(): could not make context current";
return;
}
for (size_t index = 0; index < buffers.size(); ++index) {
- glBindTexture(GL_TEXTURE_2D, buffers[index].texture_id());
+ DCHECK_LE(1u, buffers[index].texture_ids().size());
+ glBindTexture(GL_TEXTURE_2D, buffers[index].texture_ids()[0]);
// Every other frame white and the rest black.
uint8_t* data = index % 2 ? white_data.get() : black_data.get();
glTexImage2D(GL_TEXTURE_2D,
@@ -152,8 +159,10 @@ void FakeVideoDecodeAccelerator::Destroy() {
delete this;
}
-bool FakeVideoDecodeAccelerator::CanDecodeOnIOThread() {
- return true;
+bool FakeVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+ return false;
}
void FakeVideoDecodeAccelerator::DoPictureReady() {
diff --git a/chromium/content/common/gpu/media/fake_video_decode_accelerator.h b/chromium/content/common/gpu/media/fake_video_decode_accelerator.h
index 7dcbfda2e77..10d47822b45 100644
--- a/chromium/content/common/gpu/media/fake_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/fake_video_decode_accelerator.h
@@ -13,6 +13,7 @@
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "media/video/video_decode_accelerator.h"
#include "ui/gfx/geometry/size_f.h"
#include "ui/gl/gl_context.h"
@@ -23,9 +24,8 @@ class CONTENT_EXPORT FakeVideoDecodeAccelerator
: public media::VideoDecodeAccelerator {
public:
FakeVideoDecodeAccelerator(
- gfx::GLContext* gl,
- gfx::Size size,
- const base::Callback<bool(void)>& make_context_current);
+ const gfx::Size& size,
+ const MakeGLContextCurrentCallback& make_context_current_cb);
~FakeVideoDecodeAccelerator() override;
bool Initialize(const Config& config, Client* client) override;
@@ -36,7 +36,10 @@ class CONTENT_EXPORT FakeVideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
private:
void DoPictureReady();
@@ -49,8 +52,7 @@ class CONTENT_EXPORT FakeVideoDecodeAccelerator
Client* client_;
// Make our context current before running any GL entry points.
- base::Callback<bool(void)> make_context_current_;
- gfx::GLContext* gl_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
// Output picture size.
gfx::Size frame_buffer_size_;
diff --git a/chromium/content/common/gpu/media/gpu_arc_video_service.cc b/chromium/content/common/gpu/media/gpu_arc_video_service.cc
deleted file mode 100644
index 91d36980ad1..00000000000
--- a/chromium/content/common/gpu/media/gpu_arc_video_service.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/media/gpu_arc_video_service.h"
-
-#include "base/logging.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "ipc/ipc_listener.h"
-#include "ipc/ipc_message_macros.h"
-#include "ipc/ipc_sync_channel.h"
-
-namespace content {
-
-// TODO(kcwu) implement ArcVideoAccelerator::Client.
-class GpuArcVideoService::AcceleratorStub : public IPC::Listener,
- public IPC::Sender {
- public:
- // |owner| outlives AcceleratorStub.
- explicit AcceleratorStub(GpuArcVideoService* owner) : owner_(owner) {}
-
- ~AcceleratorStub() override {
- DCHECK(thread_checker_.CalledOnValidThread());
- channel_->Close();
- }
-
- IPC::ChannelHandle CreateChannel(
- base::WaitableEvent* shutdown_event,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) {
- IPC::ChannelHandle handle =
- IPC::Channel::GenerateVerifiedChannelID("arc-video");
- channel_ = IPC::SyncChannel::Create(handle, IPC::Channel::MODE_SERVER, this,
- io_task_runner, false, shutdown_event);
- base::ScopedFD client_fd = channel_->TakeClientFileDescriptor();
- DCHECK(client_fd.is_valid());
- handle.socket = base::FileDescriptor(std::move(client_fd));
- return handle;
- }
-
- // IPC::Sender implementation:
- bool Send(IPC::Message* msg) override {
- DCHECK(msg);
- return channel_->Send(msg);
- }
-
- // IPC::Listener implementation:
- void OnChannelError() override {
- DCHECK(thread_checker_.CalledOnValidThread());
- // RemoveClient will delete |this|.
- owner_->RemoveClient(this);
- }
-
- // IPC::Listener implementation:
- bool OnMessageReceived(const IPC::Message& msg) override {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- // TODO(kcwu) Add handlers here.
- return false;
- }
-
- private:
- base::ThreadChecker thread_checker_;
- GpuArcVideoService* const owner_;
- scoped_ptr<IPC::SyncChannel> channel_;
-};
-
-GpuArcVideoService::GpuArcVideoService(
- base::WaitableEvent* shutdown_event,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
- : shutdown_event_(shutdown_event), io_task_runner_(io_task_runner) {}
-
-GpuArcVideoService::~GpuArcVideoService() {}
-
-void GpuArcVideoService::CreateChannel(const CreateChannelCallback& callback) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- scoped_ptr<AcceleratorStub> stub(new AcceleratorStub(this));
-
- IPC::ChannelHandle handle =
- stub->CreateChannel(shutdown_event_, io_task_runner_);
- accelerator_stubs_[stub.get()] = std::move(stub);
-
- callback.Run(handle);
-}
-
-void GpuArcVideoService::RemoveClient(AcceleratorStub* stub) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- accelerator_stubs_.erase(stub);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/media/gpu_arc_video_service.h b/chromium/content/common/gpu/media/gpu_arc_video_service.h
deleted file mode 100644
index 131150c9f94..00000000000
--- a/chromium/content/common/gpu/media/gpu_arc_video_service.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_ARC_VIDEO_SERVICE_H_
-#define CONTENT_COMMON_GPU_MEDIA_GPU_ARC_VIDEO_SERVICE_H_
-
-#include <map>
-
-#include "base/callback.h"
-#include "base/threading/thread_checker.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-class WaitableEvent;
-}
-
-namespace IPC {
-struct ChannelHandle;
-}
-
-namespace content {
-
-// GpuArcVideoService manages life-cycle and IPC message translation for
-// ArcVideoAccelerator.
-//
-// For each creation request from GpuChannelManager, GpuArcVideoService will
-// create a new IPC channel.
-class GpuArcVideoService {
- public:
- class AcceleratorStub;
- using CreateChannelCallback = base::Callback<void(const IPC::ChannelHandle&)>;
-
- // |shutdown_event| should signal an event when this process is about to be
- // shut down in order to notify our new IPC channel to terminate.
- GpuArcVideoService(
- base::WaitableEvent* shutdown_event,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
-
- // Upon deletion, all ArcVideoAccelerator will be deleted and the associated
- // IPC channels are closed.
- ~GpuArcVideoService();
-
- // Creates a new accelerator stub. The creation result will be sent back via
- // |callback|.
- void CreateChannel(const CreateChannelCallback& callback);
-
- // Removes the reference of |stub| (and trigger deletion) from this class.
- void RemoveClient(AcceleratorStub* stub);
-
- private:
- base::ThreadChecker thread_checker_;
-
- // Shutdown event of GPU process.
- base::WaitableEvent* shutdown_event_;
-
- // GPU io thread task runner.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
-
- // Bookkeeping all accelerator stubs.
- std::map<AcceleratorStub*, scoped_ptr<AcceleratorStub>> accelerator_stubs_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuArcVideoService);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_MEDIA_GPU_ARC_VIDEO_SERVICE_H_
diff --git a/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc b/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc
index 7408e46d927..3e256073e84 100644
--- a/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc
@@ -13,13 +13,14 @@
#include "base/memory/shared_memory.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
+#include "base/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_messages.h"
+#include "gpu/ipc/service/gpu_channel.h"
#include "ipc/ipc_message_macros.h"
#include "ipc/message_filter.h"
#include "media/filters/jpeg_parser.h"
+#include "media/gpu/ipc/common/media_messages.h"
#include "ui/gfx/geometry/size.h"
#if defined(OS_CHROMEOS)
@@ -41,12 +42,6 @@ void DecodeFinished(scoped_ptr<base::SharedMemory> shm) {
}
bool VerifyDecodeParams(const AcceleratedJpegDecoderMsg_Decode_Params& params) {
- if (params.input_buffer_id < 0) {
- LOG(ERROR) << "BitstreamBuffer id " << params.input_buffer_id
- << " out of range";
- return false;
- }
-
const int kJpegMaxDimension = UINT16_MAX;
if (params.coded_size.IsEmpty() ||
params.coded_size.width() > kJpegMaxDimension ||
@@ -55,11 +50,6 @@ bool VerifyDecodeParams(const AcceleratedJpegDecoderMsg_Decode_Params& params) {
return false;
}
- if (!base::SharedMemory::IsHandleValid(params.input_buffer_handle)) {
- LOG(ERROR) << "invalid input_buffer_handle";
- return false;
- }
-
if (!base::SharedMemory::IsHandleValid(params.output_video_frame_handle)) {
LOG(ERROR) << "invalid output_video_frame_handle";
return false;
@@ -163,13 +153,12 @@ class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
void AddClientOnIOThread(int32_t route_id,
Client* client,
- IPC::Message* reply_msg) {
+ base::Callback<void(bool)> response) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
DCHECK(client_map_.count(route_id) == 0);
client_map_[route_id] = client;
- GpuMsg_CreateJpegDecoder::WriteReplyParams(reply_msg, true);
- SendOnIOThread(reply_msg);
+ response.Run(true);
}
void OnDestroyOnIOThread(const int32_t* route_id) {
@@ -208,34 +197,28 @@ class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
if (!VerifyDecodeParams(params)) {
NotifyDecodeStatusOnIOThread(
- *route_id, params.input_buffer_id,
+ *route_id, params.input_buffer.id(),
media::JpegDecodeAccelerator::INVALID_ARGUMENT);
- if (base::SharedMemory::IsHandleValid(params.input_buffer_handle))
- base::SharedMemory::CloseHandle(params.input_buffer_handle);
if (base::SharedMemory::IsHandleValid(params.output_video_frame_handle))
base::SharedMemory::CloseHandle(params.output_video_frame_handle);
return;
}
// For handles in |params|, from now on, |params.output_video_frame_handle|
- // is taken cared by scoper. |params.input_buffer_handle| need to be closed
- // manually for early exits.
+ // is taken cared by scoper. |params.input_buffer.handle()| need to be
+ // closed manually for early exits.
scoped_ptr<base::SharedMemory> output_shm(
new base::SharedMemory(params.output_video_frame_handle, false));
if (!output_shm->Map(params.output_buffer_size)) {
LOG(ERROR) << "Could not map output shared memory for input buffer id "
- << params.input_buffer_id;
+ << params.input_buffer.id();
NotifyDecodeStatusOnIOThread(
- *route_id, params.input_buffer_id,
+ *route_id, params.input_buffer.id(),
media::JpegDecodeAccelerator::PLATFORM_FAILURE);
- base::SharedMemory::CloseHandle(params.input_buffer_handle);
+ base::SharedMemory::CloseHandle(params.input_buffer.handle());
return;
}
- media::BitstreamBuffer input_buffer(params.input_buffer_id,
- params.input_buffer_handle,
- params.input_buffer_size);
-
uint8_t* shm_memory = static_cast<uint8_t*>(output_shm->memory());
scoped_refptr<media::VideoFrame> frame =
media::VideoFrame::WrapExternalSharedMemory(
@@ -250,11 +233,11 @@ class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
base::TimeDelta()); // timestamp
if (!frame.get()) {
LOG(ERROR) << "Could not create VideoFrame for input buffer id "
- << params.input_buffer_id;
+ << params.input_buffer.id();
NotifyDecodeStatusOnIOThread(
- *route_id, params.input_buffer_id,
+ *route_id, params.input_buffer.id(),
media::JpegDecodeAccelerator::PLATFORM_FAILURE);
- base::SharedMemory::CloseHandle(params.input_buffer_handle);
+ base::SharedMemory::CloseHandle(params.input_buffer.handle());
return;
}
frame->AddDestructionObserver(
@@ -262,7 +245,7 @@ class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
DCHECK_GT(client_map_.count(*route_id), 0u);
Client* client = client_map_[*route_id];
- client->Decode(input_buffer, frame);
+ client->Decode(params.input_buffer, frame);
}
protected:
@@ -309,7 +292,7 @@ class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
};
GpuJpegDecodeAccelerator::GpuJpegDecodeAccelerator(
- GpuChannel* channel,
+ gpu::GpuChannel* channel,
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
: channel_(channel),
child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
@@ -325,7 +308,7 @@ GpuJpegDecodeAccelerator::~GpuJpegDecodeAccelerator() {
}
void GpuJpegDecodeAccelerator::AddClient(int32_t route_id,
- IPC::Message* reply_msg) {
+ base::Callback<void(bool)> response) {
DCHECK(CalledOnValidThread());
// When adding non-chromeos platforms, VideoCaptureGpuJpegDecoder::Initialize
@@ -350,8 +333,7 @@ void GpuJpegDecodeAccelerator::AddClient(int32_t route_id,
if (!accelerator) {
DLOG(ERROR) << "JPEG accelerator Initialize failed";
- GpuMsg_CreateJpegDecoder::WriteReplyParams(reply_msg, false);
- Send(reply_msg);
+ response.Run(false);
return;
}
client->set_accelerator(std::move(accelerator));
@@ -372,7 +354,7 @@ void GpuJpegDecodeAccelerator::AddClient(int32_t route_id,
// here instead of making the code unnecessary complicated.
io_task_runner_->PostTask(
FROM_HERE, base::Bind(&MessageFilter::AddClientOnIOThread, filter_,
- route_id, client.release(), reply_msg));
+ route_id, client.release(), response));
}
void GpuJpegDecodeAccelerator::NotifyDecodeStatus(
diff --git a/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.h b/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.h
index 0fc316e026f..680dac578e0 100644
--- a/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.h
@@ -20,9 +20,11 @@ namespace base {
class SingleThreadTaskRunner;
}
-namespace content {
+namespace gpu {
class GpuChannel;
+}
+namespace content {
class GpuJpegDecodeAccelerator
: public IPC::Sender,
public base::NonThreadSafe,
@@ -30,11 +32,11 @@ class GpuJpegDecodeAccelerator
public:
// |channel| must outlive this object.
GpuJpegDecodeAccelerator(
- GpuChannel* channel,
+ gpu::GpuChannel* channel,
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
~GpuJpegDecodeAccelerator() override;
- void AddClient(int32_t route_id, IPC::Message* reply_msg);
+ void AddClient(int32_t route_id, base::Callback<void(bool)> response);
void NotifyDecodeStatus(int32_t route_id,
int32_t bitstream_buffer_id,
@@ -61,10 +63,10 @@ class GpuJpegDecodeAccelerator
static scoped_ptr<media::JpegDecodeAccelerator> CreateVaapiJDA(
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
- // The lifetime of objects of this class is managed by a GpuChannel. The
+ // The lifetime of objects of this class is managed by a gpu::GpuChannel. The
// GpuChannels destroy all the GpuJpegDecodeAccelerator that they own when
// they are destroyed. So a raw pointer is safe.
- GpuChannel* channel_;
+ gpu::GpuChannel* channel_;
// The message filter to run JpegDecodeAccelerator::Decode on IO thread.
scoped_refptr<MessageFilter> filter_;
diff --git a/chromium/content/common/gpu/media/gpu_video_accelerator_util.cc b/chromium/content/common/gpu/media/gpu_video_accelerator_util.cc
deleted file mode 100644
index 7692fddc40a..00000000000
--- a/chromium/content/common/gpu/media/gpu_video_accelerator_util.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/media/gpu_video_accelerator_util.h"
-
-namespace content {
-
-// Make sure the enum values of media::VideoCodecProfile and
-// gpu::VideoCodecProfile match.
-#define STATIC_ASSERT_ENUM_MATCH(name) \
- static_assert( \
- media::name == static_cast<media::VideoCodecProfile>(gpu::name), \
- #name " value must match in media and gpu.")
-
-STATIC_ASSERT_ENUM_MATCH(VIDEO_CODEC_PROFILE_UNKNOWN);
-STATIC_ASSERT_ENUM_MATCH(VIDEO_CODEC_PROFILE_MIN);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_BASELINE);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_MAIN);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_EXTENDED);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_HIGH);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_HIGH10PROFILE);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_HIGH422PROFILE);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_HIGH444PREDICTIVEPROFILE);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_SCALABLEBASELINE);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_SCALABLEHIGH);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_STEREOHIGH);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_MULTIVIEWHIGH);
-STATIC_ASSERT_ENUM_MATCH(VP8PROFILE_ANY);
-STATIC_ASSERT_ENUM_MATCH(VP9PROFILE_ANY);
-STATIC_ASSERT_ENUM_MATCH(VIDEO_CODEC_PROFILE_MAX);
-
-// static
-media::VideoDecodeAccelerator::Capabilities
-GpuVideoAcceleratorUtil::ConvertGpuToMediaDecodeCapabilities(
- const gpu::VideoDecodeAcceleratorCapabilities& gpu_capabilities) {
- media::VideoDecodeAccelerator::Capabilities capabilities;
- capabilities.supported_profiles =
- ConvertGpuToMediaDecodeProfiles(gpu_capabilities.supported_profiles);
- capabilities.flags = gpu_capabilities.flags;
- return capabilities;
-}
-
-// static
-media::VideoDecodeAccelerator::SupportedProfiles
-GpuVideoAcceleratorUtil::ConvertGpuToMediaDecodeProfiles(const
- gpu::VideoDecodeAcceleratorSupportedProfiles& gpu_profiles) {
- media::VideoDecodeAccelerator::SupportedProfiles profiles;
- for (const auto& gpu_profile : gpu_profiles) {
- media::VideoDecodeAccelerator::SupportedProfile profile;
- profile.profile =
- static_cast<media::VideoCodecProfile>(gpu_profile.profile);
- profile.max_resolution = gpu_profile.max_resolution;
- profile.min_resolution = gpu_profile.min_resolution;
- profiles.push_back(profile);
- }
- return profiles;
-}
-
-// static
-gpu::VideoDecodeAcceleratorCapabilities
-GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeCapabilities(
- const media::VideoDecodeAccelerator::Capabilities& media_capabilities) {
- gpu::VideoDecodeAcceleratorCapabilities capabilities;
- capabilities.supported_profiles =
- ConvertMediaToGpuDecodeProfiles(media_capabilities.supported_profiles);
- capabilities.flags = media_capabilities.flags;
- return capabilities;
-}
-
-// static
-gpu::VideoDecodeAcceleratorSupportedProfiles
-GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeProfiles(const
- media::VideoDecodeAccelerator::SupportedProfiles& media_profiles) {
- gpu::VideoDecodeAcceleratorSupportedProfiles profiles;
- for (const auto& media_profile : media_profiles) {
- gpu::VideoDecodeAcceleratorSupportedProfile profile;
- profile.profile =
- static_cast<gpu::VideoCodecProfile>(media_profile.profile);
- profile.max_resolution = media_profile.max_resolution;
- profile.min_resolution = media_profile.min_resolution;
- profiles.push_back(profile);
- }
- return profiles;
-}
-
-// static
-media::VideoEncodeAccelerator::SupportedProfiles
-GpuVideoAcceleratorUtil::ConvertGpuToMediaEncodeProfiles(const
- gpu::VideoEncodeAcceleratorSupportedProfiles& gpu_profiles) {
- media::VideoEncodeAccelerator::SupportedProfiles profiles;
- for (const auto& gpu_profile : gpu_profiles) {
- media::VideoEncodeAccelerator::SupportedProfile profile;
- profile.profile =
- static_cast<media::VideoCodecProfile>(gpu_profile.profile);
- profile.max_resolution = gpu_profile.max_resolution;
- profile.max_framerate_numerator = gpu_profile.max_framerate_numerator;
- profile.max_framerate_denominator = gpu_profile.max_framerate_denominator;
- profiles.push_back(profile);
- }
- return profiles;
-}
-
-// static
-gpu::VideoEncodeAcceleratorSupportedProfiles
-GpuVideoAcceleratorUtil::ConvertMediaToGpuEncodeProfiles(const
- media::VideoEncodeAccelerator::SupportedProfiles& media_profiles) {
- gpu::VideoEncodeAcceleratorSupportedProfiles profiles;
- for (const auto& media_profile : media_profiles) {
- gpu::VideoEncodeAcceleratorSupportedProfile profile;
- profile.profile =
- static_cast<gpu::VideoCodecProfile>(media_profile.profile);
- profile.max_resolution = media_profile.max_resolution;
- profile.max_framerate_numerator = media_profile.max_framerate_numerator;
- profile.max_framerate_denominator = media_profile.max_framerate_denominator;
- profiles.push_back(profile);
- }
- return profiles;
-}
-
-// static
-void GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
- const media::VideoDecodeAccelerator::SupportedProfiles& new_profiles,
- media::VideoDecodeAccelerator::SupportedProfiles* media_profiles) {
- for (const auto& profile : new_profiles) {
- bool duplicate = false;
- for (const auto& media_profile : *media_profiles) {
- if (media_profile.profile == profile.profile) {
- duplicate = true;
- break;
- }
- }
- if (!duplicate)
- media_profiles->push_back(profile);
- }
-}
-
-// static
-void GpuVideoAcceleratorUtil::InsertUniqueEncodeProfiles(
- const media::VideoEncodeAccelerator::SupportedProfiles& new_profiles,
- media::VideoEncodeAccelerator::SupportedProfiles* media_profiles) {
- for (const auto& profile : new_profiles) {
- bool duplicate = false;
- for (const auto& media_profile : *media_profiles) {
- if (media_profile.profile == profile.profile) {
- duplicate = true;
- break;
- }
- }
- if (!duplicate)
- media_profiles->push_back(profile);
- }
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/media/gpu_video_accelerator_util.h b/chromium/content/common/gpu/media/gpu_video_accelerator_util.h
deleted file mode 100644
index e39034e191e..00000000000
--- a/chromium/content/common/gpu/media/gpu_video_accelerator_util.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ACCELERATOR_UTIL_H_
-#define CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ACCELERATOR_UTIL_H_
-
-#include <vector>
-
-#include "gpu/config/gpu_info.h"
-#include "media/video/video_decode_accelerator.h"
-#include "media/video/video_encode_accelerator.h"
-
-namespace content {
-
-class GpuVideoAcceleratorUtil {
- public:
- // Convert decoder gpu capabilities to media capabilities.
- static media::VideoDecodeAccelerator::Capabilities
- ConvertGpuToMediaDecodeCapabilities(
- const gpu::VideoDecodeAcceleratorCapabilities& gpu_capabilities);
-
- // Convert decoder gpu profiles to media profiles.
- static media::VideoDecodeAccelerator::SupportedProfiles
- ConvertGpuToMediaDecodeProfiles(const
- gpu::VideoDecodeAcceleratorSupportedProfiles& gpu_profiles);
-
- // Convert decoder media capabilities to gpu capabilities.
- static gpu::VideoDecodeAcceleratorCapabilities
- ConvertMediaToGpuDecodeCapabilities(
- const media::VideoDecodeAccelerator::Capabilities& media_capabilities);
-
- // Convert decoder media profiles to gpu profiles.
- static gpu::VideoDecodeAcceleratorSupportedProfiles
- ConvertMediaToGpuDecodeProfiles(const
- media::VideoDecodeAccelerator::SupportedProfiles& media_profiles);
-
- // Convert encoder gpu profiles to media profiles.
- static media::VideoEncodeAccelerator::SupportedProfiles
- ConvertGpuToMediaEncodeProfiles(const
- gpu::VideoEncodeAcceleratorSupportedProfiles& gpu_profiles);
-
- // Convert encoder media profiles to gpu profiles.
- static gpu::VideoEncodeAcceleratorSupportedProfiles
- ConvertMediaToGpuEncodeProfiles(const
- media::VideoEncodeAccelerator::SupportedProfiles& media_profiles);
-
- // Insert |new_profiles| into |media_profiles|, ensuring no duplicates are
- // inserted.
- static void InsertUniqueDecodeProfiles(
- const media::VideoDecodeAccelerator::SupportedProfiles& new_profiles,
- media::VideoDecodeAccelerator::SupportedProfiles* media_profiles);
-
- // Insert |new_profiles| into |media_profiles|, ensuring no duplicates are
- // inserted.
- static void InsertUniqueEncodeProfiles(
- const media::VideoEncodeAccelerator::SupportedProfiles& new_profiles,
- media::VideoEncodeAccelerator::SupportedProfiles* media_profiles);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ACCELERATOR_UTIL_H_
diff --git a/chromium/content/common/gpu/media/gpu_video_decode_accelerator.cc b/chromium/content/common/gpu/media/gpu_video_decode_accelerator.cc
index 5424a5ff32d..3d30266f05a 100644
--- a/chromium/content/common/gpu/media/gpu_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/gpu_video_decode_accelerator.cc
@@ -7,7 +7,6 @@
#include <vector>
#include "base/bind.h"
-#include "base/command_line.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
@@ -15,47 +14,36 @@
#include "base/stl_util.h"
#include "base/thread_task_runner_handle.h"
#include "build/build_config.h"
-
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/gpu/media/gpu_video_accelerator_util.h"
-#include "content/public/common/content_switches.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h"
#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
#include "ipc/ipc_message_macros.h"
#include "ipc/ipc_message_utils.h"
#include "ipc/message_filter.h"
#include "media/base/limits.h"
+#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
+#include "media/gpu/ipc/common/media_messages.h"
+#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image.h"
-#include "ui/gl/gl_surface_egl.h"
-
-#if defined(OS_WIN)
-#include "base/win/windows_version.h"
-#include "content/common/gpu/media/dxva_video_decode_accelerator_win.h"
-#elif defined(OS_MACOSX)
-#include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
-#elif defined(OS_CHROMEOS)
-#if defined(USE_V4L2_CODEC)
-#include "content/common/gpu/media/v4l2_device.h"
-#include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
-#include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
-#endif
-#if defined(ARCH_CPU_X86_FAMILY)
-#include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
-#include "ui/gl/gl_implementation.h"
-#endif
-#elif defined(USE_OZONE)
-#include "media/ozone/media_ozone_platform.h"
-#elif defined(OS_ANDROID)
-#include "content/common/gpu/media/android_video_decode_accelerator.h"
-#endif
-
-#include "ui/gfx/geometry/size.h"
namespace content {
+namespace {
+static gfx::GLContext* GetGLContext(
+ const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) {
+ if (!stub) {
+ DLOG(ERROR) << "Stub is gone; no GLContext.";
+ return nullptr;
+ }
+
+ return stub->decoder()->GetGLContext();
+}
+
static bool MakeDecoderContextCurrent(
- const base::WeakPtr<GpuCommandBufferStub> stub) {
+ const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) {
if (!stub) {
DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
return false;
@@ -69,6 +57,43 @@ static bool MakeDecoderContextCurrent(
return true;
}
+#if (defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)) || defined(OS_MACOSX)
+static bool BindImage(const base::WeakPtr<gpu::GpuCommandBufferStub>& stub,
+ uint32_t client_texture_id,
+ uint32_t texture_target,
+ const scoped_refptr<gl::GLImage>& image,
+ bool can_bind_to_sampler) {
+ if (!stub) {
+ DLOG(ERROR) << "Stub is gone; won't BindImage().";
+ return false;
+ }
+
+ gpu::gles2::GLES2Decoder* command_decoder = stub->decoder();
+ gpu::gles2::TextureManager* texture_manager =
+ command_decoder->GetContextGroup()->texture_manager();
+ gpu::gles2::TextureRef* ref = texture_manager->GetTexture(client_texture_id);
+ if (ref) {
+ texture_manager->SetLevelImage(ref, texture_target, 0, image.get(),
+ can_bind_to_sampler
+ ? gpu::gles2::Texture::BOUND
+ : gpu::gles2::Texture::UNBOUND);
+ }
+
+ return true;
+}
+#endif
+
+static base::WeakPtr<gpu::gles2::GLES2Decoder> GetGLES2Decoder(
+ const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) {
+ if (!stub) {
+ DLOG(ERROR) << "Stub is gone; no GLES2Decoder.";
+ return base::WeakPtr<gpu::gles2::GLES2Decoder>();
+ }
+
+ return stub->decoder()->AsWeakPtr();
+}
+} // anonymous namespace
+
// DebugAutoLock works like AutoLock but only acquires the lock when
// DCHECK is on.
#if DCHECK_IS_ON()
@@ -103,7 +128,7 @@ class GpuVideoDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg)
IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_,
GpuVideoDecodeAccelerator::OnDecode)
- IPC_MESSAGE_UNHANDLED(return false;)
+ IPC_MESSAGE_UNHANDLED(return false)
IPC_END_MESSAGE_MAP()
return true;
}
@@ -129,19 +154,25 @@ class GpuVideoDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
int32_t host_route_id,
- GpuCommandBufferStub* stub,
+ gpu::GpuCommandBufferStub* stub,
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
: host_route_id_(host_route_id),
stub_(stub),
texture_target_(0),
+ textures_per_buffer_(0),
filter_removed_(true, false),
child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
io_task_runner_(io_task_runner),
weak_factory_for_io_(this) {
DCHECK(stub_);
stub_->AddDestructionObserver(this);
- make_context_current_ =
+ get_gl_context_cb_ = base::Bind(&GetGLContext, stub_->AsWeakPtr());
+ make_context_current_cb_ =
base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
+#if (defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)) || defined(OS_MACOSX)
+ bind_image_cb_ = base::Bind(&BindImage, stub_->AsWeakPtr());
+#endif
+ get_gles2_decoder_cb_ = base::Bind(&GetGLES2Decoder, stub_->AsWeakPtr());
}
GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
@@ -152,41 +183,10 @@ GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
// static
gpu::VideoDecodeAcceleratorCapabilities
-GpuVideoDecodeAccelerator::GetCapabilities() {
- media::VideoDecodeAccelerator::Capabilities capabilities;
- const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kDisableAcceleratedVideoDecode))
- return gpu::VideoDecodeAcceleratorCapabilities();
-
- // Query supported profiles for each VDA. The order of querying VDAs should
- // be the same as the order of initializing VDAs. Then the returned profile
- // can be initialized by corresponding VDA successfully.
-#if defined(OS_WIN)
- capabilities.supported_profiles =
- DXVAVideoDecodeAccelerator::GetSupportedProfiles();
-#elif defined(OS_CHROMEOS)
- media::VideoDecodeAccelerator::SupportedProfiles vda_profiles;
-#if defined(USE_V4L2_CODEC)
- vda_profiles = V4L2VideoDecodeAccelerator::GetSupportedProfiles();
- GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
- vda_profiles, &capabilities.supported_profiles);
- vda_profiles = V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles();
- GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
- vda_profiles, &capabilities.supported_profiles);
-#endif
-#if defined(ARCH_CPU_X86_FAMILY)
- vda_profiles = VaapiVideoDecodeAccelerator::GetSupportedProfiles();
- GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
- vda_profiles, &capabilities.supported_profiles);
-#endif
-#elif defined(OS_MACOSX)
- capabilities.supported_profiles =
- VTVideoDecodeAccelerator::GetSupportedProfiles();
-#elif defined(OS_ANDROID)
- capabilities = AndroidVideoDecodeAccelerator::GetCapabilities();
-#endif
- return GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeCapabilities(
- capabilities);
+GpuVideoDecodeAccelerator::GetCapabilities(
+ const gpu::GpuPreferences& gpu_preferences) {
+ return GpuVideoDecodeAcceleratorFactoryImpl::GetDecoderCapabilities(
+ gpu_preferences);
}
bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
@@ -209,14 +209,16 @@ bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
return handled;
}
-void GpuVideoDecodeAccelerator::NotifyCdmAttached(bool success) {
- if (!Send(new AcceleratedVideoDecoderHostMsg_CdmAttached(host_route_id_,
- success)))
- DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_CdmAttached) failed";
+void GpuVideoDecodeAccelerator::NotifyInitializationComplete(bool success) {
+ if (!Send(new AcceleratedVideoDecoderHostMsg_InitializationComplete(
+ host_route_id_, success)))
+ DLOG(ERROR)
+ << "Send(AcceleratedVideoDecoderHostMsg_InitializationComplete) failed";
}
void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
uint32_t requested_num_of_buffers,
+ uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) {
if (dimensions.width() > media::limits::kMaxDimension ||
@@ -226,14 +228,13 @@ void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
return;
}
if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
- host_route_id_,
- requested_num_of_buffers,
- dimensions,
- texture_target))) {
+ host_route_id_, requested_num_of_buffers, textures_per_buffer,
+ dimensions, texture_target))) {
DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
<< "failed";
}
texture_dimensions_ = dimensions;
+ textures_per_buffer_ = textures_per_buffer;
texture_target_ = texture_target;
}
@@ -265,7 +266,7 @@ void GpuVideoDecodeAccelerator::PictureReady(
if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
host_route_id_, picture.picture_buffer_id(),
picture.bitstream_buffer_id(), picture.visible_rect(),
- picture.allow_overlay()))) {
+ picture.allow_overlay(), picture.size_changed()))) {
DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
}
}
@@ -327,161 +328,51 @@ bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) {
return stub_->channel()->Send(message);
}
-void GpuVideoDecodeAccelerator::Initialize(
- const media::VideoDecodeAccelerator::Config& config,
- IPC::Message* init_done_msg) {
+bool GpuVideoDecodeAccelerator::Initialize(
+ const media::VideoDecodeAccelerator::Config& config) {
DCHECK(!video_decode_accelerator_);
- if (!stub_->channel()->AddRoute(host_route_id_, this)) {
+ if (!stub_->channel()->AddRoute(host_route_id_, stub_->stream_id(), this)) {
DLOG(ERROR) << "Initialize(): failed to add route";
- SendCreateDecoderReply(init_done_msg, false);
+ return false;
}
#if !defined(OS_WIN)
// Ensure we will be able to get a GL context at all before initializing
// non-Windows VDAs.
- if (!make_context_current_.Run()) {
- SendCreateDecoderReply(init_done_msg, false);
- return;
- }
+ if (!make_context_current_cb_.Run())
+ return false;
#endif
- // Array of Create..VDA() function pointers, maybe applicable to the current
- // platform. This list is ordered by priority of use and it should be the
- // same as the order of querying supported profiles of VDAs.
- const GpuVideoDecodeAccelerator::CreateVDAFp create_vda_fps[] = {
- &GpuVideoDecodeAccelerator::CreateDXVAVDA,
- &GpuVideoDecodeAccelerator::CreateV4L2VDA,
- &GpuVideoDecodeAccelerator::CreateV4L2SliceVDA,
- &GpuVideoDecodeAccelerator::CreateVaapiVDA,
- &GpuVideoDecodeAccelerator::CreateVTVDA,
- &GpuVideoDecodeAccelerator::CreateOzoneVDA,
- &GpuVideoDecodeAccelerator::CreateAndroidVDA};
-
- for (const auto& create_vda_function : create_vda_fps) {
- video_decode_accelerator_ = (this->*create_vda_function)();
- if (!video_decode_accelerator_ ||
- !video_decode_accelerator_->Initialize(config, this))
- continue;
-
- if (video_decode_accelerator_->CanDecodeOnIOThread()) {
- filter_ = new MessageFilter(this, host_route_id_);
- stub_->channel()->AddFilter(filter_.get());
- }
- SendCreateDecoderReply(init_done_msg, true);
- return;
- }
- video_decode_accelerator_.reset();
- LOG(ERROR) << "HW video decode not available for profile " << config.profile
- << (config.is_encrypted ? " with encryption" : "");
- SendCreateDecoderReply(init_done_msg, false);
-}
+ scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl> vda_factory =
+ GpuVideoDecodeAcceleratorFactoryImpl::CreateWithGLES2Decoder(
+ get_gl_context_cb_, make_context_current_cb_, bind_image_cb_,
+ get_gles2_decoder_cb_);
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateDXVAVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_WIN)
- if (base::win::GetVersion() >= base::win::VERSION_WIN7) {
- DVLOG(0) << "Initializing DXVA HW decoder for windows.";
- decoder.reset(new DXVAVideoDecodeAccelerator(make_context_current_,
- stub_->decoder()->GetGLContext()));
- } else {
- NOTIMPLEMENTED() << "HW video decode acceleration not available.";
+ if (!vda_factory) {
+ LOG(ERROR) << "Failed creating the VDA factory";
+ return false;
}
-#endif
- return decoder;
-}
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateV4L2VDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
- scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
- if (device.get()) {
- decoder.reset(new V4L2VideoDecodeAccelerator(
- gfx::GLSurfaceEGL::GetHardwareDisplay(),
- stub_->decoder()->GetGLContext()->GetHandle(),
- weak_factory_for_io_.GetWeakPtr(),
- make_context_current_,
- device,
- io_task_runner_));
+ const gpu::GpuPreferences& gpu_preferences =
+ stub_->channel()->gpu_channel_manager()->gpu_preferences();
+ video_decode_accelerator_ =
+ vda_factory->CreateVDA(this, config, gpu_preferences);
+ if (!video_decode_accelerator_) {
+ LOG(ERROR) << "HW video decode not available for profile " << config.profile
+ << (config.is_encrypted ? " with encryption" : "");
+ return false;
}
-#endif
- return decoder;
-}
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateV4L2SliceVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
- scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
- if (device.get()) {
- decoder.reset(new V4L2SliceVideoDecodeAccelerator(
- device,
- gfx::GLSurfaceEGL::GetHardwareDisplay(),
- stub_->decoder()->GetGLContext()->GetHandle(),
- weak_factory_for_io_.GetWeakPtr(),
- make_context_current_,
- io_task_runner_));
+ // Attempt to set up performing decoding tasks on IO thread, if supported by
+ // the VDA.
+ if (video_decode_accelerator_->TryToSetupDecodeOnSeparateThread(
+ weak_factory_for_io_.GetWeakPtr(), io_task_runner_)) {
+ filter_ = new MessageFilter(this, host_route_id_);
+ stub_->channel()->AddFilter(filter_.get());
}
-#endif
- return decoder;
-}
-void GpuVideoDecodeAccelerator::BindImage(uint32_t client_texture_id,
- uint32_t texture_target,
- scoped_refptr<gl::GLImage> image) {
- gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder();
- gpu::gles2::TextureManager* texture_manager =
- command_decoder->GetContextGroup()->texture_manager();
- gpu::gles2::TextureRef* ref = texture_manager->GetTexture(client_texture_id);
- if (ref) {
- texture_manager->SetLevelImage(ref, texture_target, 0, image.get(),
- gpu::gles2::Texture::BOUND);
- }
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateVaapiVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
- decoder.reset(new VaapiVideoDecodeAccelerator(
- make_context_current_, base::Bind(&GpuVideoDecodeAccelerator::BindImage,
- base::Unretained(this))));
-#endif
- return decoder;
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateVTVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_MACOSX)
- decoder.reset(new VTVideoDecodeAccelerator(
- make_context_current_, base::Bind(&GpuVideoDecodeAccelerator::BindImage,
- base::Unretained(this))));
-#endif
- return decoder;
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateOzoneVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if !defined(OS_CHROMEOS) && defined(USE_OZONE)
- media::MediaOzonePlatform* platform =
- media::MediaOzonePlatform::GetInstance();
- decoder.reset(platform->CreateVideoDecodeAccelerator(make_context_current_));
-#endif
- return decoder;
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateAndroidVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_ANDROID)
- decoder.reset(new AndroidVideoDecodeAccelerator(stub_->decoder()->AsWeakPtr(),
- make_context_current_));
-#endif
- return decoder;
+ return true;
}
void GpuVideoDecodeAccelerator::OnSetCdm(int cdm_id) {
@@ -489,40 +380,17 @@ void GpuVideoDecodeAccelerator::OnSetCdm(int cdm_id) {
video_decode_accelerator_->SetCdm(cdm_id);
}
-// Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
-// true, otherwise on the main thread.
+// Runs on IO thread if VDA::TryToSetupDecodeOnSeparateThread() succeeded,
+// otherwise on the main thread.
void GpuVideoDecodeAccelerator::OnDecode(
- const AcceleratedVideoDecoderMsg_Decode_Params& params) {
+ const media::BitstreamBuffer& bitstream_buffer) {
DCHECK(video_decode_accelerator_);
- if (params.bitstream_buffer_id < 0) {
- DLOG(ERROR) << "BitstreamBuffer id " << params.bitstream_buffer_id
- << " out of range";
- if (child_task_runner_->BelongsToCurrentThread()) {
- NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
- } else {
- child_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&GpuVideoDecodeAccelerator::NotifyError,
- base::Unretained(this),
- media::VideoDecodeAccelerator::INVALID_ARGUMENT));
- }
- return;
- }
-
- media::BitstreamBuffer bitstream_buffer(params.bitstream_buffer_id,
- params.buffer_handle, params.size,
- params.presentation_timestamp);
- if (!params.key_id.empty()) {
- bitstream_buffer.SetDecryptConfig(
- media::DecryptConfig(params.key_id, params.iv, params.subsamples));
- }
-
video_decode_accelerator_->Decode(bitstream_buffer);
}
void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
const std::vector<int32_t>& buffer_ids,
- const std::vector<uint32_t>& texture_ids) {
+ const std::vector<media::PictureBuffer::TextureIds>& texture_ids) {
if (buffer_ids.size() != texture_ids.size()) {
NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
@@ -540,51 +408,65 @@ void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
}
- gpu::gles2::TextureRef* texture_ref = texture_manager->GetTexture(
- texture_ids[i]);
- if (!texture_ref) {
- DLOG(ERROR) << "Failed to find texture id " << texture_ids[i];
- NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
- return;
- }
- gpu::gles2::Texture* info = texture_ref->texture();
- if (info->target() != texture_target_) {
- DLOG(ERROR) << "Texture target mismatch for texture id "
- << texture_ids[i];
+ media::PictureBuffer::TextureIds buffer_texture_ids = texture_ids[i];
+ media::PictureBuffer::TextureIds service_ids;
+ if (buffer_texture_ids.size() != textures_per_buffer_) {
+ DLOG(ERROR) << "Requested " << textures_per_buffer_
+ << " textures per picture buffer, got "
+ << buffer_texture_ids.size();
NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
}
- if (texture_target_ == GL_TEXTURE_EXTERNAL_OES ||
- texture_target_ == GL_TEXTURE_RECTANGLE_ARB) {
- // These textures have their dimensions defined by the underlying storage.
- // Use |texture_dimensions_| for this size.
- texture_manager->SetLevelInfo(
- texture_ref, texture_target_, 0, GL_RGBA, texture_dimensions_.width(),
- texture_dimensions_.height(), 1, 0, GL_RGBA, 0, gfx::Rect());
- } else {
- // For other targets, texture dimensions should already be defined.
- GLsizei width = 0, height = 0;
- info->GetLevelSize(texture_target_, 0, &width, &height, nullptr);
- if (width != texture_dimensions_.width() ||
- height != texture_dimensions_.height()) {
- DLOG(ERROR) << "Size mismatch for texture id " << texture_ids[i];
+ for (size_t j = 0; j < textures_per_buffer_; j++) {
+ gpu::gles2::TextureRef* texture_ref =
+ texture_manager->GetTexture(buffer_texture_ids[j]);
+ if (!texture_ref) {
+ DLOG(ERROR) << "Failed to find texture id " << buffer_texture_ids[j];
NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
}
-
- // TODO(dshwang): after moving to D3D11, remove this. crbug.com/438691
- GLenum format =
- video_decode_accelerator_.get()->GetSurfaceInternalFormat();
- if (format != GL_RGBA) {
- texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, format,
- width, height, 1, 0, format, 0,
- gfx::Rect());
+ gpu::gles2::Texture* info = texture_ref->texture();
+ if (info->target() != texture_target_) {
+ DLOG(ERROR) << "Texture target mismatch for texture id "
+ << buffer_texture_ids[j];
+ NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
+ return;
+ }
+ if (texture_target_ == GL_TEXTURE_EXTERNAL_OES ||
+ texture_target_ == GL_TEXTURE_RECTANGLE_ARB) {
+ // These textures have their dimensions defined by the underlying
+ // storage.
+ // Use |texture_dimensions_| for this size.
+ texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, GL_RGBA,
+ texture_dimensions_.width(),
+ texture_dimensions_.height(), 1, 0,
+ GL_RGBA, 0, gfx::Rect());
+ } else {
+ // For other targets, texture dimensions should already be defined.
+ GLsizei width = 0, height = 0;
+ info->GetLevelSize(texture_target_, 0, &width, &height, nullptr);
+ if (width != texture_dimensions_.width() ||
+ height != texture_dimensions_.height()) {
+ DLOG(ERROR) << "Size mismatch for texture id "
+ << buffer_texture_ids[j];
+ NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
+ return;
+ }
+
+ // TODO(dshwang): after moving to D3D11, remove this. crbug.com/438691
+ GLenum format =
+ video_decode_accelerator_.get()->GetSurfaceInternalFormat();
+ if (format != GL_RGBA) {
+ texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, format,
+ width, height, 1, 0, format, 0,
+ gfx::Rect());
+ }
}
+ service_ids.push_back(texture_ref->service_id());
+ textures.push_back(texture_ref);
}
buffers.push_back(media::PictureBuffer(buffer_ids[i], texture_dimensions_,
- texture_ref->service_id(),
- texture_ids[i]));
- textures.push_back(texture_ref);
+ service_ids, buffer_texture_ids));
}
video_decode_accelerator_->AssignPictureBuffers(buffers);
DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
@@ -637,10 +519,4 @@ void GpuVideoDecodeAccelerator::SetTextureCleared(
uncleared_textures_.erase(it);
}
-void GpuVideoDecodeAccelerator::SendCreateDecoderReply(IPC::Message* message,
- bool succeeded) {
- GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(message, succeeded);
- Send(message);
-}
-
} // namespace content
diff --git a/chromium/content/common/gpu/media/gpu_video_decode_accelerator.h b/chromium/content/common/gpu/media/gpu_video_decode_accelerator.h
index eb6459b37c3..47859d957f9 100644
--- a/chromium/content/common/gpu/media/gpu_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/gpu_video_decode_accelerator.h
@@ -15,15 +15,19 @@
#include "base/memory/ref_counted.h"
#include "base/memory/shared_memory.h"
#include "base/synchronization/waitable_event.h"
-#include "content/common/gpu/gpu_command_buffer_stub.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/config/gpu_info.h"
+#include "gpu/ipc/service/gpu_command_buffer_stub.h"
+#include "gpu/ipc/service/gpu_command_buffer_stub.h"
#include "ipc/ipc_listener.h"
#include "ipc/ipc_sender.h"
#include "media/video/video_decode_accelerator.h"
#include "ui/gfx/geometry/size.h"
-struct AcceleratedVideoDecoderMsg_Decode_Params;
+namespace gpu {
+struct GpuPreferences;
+} // namespace gpu
namespace content {
@@ -31,27 +35,29 @@ class GpuVideoDecodeAccelerator
: public IPC::Listener,
public IPC::Sender,
public media::VideoDecodeAccelerator::Client,
- public GpuCommandBufferStub::DestructionObserver {
+ public gpu::GpuCommandBufferStub::DestructionObserver {
public:
// Each of the arguments to the constructor must outlive this object.
// |stub->decoder()| will be made current around any operation that touches
// the underlying VDA so that it can make GL calls safely.
GpuVideoDecodeAccelerator(
int32_t host_route_id,
- GpuCommandBufferStub* stub,
+ gpu::GpuCommandBufferStub* stub,
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
// Static query for the capabilities, which includes the supported profiles.
// This query calls the appropriate platform-specific version. The returned
// capabilities will not contain duplicate supported profile entries.
- static gpu::VideoDecodeAcceleratorCapabilities GetCapabilities();
+ static gpu::VideoDecodeAcceleratorCapabilities GetCapabilities(
+ const gpu::GpuPreferences& gpu_preferences);
// IPC::Listener implementation.
bool OnMessageReceived(const IPC::Message& message) override;
// media::VideoDecodeAccelerator::Client implementation.
- void NotifyCdmAttached(bool success) override;
+ void NotifyInitializationComplete(bool success) override;
void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
+ uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) override;
void DismissPictureBuffer(int32_t picture_buffer_id) override;
@@ -69,33 +75,22 @@ class GpuVideoDecodeAccelerator
// Initialize VDAs from the set of VDAs supported for current platform until
// one of them succeeds for given |config|. Send the |init_done_msg| when
- // done. filter_ is passed to GpuCommandBufferStub channel only if the chosen
- // VDA can decode on IO thread.
- void Initialize(const media::VideoDecodeAccelerator::Config& config,
- IPC::Message* init_done_msg);
+ // done. filter_ is passed to gpu::GpuCommandBufferStub channel only if the
+ // chosen VDA can decode on IO thread.
+ bool Initialize(const media::VideoDecodeAccelerator::Config& config);
private:
- typedef scoped_ptr<media::VideoDecodeAccelerator>(
- GpuVideoDecodeAccelerator::*CreateVDAFp)();
-
class MessageFilter;
- scoped_ptr<media::VideoDecodeAccelerator> CreateDXVAVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2VDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2SliceVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateVaapiVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateVTVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateOzoneVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateAndroidVDA();
-
// We only allow self-delete, from OnWillDestroyStub(), after cleanup there.
~GpuVideoDecodeAccelerator() override;
// Handlers for IPC messages.
void OnSetCdm(int cdm_id);
- void OnDecode(const AcceleratedVideoDecoderMsg_Decode_Params& params);
- void OnAssignPictureBuffers(const std::vector<int32_t>& buffer_ids,
- const std::vector<uint32_t>& texture_ids);
+ void OnDecode(const media::BitstreamBuffer& bitstream_buffer);
+ void OnAssignPictureBuffers(
+ const std::vector<int32_t>& buffer_ids,
+ const std::vector<media::PictureBuffer::TextureIds>& texture_ids);
void OnReusePictureBuffer(int32_t picture_buffer_id);
void OnFlush();
void OnReset();
@@ -107,28 +102,28 @@ class GpuVideoDecodeAccelerator
// Sets the texture to cleared.
void SetTextureCleared(const media::Picture& picture);
- // Helper for replying to the creation request.
- void SendCreateDecoderReply(IPC::Message* message, bool succeeded);
-
- // Helper to bind |image| to the texture specified by |client_texture_id|.
- void BindImage(uint32_t client_texture_id,
- uint32_t texture_target,
- scoped_refptr<gl::GLImage> image);
-
// Route ID to communicate with the host.
const int32_t host_route_id_;
- // Unowned pointer to the underlying GpuCommandBufferStub. |this| is
+ // Unowned pointer to the underlying gpu::GpuCommandBufferStub. |this| is
// registered as a DestuctionObserver of |stub_| and will self-delete when
// |stub_| is destroyed.
- GpuCommandBufferStub* const stub_;
+ gpu::GpuCommandBufferStub* const stub_;
// The underlying VideoDecodeAccelerator.
scoped_ptr<media::VideoDecodeAccelerator> video_decode_accelerator_;
+ // Callback to return current GLContext, if available.
+ GetGLContextCallback get_gl_context_cb_;
+
// Callback for making the relevant context current for GL calls.
- // Returns false if failed.
- base::Callback<bool(void)> make_context_current_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
+
+ // Callback to bind a GLImage to a given texture id and target.
+ BindGLImageCallback bind_image_cb_;
+
+ // Callback to return a WeakPtr to GLES2Decoder.
+ GetGLES2DecoderCallback get_gles2_decoder_cb_;
// The texture dimensions as requested by ProvidePictureBuffers().
gfx::Size texture_dimensions_;
@@ -136,6 +131,10 @@ class GpuVideoDecodeAccelerator
// The texture target as requested by ProvidePictureBuffers().
uint32_t texture_target_;
+ // The number of textures per picture buffer as requests by
+ // ProvidePictureBuffers()
+ uint32_t textures_per_buffer_;
+
// The message filter to run VDA::Decode on IO thread if VDA supports it.
scoped_refptr<MessageFilter> filter_;
diff --git a/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc
new file mode 100644
index 00000000000..048314863d9
--- /dev/null
+++ b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc
@@ -0,0 +1,242 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h"
+
+#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_version.h"
+#include "content/common/gpu/media/dxva_video_decode_accelerator_win.h"
+#elif defined(OS_MACOSX)
+#include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
+#elif defined(OS_CHROMEOS)
+#if defined(USE_V4L2_CODEC)
+#include "content/common/gpu/media/v4l2_device.h"
+#include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
+#include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
+#include "ui/gl/gl_surface_egl.h"
+#endif
+#if defined(ARCH_CPU_X86_FAMILY)
+#include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
+#include "ui/gl/gl_implementation.h"
+#endif
+#elif defined(OS_ANDROID)
+#include "content/common/gpu/media/android_video_decode_accelerator.h"
+#endif
+
+namespace content {
+
+namespace {
+static base::WeakPtr<gpu::gles2::GLES2Decoder> GetEmptyGLES2Decoder() {
+ NOTREACHED() << "VDA requests a GLES2Decoder, but client did not provide it";
+ return base::WeakPtr<gpu::gles2::GLES2Decoder>();
+}
+}
+
+// static
+scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
+GpuVideoDecodeAcceleratorFactoryImpl::Create(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb) {
+ return make_scoped_ptr(new GpuVideoDecodeAcceleratorFactoryImpl(
+ get_gl_context_cb, make_context_current_cb, bind_image_cb,
+ base::Bind(&GetEmptyGLES2Decoder)));
+}
+
+// static
+scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateWithGLES2Decoder(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb) {
+ return make_scoped_ptr(new GpuVideoDecodeAcceleratorFactoryImpl(
+ get_gl_context_cb, make_context_current_cb, bind_image_cb,
+ get_gles2_decoder_cb));
+}
+
+// static
+gpu::VideoDecodeAcceleratorCapabilities
+GpuVideoDecodeAcceleratorFactoryImpl::GetDecoderCapabilities(
+ const gpu::GpuPreferences& gpu_preferences) {
+ media::VideoDecodeAccelerator::Capabilities capabilities;
+ if (gpu_preferences.disable_accelerated_video_decode)
+ return gpu::VideoDecodeAcceleratorCapabilities();
+
+ // Query VDAs for their capabilities and construct a set of supported
+ // profiles for current platform. This must be done in the same order as in
+ // CreateVDA(), as we currently preserve additional capabilities (such as
+ // resolutions supported) only for the first VDA supporting the given codec
+ // profile (instead of calculating a superset).
+ // TODO(posciak,henryhsu): improve this so that we choose a superset of
+ // resolutions and other supported profile parameters.
+#if defined(OS_WIN)
+ capabilities.supported_profiles =
+ DXVAVideoDecodeAccelerator::GetSupportedProfiles();
+#elif defined(OS_CHROMEOS)
+ media::VideoDecodeAccelerator::SupportedProfiles vda_profiles;
+#if defined(USE_V4L2_CODEC)
+ vda_profiles = V4L2VideoDecodeAccelerator::GetSupportedProfiles();
+ media::GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
+ vda_profiles, &capabilities.supported_profiles);
+ vda_profiles = V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles();
+ media::GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
+ vda_profiles, &capabilities.supported_profiles);
+#endif
+#if defined(ARCH_CPU_X86_FAMILY)
+ vda_profiles = VaapiVideoDecodeAccelerator::GetSupportedProfiles();
+ media::GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
+ vda_profiles, &capabilities.supported_profiles);
+#endif
+#elif defined(OS_MACOSX)
+ capabilities.supported_profiles =
+ VTVideoDecodeAccelerator::GetSupportedProfiles();
+#elif defined(OS_ANDROID)
+ capabilities =
+ AndroidVideoDecodeAccelerator::GetCapabilities(gpu_preferences);
+#endif
+ return media::GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeCapabilities(
+ capabilities);
+}
+
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateVDA(
+ media::VideoDecodeAccelerator::Client* client,
+ const media::VideoDecodeAccelerator::Config& config,
+ const gpu::GpuPreferences& gpu_preferences) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (gpu_preferences.disable_accelerated_video_decode)
+ return nullptr;
+
+ // Array of Create..VDA() function pointers, potentially usable on current
+ // platform. This list is ordered by priority, from most to least preferred,
+ // if applicable. This list must be in the same order as the querying order
+ // in GetDecoderCapabilities() above.
+ using CreateVDAFp = scoped_ptr<media::VideoDecodeAccelerator> (
+ GpuVideoDecodeAcceleratorFactoryImpl::*)(const gpu::GpuPreferences&)
+ const;
+ const CreateVDAFp create_vda_fps[] = {
+#if defined(OS_WIN)
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateDXVAVDA,
+#endif
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateV4L2VDA,
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateV4L2SVDA,
+#endif
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateVaapiVDA,
+#endif
+#if defined(OS_MACOSX)
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateVTVDA,
+#endif
+#if defined(OS_ANDROID)
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateAndroidVDA,
+#endif
+ };
+
+ scoped_ptr<media::VideoDecodeAccelerator> vda;
+
+ for (const auto& create_vda_function : create_vda_fps) {
+ vda = (this->*create_vda_function)(gpu_preferences);
+ if (vda && vda->Initialize(config, client))
+ return vda;
+ }
+
+ return nullptr;
+}
+
+#if defined(OS_WIN)
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateDXVAVDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ if (base::win::GetVersion() >= base::win::VERSION_WIN7) {
+ DVLOG(0) << "Initializing DXVA HW decoder for windows.";
+ decoder.reset(new DXVAVideoDecodeAccelerator(
+ get_gl_context_cb_, make_context_current_cb_,
+ gpu_preferences.enable_accelerated_vpx_decode));
+ }
+ return decoder;
+}
+#endif
+
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateV4L2VDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
+ if (device.get()) {
+ decoder.reset(new V4L2VideoDecodeAccelerator(
+ gfx::GLSurfaceEGL::GetHardwareDisplay(), get_gl_context_cb_,
+ make_context_current_cb_, device));
+ }
+ return decoder;
+}
+
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateV4L2SVDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
+ if (device.get()) {
+ decoder.reset(new V4L2SliceVideoDecodeAccelerator(
+ device, gfx::GLSurfaceEGL::GetHardwareDisplay(), get_gl_context_cb_,
+ make_context_current_cb_));
+ }
+ return decoder;
+}
+#endif
+
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateVaapiVDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ decoder.reset(new VaapiVideoDecodeAccelerator(make_context_current_cb_,
+ bind_image_cb_));
+ return decoder;
+}
+#endif
+
+#if defined(OS_MACOSX)
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateVTVDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ decoder.reset(
+ new VTVideoDecodeAccelerator(make_context_current_cb_, bind_image_cb_));
+ return decoder;
+}
+#endif
+
+#if defined(OS_ANDROID)
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateAndroidVDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ decoder.reset(new AndroidVideoDecodeAccelerator(make_context_current_cb_,
+ get_gles2_decoder_cb_));
+ return decoder;
+}
+#endif
+
+GpuVideoDecodeAcceleratorFactoryImpl::GpuVideoDecodeAcceleratorFactoryImpl(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb)
+ : get_gl_context_cb_(get_gl_context_cb),
+ make_context_current_cb_(make_context_current_cb),
+ bind_image_cb_(bind_image_cb),
+ get_gles2_decoder_cb_(get_gles2_decoder_cb) {}
+
+GpuVideoDecodeAcceleratorFactoryImpl::~GpuVideoDecodeAcceleratorFactoryImpl() {}
+
+} // namespace content
diff --git a/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h
new file mode 100644
index 00000000000..2d4c10b8c32
--- /dev/null
+++ b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h
@@ -0,0 +1,123 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
+#define CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
+
+#include "base/callback.h"
+#include "base/threading/thread_checker.h"
+#include "content/common/content_export.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "gpu/config/gpu_info.h"
+#include "media/video/video_decode_accelerator.h"
+
+namespace gfx {
+class GLContext;
+}
+
+namespace gl {
+class GLImage;
+}
+
+namespace gpu {
+struct GpuPreferences;
+
+namespace gles2 {
+class GLES2Decoder;
+}
+}
+
+namespace content {
+
+// TODO(posciak): this class should be an implementation of
+// content::GpuVideoDecodeAcceleratorFactory, however that can only be achieved
+// once this is moved out of content/common, see crbug.com/597150 and related.
+class CONTENT_EXPORT GpuVideoDecodeAcceleratorFactoryImpl {
+public:
+ ~GpuVideoDecodeAcceleratorFactoryImpl();
+
+ // Return current GLContext.
+ using GetGLContextCallback = base::Callback<gfx::GLContext*(void)>;
+
+ // Make the applicable GL context current. To be called by VDAs before
+ // executing any GL calls. Return true on success, false otherwise.
+ using MakeGLContextCurrentCallback = base::Callback<bool(void)>;
+
+ // Bind |image| to |client_texture_id| given |texture_target|. If
+ // |can_bind_to_sampler| is true, then the image may be used as a sampler
+ // directly, otherwise a copy to a staging buffer is required.
+ // Return true on success, false otherwise.
+ using BindGLImageCallback =
+ base::Callback<bool(uint32_t client_texture_id,
+ uint32_t texture_target,
+ const scoped_refptr<gl::GLImage>& image,
+ bool can_bind_to_sampler)>;
+
+ // Return a WeakPtr to a GLES2Decoder, if one is available.
+ using GetGLES2DecoderCallback =
+ base::Callback<base::WeakPtr<gpu::gles2::GLES2Decoder>(void)>;
+
+ static scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl> Create(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb);
+
+ static scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
+ CreateWithGLES2Decoder(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb);
+
+ static gpu::VideoDecodeAcceleratorCapabilities GetDecoderCapabilities(
+ const gpu::GpuPreferences& gpu_preferences);
+
+ scoped_ptr<media::VideoDecodeAccelerator> CreateVDA(
+ media::VideoDecodeAccelerator::Client* client,
+ const media::VideoDecodeAccelerator::Config& config,
+ const gpu::GpuPreferences& gpu_preferences);
+
+ private:
+ GpuVideoDecodeAcceleratorFactoryImpl(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb);
+
+#if defined(OS_WIN)
+ scoped_ptr<media::VideoDecodeAccelerator> CreateDXVAVDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+#endif
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
+ scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2VDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+ scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2SVDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+#endif
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+ scoped_ptr<media::VideoDecodeAccelerator> CreateVaapiVDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+#endif
+#if defined(OS_MACOSX)
+ scoped_ptr<media::VideoDecodeAccelerator> CreateVTVDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+#endif
+#if defined(OS_ANDROID)
+ scoped_ptr<media::VideoDecodeAccelerator> CreateAndroidVDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+#endif
+
+ const GetGLContextCallback get_gl_context_cb_;
+ const MakeGLContextCurrentCallback make_context_current_cb_;
+ const BindGLImageCallback bind_image_cb_;
+ const GetGLES2DecoderCallback get_gles2_decoder_cb_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(GpuVideoDecodeAcceleratorFactoryImpl);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
diff --git a/chromium/content/common/gpu/media/gpu_video_decode_accelerator_helpers.h b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_helpers.h
new file mode 100644
index 00000000000..1717f592603
--- /dev/null
+++ b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_helpers.h
@@ -0,0 +1,59 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
+#define CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
+
+#include "base/callback.h"
+#include "base/memory/weak_ptr.h"
+
+namespace gfx {
+class GLContext;
+}
+
+namespace gl {
+class GLImage;
+}
+
+namespace gpu {
+namespace gles2 {
+class GLES2Decoder;
+}
+}
+
+namespace content {
+
+// Helpers/defines for specific VideoDecodeAccelerator implementations in GPU
+// process. Which callbacks are required depends on the implementation.
+//
+// Note that these callbacks may be called more than once, and so must own/share
+// ownership of any objects bound to them.
+//
+// Unless specified otherwise, these callbacks must be executed on the GPU Child
+// thread (i.e. the thread which the VDAs are initialized on).
+
+// Return current GLContext.
+using GetGLContextCallback = base::Callback<gfx::GLContext*(void)>;
+
+// Make the applicable GL context current. To be called by VDAs before
+// executing any GL calls. Return true on success, false otherwise.
+using MakeGLContextCurrentCallback = base::Callback<bool(void)>;
+
+// Bind |image| to |client_texture_id| given |texture_target|. If
+// |can_bind_to_sampler| is true, then the image may be used as a sampler
+// directly, otherwise a copy to a staging buffer is required.
+// Return true on success, false otherwise.
+using BindGLImageCallback =
+ base::Callback<bool(uint32_t client_texture_id,
+ uint32_t texture_target,
+ const scoped_refptr<gl::GLImage>& image,
+ bool can_bind_to_sampler)>;
+
+// Return a WeakPtr to a GLES2Decoder, if one is available.
+using GetGLES2DecoderCallback =
+ base::Callback<base::WeakPtr<gpu::gles2::GLES2Decoder>(void)>;
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
diff --git a/chromium/content/common/gpu/media/gpu_video_encode_accelerator.cc b/chromium/content/common/gpu/media/gpu_video_encode_accelerator.cc
index 7dd9a082b1d..7b1457e88f1 100644
--- a/chromium/content/common/gpu/media/gpu_video_encode_accelerator.cc
+++ b/chromium/content/common/gpu/media/gpu_video_encode_accelerator.cc
@@ -11,15 +11,15 @@
#include "base/numerics/safe_math.h"
#include "base/sys_info.h"
#include "build/build_config.h"
-#include "content/common/gpu/client/gpu_memory_buffer_impl.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/gpu/media/gpu_video_accelerator_util.h"
-#include "content/public/common/content_switches.h"
+#include "gpu/ipc/client/gpu_memory_buffer_impl.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
#include "ipc/ipc_message_macros.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/limits.h"
#include "media/base/video_frame.h"
+#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
+#include "media/gpu/ipc/common/media_messages.h"
#if defined(OS_CHROMEOS)
#if defined(USE_V4L2_CODEC)
@@ -30,20 +30,14 @@
#endif
#elif defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
#include "content/common/gpu/media/android_video_encode_accelerator.h"
+#elif defined(OS_MACOSX)
+#include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
#endif
namespace content {
-namespace {
-
-// Allocation and destruction of buffer are done on the Browser process, so we
-// don't need to handle synchronization here.
-void DestroyGpuMemoryBuffer(const gpu::SyncToken& sync_token) {}
-
-} // namespace
-
static bool MakeDecoderContextCurrent(
- const base::WeakPtr<GpuCommandBufferStub> stub) {
+ const base::WeakPtr<gpu::GpuCommandBufferStub> stub) {
if (!stub) {
DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
return false;
@@ -57,8 +51,9 @@ static bool MakeDecoderContextCurrent(
return true;
}
-GpuVideoEncodeAccelerator::GpuVideoEncodeAccelerator(int32_t host_route_id,
- GpuCommandBufferStub* stub)
+GpuVideoEncodeAccelerator::GpuVideoEncodeAccelerator(
+ int32_t host_route_id,
+ gpu::GpuCommandBufferStub* stub)
: host_route_id_(host_route_id),
stub_(stub),
input_format_(media::PIXEL_FORMAT_UNKNOWN),
@@ -75,12 +70,11 @@ GpuVideoEncodeAccelerator::~GpuVideoEncodeAccelerator() {
DCHECK(!encoder_);
}
-void GpuVideoEncodeAccelerator::Initialize(
+bool GpuVideoEncodeAccelerator::Initialize(
media::VideoPixelFormat input_format,
const gfx::Size& input_visible_size,
media::VideoCodecProfile output_profile,
- uint32_t initial_bitrate,
- IPC::Message* init_done_msg) {
+ uint32_t initial_bitrate) {
DVLOG(2) << "GpuVideoEncodeAccelerator::Initialize(): "
"input_format=" << input_format
<< ", input_visible_size=" << input_visible_size.ToString()
@@ -88,11 +82,10 @@ void GpuVideoEncodeAccelerator::Initialize(
<< ", initial_bitrate=" << initial_bitrate;
DCHECK(!encoder_);
- if (!stub_->channel()->AddRoute(host_route_id_, this)) {
+ if (!stub_->channel()->AddRoute(host_route_id_, stub_->stream_id(), this)) {
DLOG(ERROR) << "GpuVideoEncodeAccelerator::Initialize(): "
"failed to add route";
- SendCreateEncoderReply(init_done_msg, false);
- return;
+ return false;
}
if (input_visible_size.width() > media::limits::kMaxDimension ||
@@ -101,12 +94,14 @@ void GpuVideoEncodeAccelerator::Initialize(
DLOG(ERROR) << "GpuVideoEncodeAccelerator::Initialize(): "
"input_visible_size " << input_visible_size.ToString()
<< " too large";
- SendCreateEncoderReply(init_done_msg, false);
- return;
+ return false;
}
- std::vector<GpuVideoEncodeAccelerator::CreateVEAFp>
- create_vea_fps = CreateVEAFps();
+ const gpu::GpuPreferences& gpu_preferences =
+ stub_->channel()->gpu_channel_manager()->gpu_preferences();
+
+ std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> create_vea_fps =
+ CreateVEAFps(gpu_preferences);
// Try all possible encoders and use the first successful encoder.
for (size_t i = 0; i < create_vea_fps.size(); ++i) {
encoder_ = (*create_vea_fps[i])();
@@ -117,14 +112,13 @@ void GpuVideoEncodeAccelerator::Initialize(
this)) {
input_format_ = input_format;
input_visible_size_ = input_visible_size;
- SendCreateEncoderReply(init_done_msg, true);
- return;
+ return true;
}
}
encoder_.reset();
DLOG(ERROR)
<< "GpuVideoEncodeAccelerator::Initialize(): VEA initialization failed";
- SendCreateEncoderReply(init_done_msg, false);
+ return false;
}
bool GpuVideoEncodeAccelerator::OnMessageReceived(const IPC::Message& message) {
@@ -176,10 +170,11 @@ void GpuVideoEncodeAccelerator::OnWillDestroyStub() {
// static
gpu::VideoEncodeAcceleratorSupportedProfiles
-GpuVideoEncodeAccelerator::GetSupportedProfiles() {
+GpuVideoEncodeAccelerator::GetSupportedProfiles(
+ const gpu::GpuPreferences& gpu_preferences) {
media::VideoEncodeAccelerator::SupportedProfiles profiles;
- std::vector<GpuVideoEncodeAccelerator::CreateVEAFp>
- create_vea_fps = CreateVEAFps();
+ std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> create_vea_fps =
+ CreateVEAFps(gpu_preferences);
for (size_t i = 0; i < create_vea_fps.size(); ++i) {
scoped_ptr<media::VideoEncodeAccelerator>
@@ -188,55 +183,73 @@ GpuVideoEncodeAccelerator::GetSupportedProfiles() {
continue;
media::VideoEncodeAccelerator::SupportedProfiles vea_profiles =
encoder->GetSupportedProfiles();
- GpuVideoAcceleratorUtil::InsertUniqueEncodeProfiles(
- vea_profiles, &profiles);
+ media::GpuVideoAcceleratorUtil::InsertUniqueEncodeProfiles(vea_profiles,
+ &profiles);
}
- return GpuVideoAcceleratorUtil::ConvertMediaToGpuEncodeProfiles(profiles);
+ return media::GpuVideoAcceleratorUtil::ConvertMediaToGpuEncodeProfiles(
+ profiles);
}
// static
std::vector<GpuVideoEncodeAccelerator::CreateVEAFp>
-GpuVideoEncodeAccelerator::CreateVEAFps() {
+GpuVideoEncodeAccelerator::CreateVEAFps(
+ const gpu::GpuPreferences& gpu_preferences) {
std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> create_vea_fps;
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateV4L2VEA);
- create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateVaapiVEA);
- create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateAndroidVEA);
+#endif
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+ if (!gpu_preferences.disable_vaapi_accelerated_video_encode)
+ create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateVaapiVEA);
+#endif
+#if defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
+ if (!gpu_preferences.disable_web_rtc_hw_encoding)
+ create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateAndroidVEA);
+#endif
+#if defined(OS_MACOSX)
+ create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateVTVEA);
+#endif
return create_vea_fps;
}
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
// static
scoped_ptr<media::VideoEncodeAccelerator>
GpuVideoEncodeAccelerator::CreateV4L2VEA() {
scoped_ptr<media::VideoEncodeAccelerator> encoder;
-#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kEncoder);
if (device)
encoder.reset(new V4L2VideoEncodeAccelerator(device));
-#endif
return encoder;
}
+#endif
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
// static
scoped_ptr<media::VideoEncodeAccelerator>
GpuVideoEncodeAccelerator::CreateVaapiVEA() {
- scoped_ptr<media::VideoEncodeAccelerator> encoder;
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
- const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- if (!cmd_line->HasSwitch(switches::kDisableVaapiAcceleratedVideoEncode))
- encoder.reset(new VaapiVideoEncodeAccelerator());
-#endif
- return encoder;
+ return make_scoped_ptr<media::VideoEncodeAccelerator>(
+ new VaapiVideoEncodeAccelerator());
}
+#endif
+#if defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
// static
scoped_ptr<media::VideoEncodeAccelerator>
GpuVideoEncodeAccelerator::CreateAndroidVEA() {
- scoped_ptr<media::VideoEncodeAccelerator> encoder;
-#if defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
- encoder.reset(new AndroidVideoEncodeAccelerator());
+ return make_scoped_ptr<media::VideoEncodeAccelerator>(
+ new AndroidVideoEncodeAccelerator());
+}
#endif
- return encoder;
+
+#if defined(OS_MACOSX)
+// static
+scoped_ptr<media::VideoEncodeAccelerator>
+GpuVideoEncodeAccelerator::CreateVTVEA() {
+ return make_scoped_ptr<media::VideoEncodeAccelerator>(
+ new VTVideoEncodeAccelerator());
}
+#endif
void GpuVideoEncodeAccelerator::OnEncode(
const AcceleratedVideoEncoderMsg_Encode_Params& params) {
@@ -315,79 +328,8 @@ void GpuVideoEncodeAccelerator::OnEncode2(
<< params.frame_id << ", size=" << params.size.ToString()
<< ", force_keyframe=" << params.force_keyframe << ", handle type="
<< params.gpu_memory_buffer_handles[0].type;
- DCHECK_EQ(media::PIXEL_FORMAT_I420, input_format_);
- DCHECK_EQ(media::VideoFrame::NumPlanes(input_format_),
- params.gpu_memory_buffer_handles.size());
-
- bool map_result = true;
- uint8_t* data[media::VideoFrame::kMaxPlanes];
- int32_t strides[media::VideoFrame::kMaxPlanes];
- ScopedVector<gfx::GpuMemoryBuffer> buffers;
- const auto& handles = params.gpu_memory_buffer_handles;
- for (size_t i = 0; i < handles.size(); ++i) {
- const size_t width =
- media::VideoFrame::Columns(i, input_format_, params.size.width());
- const size_t height =
- media::VideoFrame::Rows(i, input_format_, params.size.height());
- scoped_ptr<gfx::GpuMemoryBuffer> buffer =
- GpuMemoryBufferImpl::CreateFromHandle(
- handles[i], gfx::Size(width, height), gfx::BufferFormat::R_8,
- gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
- media::BindToCurrentLoop(base::Bind(&DestroyGpuMemoryBuffer)));
-
- // TODO(emircan): Refactor such that each frame is mapped once.
- // See http://crbug/536938.
- if (!buffer.get() || !buffer->Map()) {
- map_result = false;
- continue;
- }
-
- data[i] = reinterpret_cast<uint8_t*>(buffer->memory(0));
- strides[i] = buffer->stride(0);
- buffers.push_back(buffer.release());
- }
-
- if (!map_result) {
- DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode2(): "
- << "failed to map buffers";
- NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
- return;
- }
-
- if (!encoder_)
- return;
-
- if (params.frame_id < 0) {
- DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode2(): invalid frame_id="
- << params.frame_id;
- NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
- return;
- }
-
- scoped_refptr<media::VideoFrame> frame =
- media::VideoFrame::WrapExternalYuvData(
- input_format_,
- input_coded_size_,
- gfx::Rect(input_visible_size_),
- input_visible_size_,
- strides[media::VideoFrame::kYPlane],
- strides[media::VideoFrame::kUPlane],
- strides[media::VideoFrame::kVPlane],
- data[media::VideoFrame::kYPlane],
- data[media::VideoFrame::kUPlane],
- data[media::VideoFrame::kVPlane],
- params.timestamp);
- if (!frame.get()) {
- DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode2(): "
- << "could not create a frame";
- NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
- return;
- }
- frame->AddDestructionObserver(media::BindToCurrentLoop(
- base::Bind(&GpuVideoEncodeAccelerator::EncodeFrameFinished2,
- weak_this_factory_.GetWeakPtr(), params.frame_id,
- base::Passed(&buffers))));
- encoder_->Encode(frame, params.force_keyframe);
+ // Encoding GpuMemoryBuffer backed frames is not supported.
+ NOTREACHED();
}
void GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(
@@ -439,25 +381,8 @@ void GpuVideoEncodeAccelerator::EncodeFrameFinished(
// Just let |shm| fall out of scope.
}
-void GpuVideoEncodeAccelerator::EncodeFrameFinished2(
- int32_t frame_id,
- ScopedVector<gfx::GpuMemoryBuffer> buffers) {
- // TODO(emircan): Consider calling Unmap() in dtor.
- for (const auto& buffer : buffers)
- buffer->Unmap();
- Send(new AcceleratedVideoEncoderHostMsg_NotifyInputDone(host_route_id_,
- frame_id));
- // Just let |buffers| fall out of scope.
-}
-
void GpuVideoEncodeAccelerator::Send(IPC::Message* message) {
stub_->channel()->Send(message);
}
-void GpuVideoEncodeAccelerator::SendCreateEncoderReply(IPC::Message* message,
- bool succeeded) {
- GpuCommandBufferMsg_CreateVideoEncoder::WriteReplyParams(message, succeeded);
- Send(message);
-}
-
} // namespace content
diff --git a/chromium/content/common/gpu/media/gpu_video_encode_accelerator.h b/chromium/content/common/gpu/media/gpu_video_encode_accelerator.h
index ecc14f28e99..2c2db293db3 100644
--- a/chromium/content/common/gpu/media/gpu_video_encode_accelerator.h
+++ b/chromium/content/common/gpu/media/gpu_video_encode_accelerator.h
@@ -13,8 +13,8 @@
#include "base/macros.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
-#include "content/common/gpu/gpu_command_buffer_stub.h"
#include "gpu/config/gpu_info.h"
+#include "gpu/ipc/service/gpu_command_buffer_stub.h"
#include "ipc/ipc_listener.h"
#include "media/video/video_encode_accelerator.h"
#include "ui/gfx/geometry/size.h"
@@ -26,6 +26,10 @@ namespace base {
class SharedMemory;
} // namespace base
+namespace gpu {
+struct GpuPreferences;
+} // namespace gpu
+
namespace content {
// This class encapsulates the GPU process view of a VideoEncodeAccelerator,
@@ -34,18 +38,18 @@ namespace content {
class GpuVideoEncodeAccelerator
: public IPC::Listener,
public media::VideoEncodeAccelerator::Client,
- public GpuCommandBufferStub::DestructionObserver {
+ public gpu::GpuCommandBufferStub::DestructionObserver {
public:
- GpuVideoEncodeAccelerator(int32_t host_route_id, GpuCommandBufferStub* stub);
+ GpuVideoEncodeAccelerator(int32_t host_route_id,
+ gpu::GpuCommandBufferStub* stub);
~GpuVideoEncodeAccelerator() override;
// Initialize this accelerator with the given parameters and send
// |init_done_msg| when complete.
- void Initialize(media::VideoPixelFormat input_format,
+ bool Initialize(media::VideoPixelFormat input_format,
const gfx::Size& input_visible_size,
media::VideoCodecProfile output_profile,
- uint32_t initial_bitrate,
- IPC::Message* init_done_msg);
+ uint32_t initial_bitrate);
// IPC::Listener implementation
bool OnMessageReceived(const IPC::Message& message) override;
@@ -59,23 +63,34 @@ class GpuVideoEncodeAccelerator
bool key_frame) override;
void NotifyError(media::VideoEncodeAccelerator::Error error) override;
- // GpuCommandBufferStub::DestructionObserver implementation.
+ // gpu::GpuCommandBufferStub::DestructionObserver implementation.
void OnWillDestroyStub() override;
// Static query for supported profiles. This query calls the appropriate
// platform-specific version. The returned supported profiles vector will
// not contain duplicates.
- static gpu::VideoEncodeAcceleratorSupportedProfiles GetSupportedProfiles();
+ static gpu::VideoEncodeAcceleratorSupportedProfiles GetSupportedProfiles(
+ const gpu::GpuPreferences& gpu_preferences);
private:
typedef scoped_ptr<media::VideoEncodeAccelerator>(*CreateVEAFp)();
// Return a set of VEA Create function pointers applicable to the current
// platform.
- static std::vector<CreateVEAFp> CreateVEAFps();
+ static std::vector<CreateVEAFp> CreateVEAFps(
+ const gpu::GpuPreferences& gpu_preferences);
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
static scoped_ptr<media::VideoEncodeAccelerator> CreateV4L2VEA();
+#endif
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
static scoped_ptr<media::VideoEncodeAccelerator> CreateVaapiVEA();
+#endif
+#if defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
static scoped_ptr<media::VideoEncodeAccelerator> CreateAndroidVEA();
+#endif
+#if defined(OS_MACOSX)
+ static scoped_ptr<media::VideoEncodeAccelerator> CreateVTVEA();
+#endif
// IPC handlers, proxying media::VideoEncodeAccelerator for the renderer
// process.
@@ -90,19 +105,15 @@ class GpuVideoEncodeAccelerator
void EncodeFrameFinished(int32_t frame_id,
scoped_ptr<base::SharedMemory> shm);
- void EncodeFrameFinished2(int32_t frame_id,
- ScopedVector<gfx::GpuMemoryBuffer> buffers);
void Send(IPC::Message* message);
- // Helper for replying to the creation request.
- void SendCreateEncoderReply(IPC::Message* message, bool succeeded);
// Route ID to communicate with the host.
const uint32_t host_route_id_;
- // Unowned pointer to the underlying GpuCommandBufferStub. |this| is
+ // Unowned pointer to the underlying gpu::GpuCommandBufferStub. |this| is
// registered as a DestuctionObserver of |stub_| and will self-delete when
// |stub_| is destroyed.
- GpuCommandBufferStub* const stub_;
+ gpu::GpuCommandBufferStub* const stub_;
// Owned pointer to the underlying VideoEncodeAccelerator.
scoped_ptr<media::VideoEncodeAccelerator> encoder_;
diff --git a/chromium/content/common/gpu/media/media_channel.cc b/chromium/content/common/gpu/media/media_channel.cc
new file mode 100644
index 00000000000..7baeba075e4
--- /dev/null
+++ b/chromium/content/common/gpu/media/media_channel.cc
@@ -0,0 +1,145 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/media_channel.h"
+
+#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
+#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "media/gpu/ipc/common/media_messages.h"
+
+namespace content {
+
+namespace {
+
+void SendCreateJpegDecoderResult(
+ scoped_ptr<IPC::Message> reply_message,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
+ base::WeakPtr<gpu::GpuChannel> channel,
+ scoped_refptr<gpu::GpuChannelMessageFilter> filter,
+ bool result) {
+ GpuChannelMsg_CreateJpegDecoder::WriteReplyParams(reply_message.get(),
+ result);
+ if (io_task_runner->BelongsToCurrentThread()) {
+ filter->Send(reply_message.release());
+ } else if (channel) {
+ channel->Send(reply_message.release());
+ }
+}
+
+} // namespace
+
+class MediaChannelDispatchHelper {
+ public:
+ MediaChannelDispatchHelper(MediaChannel* channel, int32_t routing_id)
+ : channel_(channel), routing_id_(routing_id) {}
+
+ bool Send(IPC::Message* msg) { return channel_->Send(msg); }
+
+ void OnCreateVideoDecoder(const media::VideoDecodeAccelerator::Config& config,
+ int32_t decoder_route_id,
+ IPC::Message* reply_message) {
+ channel_->OnCreateVideoDecoder(routing_id_, config, decoder_route_id,
+ reply_message);
+ }
+
+ void OnCreateVideoEncoder(const media::CreateVideoEncoderParams& params,
+ IPC::Message* reply_message) {
+ channel_->OnCreateVideoEncoder(routing_id_, params, reply_message);
+ }
+
+ private:
+ MediaChannel* const channel_;
+ const int32_t routing_id_;
+ DISALLOW_COPY_AND_ASSIGN(MediaChannelDispatchHelper);
+};
+
+MediaChannel::MediaChannel(gpu::GpuChannel* channel) : channel_(channel) {}
+
+MediaChannel::~MediaChannel() {}
+
+bool MediaChannel::Send(IPC::Message* msg) {
+ return channel_->Send(msg);
+}
+
+bool MediaChannel::OnMessageReceived(const IPC::Message& message) {
+ MediaChannelDispatchHelper helper(this, message.routing_id());
+ bool handled = true;
+ IPC_BEGIN_MESSAGE_MAP(MediaChannel, message)
+ IPC_MESSAGE_FORWARD_DELAY_REPLY(
+ GpuCommandBufferMsg_CreateVideoDecoder, &helper,
+ MediaChannelDispatchHelper::OnCreateVideoDecoder)
+ IPC_MESSAGE_FORWARD_DELAY_REPLY(
+ GpuCommandBufferMsg_CreateVideoEncoder, &helper,
+ MediaChannelDispatchHelper::OnCreateVideoEncoder)
+ IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateJpegDecoder,
+ OnCreateJpegDecoder)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ return handled;
+}
+
+void MediaChannel::OnCreateJpegDecoder(int32_t route_id,
+ IPC::Message* reply_msg) {
+ scoped_ptr<IPC::Message> msg(reply_msg);
+ if (!jpeg_decoder_) {
+ jpeg_decoder_.reset(
+ new GpuJpegDecodeAccelerator(channel_, channel_->io_task_runner()));
+ }
+ jpeg_decoder_->AddClient(
+ route_id, base::Bind(&SendCreateJpegDecoderResult, base::Passed(&msg),
+ channel_->io_task_runner(), channel_->AsWeakPtr(),
+ make_scoped_refptr(channel_->filter())));
+}
+
+void MediaChannel::OnCreateVideoDecoder(
+ int32_t command_buffer_route_id,
+ const media::VideoDecodeAccelerator::Config& config,
+ int32_t decoder_route_id,
+ IPC::Message* reply_message) {
+ TRACE_EVENT0("gpu", "MediaChannel::OnCreateVideoDecoder");
+ gpu::GpuCommandBufferStub* stub =
+ channel_->LookupCommandBuffer(command_buffer_route_id);
+ if (!stub) {
+ reply_message->set_reply_error();
+ Send(reply_message);
+ return;
+ }
+ GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
+ decoder_route_id, stub, stub->channel()->io_task_runner());
+ bool succeeded = decoder->Initialize(config);
+ GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(reply_message,
+ succeeded);
+ Send(reply_message);
+
+ // decoder is registered as a DestructionObserver of this stub and will
+ // self-delete during destruction of this stub.
+}
+
+void MediaChannel::OnCreateVideoEncoder(
+ int32_t command_buffer_route_id,
+ const media::CreateVideoEncoderParams& params,
+ IPC::Message* reply_message) {
+ TRACE_EVENT0("gpu", "MediaChannel::OnCreateVideoEncoder");
+ gpu::GpuCommandBufferStub* stub =
+ channel_->LookupCommandBuffer(command_buffer_route_id);
+ if (!stub) {
+ reply_message->set_reply_error();
+ Send(reply_message);
+ return;
+ }
+ GpuVideoEncodeAccelerator* encoder =
+ new GpuVideoEncodeAccelerator(params.encoder_route_id, stub);
+ bool succeeded =
+ encoder->Initialize(params.input_format, params.input_visible_size,
+ params.output_profile, params.initial_bitrate);
+ GpuCommandBufferMsg_CreateVideoEncoder::WriteReplyParams(reply_message,
+ succeeded);
+ Send(reply_message);
+
+ // encoder is registered as a DestructionObserver of this stub and will
+ // self-delete during destruction of this stub.
+}
+
+} // namespace content
diff --git a/chromium/content/common/gpu/media/media_channel.h b/chromium/content/common/gpu/media/media_channel.h
new file mode 100644
index 00000000000..7cfe0378587
--- /dev/null
+++ b/chromium/content/common/gpu/media/media_channel.h
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_MEDIA_CHANNEL_H_
+#define CONTENT_COMMON_GPU_MEDIA_MEDIA_CHANNEL_H_
+
+#include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
+#include "ipc/ipc_listener.h"
+#include "ipc/ipc_sender.h"
+#include "media/video/video_decode_accelerator.h"
+
+namespace media {
+struct CreateVideoEncoderParams;
+}
+
+namespace gpu {
+class GpuChannel;
+class GpuCommandBufferStub;
+}
+
+namespace content {
+
+class MediaChannelDispatchHelper;
+
+class MediaChannel : public IPC::Listener, public IPC::Sender {
+ public:
+ explicit MediaChannel(gpu::GpuChannel* channel);
+ ~MediaChannel() override;
+
+ // IPC::Sender implementation:
+ bool Send(IPC::Message* msg) override;
+
+ private:
+ friend class MediaChannelDispatchHelper;
+
+ // IPC::Listener implementation:
+ bool OnMessageReceived(const IPC::Message& message) override;
+
+ // Message handlers.
+ void OnCreateJpegDecoder(int32_t route_id, IPC::Message* reply_msg);
+ void OnCreateVideoDecoder(int32_t command_buffer_route_id,
+ const media::VideoDecodeAccelerator::Config& config,
+ int32_t route_id,
+ IPC::Message* reply_message);
+ void OnCreateVideoEncoder(int32_t command_buffer_route_id,
+ const media::CreateVideoEncoderParams& params,
+ IPC::Message* reply_message);
+
+ gpu::GpuChannel* const channel_;
+ scoped_ptr<GpuJpegDecodeAccelerator> jpeg_decoder_;
+ DISALLOW_COPY_AND_ASSIGN(MediaChannel);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_MEDIA_CHANNEL_H_
diff --git a/chromium/content/common/gpu/media/media_service.cc b/chromium/content/common/gpu/media/media_service.cc
new file mode 100644
index 00000000000..89ec8b1fe50
--- /dev/null
+++ b/chromium/content/common/gpu/media/media_service.cc
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/media_service.h"
+
+#include <utility>
+
+#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
+#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
+#include "content/common/gpu/media/media_channel.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
+#include "ipc/ipc_message_macros.h"
+#include "ipc/param_traits_macros.h"
+
+namespace content {
+
+MediaService::MediaService(gpu::GpuChannelManager* channel_manager)
+ : channel_manager_(channel_manager) {}
+
+MediaService::~MediaService() {}
+
+void MediaService::AddChannel(int32_t client_id) {
+ gpu::GpuChannel* gpu_channel = channel_manager_->LookupChannel(client_id);
+ DCHECK(gpu_channel);
+ scoped_ptr<MediaChannel> media_channel(new MediaChannel(gpu_channel));
+ gpu_channel->SetUnhandledMessageListener(media_channel.get());
+ media_channels_.set(client_id, std::move(media_channel));
+}
+
+void MediaService::RemoveChannel(int32_t client_id) {
+ media_channels_.erase(client_id);
+}
+
+void MediaService::DestroyAllChannels() {
+ media_channels_.clear();
+}
+
+} // namespace content
diff --git a/chromium/content/common/gpu/media/media_service.h b/chromium/content/common/gpu/media/media_service.h
new file mode 100644
index 00000000000..15dca82260a
--- /dev/null
+++ b/chromium/content/common/gpu/media/media_service.h
@@ -0,0 +1,42 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_MEDIA_SERVICE_H_
+#define CONTENT_COMMON_GPU_MEDIA_MEDIA_SERVICE_H_
+
+#include <stdint.h>
+
+#include "base/containers/scoped_ptr_hash_map.h"
+#include "base/macros.h"
+#include "ipc/ipc_listener.h"
+#include "ipc/ipc_sender.h"
+#include "media/video/video_decode_accelerator.h"
+
+namespace gpu {
+class GpuChannel;
+class GpuChannelManager;
+}
+
+namespace content {
+
+class MediaChannel;
+
+class MediaService {
+ public:
+ MediaService(gpu::GpuChannelManager* channel_manager);
+ ~MediaService();
+
+ void AddChannel(int32_t client_id);
+ void RemoveChannel(int32_t client_id);
+ void DestroyAllChannels();
+
+ private:
+ gpu::GpuChannelManager* const channel_manager_;
+ base::ScopedPtrHashMap<int32_t, scoped_ptr<MediaChannel>> media_channels_;
+ DISALLOW_COPY_AND_ASSIGN(MediaService);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_MEDIA_SERVICE_H_
diff --git a/chromium/content/common/gpu/media/rendering_helper.cc b/chromium/content/common/gpu/media/rendering_helper.cc
index 85bfe0a840d..2a19428b2d0 100644
--- a/chromium/content/common/gpu/media/rendering_helper.cc
+++ b/chromium/content/common/gpu/media/rendering_helper.cc
@@ -160,6 +160,9 @@ RenderingHelperParams::RenderingHelperParams()
: rendering_fps(0), warm_up_iterations(0), render_as_thumbnails(false) {
}
+RenderingHelperParams::RenderingHelperParams(
+ const RenderingHelperParams& other) = default;
+
RenderingHelperParams::~RenderingHelperParams() {}
VideoFrameTexture::VideoFrameTexture(uint32_t texture_target,
@@ -179,6 +182,9 @@ RenderingHelper::RenderedVideo::RenderedVideo()
: is_flushing(false), frames_to_drop(0) {
}
+RenderingHelper::RenderedVideo::RenderedVideo(const RenderedVideo& other) =
+ default;
+
RenderingHelper::RenderedVideo::~RenderedVideo() {
}
@@ -665,12 +671,8 @@ void RenderingHelper::DeleteTexture(uint32_t texture_id) {
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
}
-scoped_refptr<gfx::GLContext> RenderingHelper::GetGLContext() {
- return gl_context_;
-}
-
-void* RenderingHelper::GetGLContextHandle() {
- return gl_context_->GetHandle();
+gfx::GLContext* RenderingHelper::GetGLContext() {
+ return gl_context_.get();
}
void* RenderingHelper::GetGLDisplay() {
diff --git a/chromium/content/common/gpu/media/rendering_helper.h b/chromium/content/common/gpu/media/rendering_helper.h
index 8a6c28bd3f7..250d382ac61 100644
--- a/chromium/content/common/gpu/media/rendering_helper.h
+++ b/chromium/content/common/gpu/media/rendering_helper.h
@@ -54,6 +54,7 @@ class VideoFrameTexture : public base::RefCounted<VideoFrameTexture> {
struct RenderingHelperParams {
RenderingHelperParams();
+ RenderingHelperParams(const RenderingHelperParams& other);
~RenderingHelperParams();
// The rendering FPS.
@@ -135,10 +136,7 @@ class RenderingHelper {
void* GetGLDisplay();
// Get the GL context.
- scoped_refptr<gfx::GLContext> GetGLContext();
-
- // Get the platform specific handle to the OpenGL context.
- void* GetGLContextHandle();
+ gfx::GLContext* GetGLContext();
// Get rendered thumbnails as RGB.
// Sets alpha_solid to true if the alpha channel is entirely 0xff.
@@ -165,6 +163,7 @@ class RenderingHelper {
std::queue<scoped_refptr<VideoFrameTexture> > pending_frames;
RenderedVideo();
+ RenderedVideo(const RenderedVideo& other);
~RenderedVideo();
};
diff --git a/chromium/content/common/gpu/media/shared_memory_region.cc b/chromium/content/common/gpu/media/shared_memory_region.cc
new file mode 100644
index 00000000000..4ee6a242578
--- /dev/null
+++ b/chromium/content/common/gpu/media/shared_memory_region.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+#include "content/common/gpu/media/shared_memory_region.h"
+
+namespace content {
+
+SharedMemoryRegion::SharedMemoryRegion(const base::SharedMemoryHandle& handle,
+ off_t offset,
+ size_t size,
+ bool read_only)
+ : shm_(handle, read_only),
+ offset_(offset),
+ size_(size),
+ alignment_size_(offset % base::SysInfo::VMAllocationGranularity()) {
+ DCHECK_GE(offset_, 0) << "Invalid offset: " << offset_;
+}
+
+SharedMemoryRegion::SharedMemoryRegion(
+ const media::BitstreamBuffer& bitstream_buffer,
+ bool read_only)
+ : SharedMemoryRegion(bitstream_buffer.handle(),
+ bitstream_buffer.offset(),
+ bitstream_buffer.size(),
+ read_only) {}
+
+bool SharedMemoryRegion::Map() {
+ if (offset_ < 0) {
+ DVLOG(1) << "Invalid offset: " << offset_;
+ return false;
+ }
+ return shm_.MapAt(offset_ - alignment_size_, size_ + alignment_size_);
+}
+
+void* SharedMemoryRegion::memory() {
+ int8_t* addr = reinterpret_cast<int8_t*>(shm_.memory());
+ return addr ? addr + alignment_size_ : nullptr;
+}
+
+} // namespace content
diff --git a/chromium/content/common/gpu/media/shared_memory_region.h b/chromium/content/common/gpu/media/shared_memory_region.h
new file mode 100644
index 00000000000..f7c5db29982
--- /dev/null
+++ b/chromium/content/common/gpu/media/shared_memory_region.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_SHARED_MEMORY_REGION_H_
+#define CONTENT_COMMON_GPU_MEDIA_SHARED_MEMORY_REGION_H_
+
+#include "base/memory/shared_memory.h"
+#include "media/base/bitstream_buffer.h"
+
+namespace content {
+
+// Helper class to access a region of a SharedMemory. Different from
+// SharedMemory, in which the |offset| of function MapAt() must be aligned to
+// the value of |SysInfo::VMAllocationGranularity()|, the |offset| of a
+// SharedMemoryRegion needs not to be aligned, this class hides the details
+// and returns the mapped address of the given offset.
+class SharedMemoryRegion {
+ public:
+ // Creates a SharedMemoryRegion.
+ // The mapped memory region begins at |offset| bytes from the start of the
+ // shared memory and the length is |size|. It will take the ownership of
+ // the |handle| and release the resource when being destroyed. Different
+ // from SharedMemory, the |offset| needs not to be aligned to the value of
+ // |SysInfo::VMAllocationGranularity()|.
+ SharedMemoryRegion(const base::SharedMemoryHandle& handle,
+ off_t offset,
+ size_t size,
+ bool read_only);
+
+ // Creates a SharedMemoryRegion from the given |bistream_buffer|.
+ SharedMemoryRegion(const media::BitstreamBuffer& bitstream_buffer,
+ bool read_only);
+
+ // Maps the shared memory into the caller's address space.
+ // Return true on success, false otherwise.
+ bool Map();
+
+ // Gets a pointer to the mapped region if it has been mapped via Map().
+ // Returns |nullptr| if it is not mapped. The returned pointer points
+ // to the memory at the offset previously passed to the constructor.
+ void* memory();
+
+ size_t size() const { return size_; }
+
+ private:
+ base::SharedMemory shm_;
+ off_t offset_;
+ size_t size_;
+ size_t alignment_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedMemoryRegion);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_SHARED_MEMORY_REGION_H_
diff --git a/chromium/content/common/gpu/media/v4l2_image_processor.cc b/chromium/content/common/gpu/media/v4l2_image_processor.cc
index f0cf3977774..340a1484335 100644
--- a/chromium/content/common/gpu/media/v4l2_image_processor.cc
+++ b/chromium/content/common/gpu/media/v4l2_image_processor.cc
@@ -468,24 +468,22 @@ void V4L2ImageProcessor::Enqueue() {
}
}
- // TODO(posciak): Fix this to be non-Exynos specific.
- // Exynos GSC is liable to race conditions if more than one output buffer is
- // simultaneously enqueued, so enqueue just one.
- if (output_buffer_queued_count_ == 0 && !free_output_buffers_.empty()) {
- const int old_outputs_queued = output_buffer_queued_count_;
+ const int old_outputs_queued = output_buffer_queued_count_;
+ while (!free_output_buffers_.empty()) {
if (!EnqueueOutputRecord())
return;
- if (old_outputs_queued == 0 && output_buffer_queued_count_ != 0) {
- // We just started up a previously empty queue.
- // Queue state changed; signal interrupt.
- if (!device_->SetDevicePollInterrupt())
- return;
- // Start VIDIOC_STREAMON if we haven't yet.
- if (!output_streamon_) {
- __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
- output_streamon_ = true;
- }
+ }
+
+ if (old_outputs_queued == 0 && output_buffer_queued_count_ != 0) {
+ // We just started up a previously empty queue.
+ // Queue state changed; signal interrupt.
+ if (!device_->SetDevicePollInterrupt())
+ return;
+ // Start VIDIOC_STREAMON if we haven't yet.
+ if (!output_streamon_) {
+ __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
+ output_streamon_ = true;
}
}
DCHECK_LE(output_buffer_queued_count_, 1);
diff --git a/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc b/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc
index 06091a36b4d..0121eadbc09 100644
--- a/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc
@@ -112,10 +112,11 @@ V4L2JpegDecodeAccelerator::BufferRecord::~BufferRecord() {
}
V4L2JpegDecodeAccelerator::JobRecord::JobRecord(
- media::BitstreamBuffer bitstream_buffer,
+ const media::BitstreamBuffer& bitstream_buffer,
scoped_refptr<media::VideoFrame> video_frame)
- : bitstream_buffer(bitstream_buffer), out_frame(video_frame) {
-}
+ : bitstream_buffer_id(bitstream_buffer.id()),
+ shm(bitstream_buffer, true),
+ out_frame(video_frame) {}
V4L2JpegDecodeAccelerator::JobRecord::~JobRecord() {
}
@@ -233,6 +234,14 @@ void V4L2JpegDecodeAccelerator::Decode(
<< ", size=" << bitstream_buffer.size();
DCHECK(io_task_runner_->BelongsToCurrentThread());
+ if (bitstream_buffer.id() < 0) {
+ LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+ PostNotifyError(bitstream_buffer.id(), INVALID_ARGUMENT);
+ return;
+ }
+
if (video_frame->format() != media::PIXEL_FORMAT_I420) {
PostNotifyError(bitstream_buffer.id(), UNSUPPORTED_JPEG);
return;
@@ -260,11 +269,9 @@ bool V4L2JpegDecodeAccelerator::IsSupported() {
void V4L2JpegDecodeAccelerator::DecodeTask(scoped_ptr<JobRecord> job_record) {
DCHECK(decoder_task_runner_->BelongsToCurrentThread());
- job_record->shm.reset(
- new base::SharedMemory(job_record->bitstream_buffer.handle(), true));
- if (!job_record->shm->Map(job_record->bitstream_buffer.size())) {
+ if (!job_record->shm.Map()) {
PLOG(ERROR) << __func__ << ": could not map bitstream_buffer";
- PostNotifyError(job_record->bitstream_buffer.id(), UNREADABLE_INPUT);
+ PostNotifyError(job_record->bitstream_buffer_id, UNREADABLE_INPUT);
return;
}
input_jobs_.push(make_linked_ptr(job_record.release()));
@@ -288,7 +295,7 @@ bool V4L2JpegDecodeAccelerator::ShouldRecreateInputBuffers() {
linked_ptr<JobRecord> job_record = input_jobs_.front();
// Check input buffer size is enough
return (input_buffer_map_.empty() ||
- (job_record->bitstream_buffer.size() + sizeof(kDefaultDhtSeg)) >
+ (job_record->shm.size() + sizeof(kDefaultDhtSeg)) >
input_buffer_map_.front().length);
}
@@ -333,8 +340,7 @@ bool V4L2JpegDecodeAccelerator::CreateInputBuffers() {
// The input image may miss huffman table. We didn't parse the image before,
// so we create more to avoid the situation of not enough memory.
// Reserve twice size to avoid recreating input buffer frequently.
- size_t reserve_size =
- (job_record->bitstream_buffer.size() + sizeof(kDefaultDhtSeg)) * 2;
+ size_t reserve_size = (job_record->shm.size() + sizeof(kDefaultDhtSeg)) * 2;
struct v4l2_format format;
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
@@ -711,17 +717,16 @@ void V4L2JpegDecodeAccelerator::Dequeue() {
// V4L2_PIX_FMT_YUV420.
if (!CopyOutputImage(output_buffer_pixelformat_, output_record.address,
output_buffer_coded_size_, job_record->out_frame)) {
- PostNotifyError(job_record->bitstream_buffer.id(), PLATFORM_FAILURE);
+ PostNotifyError(job_record->bitstream_buffer_id, PLATFORM_FAILURE);
return;
}
DVLOG(3) << "Decoding finished, returning bitstream buffer, id="
- << job_record->bitstream_buffer.id();
+ << job_record->bitstream_buffer_id;
child_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&V4L2JpegDecodeAccelerator::VideoFrameReady, weak_ptr_,
- job_record->bitstream_buffer.id()));
+ FROM_HERE, base::Bind(&V4L2JpegDecodeAccelerator::VideoFrameReady,
+ weak_ptr_, job_record->bitstream_buffer_id));
}
}
}
@@ -819,10 +824,9 @@ bool V4L2JpegDecodeAccelerator::EnqueueInputRecord() {
DCHECK(!input_record.at_device);
// It will add default huffman segment if it's missing.
- if (!AddHuffmanTable(job_record->shm->memory(),
- job_record->bitstream_buffer.size(),
+ if (!AddHuffmanTable(job_record->shm.memory(), job_record->shm.size(),
input_record.address, input_record.length)) {
- PostNotifyError(job_record->bitstream_buffer.id(), PARSE_JPEG_FAILED);
+ PostNotifyError(job_record->bitstream_buffer_id, PARSE_JPEG_FAILED);
return false;
}
@@ -836,8 +840,9 @@ bool V4L2JpegDecodeAccelerator::EnqueueInputRecord() {
running_jobs_.push(job_record);
free_input_buffers_.pop_back();
- DVLOG(3) << __func__ << ": enqueued frame id="
- << job_record->bitstream_buffer.id() << " to device.";
+ DVLOG(3) << __func__
+ << ": enqueued frame id=" << job_record->bitstream_buffer_id
+ << " to device.";
return true;
}
diff --git a/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h b/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h
index 435808012ec..bef33b22c10 100644
--- a/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h
@@ -18,6 +18,7 @@
#include "base/single_thread_task_runner.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/v4l2_device.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/video_frame.h"
@@ -58,16 +59,16 @@ class CONTENT_EXPORT V4L2JpegDecodeAccelerator
// the time of submission we may not have one available (and don't need one
// to submit input to the device).
struct JobRecord {
- JobRecord(media::BitstreamBuffer bitstream_buffer,
+ JobRecord(const media::BitstreamBuffer& bitstream_buffer,
scoped_refptr<media::VideoFrame> video_frame);
~JobRecord();
- // Input image buffer.
- media::BitstreamBuffer bitstream_buffer;
+ // Input image buffer ID.
+ int32_t bitstream_buffer_id;
+ // Memory mapped from |bitstream_buffer|.
+ SharedMemoryRegion shm;
// Output frame buffer.
scoped_refptr<media::VideoFrame> out_frame;
- // Memory mapped from |bitstream_buffer|.
- scoped_ptr<base::SharedMemory> shm;
};
void EnqueueInput();
diff --git a/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc b/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
index 4c3b724daa5..80087232b65 100644
--- a/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
@@ -19,9 +19,11 @@
#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/stringprintf.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/media_switches.h"
+#include "ui/gl/gl_context.h"
#include "ui/gl/scoped_binders.h"
#define LOGF(level) LOG(level) << __FUNCTION__ << "(): "
@@ -169,14 +171,12 @@ struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef {
BitstreamBufferRef(
base::WeakPtr<VideoDecodeAccelerator::Client>& client,
const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
- base::SharedMemory* shm,
- size_t size,
+ SharedMemoryRegion* shm,
int32_t input_id);
~BitstreamBufferRef();
const base::WeakPtr<VideoDecodeAccelerator::Client> client;
const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
- const scoped_ptr<base::SharedMemory> shm;
- const size_t size;
+ const scoped_ptr<SharedMemoryRegion> shm;
off_t bytes_used;
const int32_t input_id;
};
@@ -184,13 +184,11 @@ struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef {
V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
base::WeakPtr<VideoDecodeAccelerator::Client>& client,
const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
- base::SharedMemory* shm,
- size_t size,
+ SharedMemoryRegion* shm,
int32_t input_id)
: client(client),
client_task_runner(client_task_runner),
shm(shm),
- size(size),
bytes_used(0),
input_id(input_id) {}
@@ -382,15 +380,11 @@ V4L2VP8Picture::~V4L2VP8Picture() {
V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
const scoped_refptr<V4L2Device>& device,
EGLDisplay egl_display,
- EGLContext egl_context,
- const base::WeakPtr<Client>& io_client,
- const base::Callback<bool(void)>& make_context_current,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb)
: input_planes_count_(0),
output_planes_count_(0),
child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
- io_task_runner_(io_task_runner),
- io_client_(io_client),
device_(device),
decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"),
device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"),
@@ -406,9 +400,9 @@ V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
surface_set_change_pending_(false),
picture_clearing_count_(0),
pictures_assigned_(false, false),
- make_context_current_(make_context_current),
egl_display_(egl_display),
- egl_context_(egl_context),
+ get_gl_context_cb_(get_gl_context_cb),
+ make_context_current_cb_(make_context_current_cb),
weak_this_factory_(this) {
weak_this_ = weak_this_factory_.GetWeakPtr();
}
@@ -444,6 +438,11 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
DCHECK(child_task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kUninitialized);
+ if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
if (config.is_encrypted) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
return false;
@@ -459,6 +458,14 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
client_ptr_factory_.reset(
new base::WeakPtrFactory<VideoDecodeAccelerator::Client>(client));
client_ = client_ptr_factory_->GetWeakPtr();
+ // If we haven't been set up to decode on separate thread via
+ // TryToSetupDecodeOnSeparateThread(), use the main thread/client for
+ // decode tasks.
+ if (!decode_task_runner_) {
+ decode_task_runner_ = child_task_runner_;
+ DCHECK(!decode_client_);
+ decode_client_ = client_;
+ }
video_profile_ = config.profile;
@@ -485,7 +492,7 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
}
// We need the context to be initialized to query extensions.
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "Initialize(): could not make context current";
return false;
}
@@ -750,7 +757,7 @@ bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() {
child_task_runner_->PostTask(
FROM_HERE,
base::Bind(&VideoDecodeAccelerator::Client::ProvidePictureBuffers,
- client_, num_pictures, coded_size_,
+ client_, num_pictures, 1, coded_size_,
device_->GetTextureTarget()));
// Wait for the client to call AssignPictureBuffers() on the Child thread.
@@ -1182,7 +1189,15 @@ void V4L2SliceVideoDecodeAccelerator::Decode(
const media::BitstreamBuffer& bitstream_buffer) {
DVLOGF(3) << "input_id=" << bitstream_buffer.id()
<< ", size=" << bitstream_buffer.size();
- DCHECK(io_task_runner_->BelongsToCurrentThread());
+ DCHECK(decode_task_runner_->BelongsToCurrentThread());
+
+ if (bitstream_buffer.id() < 0) {
+ LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
decoder_thread_task_runner_->PostTask(
FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeTask,
@@ -1196,10 +1211,9 @@ void V4L2SliceVideoDecodeAccelerator::DecodeTask(
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
scoped_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef(
- io_client_, io_task_runner_,
- new base::SharedMemory(bitstream_buffer.handle(), true),
- bitstream_buffer.size(), bitstream_buffer.id()));
- if (!bitstream_record->shm->Map(bitstream_buffer.size())) {
+ decode_client_, decode_task_runner_,
+ new SharedMemoryRegion(bitstream_buffer, true), bitstream_buffer.id()));
+ if (!bitstream_record->shm->Map()) {
LOGF(ERROR) << "Could not map bitstream_buffer";
NOTIFY_ERROR(UNREADABLE_INPUT);
return;
@@ -1231,7 +1245,7 @@ bool V4L2SliceVideoDecodeAccelerator::TrySetNewBistreamBuffer() {
const uint8_t* const data = reinterpret_cast<const uint8_t*>(
decoder_current_bitstream_buffer_->shm->memory());
- const size_t data_size = decoder_current_bitstream_buffer_->size;
+ const size_t data_size = decoder_current_bitstream_buffer_->shm->size();
decoder_->SetStream(data, data_size);
return true;
@@ -1442,8 +1456,9 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
return;
}
- if (!make_context_current_.Run()) {
- DLOG(ERROR) << "could not make context current";
+ gfx::GLContext* gl_context = get_gl_context_cb_.Run();
+ if (!gl_context || !make_context_current_cb_.Run()) {
+ DLOG(ERROR) << "No GL context";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
}
@@ -1481,13 +1496,10 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
DCHECK_EQ(output_record.picture_id, -1);
DCHECK_EQ(output_record.cleared, false);
- EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_,
- egl_context_,
- buffers[i].texture_id(),
- coded_size_,
- i,
- output_format_fourcc_,
- output_planes_count_);
+ DCHECK_LE(1u, buffers[i].texture_ids().size());
+ EGLImageKHR egl_image = device_->CreateEGLImage(
+ egl_display_, gl_context->GetHandle(), buffers[i].texture_ids()[0],
+ buffers[i].size(), i, output_format_fourcc_, output_planes_count_);
if (egl_image == EGL_NO_IMAGE_KHR) {
LOGF(ERROR) << "Could not create EGLImageKHR";
// Ownership of EGLImages allocated in previous iterations of this loop
@@ -1511,7 +1523,7 @@ void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
DCHECK(child_task_runner_->BelongsToCurrentThread());
DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
LOGF(ERROR) << "could not make context current";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
@@ -1587,7 +1599,7 @@ void V4L2SliceVideoDecodeAccelerator::FlushTask() {
// which - when reached - will trigger flush sequence.
decoder_input_queue_.push(
linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef(
- io_client_, io_task_runner_, nullptr, 0, kFlushBufferId)));
+ decode_client_, decode_task_runner_, nullptr, kFlushBufferId)));
return;
}
@@ -2501,12 +2513,14 @@ void V4L2SliceVideoDecodeAccelerator::SendPictureReady() {
bool cleared = pending_picture_ready_.front().cleared;
const media::Picture& picture = pending_picture_ready_.front().picture;
if (cleared && picture_clearing_count_ == 0) {
- DVLOGF(4) << "Posting picture ready to IO for: "
+ DVLOGF(4) << "Posting picture ready to decode task runner for: "
<< picture.picture_buffer_id();
- // This picture is cleared. Post it to IO thread to reduce latency. This
- // should be the case after all pictures are cleared at the beginning.
- io_task_runner_->PostTask(
- FROM_HERE, base::Bind(&Client::PictureReady, io_client_, picture));
+ // This picture is cleared. It can be posted to a thread different than
+ // the main GPU thread to reduce latency. This should be the case after
+ // all pictures are cleared at the beginning.
+ decode_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&Client::PictureReady, decode_client_, picture));
pending_picture_ready_.pop();
} else if (!cleared || resetting_or_flushing) {
DVLOGF(3) << "cleared=" << pending_picture_ready_.front().cleared
@@ -2544,7 +2558,11 @@ void V4L2SliceVideoDecodeAccelerator::PictureCleared() {
SendPictureReady();
}
-bool V4L2SliceVideoDecodeAccelerator::CanDecodeOnIOThread() {
+bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+ decode_client_ = decode_client_;
+ decode_task_runner_ = decode_task_runner;
return true;
}
diff --git a/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h b/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h
index dd72eb7a6dd..cc11da302a1 100644
--- a/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h
@@ -19,6 +19,7 @@
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "content/common/gpu/media/h264_decoder.h"
#include "content/common/gpu/media/v4l2_device.h"
#include "content/common/gpu/media/vp8_decoder.h"
@@ -38,10 +39,8 @@ class CONTENT_EXPORT V4L2SliceVideoDecodeAccelerator
V4L2SliceVideoDecodeAccelerator(
const scoped_refptr<V4L2Device>& device,
EGLDisplay egl_display,
- EGLContext egl_context,
- const base::WeakPtr<Client>& io_client_,
- const base::Callback<bool(void)>& make_context_current,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb);
~V4L2SliceVideoDecodeAccelerator() override;
// media::VideoDecodeAccelerator implementation.
@@ -53,7 +52,10 @@ class CONTENT_EXPORT V4L2SliceVideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
static media::VideoDecodeAccelerator::SupportedProfiles
GetSupportedProfiles();
@@ -282,8 +284,8 @@ class CONTENT_EXPORT V4L2SliceVideoDecodeAccelerator
// GPU Child thread task runner.
const scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
- // IO thread task runner.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+ // Task runner Decode() and PictureReady() run on.
+ scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner_;
// WeakPtr<> pointing to |this| for use in posting tasks from the decoder or
// device worker threads back to the child thread.
@@ -295,8 +297,8 @@ class CONTENT_EXPORT V4L2SliceVideoDecodeAccelerator
scoped_ptr<base::WeakPtrFactory<VideoDecodeAccelerator::Client>>
client_ptr_factory_;
base::WeakPtr<VideoDecodeAccelerator::Client> client_;
- // Callbacks to |io_client_| must be executed on |io_task_runner_|.
- base::WeakPtr<Client> io_client_;
+ // Callbacks to |decode_client_| must be executed on |decode_task_runner_|.
+ base::WeakPtr<Client> decode_client_;
// V4L2 device in use.
scoped_refptr<V4L2Device> device_;
@@ -381,12 +383,13 @@ class CONTENT_EXPORT V4L2SliceVideoDecodeAccelerator
// to avoid races with potential Reset requests.
base::WaitableEvent pictures_assigned_;
- // Make the GL context current callback.
- base::Callback<bool(void)> make_context_current_;
-
// EGL state
EGLDisplay egl_display_;
- EGLContext egl_context_;
+
+ // Callback to get current GLContext.
+ GetGLContextCallback get_gl_context_cb_;
+ // Callback to set the correct gl context.
+ MakeGLContextCurrentCallback make_context_current_cb_;
// The WeakPtrFactory for |weak_this_|.
base::WeakPtrFactory<V4L2SliceVideoDecodeAccelerator> weak_this_factory_;
diff --git a/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.cc b/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.cc
index f9311257ed7..719dbf7a80f 100644
--- a/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.cc
@@ -15,16 +15,17 @@
#include "base/bind.h"
#include "base/command_line.h"
#include "base/macros.h"
-#include "base/memory/shared_memory.h"
#include "base/message_loop/message_loop.h"
#include "base/numerics/safe_conversions.h"
#include "base/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
#include "media/base/media_switches.h"
#include "media/filters/h264_parser.h"
#include "ui/gfx/geometry/rect.h"
+#include "ui/gl/gl_context.h"
#include "ui/gl/scoped_binders.h"
#define NOTIFY_ERROR(x) \
@@ -65,14 +66,12 @@ struct V4L2VideoDecodeAccelerator::BitstreamBufferRef {
BitstreamBufferRef(
base::WeakPtr<Client>& client,
scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
- base::SharedMemory* shm,
- size_t size,
+ scoped_ptr<SharedMemoryRegion> shm,
int32_t input_id);
~BitstreamBufferRef();
const base::WeakPtr<Client> client;
const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
- const scoped_ptr<base::SharedMemory> shm;
- const size_t size;
+ const scoped_ptr<SharedMemoryRegion> shm;
size_t bytes_used;
const int32_t input_id;
};
@@ -94,13 +93,11 @@ struct V4L2VideoDecodeAccelerator::PictureRecord {
V4L2VideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
base::WeakPtr<Client>& client,
scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
- base::SharedMemory* shm,
- size_t size,
+ scoped_ptr<SharedMemoryRegion> shm,
int32_t input_id)
: client(client),
client_task_runner(client_task_runner),
- shm(shm),
- size(size),
+ shm(std::move(shm)),
bytes_used(0),
input_id(input_id) {}
@@ -157,14 +154,10 @@ V4L2VideoDecodeAccelerator::PictureRecord::~PictureRecord() {}
V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator(
EGLDisplay egl_display,
- EGLContext egl_context,
- const base::WeakPtr<Client>& io_client,
- const base::Callback<bool(void)>& make_context_current,
- const scoped_refptr<V4L2Device>& device,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const scoped_refptr<V4L2Device>& device)
: child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
- io_task_runner_(io_task_runner),
- io_client_(io_client),
decoder_thread_("V4L2DecoderThread"),
decoder_state_(kUninitialized),
device_(device),
@@ -184,9 +177,9 @@ V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator(
picture_clearing_count_(0),
pictures_assigned_(false, false),
device_poll_thread_("V4L2DevicePollThread"),
- make_context_current_(make_context_current),
egl_display_(egl_display),
- egl_context_(egl_context),
+ get_gl_context_cb_(get_gl_context_cb),
+ make_context_current_cb_(make_context_current_cb),
video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN),
output_format_fourcc_(0),
weak_this_factory_(this) {
@@ -212,6 +205,11 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
DCHECK(child_task_runner_->BelongsToCurrentThread());
DCHECK_EQ(decoder_state_, kUninitialized);
+ if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
if (config.is_encrypted) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
return false;
@@ -226,6 +224,14 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
client_ = client_ptr_factory_->GetWeakPtr();
+ // If we haven't been set up to decode on separate thread via
+ // TryToSetupDecodeOnSeparateThread(), use the main thread/client for
+ // decode tasks.
+ if (!decode_task_runner_) {
+ decode_task_runner_ = child_task_runner_;
+ DCHECK(!decode_client_);
+ decode_client_ = client_;
+ }
video_profile_ = config.profile;
@@ -235,7 +241,7 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
}
// We need the context to be initialized to query extensions.
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "Initialize(): could not make context current";
return false;
}
@@ -253,16 +259,9 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
- // This cap combination is deprecated, but some older drivers may still be
- // returning it.
- const __u32 kCapsRequiredCompat = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE |
- V4L2_CAP_STREAMING;
- if ((caps.capabilities & kCapsRequiredCompat) != kCapsRequiredCompat) {
- LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
- ", caps check failed: 0x" << std::hex << caps.capabilities;
- return false;
- }
+ LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
+ ", caps check failed: 0x" << std::hex << caps.capabilities;
+ return false;
}
if (!SetupFormats())
@@ -303,7 +302,15 @@ void V4L2VideoDecodeAccelerator::Decode(
const media::BitstreamBuffer& bitstream_buffer) {
DVLOG(1) << "Decode(): input_id=" << bitstream_buffer.id()
<< ", size=" << bitstream_buffer.size();
- DCHECK(io_task_runner_->BelongsToCurrentThread());
+ DCHECK(decode_task_runner_->BelongsToCurrentThread());
+
+ if (bitstream_buffer.id() < 0) {
+ LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
// DecodeTask() will take care of running a DecodeBufferTask().
decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
@@ -327,7 +334,8 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffers(
return;
}
- if (!make_context_current_.Run()) {
+ gfx::GLContext* gl_context = get_gl_context_cb_.Run();
+ if (!gl_context || !make_context_current_cb_.Run()) {
LOG(ERROR) << "AssignPictureBuffers(): could not make context current";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
@@ -365,14 +373,11 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffers(
DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
DCHECK_EQ(output_record.picture_id, -1);
DCHECK_EQ(output_record.cleared, false);
+ DCHECK_LE(1u, buffers[i].texture_ids().size());
- EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_,
- egl_context_,
- buffers[i].texture_id(),
- coded_size_,
- i,
- output_format_fourcc_,
- output_planes_count_);
+ EGLImageKHR egl_image = device_->CreateEGLImage(
+ egl_display_, gl_context->GetHandle(), buffers[i].texture_ids()[0],
+ coded_size_, i, output_format_fourcc_, output_planes_count_);
if (egl_image == EGL_NO_IMAGE_KHR) {
LOG(ERROR) << "AssignPictureBuffers(): could not create EGLImageKHR";
// Ownership of EGLImages allocated in previous iterations of this loop
@@ -397,7 +402,7 @@ void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) {
// Must be run on child thread, as we'll insert a sync in the EGL context.
DCHECK(child_task_runner_->BelongsToCurrentThread());
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "ReusePictureBuffer(): could not make context current";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
@@ -458,7 +463,13 @@ void V4L2VideoDecodeAccelerator::Destroy() {
delete this;
}
-bool V4L2VideoDecodeAccelerator::CanDecodeOnIOThread() { return true; }
+bool V4L2VideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+ decode_client_ = decode_client_;
+ decode_task_runner_ = decode_task_runner;
+ return true;
+}
// static
media::VideoDecodeAccelerator::SupportedProfiles
@@ -480,10 +491,11 @@ void V4L2VideoDecodeAccelerator::DecodeTask(
bitstream_buffer.id());
scoped_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef(
- io_client_, io_task_runner_,
- new base::SharedMemory(bitstream_buffer.handle(), true),
- bitstream_buffer.size(), bitstream_buffer.id()));
- if (!bitstream_record->shm->Map(bitstream_buffer.size())) {
+ decode_client_, decode_task_runner_,
+ scoped_ptr<SharedMemoryRegion>(
+ new SharedMemoryRegion(bitstream_buffer, true)),
+ bitstream_buffer.id()));
+ if (!bitstream_record->shm->Map()) {
LOG(ERROR) << "Decode(): could not map bitstream_buffer";
NOTIFY_ERROR(UNREADABLE_INPUT);
return;
@@ -542,54 +554,51 @@ void V4L2VideoDecodeAccelerator::DecodeBufferTask() {
// Setup to use the next buffer.
decoder_current_bitstream_buffer_.reset(buffer_ref.release());
decoder_input_queue_.pop();
- DVLOG(3) << "DecodeBufferTask(): reading input_id="
- << decoder_current_bitstream_buffer_->input_id
- << ", addr=" << (decoder_current_bitstream_buffer_->shm ?
- decoder_current_bitstream_buffer_->shm->memory() :
- NULL)
- << ", size=" << decoder_current_bitstream_buffer_->size;
+ const auto& shm = decoder_current_bitstream_buffer_->shm;
+ if (shm) {
+ DVLOG(3) << "DecodeBufferTask(): reading input_id="
+ << decoder_current_bitstream_buffer_->input_id
+ << ", addr=" << shm->memory() << ", size=" << shm->size();
+ } else {
+ DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId);
+ DVLOG(3) << "DecodeBufferTask(): reading input_id=kFlushBufferId";
+ }
}
bool schedule_task = false;
- const size_t size = decoder_current_bitstream_buffer_->size;
size_t decoded_size = 0;
- if (size == 0) {
- const int32_t input_id = decoder_current_bitstream_buffer_->input_id;
- if (input_id >= 0) {
- // This is a buffer queued from the client that has zero size. Skip.
+ const auto& shm = decoder_current_bitstream_buffer_->shm;
+ if (!shm) {
+ // This is a dummy buffer, queued to flush the pipe. Flush.
+ DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId);
+ // Enqueue a buffer guaranteed to be empty. To do that, we flush the
+ // current input, enqueue no data to the next frame, then flush that down.
+ schedule_task = true;
+ if (decoder_current_input_buffer_ != -1 &&
+ input_buffer_map_[decoder_current_input_buffer_].input_id !=
+ kFlushBufferId)
+ schedule_task = FlushInputFrame();
+
+ if (schedule_task && AppendToInputFrame(NULL, 0) && FlushInputFrame()) {
+ DVLOG(2) << "DecodeBufferTask(): enqueued flush buffer";
+ decoder_partial_frame_pending_ = false;
schedule_task = true;
} else {
- // This is a buffer of zero size, queued to flush the pipe. Flush.
- DCHECK_EQ(decoder_current_bitstream_buffer_->shm.get(),
- static_cast<base::SharedMemory*>(NULL));
- // Enqueue a buffer guaranteed to be empty. To do that, we flush the
- // current input, enqueue no data to the next frame, then flush that down.
- schedule_task = true;
- if (decoder_current_input_buffer_ != -1 &&
- input_buffer_map_[decoder_current_input_buffer_].input_id !=
- kFlushBufferId)
- schedule_task = FlushInputFrame();
-
- if (schedule_task && AppendToInputFrame(NULL, 0) && FlushInputFrame()) {
- DVLOG(2) << "DecodeBufferTask(): enqueued flush buffer";
- decoder_partial_frame_pending_ = false;
- schedule_task = true;
- } else {
- // If we failed to enqueue the empty buffer (due to pipeline
- // backpressure), don't advance the bitstream buffer queue, and don't
- // schedule the next task. This bitstream buffer queue entry will get
- // reprocessed when the pipeline frees up.
- schedule_task = false;
- }
+ // If we failed to enqueue the empty buffer (due to pipeline
+ // backpressure), don't advance the bitstream buffer queue, and don't
+ // schedule the next task. This bitstream buffer queue entry will get
+ // reprocessed when the pipeline frees up.
+ schedule_task = false;
}
+ } else if (shm->size() == 0) {
+ // This is a buffer queued from the client that has zero size. Skip.
+ schedule_task = true;
} else {
// This is a buffer queued from the client, with actual contents. Decode.
const uint8_t* const data =
- reinterpret_cast<const uint8_t*>(
- decoder_current_bitstream_buffer_->shm->memory()) +
+ reinterpret_cast<const uint8_t*>(shm->memory()) +
decoder_current_bitstream_buffer_->bytes_used;
const size_t data_size =
- decoder_current_bitstream_buffer_->size -
- decoder_current_bitstream_buffer_->bytes_used;
+ shm->size() - decoder_current_bitstream_buffer_->bytes_used;
if (!AdvanceFrameFragment(data, data_size, &decoded_size)) {
NOTIFY_ERROR(UNREADABLE_INPUT);
return;
@@ -618,8 +627,8 @@ void V4L2VideoDecodeAccelerator::DecodeBufferTask() {
if (schedule_task) {
decoder_current_bitstream_buffer_->bytes_used += decoded_size;
- if (decoder_current_bitstream_buffer_->bytes_used ==
- decoder_current_bitstream_buffer_->size) {
+ if ((shm ? shm->size() : 0) ==
+ decoder_current_bitstream_buffer_->bytes_used) {
// Our current bitstream buffer is done; return it.
int32_t input_id = decoder_current_bitstream_buffer_->input_id;
DVLOG(3) << "DecodeBufferTask(): finished input_id=" << input_id;
@@ -1023,14 +1032,7 @@ bool V4L2VideoDecodeAccelerator::DequeueResolutionChangeEvent() {
while (device_->Ioctl(VIDIOC_DQEVENT, &ev) == 0) {
if (ev.type == V4L2_EVENT_SOURCE_CHANGE) {
- uint32_t changes = ev.u.src_change.changes;
- // We used to define source change was always resolution change. The union
- // |ev.u| is not used and it is zero by default. When using the upstream
- // version of the resolution event change, we also need to check
- // |ev.u.src_change.changes| to know what is changed. For API backward
- // compatibility, event is treated as resolution change when all bits in
- // |ev.u.src_change.changes| are cleared.
- if (changes == 0 || (changes & V4L2_EVENT_SRC_CH_RESOLUTION)) {
+ if (ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
DVLOG(3)
<< "DequeueResolutionChangeEvent(): got resolution change event.";
return true;
@@ -1282,7 +1284,7 @@ void V4L2VideoDecodeAccelerator::FlushTask() {
// Queue up an empty buffer -- this triggers the flush.
decoder_input_queue_.push(
linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef(
- io_client_, io_task_runner_, NULL, 0, kFlushBufferId)));
+ decode_client_, decode_task_runner_, nullptr, kFlushBufferId)));
decoder_flushing_ = true;
SendPictureReady(); // Send all pending PictureReady.
@@ -1886,9 +1888,9 @@ bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() {
<< "buffer_count=" << buffer_count
<< ", coded_size=" << coded_size_.ToString();
child_task_runner_->PostTask(
- FROM_HERE, base::Bind(&Client::ProvidePictureBuffers, client_,
- buffer_count, coded_size_,
- device_->GetTextureTarget()));
+ FROM_HERE,
+ base::Bind(&Client::ProvidePictureBuffers, client_, buffer_count, 1,
+ coded_size_, device_->GetTextureTarget()));
// Wait for the client to call AssignPictureBuffers() on the Child thread.
// We do this, because if we continue decoding without finishing buffer
@@ -2005,10 +2007,12 @@ void V4L2VideoDecodeAccelerator::SendPictureReady() {
bool cleared = pending_picture_ready_.front().cleared;
const media::Picture& picture = pending_picture_ready_.front().picture;
if (cleared && picture_clearing_count_ == 0) {
- // This picture is cleared. Post it to IO thread to reduce latency. This
- // should be the case after all pictures are cleared at the beginning.
- io_task_runner_->PostTask(
- FROM_HERE, base::Bind(&Client::PictureReady, io_client_, picture));
+ // This picture is cleared. It can be posted to a thread different than
+ // the main GPU thread to reduce latency. This should be the case after
+ // all pictures are cleared at the beginning.
+ decode_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&Client::PictureReady, decode_client_, picture));
pending_picture_ready_.pop();
} else if (!cleared || resetting_or_flushing) {
DVLOG(3) << "SendPictureReady()"
diff --git a/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.h b/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.h
index 3d06665e344..cb749569241 100644
--- a/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.h
@@ -23,6 +23,7 @@
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "content/common/gpu/media/v4l2_device.h"
#include "media/base/limits.h"
#include "media/base/video_decoder_config.h"
@@ -78,11 +79,9 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
public:
V4L2VideoDecodeAccelerator(
EGLDisplay egl_display,
- EGLContext egl_context,
- const base::WeakPtr<Client>& io_client_,
- const base::Callback<bool(void)>& make_context_current,
- const scoped_refptr<V4L2Device>& device,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const scoped_refptr<V4L2Device>& device);
~V4L2VideoDecodeAccelerator() override;
// media::VideoDecodeAccelerator implementation.
@@ -95,7 +94,10 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
static media::VideoDecodeAccelerator::SupportedProfiles
GetSupportedProfiles();
@@ -316,8 +318,8 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
// Our original calling task runner for the child thread.
scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
- // Task runner of the IO thread.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+ // Task runner Decode() and PictureReady() run on.
+ scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner_;
// WeakPtr<> pointing to |this| for use in posting tasks from the decoder or
// device worker threads back to the child thread. Because the worker threads
@@ -332,8 +334,8 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
// child_task_runner_.
scoped_ptr<base::WeakPtrFactory<Client> > client_ptr_factory_;
base::WeakPtr<Client> client_;
- // Callbacks to |io_client_| must be executed on |io_task_runner_|.
- base::WeakPtr<Client> io_client_;
+ // Callbacks to |decode_client_| must be executed on |decode_task_runner_|.
+ base::WeakPtr<Client> decode_client_;
//
// Decoder state, owned and operated by decoder_thread_.
@@ -438,12 +440,13 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
// Other state, held by the child (main) thread.
//
- // Make our context current before running any EGL entry points.
- base::Callback<bool(void)> make_context_current_;
-
// EGL state
EGLDisplay egl_display_;
- EGLContext egl_context_;
+
+ // Callback to get current GLContext.
+ GetGLContextCallback get_gl_context_cb_;
+ // Callback to set the correct gl context.
+ MakeGLContextCurrentCallback make_context_current_cb_;
// The codec we'll be decoding for.
media::VideoCodecProfile video_profile_;
diff --git a/chromium/content/common/gpu/media/v4l2_video_encode_accelerator.cc b/chromium/content/common/gpu/media/v4l2_video_encode_accelerator.cc
index 98f4e48db35..d724d8dea40 100644
--- a/chromium/content/common/gpu/media/v4l2_video_encode_accelerator.cc
+++ b/chromium/content/common/gpu/media/v4l2_video_encode_accelerator.cc
@@ -17,8 +17,8 @@
#include "base/numerics/safe_conversions.h"
#include "base/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/v4l2_video_encode_accelerator.h"
-#include "content/public/common/content_switches.h"
#include "media/base/bitstream_buffer.h"
#define NOTIFY_ERROR(x) \
@@ -51,13 +51,10 @@
namespace content {
struct V4L2VideoEncodeAccelerator::BitstreamBufferRef {
- BitstreamBufferRef(int32_t id,
- scoped_ptr<base::SharedMemory> shm,
- size_t size)
- : id(id), shm(std::move(shm)), size(size) {}
+ BitstreamBufferRef(int32_t id, scoped_ptr<SharedMemoryRegion> shm)
+ : id(id), shm(std::move(shm)) {}
const int32_t id;
- const scoped_ptr<base::SharedMemory> shm;
- const size_t size;
+ const scoped_ptr<SharedMemoryRegion> shm;
};
V4L2VideoEncodeAccelerator::InputRecord::InputRecord() : at_device(false) {
@@ -128,20 +125,13 @@ bool V4L2VideoEncodeAccelerator::Initialize(
const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
- // This cap combination is deprecated, but some older drivers may still be
- // returning it.
- const __u32 kCapsRequiredCompat = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE |
- V4L2_CAP_STREAMING;
- if ((caps.capabilities & kCapsRequiredCompat) != kCapsRequiredCompat) {
- LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP: "
- "caps check failed: 0x" << std::hex << caps.capabilities;
- return false;
- }
+ LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP: "
+ "caps check failed: 0x" << std::hex << caps.capabilities;
+ return false;
}
if (!SetFormats(input_format, output_profile)) {
- LOG(ERROR) << "Failed setting up formats";
+ DLOG(ERROR) << "Failed setting up formats";
return false;
}
@@ -231,15 +221,14 @@ void V4L2VideoEncodeAccelerator::UseOutputBitstreamBuffer(
return;
}
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(buffer.handle(), false));
- if (!shm->Map(buffer.size())) {
+ scoped_ptr<SharedMemoryRegion> shm(new SharedMemoryRegion(buffer, false));
+ if (!shm->Map()) {
NOTIFY_ERROR(kPlatformFailureError);
return;
}
scoped_ptr<BitstreamBufferRef> buffer_ref(
- new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
+ new BitstreamBufferRef(buffer.id(), std::move(shm)));
encoder_thread_.message_loop()->PostTask(
FROM_HERE,
base::Bind(&V4L2VideoEncodeAccelerator::UseOutputBitstreamBufferTask,
@@ -318,7 +307,13 @@ V4L2VideoEncodeAccelerator::GetSupportedProfiles() {
profiles.push_back(profile);
break;
case V4L2_PIX_FMT_VP9:
- profile.profile = media::VP9PROFILE_ANY;
+ profile.profile = media::VP9PROFILE_PROFILE0;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE1;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE2;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE3;
profiles.push_back(profile);
break;
}
@@ -365,13 +360,21 @@ void V4L2VideoEncodeAccelerator::EncodeTask(
std::vector<struct v4l2_ext_control> ctrls;
struct v4l2_ext_control ctrl;
memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE;
- ctrl.value = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME;
+ ctrl.id = V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME;
ctrls.push_back(ctrl);
if (!SetExtCtrls(ctrls)) {
- LOG(ERROR) << "Failed requesting keyframe";
- NOTIFY_ERROR(kPlatformFailureError);
- return;
+ // Some platforms still use the old control. Fallback before they are
+ // updated.
+ ctrls.clear();
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE;
+ ctrl.value = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME;
+ ctrls.push_back(ctrl);
+ if (!SetExtCtrls(ctrls)) {
+ LOG(ERROR) << "Failed requesting keyframe";
+ NOTIFY_ERROR(kPlatformFailureError);
+ return;
+ }
}
}
}
@@ -893,7 +896,7 @@ bool V4L2VideoEncodeAccelerator::NegotiateInputFormat(
uint32_t input_format_fourcc =
V4L2Device::VideoPixelFormatToV4L2PixFmt(input_format);
if (!input_format_fourcc) {
- LOG(ERROR) << "Unsupported input format";
+ LOG(ERROR) << "Unsupported input format" << input_format_fourcc;
return false;
}
@@ -913,8 +916,10 @@ bool V4L2VideoEncodeAccelerator::NegotiateInputFormat(
input_format_fourcc = device_->PreferredInputFormat();
input_format =
V4L2Device::V4L2PixFmtToVideoPixelFormat(input_format_fourcc);
- if (input_format == media::PIXEL_FORMAT_UNKNOWN)
+ if (input_format == media::PIXEL_FORMAT_UNKNOWN) {
+ LOG(ERROR) << "Unsupported input format" << input_format_fourcc;
return false;
+ }
input_planes_count = media::VideoFrame::NumPlanes(input_format);
DCHECK_LE(input_planes_count, static_cast<size_t>(VIDEO_MAX_PLANES));
@@ -930,9 +935,14 @@ bool V4L2VideoEncodeAccelerator::NegotiateInputFormat(
DCHECK_EQ(format.fmt.pix_mp.num_planes, input_planes_count);
}
- // Take device-adjusted sizes for allocated size.
+ // Take device-adjusted sizes for allocated size. If the size is adjusted
+ // down, it means the input is too big and the hardware does not support it.
input_allocated_size_ = V4L2Device::CodedSizeFromV4L2Format(format);
- DCHECK(gfx::Rect(input_allocated_size_).Contains(gfx::Rect(visible_size_)));
+ if (!gfx::Rect(input_allocated_size_).Contains(gfx::Rect(visible_size_))) {
+ DVLOG(1) << "Input size too big " << visible_size_.ToString()
+ << ", adjusted to " << input_allocated_size_.ToString();
+ return false;
+ }
device_input_format_ = input_format;
input_planes_count_ = input_planes_count;
@@ -1031,30 +1041,35 @@ bool V4L2VideoEncodeAccelerator::InitControls() {
ctrls.push_back(ctrl);
}
- // Enable "tight" bitrate mode. For this to work properly, frame- and mb-level
- // bitrate controls have to be enabled as well.
+ // Enable macroblock-level bitrate control.
memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF;
+ ctrl.id = V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE;
ctrl.value = 1;
ctrls.push_back(ctrl);
- // Force bitrate control to average over a GOP (for tight bitrate
- // tolerance).
+ // Disable periodic key frames.
memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT;
- ctrl.value = 1;
+ ctrl.id = V4L2_CID_MPEG_VIDEO_GOP_SIZE;
+ ctrl.value = 0;
ctrls.push_back(ctrl);
- // Enable macroblock-level bitrate control.
+ // Ignore return value as these controls are optional.
+ SetExtCtrls(ctrls);
+
+ // Optional Exynos specific controls.
+ ctrls.clear();
+ // Enable "tight" bitrate mode. For this to work properly, frame- and mb-level
+ // bitrate controls have to be enabled as well.
memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE;
+ ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF;
ctrl.value = 1;
ctrls.push_back(ctrl);
- // Disable periodic key frames.
+ // Force bitrate control to average over a GOP (for tight bitrate
+ // tolerance).
memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_GOP_SIZE;
- ctrl.value = 0;
+ ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT;
+ ctrl.value = 1;
ctrls.push_back(ctrl);
// Ignore return value as these controls are optional.
diff --git a/chromium/content/common/gpu/media/vaapi_drm_picture.cc b/chromium/content/common/gpu/media/vaapi_drm_picture.cc
index f20716426fd..ab5a4f28b1a 100644
--- a/chromium/content/common/gpu/media/vaapi_drm_picture.cc
+++ b/chromium/content/common/gpu/media/vaapi_drm_picture.cc
@@ -27,16 +27,16 @@ namespace content {
VaapiDrmPicture::VaapiDrmPicture(
const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)>& make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size)
: VaapiPicture(picture_buffer_id, texture_id, size),
vaapi_wrapper_(vaapi_wrapper),
- make_context_current_(make_context_current) {}
+ make_context_current_cb_(make_context_current_cb) {}
VaapiDrmPicture::~VaapiDrmPicture() {
- if (gl_image_ && make_context_current_.Run()) {
+ if (gl_image_ && make_context_current_cb_.Run()) {
gl_image_->ReleaseTexImage(GL_TEXTURE_EXTERNAL_OES);
gl_image_->Destroy(true);
@@ -67,7 +67,7 @@ bool VaapiDrmPicture::Initialize() {
pixmap_->SetProcessingCallback(
base::Bind(&VaapiWrapper::ProcessPixmap, vaapi_wrapper_));
- if (!make_context_current_.Run())
+ if (!make_context_current_cb_.Run())
return false;
gfx::ScopedTextureBinder texture_binder(GL_TEXTURE_EXTERNAL_OES,
diff --git a/chromium/content/common/gpu/media/vaapi_drm_picture.h b/chromium/content/common/gpu/media/vaapi_drm_picture.h
index 066192b25ca..7f5fc8a1780 100644
--- a/chromium/content/common/gpu/media/vaapi_drm_picture.h
+++ b/chromium/content/common/gpu/media/vaapi_drm_picture.h
@@ -11,7 +11,6 @@
#include <stdint.h>
-#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -35,7 +34,7 @@ class VaapiWrapper;
class VaapiDrmPicture : public VaapiPicture {
public:
VaapiDrmPicture(const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)>& make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size);
@@ -52,7 +51,7 @@ class VaapiDrmPicture : public VaapiPicture {
private:
scoped_refptr<VaapiWrapper> vaapi_wrapper_;
- base::Callback<bool(void)> make_context_current_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
// Ozone buffer, the storage of the EGLImage and the VASurface.
scoped_refptr<ui::NativePixmap> pixmap_;
diff --git a/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc b/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc
index 8efb362180d..a0cbc6e059d 100644
--- a/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc
@@ -13,8 +13,9 @@
#include "base/metrics/histogram.h"
#include "base/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
-#include "content/common/gpu/gpu_channel.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/vaapi_picture.h"
+#include "gpu/ipc/service/gpu_channel.h"
#include "media/base/video_frame.h"
#include "media/filters/jpeg_parser.h"
#include "third_party/libyuv/include/libyuv.h"
@@ -76,10 +77,10 @@ static unsigned int VaSurfaceFormatForJpeg(
} // namespace
VaapiJpegDecodeAccelerator::DecodeRequest::DecodeRequest(
- const media::BitstreamBuffer& bitstream_buffer,
- scoped_ptr<base::SharedMemory> shm,
+ int32_t bitstream_buffer_id,
+ scoped_ptr<SharedMemoryRegion> shm,
const scoped_refptr<media::VideoFrame>& video_frame)
- : bitstream_buffer(bitstream_buffer),
+ : bitstream_buffer_id(bitstream_buffer_id),
shm(std::move(shm)),
video_frame(video_frame) {}
@@ -226,9 +227,9 @@ void VaapiJpegDecodeAccelerator::DecodeTask(
media::JpegParseResult parse_result;
if (!media::ParseJpegPicture(
reinterpret_cast<const uint8_t*>(request->shm->memory()),
- request->bitstream_buffer.size(), &parse_result)) {
+ request->shm->size(), &parse_result)) {
DLOG(ERROR) << "ParseJpegPicture failed";
- NotifyErrorFromDecoderThread(request->bitstream_buffer.id(),
+ NotifyErrorFromDecoderThread(request->bitstream_buffer_id,
PARSE_JPEG_FAILED);
return;
}
@@ -237,7 +238,7 @@ void VaapiJpegDecodeAccelerator::DecodeTask(
VaSurfaceFormatForJpeg(parse_result.frame_header);
if (!new_va_rt_format) {
DLOG(ERROR) << "Unsupported subsampling";
- NotifyErrorFromDecoderThread(request->bitstream_buffer.id(),
+ NotifyErrorFromDecoderThread(request->bitstream_buffer_id,
UNSUPPORTED_JPEG);
return;
}
@@ -255,7 +256,7 @@ void VaapiJpegDecodeAccelerator::DecodeTask(
if (!vaapi_wrapper_->CreateSurfaces(va_rt_format_, new_coded_size, 1,
&va_surfaces)) {
LOG(ERROR) << "Create VA surface failed";
- NotifyErrorFromDecoderThread(request->bitstream_buffer.id(),
+ NotifyErrorFromDecoderThread(request->bitstream_buffer_id,
PLATFORM_FAILURE);
return;
}
@@ -266,15 +267,15 @@ void VaapiJpegDecodeAccelerator::DecodeTask(
if (!VaapiJpegDecoder::Decode(vaapi_wrapper_.get(), parse_result,
va_surface_id_)) {
LOG(ERROR) << "Decode JPEG failed";
- NotifyErrorFromDecoderThread(request->bitstream_buffer.id(),
+ NotifyErrorFromDecoderThread(request->bitstream_buffer_id,
PLATFORM_FAILURE);
return;
}
- if (!OutputPicture(va_surface_id_, request->bitstream_buffer.id(),
+ if (!OutputPicture(va_surface_id_, request->bitstream_buffer_id,
request->video_frame)) {
LOG(ERROR) << "Output picture failed";
- NotifyErrorFromDecoderThread(request->bitstream_buffer.id(),
+ NotifyErrorFromDecoderThread(request->bitstream_buffer_id,
PLATFORM_FAILURE);
return;
}
@@ -289,17 +290,25 @@ void VaapiJpegDecodeAccelerator::Decode(
DVLOG(4) << "Mapping new input buffer id: " << bitstream_buffer.id()
<< " size: " << bitstream_buffer.size();
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(bitstream_buffer.handle(), true));
- if (!shm->Map(bitstream_buffer.size())) {
+ // SharedMemoryRegion will take over the |bitstream_buffer.handle()|.
+ scoped_ptr<SharedMemoryRegion> shm(
+ new SharedMemoryRegion(bitstream_buffer, true));
+
+ if (bitstream_buffer.id() < 0) {
+ LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+ NotifyErrorFromDecoderThread(bitstream_buffer.id(), INVALID_ARGUMENT);
+ return;
+ }
+
+ if (!shm->Map()) {
LOG(ERROR) << "Failed to map input buffer";
NotifyErrorFromDecoderThread(bitstream_buffer.id(), UNREADABLE_INPUT);
return;
}
scoped_ptr<DecodeRequest> request(
- new DecodeRequest(bitstream_buffer, std::move(shm), video_frame));
+ new DecodeRequest(bitstream_buffer.id(), std::move(shm), video_frame));
decoder_task_runner_->PostTask(
FROM_HERE, base::Bind(&VaapiJpegDecodeAccelerator::DecodeTask,
diff --git a/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h b/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h
index 7d78a5503e9..232b04de829 100644
--- a/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h
@@ -15,6 +15,7 @@
#include "base/threading/non_thread_safe.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/vaapi_jpeg_decoder.h"
#include "content/common/gpu/media/vaapi_wrapper.h"
#include "media/base/bitstream_buffer.h"
@@ -47,13 +48,13 @@ class CONTENT_EXPORT VaapiJpegDecodeAccelerator
// An input buffer and the corresponding output video frame awaiting
// consumption, provided by the client.
struct DecodeRequest {
- DecodeRequest(const media::BitstreamBuffer& bitstream_buffer,
- scoped_ptr<base::SharedMemory> shm,
+ DecodeRequest(int32_t bitstream_buffer_id,
+ scoped_ptr<SharedMemoryRegion> shm,
const scoped_refptr<media::VideoFrame>& video_frame);
~DecodeRequest();
- media::BitstreamBuffer bitstream_buffer;
- scoped_ptr<base::SharedMemory> shm;
+ int32_t bitstream_buffer_id;
+ scoped_ptr<SharedMemoryRegion> shm;
scoped_refptr<media::VideoFrame> video_frame;
};
diff --git a/chromium/content/common/gpu/media/vaapi_picture.cc b/chromium/content/common/gpu/media/vaapi_picture.cc
index 5222bd23504..cdf8c355974 100644
--- a/chromium/content/common/gpu/media/vaapi_picture.cc
+++ b/chromium/content/common/gpu/media/vaapi_picture.cc
@@ -18,16 +18,16 @@ namespace content {
// static
linked_ptr<VaapiPicture> VaapiPicture::CreatePicture(
const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)> make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size) {
linked_ptr<VaapiPicture> picture;
#if defined(USE_X11)
- picture.reset(new VaapiTFPPicture(vaapi_wrapper, make_context_current,
+ picture.reset(new VaapiTFPPicture(vaapi_wrapper, make_context_current_cb,
picture_buffer_id, texture_id, size));
#elif defined(USE_OZONE)
- picture.reset(new VaapiDrmPicture(vaapi_wrapper, make_context_current,
+ picture.reset(new VaapiDrmPicture(vaapi_wrapper, make_context_current_cb,
picture_buffer_id, texture_id, size));
#endif // USE_X11
diff --git a/chromium/content/common/gpu/media/vaapi_picture.h b/chromium/content/common/gpu/media/vaapi_picture.h
index 921f80344ec..4bd51e11620 100644
--- a/chromium/content/common/gpu/media/vaapi_picture.h
+++ b/chromium/content/common/gpu/media/vaapi_picture.h
@@ -12,11 +12,11 @@
#include <stdint.h>
-#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/threading/non_thread_safe.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "ui/gfx/geometry/size.h"
namespace gl {
@@ -52,10 +52,10 @@ class VaapiPicture : public base::NonThreadSafe {
// Create a VaapiPicture of |size| to be associated with
// |picture_buffer_id| and bound to |texture_id|.
- // |make_context_current| is provided for the GL operations.
+ // |make_context_current_cb| is provided for the GL operations.
static linked_ptr<VaapiPicture> CreatePicture(
const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)> make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size);
diff --git a/chromium/content/common/gpu/media/vaapi_tfp_picture.cc b/chromium/content/common/gpu/media/vaapi_tfp_picture.cc
index 3de593b62fd..074ba98ed73 100644
--- a/chromium/content/common/gpu/media/vaapi_tfp_picture.cc
+++ b/chromium/content/common/gpu/media/vaapi_tfp_picture.cc
@@ -14,18 +14,18 @@ namespace content {
VaapiTFPPicture::VaapiTFPPicture(
const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)> make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size)
: VaapiPicture(picture_buffer_id, texture_id, size),
vaapi_wrapper_(vaapi_wrapper),
- make_context_current_(make_context_current),
+ make_context_current_cb_(make_context_current_cb),
x_display_(gfx::GetXDisplay()),
x_pixmap_(0) {}
VaapiTFPPicture::~VaapiTFPPicture() {
- if (glx_image_.get() && make_context_current_.Run()) {
+ if (glx_image_.get() && make_context_current_cb_.Run()) {
glx_image_->ReleaseTexImage(GL_TEXTURE_2D);
glx_image_->Destroy(true);
DCHECK_EQ(glGetError(), static_cast<GLenum>(GL_NO_ERROR));
@@ -36,7 +36,7 @@ VaapiTFPPicture::~VaapiTFPPicture() {
}
bool VaapiTFPPicture::Initialize() {
- if (!make_context_current_.Run())
+ if (!make_context_current_cb_.Run())
return false;
XWindowAttributes win_attr;
diff --git a/chromium/content/common/gpu/media/vaapi_tfp_picture.h b/chromium/content/common/gpu/media/vaapi_tfp_picture.h
index 3b66e10800b..5ef35653202 100644
--- a/chromium/content/common/gpu/media/vaapi_tfp_picture.h
+++ b/chromium/content/common/gpu/media/vaapi_tfp_picture.h
@@ -11,7 +11,6 @@
#include <stdint.h>
-#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "content/common/gpu/media/vaapi_picture.h"
@@ -34,7 +33,7 @@ class VaapiWrapper;
class VaapiTFPPicture : public VaapiPicture {
public:
VaapiTFPPicture(const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)> make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size);
@@ -50,7 +49,7 @@ class VaapiTFPPicture : public VaapiPicture {
private:
scoped_refptr<VaapiWrapper> vaapi_wrapper_;
- base::Callback<bool(void)> make_context_current_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
Display* x_display_;
Pixmap x_pixmap_;
diff --git a/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.cc b/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.cc
index 271a0f7a1c9..d8caeec94da 100644
--- a/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.cc
@@ -14,12 +14,12 @@
#include "base/strings/string_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/trace_event/trace_event.h"
-#include "content/common/gpu/gpu_channel.h"
#include "content/common/gpu/media/accelerated_video_decoder.h"
#include "content/common/gpu/media/h264_decoder.h"
#include "content/common/gpu/media/vaapi_picture.h"
#include "content/common/gpu/media/vp8_decoder.h"
#include "content/common/gpu/media/vp9_decoder.h"
+#include "gpu/ipc/service/gpu_channel.h"
#include "media/base/bind_to_current_loop.h"
#include "media/video/picture.h"
#include "third_party/libva/va/va_dec_vp8.h"
@@ -256,8 +256,7 @@ class VaapiVideoDecodeAccelerator::VaapiVP9Accelerator
DISALLOW_COPY_AND_ASSIGN(VaapiVP9Accelerator);
};
-VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0), size(0) {
-}
+VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0) {}
VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() {
}
@@ -293,11 +292,9 @@ VaapiPicture* VaapiVideoDecodeAccelerator::PictureById(
}
VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- const base::Callback<void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>&
- bind_image)
- : make_context_current_(make_context_current),
- state_(kUninitialized),
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb)
+ : state_(kUninitialized),
input_ready_(&lock_),
surfaces_available_(&lock_),
message_loop_(base::MessageLoop::current()),
@@ -307,7 +304,8 @@ VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
finish_flush_pending_(false),
awaiting_va_surfaces_recycle_(false),
requested_num_pics_(0),
- bind_image_(bind_image),
+ make_context_current_cb_(make_context_current_cb),
+ bind_image_cb_(bind_image_cb),
weak_this_factory_(this) {
weak_this_ = weak_this_factory_.GetWeakPtr();
va_surface_release_cb_ = media::BindToCurrentLoop(
@@ -322,6 +320,11 @@ bool VaapiVideoDecodeAccelerator::Initialize(const Config& config,
Client* client) {
DCHECK_EQ(message_loop_, base::MessageLoop::current());
+ if (make_context_current_cb_.is_null() || bind_image_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
if (config.is_encrypted) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
return false;
@@ -447,10 +450,10 @@ void VaapiVideoDecodeAccelerator::MapAndQueueNewInputBuffer(
DVLOG(4) << "Mapping new input buffer id: " << bitstream_buffer.id()
<< " size: " << (int)bitstream_buffer.size();
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(bitstream_buffer.handle(), true));
- RETURN_AND_NOTIFY_ON_FAILURE(shm->Map(bitstream_buffer.size()),
- "Failed to map input buffer", UNREADABLE_INPUT,);
+ scoped_ptr<SharedMemoryRegion> shm(
+ new SharedMemoryRegion(bitstream_buffer, true));
+ RETURN_AND_NOTIFY_ON_FAILURE(shm->Map(), "Failed to map input buffer",
+ UNREADABLE_INPUT, );
base::AutoLock auto_lock(lock_);
@@ -458,7 +461,6 @@ void VaapiVideoDecodeAccelerator::MapAndQueueNewInputBuffer(
linked_ptr<InputBuffer> input_buffer(new InputBuffer());
input_buffer->shm.reset(shm.release());
input_buffer->id = bitstream_buffer.id();
- input_buffer->size = bitstream_buffer.size();
++num_stream_bufs_at_decoder_;
TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
@@ -497,13 +499,12 @@ bool VaapiVideoDecodeAccelerator::GetInputBuffer_Locked() {
curr_input_buffer_ = input_buffers_.front();
input_buffers_.pop();
- DVLOG(4) << "New current bitstream buffer, id: "
- << curr_input_buffer_->id
- << " size: " << curr_input_buffer_->size;
+ DVLOG(4) << "New current bitstream buffer, id: " << curr_input_buffer_->id
+ << " size: " << curr_input_buffer_->shm->size();
decoder_->SetStream(
static_cast<uint8_t*>(curr_input_buffer_->shm->memory()),
- curr_input_buffer_->size);
+ curr_input_buffer_->shm->size());
return true;
default:
@@ -663,7 +664,7 @@ void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
message_loop_->PostTask(
FROM_HERE,
base::Bind(&Client::ProvidePictureBuffers, client_, requested_num_pics_,
- requested_pic_size_, VaapiPicture::GetGLTextureTarget()));
+ 1, requested_pic_size_, VaapiPicture::GetGLTextureTarget()));
}
void VaapiVideoDecodeAccelerator::Decode(
@@ -673,6 +674,12 @@ void VaapiVideoDecodeAccelerator::Decode(
TRACE_EVENT1("Video Decoder", "VAVDA::Decode", "Buffer id",
bitstream_buffer.id());
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ bitstream_buffer.id() >= 0 &&
+ base::SharedMemory::IsHandleValid(bitstream_buffer.handle()),
+ "Invalid bitstream_buffer, id: " << bitstream_buffer.id(),
+ INVALID_ARGUMENT, );
+
// We got a new input buffer from the client, map it and queue for later use.
MapAndQueueNewInputBuffer(bitstream_buffer);
@@ -734,18 +741,22 @@ void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
DCHECK_EQ(va_surface_ids.size(), buffers.size());
for (size_t i = 0; i < buffers.size(); ++i) {
+ DCHECK_LE(1u, buffers[i].texture_ids().size());
DVLOG(2) << "Assigning picture id: " << buffers[i].id()
- << " to texture id: " << buffers[i].texture_id()
+ << " to texture id: " << buffers[i].texture_ids()[0]
<< " VASurfaceID: " << va_surface_ids[i];
linked_ptr<VaapiPicture> picture(VaapiPicture::CreatePicture(
- vaapi_wrapper_, make_context_current_, buffers[i].id(),
- buffers[i].texture_id(), requested_pic_size_));
+ vaapi_wrapper_, make_context_current_cb_, buffers[i].id(),
+ buffers[i].texture_ids()[0], requested_pic_size_));
scoped_refptr<gl::GLImage> image = picture->GetImageToBind();
if (image) {
- bind_image_.Run(buffers[i].internal_texture_id(),
- VaapiPicture::GetGLTextureTarget(), image);
+ DCHECK_LE(1u, buffers[i].internal_texture_ids().size());
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ bind_image_cb_.Run(buffers[i].internal_texture_ids()[0],
+ VaapiPicture::GetGLTextureTarget(), image, true),
+ "Failed to bind image", PLATFORM_FAILURE, );
}
RETURN_AND_NOTIFY_ON_FAILURE(
@@ -960,7 +971,9 @@ void VaapiVideoDecodeAccelerator::Destroy() {
delete this;
}
-bool VaapiVideoDecodeAccelerator::CanDecodeOnIOThread() {
+bool VaapiVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
return false;
}
diff --git a/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.h b/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.h
index 11cc082a627..f9cfb90376c 100644
--- a/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.h
@@ -20,13 +20,14 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/linked_ptr.h"
-#include "base/memory/shared_memory.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/vaapi_wrapper.h"
#include "media/base/bitstream_buffer.h"
#include "media/video/picture.h"
@@ -55,9 +56,9 @@ class CONTENT_EXPORT VaapiVideoDecodeAccelerator
class VaapiDecodeSurface;
VaapiVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- const base::Callback<
- void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>& bind_image);
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb);
+
~VaapiVideoDecodeAccelerator() override;
// media::VideoDecodeAccelerator implementation.
@@ -69,7 +70,10 @@ class CONTENT_EXPORT VaapiVideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
static media::VideoDecodeAccelerator::SupportedProfiles
GetSupportedProfiles();
@@ -180,10 +184,6 @@ class CONTENT_EXPORT VaapiVideoDecodeAccelerator
// available.
scoped_refptr<VaapiDecodeSurface> CreateSurface();
-
- // Client-provided GL state.
- base::Callback<bool(void)> make_context_current_;
-
// VAVDA state.
enum State {
// Initialize() not called yet or failed.
@@ -210,8 +210,7 @@ class CONTENT_EXPORT VaapiVideoDecodeAccelerator
~InputBuffer();
int32_t id;
- size_t size;
- scoped_ptr<base::SharedMemory> shm;
+ scoped_ptr<SharedMemoryRegion> shm;
};
// Queue for incoming input buffers.
@@ -305,10 +304,11 @@ class CONTENT_EXPORT VaapiVideoDecodeAccelerator
size_t requested_num_pics_;
gfx::Size requested_pic_size_;
- // Binds the provided GLImage to a givenr client texture ID & texture target
- // combination in GLES.
- base::Callback<void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>
- bind_image_;
+ // Callback to make GL context current.
+ MakeGLContextCurrentCallback make_context_current_cb_;
+
+ // Callback to bind a GLImage to a given texture.
+ BindGLImageCallback bind_image_cb_;
// The WeakPtrFactory for |weak_this_|.
base::WeakPtrFactory<VaapiVideoDecodeAccelerator> weak_this_factory_;
diff --git a/chromium/content/common/gpu/media/vaapi_video_encode_accelerator.cc b/chromium/content/common/gpu/media/vaapi_video_encode_accelerator.cc
index 049cd7a5547..520d411e21b 100644
--- a/chromium/content/common/gpu/media/vaapi_video_encode_accelerator.cc
+++ b/chromium/content/common/gpu/media/vaapi_video_encode_accelerator.cc
@@ -13,6 +13,7 @@
#include "base/metrics/histogram.h"
#include "base/numerics/safe_conversions.h"
#include "content/common/gpu/media/h264_dpb.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "media/base/bind_to_current_loop.h"
#include "third_party/libva/va/va_enc_h264.h"
@@ -100,13 +101,10 @@ struct VaapiVideoEncodeAccelerator::InputFrameRef {
};
struct VaapiVideoEncodeAccelerator::BitstreamBufferRef {
- BitstreamBufferRef(int32_t id,
- scoped_ptr<base::SharedMemory> shm,
- size_t size)
- : id(id), shm(std::move(shm)), size(size) {}
+ BitstreamBufferRef(int32_t id, scoped_ptr<SharedMemoryRegion> shm)
+ : id(id), shm(std::move(shm)) {}
const int32_t id;
- const scoped_ptr<base::SharedMemory> shm;
- const size_t size;
+ const scoped_ptr<SharedMemoryRegion> shm;
};
media::VideoEncodeAccelerator::SupportedProfiles
@@ -176,9 +174,19 @@ bool VaapiVideoEncodeAccelerator::Initialize(
client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
client_ = client_ptr_factory_->GetWeakPtr();
- if (output_profile < media::H264PROFILE_BASELINE ||
- output_profile > media::H264PROFILE_MAIN) {
- DVLOGF(1) << "Unsupported output profile: " << output_profile;
+ const SupportedProfiles& profiles = GetSupportedProfiles();
+ auto profile = find_if(profiles.begin(), profiles.end(),
+ [output_profile](const SupportedProfile& profile) {
+ return profile.profile == output_profile;
+ });
+ if (profile == profiles.end()) {
+ DVLOGF(1) << "Unsupported output profile " << output_profile;
+ return false;
+ }
+ if (input_visible_size.width() > profile->max_resolution.width() ||
+ input_visible_size.height() > profile->max_resolution.height()) {
+ DVLOGF(1) << "Input size too big: " << input_visible_size.ToString()
+ << ", max supported size: " << profile->max_resolution.ToString();
return false;
}
@@ -546,11 +554,8 @@ void VaapiVideoEncodeAccelerator::TryToReturnBitstreamBuffer() {
size_t data_size = 0;
if (!vaapi_wrapper_->DownloadAndDestroyCodedBuffer(
- encode_job->coded_buffer,
- encode_job->input_surface->id(),
- target_data,
- buffer->size,
- &data_size)) {
+ encode_job->coded_buffer, encode_job->input_surface->id(),
+ target_data, buffer->shm->size(), &data_size)) {
NOTIFY_ERROR(kPlatformFailureError, "Failed downloading coded buffer");
return;
}
@@ -669,15 +674,14 @@ void VaapiVideoEncodeAccelerator::UseOutputBitstreamBuffer(
return;
}
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(buffer.handle(), false));
- if (!shm->Map(buffer.size())) {
+ scoped_ptr<SharedMemoryRegion> shm(new SharedMemoryRegion(buffer, false));
+ if (!shm->Map()) {
NOTIFY_ERROR(kPlatformFailureError, "Failed mapping shared memory.");
return;
}
scoped_ptr<BitstreamBufferRef> buffer_ref(
- new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
+ new BitstreamBufferRef(buffer.id(), std::move(shm)));
encoder_thread_task_runner_->PostTask(
FROM_HERE,
diff --git a/chromium/content/common/gpu/media/vaapi_wrapper.cc b/chromium/content/common/gpu/media/vaapi_wrapper.cc
index db38f32f7f8..19303e1e6d6 100644
--- a/chromium/content/common/gpu/media/vaapi_wrapper.cc
+++ b/chromium/content/common/gpu/media/vaapi_wrapper.cc
@@ -9,7 +9,6 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/command_line.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
@@ -18,7 +17,6 @@
// Auto-generated for dlopen libva libraries
#include "content/common/gpu/media/va_stubs.h"
#include "content/common/gpu/media/vaapi_picture.h"
-#include "content/public/common/content_switches.h"
#include "third_party/libyuv/include/libyuv.h"
#include "ui/gl/gl_bindings.h"
#if defined(USE_X11)
@@ -127,7 +125,9 @@ static const ProfileMap kProfileMap[] = {
// media::H264PROFILE_HIGH*.
{media::H264PROFILE_HIGH, VAProfileH264High},
{media::VP8PROFILE_ANY, VAProfileVP8Version0_3},
- {media::VP9PROFILE_ANY, VAProfileVP9Profile0},
+ // TODO(servolk): Need to add VP9 profiles 1,2,3 here after rolling
+ // third_party/libva to 1.7. crbug.com/598118
+ {media::VP9PROFILE_PROFILE0, VAProfileVP9Profile0},
};
static std::vector<VAConfigAttrib> GetRequiredAttribs(
@@ -214,10 +214,6 @@ scoped_refptr<VaapiWrapper> VaapiWrapper::CreateForVideoCodec(
media::VideoEncodeAccelerator::SupportedProfiles
VaapiWrapper::GetSupportedEncodeProfiles() {
media::VideoEncodeAccelerator::SupportedProfiles profiles;
- const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kDisableVaapiAcceleratedVideoEncode))
- return profiles;
-
std::vector<ProfileInfo> encode_profile_infos =
profile_infos_.Get().GetSupportedProfileInfosForCodecMode(kEncode);
@@ -369,11 +365,8 @@ bool VaapiWrapper::VaInitialize(const base::Closure& report_error_to_uma_cb) {
return false;
}
- VAStatus va_res = VA_STATUS_SUCCESS;
- if (!va_display_state->Initialize(&va_res)) {
- VA_LOG_ON_ERROR(va_res, "vaInitialize failed");
+ if (!va_display_state->Initialize())
return false;
- }
va_display_ = va_display_state->va_display();
return true;
@@ -1218,7 +1211,7 @@ VaapiWrapper::VADisplayState::VADisplayState()
VaapiWrapper::VADisplayState::~VADisplayState() {}
-bool VaapiWrapper::VADisplayState::Initialize(VAStatus* status) {
+bool VaapiWrapper::VADisplayState::Initialize() {
va_lock_.AssertAcquired();
if (refcount_++ == 0) {
#if defined(USE_X11)
@@ -1232,9 +1225,12 @@ bool VaapiWrapper::VADisplayState::Initialize(VAStatus* status) {
return false;
}
- *status = vaInitialize(va_display_, &major_version_, &minor_version_);
- if (*status != VA_STATUS_SUCCESS)
+ VAStatus va_res =
+ vaInitialize(va_display_, &major_version_, &minor_version_);
+ if (va_res != VA_STATUS_SUCCESS) {
+ LOG(WARNING) << "vaInitialize failed: " << vaErrorStr(va_res);
return false;
+ }
va_initialized_ = true;
DVLOG(1) << "VAAPI version: " << major_version_ << "." << minor_version_;
diff --git a/chromium/content/common/gpu/media/vaapi_wrapper.h b/chromium/content/common/gpu/media/vaapi_wrapper.h
index 7f14b49be11..4394bc36b92 100644
--- a/chromium/content/common/gpu/media/vaapi_wrapper.h
+++ b/chromium/content/common/gpu/media/vaapi_wrapper.h
@@ -247,7 +247,7 @@ class CONTENT_EXPORT VaapiWrapper
~VADisplayState();
// |va_lock_| must be held on entry.
- bool Initialize(VAStatus* status);
+ bool Initialize();
void Deinitialize(VAStatus* status);
base::Lock* va_lock() { return &va_lock_; }
diff --git a/chromium/content/common/gpu/media/video_decode_accelerator_unittest.cc b/chromium/content/common/gpu/media/video_decode_accelerator_unittest.cc
index 36466304a3c..91339668867 100644
--- a/chromium/content/common/gpu/media/video_decode_accelerator_unittest.cc
+++ b/chromium/content/common/gpu/media/video_decode_accelerator_unittest.cc
@@ -47,9 +47,10 @@
#include "base/threading/thread.h"
#include "build/build_config.h"
#include "content/common/gpu/media/fake_video_decode_accelerator.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h"
#include "content/common/gpu/media/rendering_helper.h"
#include "content/common/gpu/media/video_accelerator_unittest_helpers.h"
-#include "content/public/common/content_switches.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
#include "media/filters/h264_parser.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/codec/png_codec.h"
@@ -334,6 +335,7 @@ class GLRenderingVDAClient
// VideoDecodeAccelerator::Client implementation.
// The heart of the Client.
void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
+ uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) override;
void DismissPictureBuffer(int32_t picture_buffer_id) override;
@@ -359,16 +361,6 @@ class GLRenderingVDAClient
private:
typedef std::map<int32_t, scoped_refptr<TextureRef>> TextureRefMap;
- scoped_ptr<media::VideoDecodeAccelerator> CreateFakeVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateDXVAVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2VDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2SliceVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateVaapiVDA();
-
- void BindImage(uint32_t client_texture_id,
- uint32_t texture_target,
- scoped_refptr<gl::GLImage> image);
-
void SetState(ClientState new_state);
void FinishInitialization();
void ReturnPicture(int32_t picture_buffer_id);
@@ -401,8 +393,10 @@ class GLRenderingVDAClient
int next_bitstream_buffer_id_;
ClientStateNotification<ClientState>* note_;
scoped_ptr<VideoDecodeAccelerator> decoder_;
- scoped_ptr<base::WeakPtrFactory<VideoDecodeAccelerator> >
- weak_decoder_factory_;
+ base::WeakPtr<VideoDecodeAccelerator> weak_vda_;
+ scoped_ptr<base::WeakPtrFactory<VideoDecodeAccelerator>>
+ weak_vda_ptr_factory_;
+ scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl> vda_factory_;
int remaining_play_throughs_;
int reset_after_frame_num_;
int delete_decoder_state_;
@@ -440,9 +434,23 @@ class GLRenderingVDAClient
int32_t next_picture_buffer_id_;
+ base::WeakPtr<GLRenderingVDAClient> weak_this_;
+ base::WeakPtrFactory<GLRenderingVDAClient> weak_this_factory_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(GLRenderingVDAClient);
};
+static bool DoNothingReturnTrue() {
+ return true;
+}
+
+static bool DummyBindImage(uint32_t client_texture_id,
+ uint32_t texture_target,
+ const scoped_refptr<gl::GLImage>& image,
+ bool can_bind_to_sampler) {
+ return true;
+}
+
GLRenderingVDAClient::GLRenderingVDAClient(
size_t window_id,
RenderingHelper* rendering_helper,
@@ -483,7 +491,8 @@ GLRenderingVDAClient::GLRenderingVDAClient(
delay_reuse_after_frame_num_(delay_reuse_after_frame_num),
decode_calls_per_second_(decode_calls_per_second),
render_as_thumbnails_(render_as_thumbnails),
- next_picture_buffer_id_(1) {
+ next_picture_buffer_id_(1),
+ weak_this_factory_(this) {
LOG_ASSERT(num_in_flight_decodes > 0);
LOG_ASSERT(num_play_throughs > 0);
// |num_in_flight_decodes_| is unsupported if |decode_calls_per_second_| > 0.
@@ -494,6 +503,8 @@ GLRenderingVDAClient::GLRenderingVDAClient(
profile_ = (profile != media::VIDEO_CODEC_PROFILE_UNKNOWN
? profile
: media::H264PROFILE_BASELINE);
+
+ weak_this_ = weak_this_factory_.GetWeakPtr();
}
GLRenderingVDAClient::~GLRenderingVDAClient() {
@@ -502,119 +513,49 @@ GLRenderingVDAClient::~GLRenderingVDAClient() {
SetState(CS_DESTROYED);
}
-static bool DoNothingReturnTrue() { return true; }
+void GLRenderingVDAClient::CreateAndStartDecoder() {
+ LOG_ASSERT(decoder_deleted());
+ LOG_ASSERT(!decoder_.get());
-scoped_ptr<media::VideoDecodeAccelerator>
-GLRenderingVDAClient::CreateFakeVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
if (fake_decoder_) {
- decoder.reset(new FakeVideoDecodeAccelerator(
- static_cast<gfx::GLContext*> (rendering_helper_->GetGLContextHandle()),
- frame_size_,
- base::Bind(&DoNothingReturnTrue)));
- }
- return decoder;
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GLRenderingVDAClient::CreateDXVAVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_WIN)
- if (base::win::GetVersion() >= base::win::VERSION_WIN7)
- decoder.reset(
- new DXVAVideoDecodeAccelerator(
- base::Bind(&DoNothingReturnTrue),
- rendering_helper_->GetGLContext().get()));
-#endif
- return decoder;
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GLRenderingVDAClient::CreateV4L2VDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
- scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
- if (device.get()) {
- base::WeakPtr<VideoDecodeAccelerator::Client> weak_client = AsWeakPtr();
- decoder.reset(new V4L2VideoDecodeAccelerator(
- static_cast<EGLDisplay>(rendering_helper_->GetGLDisplay()),
- static_cast<EGLContext>(rendering_helper_->GetGLContextHandle()),
- weak_client, base::Bind(&DoNothingReturnTrue), device,
- base::ThreadTaskRunnerHandle::Get()));
- }
-#endif
- return decoder;
-}
+ decoder_.reset(new FakeVideoDecodeAccelerator(
+ frame_size_, base::Bind(&DoNothingReturnTrue)));
+ LOG_ASSERT(decoder_->Initialize(profile_, this));
+ } else {
+ if (!vda_factory_) {
+ vda_factory_ = GpuVideoDecodeAcceleratorFactoryImpl::Create(
+ base::Bind(&RenderingHelper::GetGLContext,
+ base::Unretained(rendering_helper_)),
+ base::Bind(&DoNothingReturnTrue), base::Bind(&DummyBindImage));
+ LOG_ASSERT(vda_factory_);
+ }
-scoped_ptr<media::VideoDecodeAccelerator>
-GLRenderingVDAClient::CreateV4L2SliceVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
- scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
- if (device.get()) {
- base::WeakPtr<VideoDecodeAccelerator::Client> weak_client = AsWeakPtr();
- decoder.reset(new V4L2SliceVideoDecodeAccelerator(
- device, static_cast<EGLDisplay>(rendering_helper_->GetGLDisplay()),
- static_cast<EGLContext>(rendering_helper_->GetGLContextHandle()),
- weak_client, base::Bind(&DoNothingReturnTrue),
- base::ThreadTaskRunnerHandle::Get()));
+ VideoDecodeAccelerator::Config config(profile_);
+ gpu::GpuPreferences gpu_preferences;
+ decoder_ = vda_factory_->CreateVDA(this, config, gpu_preferences);
}
-#endif
- return decoder;
-}
-scoped_ptr<media::VideoDecodeAccelerator>
-GLRenderingVDAClient::CreateVaapiVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
- decoder.reset(new VaapiVideoDecodeAccelerator(
- base::Bind(&DoNothingReturnTrue),
- base::Bind(&GLRenderingVDAClient::BindImage, base::Unretained(this))));
-#endif
- return decoder;
-}
+ LOG_ASSERT(decoder_) << "Failed creating a VDA";
-void GLRenderingVDAClient::BindImage(uint32_t client_texture_id,
- uint32_t texture_target,
- scoped_refptr<gl::GLImage> image) {}
+ decoder_->TryToSetupDecodeOnSeparateThread(
+ weak_this_, base::ThreadTaskRunnerHandle::Get());
-void GLRenderingVDAClient::CreateAndStartDecoder() {
- LOG_ASSERT(decoder_deleted());
- LOG_ASSERT(!decoder_.get());
-
- VideoDecodeAccelerator::Client* client = this;
-
- scoped_ptr<media::VideoDecodeAccelerator> decoders[] = {
- CreateFakeVDA(),
- CreateDXVAVDA(),
- CreateV4L2VDA(),
- CreateV4L2SliceVDA(),
- CreateVaapiVDA(),
- };
+ weak_vda_ptr_factory_.reset(
+ new base::WeakPtrFactory<VideoDecodeAccelerator>(decoder_.get()));
+ weak_vda_ = weak_vda_ptr_factory_->GetWeakPtr();
- for (size_t i = 0; i < arraysize(decoders); ++i) {
- if (!decoders[i])
- continue;
- decoder_ = std::move(decoders[i]);
- weak_decoder_factory_.reset(
- new base::WeakPtrFactory<VideoDecodeAccelerator>(decoder_.get()));
- if (decoder_->Initialize(profile_, client)) {
- SetState(CS_DECODER_SET);
- FinishInitialization();
- return;
- }
- }
- // Decoders are all initialize failed.
- LOG(ERROR) << "VideoDecodeAccelerator::Initialize() failed";
- LOG_ASSERT(false);
+ SetState(CS_DECODER_SET);
+ FinishInitialization();
}
void GLRenderingVDAClient::ProvidePictureBuffers(
uint32_t requested_num_of_buffers,
+ uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) {
if (decoder_deleted())
return;
+ LOG_ASSERT(textures_per_buffer == 1u);
std::vector<media::PictureBuffer> buffers;
requested_num_of_buffers += kExtraPictureBuffers;
@@ -637,8 +578,9 @@ void GLRenderingVDAClient::ProvidePictureBuffers(
texture_id))))
.second);
- buffers.push_back(
- media::PictureBuffer(picture_buffer_id, dimensions, texture_id));
+ media::PictureBuffer::TextureIds ids;
+ ids.push_back(texture_id);
+ buffers.push_back(media::PictureBuffer(picture_buffer_id, dimensions, ids));
}
decoder_->AssignPictureBuffers(buffers);
}
@@ -710,10 +652,8 @@ void GLRenderingVDAClient::ReturnPicture(int32_t picture_buffer_id) {
if (num_decoded_frames_ > delay_reuse_after_frame_num_) {
base::MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&VideoDecodeAccelerator::ReusePictureBuffer,
- weak_decoder_factory_->GetWeakPtr(),
- picture_buffer_id),
+ FROM_HERE, base::Bind(&VideoDecodeAccelerator::ReusePictureBuffer,
+ weak_vda_, picture_buffer_id),
kReuseDelay);
} else {
decoder_->ReusePictureBuffer(picture_buffer_id);
@@ -835,7 +775,7 @@ void GLRenderingVDAClient::FinishInitialization() {
void GLRenderingVDAClient::DeleteDecoder() {
if (decoder_deleted())
return;
- weak_decoder_factory_.reset();
+ weak_vda_ptr_factory_->InvalidateWeakPtrs();
decoder_.reset();
STLClearObject(&encoded_data_);
active_textures_.clear();
@@ -1196,17 +1136,6 @@ class VideoDecodeAcceleratorParamTest
base::Tuple<int, int, int, ResetPoint, ClientState, bool, bool> > {
};
-// Helper so that gtest failures emit a more readable version of the tuple than
-// its byte representation.
-::std::ostream& operator<<(
- ::std::ostream& os,
- const base::Tuple<int, int, int, ResetPoint, ClientState, bool, bool>& t) {
- return os << base::get<0>(t) << ", " << base::get<1>(t) << ", "
- << base::get<2>(t) << ", " << base::get<3>(t) << ", "
- << base::get<4>(t) << ", " << base::get<5>(t) << ", "
- << base::get<6>(t);
-}
-
// Wait for |note| to report a state and if it's not |expected_state| then
// assert |client| has deleted its decoder.
static void AssertWaitForStateOrDeleted(
diff --git a/chromium/content/common/gpu/media/video_encode_accelerator_unittest.cc b/chromium/content/common/gpu/media/video_encode_accelerator_unittest.cc
index 9224e89c72f..09f1c63ed23 100644
--- a/chromium/content/common/gpu/media/video_encode_accelerator_unittest.cc
+++ b/chromium/content/common/gpu/media/video_encode_accelerator_unittest.cc
@@ -55,6 +55,8 @@
// Status has been defined as int in Xlib.h.
#undef Status
#endif // defined(ARCH_CPU_X86_FAMILY)
+#elif defined(OS_MACOSX)
+#include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
#else
#error The VideoEncodeAcceleratorUnittest is not supported on this platform.
#endif
@@ -126,7 +128,11 @@ const unsigned int kLoggedLatencyPercentiles[] = {50, 75, 95};
// of the stream.
// Bitrate is only forced for tests that test bitrate.
const char* g_default_in_filename = "bear_320x192_40frames.yuv";
+#if !defined(OS_MACOSX)
const char* g_default_in_parameters = ":320:192:1:out.h264:200000";
+#else
+const char* g_default_in_parameters = ":320:192:0:out.h264:200000";
+#endif
// Enabled by including a --fake_encoder flag to the command line invoking the
// test.
@@ -623,8 +629,8 @@ class VideoFrameQualityValidator {
private:
void InitializeCB(bool success);
- void DecodeDone(media::VideoDecoder::Status status);
- void FlushDone(media::VideoDecoder::Status status);
+ void DecodeDone(media::DecodeStatus status);
+ void FlushDone(media::DecodeStatus status);
void VerifyOutputFrame(const scoped_refptr<media::VideoFrame>& output_frame);
void Decode();
@@ -670,16 +676,18 @@ void VideoFrameQualityValidator::Initialize(const gfx::Size& coded_size,
if (IsVP8(profile_))
config.Initialize(media::kCodecVP8, media::VP8PROFILE_ANY, kInputFormat,
media::COLOR_SPACE_UNSPECIFIED, coded_size, visible_size,
- natural_size, media::EmptyExtraData(), false);
+ natural_size, media::EmptyExtraData(),
+ media::Unencrypted());
else if (IsH264(profile_))
config.Initialize(media::kCodecH264, media::H264PROFILE_MAIN, kInputFormat,
media::COLOR_SPACE_UNSPECIFIED, coded_size, visible_size,
- natural_size, media::EmptyExtraData(), false);
+ natural_size, media::EmptyExtraData(),
+ media::Unencrypted());
else
LOG_ASSERT(0) << "Invalid profile " << profile_;
decoder_->Initialize(
- config, false, media::SetCdmReadyCB(),
+ config, false, nullptr,
base::Bind(&VideoFrameQualityValidator::InitializeCB,
base::Unretained(this)),
base::Bind(&VideoFrameQualityValidator::VerifyOutputFrame,
@@ -704,9 +712,8 @@ void VideoFrameQualityValidator::AddOriginalFrame(
original_frames_.push(frame);
}
-void VideoFrameQualityValidator::DecodeDone(
- media::VideoDecoder::Status status) {
- if (status == media::VideoDecoder::kOk) {
+void VideoFrameQualityValidator::DecodeDone(media::DecodeStatus status) {
+ if (status == media::DecodeStatus::OK) {
decoder_state_ = INITIALIZED;
Decode();
} else {
@@ -716,7 +723,7 @@ void VideoFrameQualityValidator::DecodeDone(
}
}
-void VideoFrameQualityValidator::FlushDone(media::VideoDecoder::Status status) {
+void VideoFrameQualityValidator::FlushDone(media::DecodeStatus status) {
flush_complete_cb_.Run();
}
@@ -810,6 +817,7 @@ class VEAClient : public VideoEncodeAccelerator::Client {
scoped_ptr<media::VideoEncodeAccelerator> CreateFakeVEA();
scoped_ptr<media::VideoEncodeAccelerator> CreateV4L2VEA();
scoped_ptr<media::VideoEncodeAccelerator> CreateVaapiVEA();
+ scoped_ptr<media::VideoEncodeAccelerator> CreateVTVEA();
void SetState(ClientState new_state);
@@ -1071,6 +1079,14 @@ scoped_ptr<media::VideoEncodeAccelerator> VEAClient::CreateVaapiVEA() {
return encoder;
}
+scoped_ptr<media::VideoEncodeAccelerator> VEAClient::CreateVTVEA() {
+ scoped_ptr<media::VideoEncodeAccelerator> encoder;
+#if defined(OS_MACOSX)
+ encoder.reset(new VTVideoEncodeAccelerator());
+#endif
+ return encoder;
+}
+
void VEAClient::CreateEncoder() {
DCHECK(thread_checker_.CalledOnValidThread());
LOG_ASSERT(!has_encoder());
@@ -1078,7 +1094,8 @@ void VEAClient::CreateEncoder() {
scoped_ptr<media::VideoEncodeAccelerator> encoders[] = {
CreateFakeVEA(),
CreateV4L2VEA(),
- CreateVaapiVEA()
+ CreateVaapiVEA(),
+ CreateVTVEA()
};
DVLOG(1) << "Profile: " << test_stream_->requested_profile
@@ -1649,6 +1666,7 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
encoder_thread.Stop();
}
+#if !defined(OS_MACOSX)
INSTANTIATE_TEST_CASE_P(
SimpleEncode,
VideoEncodeAcceleratorTest,
@@ -1693,6 +1711,26 @@ INSTANTIATE_TEST_CASE_P(
base::MakeTuple(3, false, 0, false, false, false, false, false),
base::MakeTuple(3, false, 0, true, false, false, true, false),
base::MakeTuple(3, false, 0, true, false, true, false, false)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ SimpleEncode,
+ VideoEncodeAcceleratorTest,
+ ::testing::Values(
+ base::MakeTuple(1, true, 0, false, false, false, false, false),
+ base::MakeTuple(1, true, 0, false, false, false, false, true)));
+
+INSTANTIATE_TEST_CASE_P(
+ EncoderPerf,
+ VideoEncodeAcceleratorTest,
+ ::testing::Values(
+ base::MakeTuple(1, false, 0, false, true, false, false, false)));
+
+INSTANTIATE_TEST_CASE_P(
+ MultipleEncoders,
+ VideoEncodeAcceleratorTest,
+ ::testing::Values(
+ base::MakeTuple(3, false, 0, false, false, false, false, false)));
+#endif
// TODO(posciak): more tests:
// - async FeedEncoderWithOutput
diff --git a/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.cc b/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.cc
index e74e6f64d55..1571e834620 100644
--- a/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.cc
+++ b/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.cc
@@ -10,7 +10,6 @@
#include <stddef.h>
#include "base/bind.h"
-#include "base/command_line.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "base/macros.h"
@@ -20,7 +19,6 @@
#include "base/thread_task_runner_handle.h"
#include "base/version.h"
#include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
-#include "content/public/common/content_switches.h"
#include "media/base/limits.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image_io_surface.h"
@@ -46,7 +44,11 @@ static const media::VideoCodecProfile kSupportedProfiles[] = {
media::H264PROFILE_MAIN,
media::H264PROFILE_EXTENDED,
media::H264PROFILE_HIGH,
- media::H264PROFILE_HIGH10PROFILE,
+ // TODO(hubbe): Try to re-enable this again somehow. Currently it seems
+ // that some codecs fail to check the profile during initialization and
+ // then fail on the first frame decode, which currently results in a
+ // pipeline failure.
+ // media::H264PROFILE_HIGH10PROFILE,
media::H264PROFILE_SCALABLEBASELINE,
media::H264PROFILE_SCALABLEHIGH,
media::H264PROFILE_STEREOHIGH,
@@ -72,9 +74,9 @@ static base::ScopedCFTypeRef<CFMutableDictionaryRef>
BuildImageConfig(CMVideoDimensions coded_dimensions) {
base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config;
- // 4:2:2 is used over the native 4:2:0 because only 4:2:2 can be directly
- // bound to a texture by CGLTexImageIOSurface2D().
- int32_t pixel_format = kCVPixelFormatType_422YpCbCr8;
+ // Note that 4:2:0 textures cannot be used directly as RGBA in OpenGL, but are
+ // lower power than 4:2:2 when composited directly by CoreAnimation.
+ int32_t pixel_format = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
#define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i)
base::ScopedCFTypeRef<CFNumberRef> cf_pixel_format(CFINT(pixel_format));
base::ScopedCFTypeRef<CFNumberRef> cf_width(CFINT(coded_dimensions.width));
@@ -86,7 +88,7 @@ BuildImageConfig(CMVideoDimensions coded_dimensions) {
image_config.reset(
CFDictionaryCreateMutable(
kCFAllocatorDefault,
- 4, // capacity
+ 3, // capacity
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks));
if (!image_config.get())
@@ -96,8 +98,6 @@ BuildImageConfig(CMVideoDimensions coded_dimensions) {
cf_pixel_format);
CFDictionarySetValue(image_config, kCVPixelBufferWidthKey, cf_width);
CFDictionarySetValue(image_config, kCVPixelBufferHeightKey, cf_height);
- CFDictionarySetValue(image_config, kCVPixelBufferOpenGLCompatibilityKey,
- kCFBooleanTrue);
return image_config;
}
@@ -175,11 +175,6 @@ static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
// session fails, hardware decoding will be disabled (Initialize() will always
// return false).
static bool InitializeVideoToolboxInternal() {
- if (base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableAcceleratedVideoDecode)) {
- return false;
- }
-
if (!IsVtInitialized()) {
// CoreVideo is also required, but the loader stops after the first path is
// loaded. Instead we rely on the transitive dependency from VideoToolbox to
@@ -255,6 +250,8 @@ static void OutputThunk(
VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {
}
+VTVideoDecodeAccelerator::Task::Task(const Task& other) = default;
+
VTVideoDecodeAccelerator::Task::~Task() {
}
@@ -291,11 +288,10 @@ bool VTVideoDecodeAccelerator::FrameOrder::operator()(
}
VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- const base::Callback<void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>&
- bind_image)
- : make_context_current_(make_context_current),
- bind_image_(bind_image),
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb)
+ : make_context_current_cb_(make_context_current_cb),
+ bind_image_cb_(bind_image_cb),
client_(nullptr),
state_(STATE_DECODING),
format_(nullptr),
@@ -307,7 +303,6 @@ VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()),
decoder_thread_("VTDecoderThread"),
weak_this_factory_(this) {
- DCHECK(!make_context_current_.is_null());
callback_.decompressionOutputCallback = OutputThunk;
callback_.decompressionOutputRefCon = this;
weak_this_ = weak_this_factory_.GetWeakPtr();
@@ -321,6 +316,11 @@ bool VTVideoDecodeAccelerator::Initialize(const Config& config,
Client* client) {
DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ if (make_context_current_cb_.is_null() || bind_image_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
if (config.is_encrypted) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
return false;
@@ -622,23 +622,21 @@ void VTVideoDecodeAccelerator::DecodeTask(
config_changed_ = true;
}
if (config_changed_) {
- if (last_sps_.empty()) {
- config_changed_ = false;
- DLOG(ERROR) << "Invalid configuration; no SPS";
- NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
- return;
- }
- if (last_pps_.empty()) {
- config_changed_ = false;
- DLOG(ERROR) << "Invalid configuration; no PPS";
- NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
- return;
- }
-
// Only reconfigure at IDRs to avoid corruption.
if (frame->is_idr) {
config_changed_ = false;
+ if (last_sps_.empty()) {
+ DLOG(ERROR) << "Invalid configuration; no SPS";
+ NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
+ return;
+ }
+ if (last_pps_.empty()) {
+ DLOG(ERROR) << "Invalid configuration; no PPS";
+ NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
+ return;
+ }
+
// ConfigureDecoder() calls NotifyError() on failure.
if (!ConfigureDecoder())
return;
@@ -825,6 +823,13 @@ void VTVideoDecodeAccelerator::FlushDone(TaskType type) {
void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) {
DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ if (bitstream.id() < 0) {
+ DLOG(ERROR) << "Invalid bitstream, id: " << bitstream.id();
+ if (base::SharedMemory::IsHandleValid(bitstream.handle()))
+ base::SharedMemory::CloseHandle(bitstream.handle());
+ NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
+ return;
+ }
DCHECK_EQ(0u, assigned_bitstream_ids_.count(bitstream.id()));
assigned_bitstream_ids_.insert(bitstream.id());
Frame* frame = new Frame(bitstream.id());
@@ -842,10 +847,12 @@ void VTVideoDecodeAccelerator::AssignPictureBuffers(
DCHECK(!picture_info_map_.count(picture.id()));
assigned_picture_ids_.insert(picture.id());
available_picture_ids_.push_back(picture.id());
+ DCHECK_LE(1u, picture.internal_texture_ids().size());
+ DCHECK_LE(1u, picture.texture_ids().size());
picture_info_map_.insert(std::make_pair(
picture.id(),
- make_scoped_ptr(new PictureInfo(picture.internal_texture_id(),
- picture.texture_id()))));
+ make_scoped_ptr(new PictureInfo(picture.internal_texture_ids()[0],
+ picture.texture_ids()[0]))));
}
// Pictures are not marked as uncleared until after this method returns, and
@@ -859,7 +866,7 @@ void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) {
DCHECK(gpu_thread_checker_.CalledOnValidThread());
DCHECK(picture_info_map_.count(picture_id));
PictureInfo* picture_info = picture_info_map_.find(picture_id)->second.get();
- DCHECK_EQ(CFGetRetainCount(picture_info->cv_image), 1);
+ DCHECK_EQ(CFGetRetainCount(picture_info->cv_image), 2);
picture_info->cv_image.reset();
picture_info->gl_image->Destroy(false);
picture_info->gl_image = nullptr;
@@ -1002,8 +1009,8 @@ bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) {
// Request new pictures.
picture_size_ = frame.coded_size;
- client_->ProvidePictureBuffers(
- kNumPictureBuffers, coded_size_, GL_TEXTURE_RECTANGLE_ARB);
+ client_->ProvidePictureBuffers(kNumPictureBuffers, 1, coded_size_,
+ GL_TEXTURE_RECTANGLE_ARB);
return false;
}
if (!SendFrame(frame))
@@ -1026,47 +1033,27 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
DCHECK(!picture_info->cv_image);
DCHECK(!picture_info->gl_image);
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
DLOG(ERROR) << "Failed to make GL context current";
NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
return false;
}
- IOSurfaceRef surface = CVPixelBufferGetIOSurface(frame.image.get());
- if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGLCoreProfile)
- glEnable(GL_TEXTURE_RECTANGLE_ARB);
- gfx::ScopedTextureBinder texture_binder(GL_TEXTURE_RECTANGLE_ARB,
- picture_info->service_texture_id);
- CGLContextObj cgl_context =
- static_cast<CGLContextObj>(gfx::GLContext::GetCurrent()->GetHandle());
- CGLError status = CGLTexImageIOSurface2D(
- cgl_context, // ctx
- GL_TEXTURE_RECTANGLE_ARB, // target
- GL_RGB, // internal_format
- frame.coded_size.width(), // width
- frame.coded_size.height(), // height
- GL_YCBCR_422_APPLE, // format
- GL_UNSIGNED_SHORT_8_8_APPLE, // type
- surface, // io_surface
- 0); // plane
- if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGLCoreProfile)
- glDisable(GL_TEXTURE_RECTANGLE_ARB);
- if (status != kCGLNoError) {
- NOTIFY_STATUS("CGLTexImageIOSurface2D()", status, SFT_PLATFORM_ERROR);
- return false;
- }
-
- bool allow_overlay = false;
scoped_refptr<gl::GLImageIOSurface> gl_image(
new gl::GLImageIOSurface(frame.coded_size, GL_BGRA_EXT));
- if (gl_image->Initialize(surface, gfx::GenericSharedMemoryId(),
- gfx::BufferFormat::BGRA_8888)) {
- allow_overlay = true;
- } else {
- gl_image = nullptr;
+ if (!gl_image->InitializeWithCVPixelBuffer(
+ frame.image.get(), gfx::GenericSharedMemoryId(),
+ gfx::BufferFormat::YUV_420_BIPLANAR)) {
+ NOTIFY_STATUS("Failed to initialize GLImageIOSurface", PLATFORM_FAILURE,
+ SFT_PLATFORM_ERROR);
+ }
+
+ if (!bind_image_cb_.Run(picture_info->client_texture_id,
+ GL_TEXTURE_RECTANGLE_ARB, gl_image, false)) {
+ DLOG(ERROR) << "Failed to bind image";
+ NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
+ return false;
}
- bind_image_.Run(picture_info->client_texture_id, GL_TEXTURE_RECTANGLE_ARB,
- gl_image);
// Assign the new image(s) to the the picture info.
picture_info->gl_image = gl_image;
@@ -1080,7 +1067,7 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
// coded size and fix it.
client_->PictureReady(media::Picture(picture_id, frame.bitstream_id,
gfx::Rect(frame.coded_size),
- allow_overlay));
+ true));
return true;
}
@@ -1143,7 +1130,9 @@ void VTVideoDecodeAccelerator::Destroy() {
QueueFlush(TASK_DESTROY);
}
-bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() {
+bool VTVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
return false;
}
diff --git a/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.h b/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.h
index 2d222163823..22fc8b1d6ad 100644
--- a/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.h
+++ b/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.h
@@ -17,6 +17,7 @@
#include "base/message_loop/message_loop.h"
#include "base/threading/thread.h"
#include "base/threading/thread_checker.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "content/common/gpu/media/vt_mac.h"
#include "media/filters/h264_parser.h"
#include "media/video/h264_poc.h"
@@ -35,9 +36,9 @@ bool InitializeVideoToolbox();
class VTVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
public:
explicit VTVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- const base::Callback<
- void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>& bind_image);
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb);
+
~VTVideoDecodeAccelerator() override;
// VideoDecodeAccelerator implementation.
@@ -49,7 +50,10 @@ class VTVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
// Called by OutputThunk() when VideoToolbox finishes decoding a frame.
void Output(
@@ -114,6 +118,7 @@ class VTVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
struct Task {
Task(TaskType type);
+ Task(const Task& other);
~Task();
TaskType type;
@@ -189,9 +194,9 @@ class VTVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
//
// GPU thread state.
//
- base::Callback<bool(void)> make_context_current_;
- base::Callback<void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>
- bind_image_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
+ BindGLImageCallback bind_image_cb_;
+
media::VideoDecodeAccelerator::Client* client_;
State state_;
diff --git a/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.cc b/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.cc
new file mode 100644
index 00000000000..71c80ef3a9f
--- /dev/null
+++ b/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.cc
@@ -0,0 +1,552 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
+
+#include "base/thread_task_runner_handle.h"
+#include "media/base/mac/coremedia_glue.h"
+#include "media/base/mac/corevideo_glue.h"
+#include "media/base/mac/video_frame_mac.h"
+
+namespace content {
+
+namespace {
+
+// TODO(emircan): Check if we can find the actual system capabilities via
+// creating VTCompressionSessions with varying requirements.
+// See crbug.com/584784.
+const size_t kBitsPerByte = 8;
+const size_t kDefaultResolutionWidth = 640;
+const size_t kDefaultResolutionHeight = 480;
+const size_t kMaxFrameRateNumerator = 30;
+const size_t kMaxFrameRateDenominator = 1;
+const size_t kMaxResolutionWidth = 4096;
+const size_t kMaxResolutionHeight = 2160;
+const size_t kNumInputBuffers = 3;
+
+} // namespace
+
+struct VTVideoEncodeAccelerator::InProgressFrameEncode {
+ InProgressFrameEncode(base::TimeDelta rtp_timestamp,
+ base::TimeTicks ref_time)
+ : timestamp(rtp_timestamp), reference_time(ref_time) {}
+ const base::TimeDelta timestamp;
+ const base::TimeTicks reference_time;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(InProgressFrameEncode);
+};
+
+struct VTVideoEncodeAccelerator::EncodeOutput {
+ EncodeOutput(VTEncodeInfoFlags info_flags, CMSampleBufferRef sbuf)
+ : info(info_flags), sample_buffer(sbuf, base::scoped_policy::RETAIN) {}
+ const VTEncodeInfoFlags info;
+ const base::ScopedCFTypeRef<CMSampleBufferRef> sample_buffer;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(EncodeOutput);
+};
+
+struct VTVideoEncodeAccelerator::BitstreamBufferRef {
+ BitstreamBufferRef(int32_t id,
+ scoped_ptr<base::SharedMemory> shm,
+ size_t size)
+ : id(id), shm(std::move(shm)), size(size) {}
+ const int32_t id;
+ const scoped_ptr<base::SharedMemory> shm;
+ const size_t size;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BitstreamBufferRef);
+};
+
+VTVideoEncodeAccelerator::VTVideoEncodeAccelerator()
+ : client_task_runner_(base::ThreadTaskRunnerHandle::Get()),
+ encoder_thread_("VTEncoderThread"),
+ encoder_task_weak_factory_(this) {
+ encoder_weak_ptr_ = encoder_task_weak_factory_.GetWeakPtr();
+}
+
+VTVideoEncodeAccelerator::~VTVideoEncodeAccelerator() {
+ DVLOG(3) << __FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ Destroy();
+ DCHECK(!encoder_thread_.IsRunning());
+ DCHECK(!encoder_task_weak_factory_.HasWeakPtrs());
+}
+
+media::VideoEncodeAccelerator::SupportedProfiles
+VTVideoEncodeAccelerator::GetSupportedProfiles() {
+ DVLOG(3) << __FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ SupportedProfiles profiles;
+ // Check if HW encoder is supported initially.
+ videotoolbox_glue_ = VideoToolboxGlue::Get();
+ if (!videotoolbox_glue_) {
+ DLOG(ERROR) << "Failed creating VideoToolbox glue.";
+ return profiles;
+ }
+ const bool rv = CreateCompressionSession(
+ media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0),
+ gfx::Size(kDefaultResolutionWidth, kDefaultResolutionHeight), true);
+ DestroyCompressionSession();
+ if (!rv) {
+ VLOG(1)
+ << "Hardware encode acceleration is not available on this platform.";
+ return profiles;
+ }
+
+ SupportedProfile profile;
+ profile.profile = media::H264PROFILE_BASELINE;
+ profile.max_framerate_numerator = kMaxFrameRateNumerator;
+ profile.max_framerate_denominator = kMaxFrameRateDenominator;
+ profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight);
+ profiles.push_back(profile);
+ return profiles;
+}
+
+bool VTVideoEncodeAccelerator::Initialize(
+ media::VideoPixelFormat format,
+ const gfx::Size& input_visible_size,
+ media::VideoCodecProfile output_profile,
+ uint32_t initial_bitrate,
+ Client* client) {
+ DVLOG(3) << __FUNCTION__
+ << ": input_format=" << media::VideoPixelFormatToString(format)
+ << ", input_visible_size=" << input_visible_size.ToString()
+ << ", output_profile=" << output_profile
+ << ", initial_bitrate=" << initial_bitrate;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(client);
+
+ if (media::PIXEL_FORMAT_I420 != format) {
+ DLOG(ERROR) << "Input format not supported= "
+ << media::VideoPixelFormatToString(format);
+ return false;
+ }
+ if (media::H264PROFILE_BASELINE != output_profile) {
+ DLOG(ERROR) << "Output profile not supported= "
+ << output_profile;
+ return false;
+ }
+
+ videotoolbox_glue_ = VideoToolboxGlue::Get();
+ if (!videotoolbox_glue_) {
+ DLOG(ERROR) << "Failed creating VideoToolbox glue.";
+ return false;
+ }
+
+ client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
+ client_ = client_ptr_factory_->GetWeakPtr();
+ input_visible_size_ = input_visible_size;
+ frame_rate_ = kMaxFrameRateNumerator / kMaxFrameRateDenominator;
+ target_bitrate_ = initial_bitrate;
+ bitstream_buffer_size_ = input_visible_size.GetArea();
+
+ if (!encoder_thread_.Start()) {
+ DLOG(ERROR) << "Failed spawning encoder thread.";
+ return false;
+ }
+ encoder_thread_task_runner_ = encoder_thread_.task_runner();
+
+ if (!ResetCompressionSession()) {
+ DLOG(ERROR) << "Failed creating compression session.";
+ return false;
+ }
+
+ client_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&Client::RequireBitstreamBuffers, client_, kNumInputBuffers,
+ input_visible_size_, bitstream_buffer_size_));
+ return true;
+}
+
+void VTVideoEncodeAccelerator::Encode(
+ const scoped_refptr<media::VideoFrame>& frame,
+ bool force_keyframe) {
+ DVLOG(3) << __FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ encoder_thread_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::EncodeTask,
+ base::Unretained(this), frame, force_keyframe));
+}
+
+void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer(
+ const media::BitstreamBuffer& buffer) {
+ DVLOG(3) << __FUNCTION__ << ": buffer size=" << buffer.size();
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (buffer.size() < bitstream_buffer_size_) {
+ DLOG(ERROR) << "Output BitstreamBuffer isn't big enough: " << buffer.size()
+ << " vs. " << bitstream_buffer_size_;
+ client_->NotifyError(kInvalidArgumentError);
+ return;
+ }
+
+ scoped_ptr<base::SharedMemory> shm(
+ new base::SharedMemory(buffer.handle(), false));
+ if (!shm->Map(buffer.size())) {
+ DLOG(ERROR) << "Failed mapping shared memory.";
+ client_->NotifyError(kPlatformFailureError);
+ return;
+ }
+
+ scoped_ptr<BitstreamBufferRef> buffer_ref(
+ new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
+
+ encoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask,
+ base::Unretained(this), base::Passed(&buffer_ref)));
+}
+
+void VTVideoEncodeAccelerator::RequestEncodingParametersChange(
+ uint32_t bitrate,
+ uint32_t framerate) {
+ DVLOG(3) << __FUNCTION__ << ": bitrate=" << bitrate
+ << ": framerate=" << framerate;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ encoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VTVideoEncodeAccelerator::RequestEncodingParametersChangeTask,
+ base::Unretained(this), bitrate, framerate));
+}
+
+void VTVideoEncodeAccelerator::Destroy() {
+ DVLOG(3) << __FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Cancel all callbacks.
+ client_ptr_factory_.reset();
+
+ if (encoder_thread_.IsRunning()) {
+ encoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VTVideoEncodeAccelerator::DestroyTask,
+ base::Unretained(this)));
+ encoder_thread_.Stop();
+ } else {
+ DestroyTask();
+ }
+}
+
+void VTVideoEncodeAccelerator::EncodeTask(
+ const scoped_refptr<media::VideoFrame>& frame,
+ bool force_keyframe) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+ DCHECK(compression_session_);
+ DCHECK(frame);
+
+ // TODO(emircan): See if we can eliminate a copy here by using
+ // CVPixelBufferPool for the allocation of incoming VideoFrames.
+ base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer =
+ media::WrapVideoFrameInCVPixelBuffer(*frame);
+ base::ScopedCFTypeRef<CFDictionaryRef> frame_props =
+ media::video_toolbox::DictionaryWithKeyValue(
+ videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
+ force_keyframe ? kCFBooleanTrue : kCFBooleanFalse);
+
+ base::TimeTicks ref_time;
+ if (!frame->metadata()->GetTimeTicks(
+ media::VideoFrameMetadata::REFERENCE_TIME, &ref_time)) {
+ ref_time = base::TimeTicks::Now();
+ }
+ auto timestamp_cm = CoreMediaGlue::CMTimeMake(
+ frame->timestamp().InMicroseconds(), USEC_PER_SEC);
+ // Wrap information we'll need after the frame is encoded in a heap object.
+ // We'll get the pointer back from the VideoToolbox completion callback.
+ scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode(
+ frame->timestamp(), ref_time));
+
+ // We can pass the ownership of |request| to the encode callback if
+ // successful. Otherwise let it fall out of scope.
+ OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame(
+ compression_session_, pixel_buffer, timestamp_cm,
+ CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props,
+ reinterpret_cast<void*>(request.get()), nullptr);
+ if (status != noErr) {
+ DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
+ NotifyError(kPlatformFailureError);
+ } else {
+ CHECK(request.release());
+ }
+}
+
+void VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask(
+ scoped_ptr<BitstreamBufferRef> buffer_ref) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+
+ // If there is already EncodeOutput waiting, copy its output first.
+ if (!encoder_output_queue_.empty()) {
+ scoped_ptr<VTVideoEncodeAccelerator::EncodeOutput> encode_output =
+ std::move(encoder_output_queue_.front());
+ encoder_output_queue_.pop_front();
+ ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
+ return;
+ }
+
+ bitstream_buffer_queue_.push_back(std::move(buffer_ref));
+}
+
+void VTVideoEncodeAccelerator::RequestEncodingParametersChangeTask(
+ uint32_t bitrate,
+ uint32_t framerate) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+
+ frame_rate_ = framerate > 1 ? framerate : 1;
+ target_bitrate_ = bitrate > 1 ? bitrate : 1;
+
+ if (!compression_session_) {
+ NotifyError(kPlatformFailureError);
+ return;
+ }
+
+ media::video_toolbox::SessionPropertySetter session_property_setter(
+ compression_session_, videotoolbox_glue_);
+ // TODO(emircan): See crbug.com/425352.
+ bool rv = session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
+ target_bitrate_);
+ rv &= session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
+ frame_rate_);
+ rv &= session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_DataRateLimits(),
+ media::video_toolbox::ArrayWithIntegerAndFloat(
+ target_bitrate_ / kBitsPerByte, 1.0f));
+ DLOG_IF(ERROR, !rv) << "Couldn't change session encoding parameters.";
+}
+
+void VTVideoEncodeAccelerator::DestroyTask() {
+ DCHECK(thread_checker_.CalledOnValidThread() ||
+ (encoder_thread_.IsRunning() &&
+ encoder_thread_task_runner_->BelongsToCurrentThread()));
+
+ // Cancel all encoder thread callbacks.
+ encoder_task_weak_factory_.InvalidateWeakPtrs();
+
+ // This call blocks until all pending frames are flushed out.
+ DestroyCompressionSession();
+}
+
+void VTVideoEncodeAccelerator::NotifyError(
+ media::VideoEncodeAccelerator::Error error) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+ client_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&Client::NotifyError, client_, error));
+}
+
+// static
+void VTVideoEncodeAccelerator::CompressionCallback(void* encoder_opaque,
+ void* request_opaque,
+ OSStatus status,
+ VTEncodeInfoFlags info,
+ CMSampleBufferRef sbuf) {
+ // This function may be called asynchronously, on a different thread from the
+ // one that calls VTCompressionSessionEncodeFrame.
+ DVLOG(3) << __FUNCTION__;
+
+ auto encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque);
+ DCHECK(encoder);
+
+ // Release InProgressFrameEncode, since we don't have support to return
+ // timestamps at this point.
+ scoped_ptr<InProgressFrameEncode> request(
+ reinterpret_cast<InProgressFrameEncode*>(request_opaque));
+ request.reset();
+
+ // EncodeOutput holds onto CMSampleBufferRef when posting task between
+ // threads.
+ scoped_ptr<EncodeOutput> encode_output(new EncodeOutput(info, sbuf));
+
+ // This method is NOT called on |encoder_thread_|, so we still need to
+ // post a task back to it to do work.
+ encoder->encoder_thread_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::CompressionCallbackTask,
+ encoder->encoder_weak_ptr_, status,
+ base::Passed(&encode_output)));
+}
+
+void VTVideoEncodeAccelerator::CompressionCallbackTask(
+ OSStatus status,
+ scoped_ptr<EncodeOutput> encode_output) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+
+ if (status != noErr) {
+ DLOG(ERROR) << " encode failed: " << status;
+ NotifyError(kPlatformFailureError);
+ return;
+ }
+
+ // If there isn't any BitstreamBuffer to copy into, add it to a queue for
+ // later use.
+ if (bitstream_buffer_queue_.empty()) {
+ encoder_output_queue_.push_back(std::move(encode_output));
+ return;
+ }
+
+ scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref =
+ std::move(bitstream_buffer_queue_.front());
+ bitstream_buffer_queue_.pop_front();
+ ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
+}
+
+void VTVideoEncodeAccelerator::ReturnBitstreamBuffer(
+ scoped_ptr<EncodeOutput> encode_output,
+ scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref) {
+ DVLOG(3) << __FUNCTION__;
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+
+ if (encode_output->info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped) {
+ DVLOG(2) << " frame dropped";
+ client_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
+ buffer_ref->id, 0, false));
+ return;
+ }
+
+ auto sample_attachments = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
+ CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(
+ encode_output->sample_buffer.get(), true),
+ 0));
+ const bool keyframe =
+ !CFDictionaryContainsKey(sample_attachments,
+ CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
+
+ size_t used_buffer_size = 0;
+ const bool copy_rv = media::video_toolbox::CopySampleBufferToAnnexBBuffer(
+ encode_output->sample_buffer.get(), keyframe, buffer_ref->size,
+ reinterpret_cast<char*>(buffer_ref->shm->memory()), &used_buffer_size);
+ if (!copy_rv) {
+ DLOG(ERROR) << "Cannot copy output from SampleBuffer to AnnexBBuffer.";
+ used_buffer_size = 0;
+ }
+
+ client_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
+ buffer_ref->id, used_buffer_size, keyframe));
+}
+
+bool VTVideoEncodeAccelerator::ResetCompressionSession() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ DestroyCompressionSession();
+
+ CFTypeRef attributes_keys[] = {
+ kCVPixelBufferOpenGLCompatibilityKey,
+ kCVPixelBufferIOSurfacePropertiesKey,
+ kCVPixelBufferPixelFormatTypeKey
+ };
+ const int format[] = {
+ CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange};
+ CFTypeRef attributes_values[] = {
+ kCFBooleanTrue,
+ media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0)
+ .release(),
+ media::video_toolbox::ArrayWithIntegers(format, arraysize(format))
+ .release()};
+ const base::ScopedCFTypeRef<CFDictionaryRef> attributes =
+ media::video_toolbox::DictionaryWithKeysAndValues(
+ attributes_keys, attributes_values, arraysize(attributes_keys));
+ for (auto& v : attributes_values)
+ CFRelease(v);
+
+ bool session_rv =
+ CreateCompressionSession(attributes, input_visible_size_, false);
+ if (!session_rv) {
+ DestroyCompressionSession();
+ return false;
+ }
+
+ const bool configure_rv = ConfigureCompressionSession();
+ if (configure_rv)
+ RequestEncodingParametersChange(target_bitrate_, frame_rate_);
+ return configure_rv;
+}
+
+bool VTVideoEncodeAccelerator::CreateCompressionSession(
+ base::ScopedCFTypeRef<CFDictionaryRef> attributes,
+ const gfx::Size& input_size,
+ bool require_hw_encoding) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ std::vector<CFTypeRef> encoder_keys;
+ std::vector<CFTypeRef> encoder_values;
+ if (require_hw_encoding) {
+ encoder_keys.push_back(videotoolbox_glue_
+ ->kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder());
+ encoder_values.push_back(kCFBooleanTrue);
+ } else {
+ encoder_keys.push_back(videotoolbox_glue_
+ ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder());
+ encoder_values.push_back(kCFBooleanTrue);
+ }
+ base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec =
+ media::video_toolbox::DictionaryWithKeysAndValues(
+ encoder_keys.data(), encoder_values.data(), encoder_keys.size());
+
+ // Create the compression session.
+ // Note that the encoder object is given to the compression session as the
+ // callback context using a raw pointer. The C API does not allow us to use a
+ // smart pointer, nor is this encoder ref counted. However, this is still
+ // safe, because we 1) we own the compression session and 2) we tear it down
+ // safely. When destructing the encoder, the compression session is flushed
+ // and invalidated. Internally, VideoToolbox will join all of its threads
+ // before returning to the client. Therefore, when control returns to us, we
+ // are guaranteed that the output callback will not execute again.
+ OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate(
+ kCFAllocatorDefault,
+ input_size.width(),
+ input_size.height(),
+ CoreMediaGlue::kCMVideoCodecType_H264,
+ encoder_spec,
+ attributes,
+ nullptr /* compressedDataAllocator */,
+ &VTVideoEncodeAccelerator::CompressionCallback,
+ reinterpret_cast<void*>(this),
+ compression_session_.InitializeInto());
+ if (status != noErr) {
+ DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
+ return false;
+ }
+ DVLOG(3) << " VTCompressionSession created with HW encode: "
+ << require_hw_encoding << ", input size=" << input_size.ToString();
+ return true;
+}
+
+bool VTVideoEncodeAccelerator::ConfigureCompressionSession() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(compression_session_);
+
+ media::video_toolbox::SessionPropertySetter session_property_setter(
+ compression_session_, videotoolbox_glue_);
+ bool rv = true;
+ rv &= session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(),
+ videotoolbox_glue_->kVTProfileLevel_H264_Baseline_AutoLevel());
+ rv &= session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), true);
+ rv &= session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(),
+ false);
+ DLOG_IF(ERROR, !rv) << " Setting session property failed.";
+ return rv;
+}
+
+void VTVideoEncodeAccelerator::DestroyCompressionSession() {
+ DCHECK(thread_checker_.CalledOnValidThread() ||
+ (encoder_thread_.IsRunning() &&
+ encoder_thread_task_runner_->BelongsToCurrentThread()));
+
+ if (compression_session_) {
+ videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_);
+ compression_session_.reset();
+ }
+}
+
+} // namespace content
diff --git a/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.h b/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.h
new file mode 100644
index 00000000000..aa4b37ed22d
--- /dev/null
+++ b/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.h
@@ -0,0 +1,142 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_
+#define CONTENT_COMMON_GPU_MEDIA_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_
+
+#include "base/mac/scoped_cftyperef.h"
+#include "content/common/content_export.h"
+#include "media/base/mac/videotoolbox_glue.h"
+#include "media/base/mac/videotoolbox_helpers.h"
+#include "media/video/video_encode_accelerator.h"
+
+namespace content {
+
+// VideoToolbox.framework implementation of the VideoEncodeAccelerator
+// interface for MacOSX. VideoToolbox makes no guarantees that it is thread
+// safe, so this object is pinned to the thread on which it is constructed.
+class CONTENT_EXPORT VTVideoEncodeAccelerator
+ : public media::VideoEncodeAccelerator {
+ public:
+ VTVideoEncodeAccelerator();
+ ~VTVideoEncodeAccelerator() override;
+
+ // media::VideoEncodeAccelerator implementation.
+ media::VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles()
+ override;
+ bool Initialize(media::VideoPixelFormat format,
+ const gfx::Size& input_visible_size,
+ media::VideoCodecProfile output_profile,
+ uint32_t initial_bitrate,
+ Client* client) override;
+ void Encode(const scoped_refptr<media::VideoFrame>& frame,
+ bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(const media::BitstreamBuffer& buffer) override;
+ void RequestEncodingParametersChange(uint32_t bitrate,
+ uint32_t framerate) override;
+ void Destroy() override;
+
+ private:
+ using CMSampleBufferRef = CoreMediaGlue::CMSampleBufferRef;
+ using VTCompressionSessionRef = VideoToolboxGlue::VTCompressionSessionRef;
+ using VTEncodeInfoFlags = VideoToolboxGlue::VTEncodeInfoFlags;
+
+ // Holds the associated data of a video frame being processed.
+ struct InProgressFrameEncode;
+
+ // Holds output buffers coming from the encoder.
+ struct EncodeOutput;
+
+ // Holds output buffers coming from the client ready to be filled.
+ struct BitstreamBufferRef;
+
+ // Encoding tasks to be run on |encoder_thread_|.
+ void EncodeTask(const scoped_refptr<media::VideoFrame>& frame,
+ bool force_keyframe);
+ void UseOutputBitstreamBufferTask(scoped_ptr<BitstreamBufferRef> buffer_ref);
+ void RequestEncodingParametersChangeTask(uint32_t bitrate,
+ uint32_t framerate);
+ void DestroyTask();
+
+ // Helper function to notify the client of an error on |client_task_runner_|.
+ void NotifyError(media::VideoEncodeAccelerator::Error error);
+
+ // Compression session callback function to handle compressed frames.
+ static void CompressionCallback(void* encoder_opaque,
+ void* request_opaque,
+ OSStatus status,
+ VTEncodeInfoFlags info,
+ CMSampleBufferRef sbuf);
+ void CompressionCallbackTask(OSStatus status,
+ scoped_ptr<EncodeOutput> encode_output);
+
+ // Copy CMSampleBuffer into a BitstreamBuffer and return it to the |client_|.
+ void ReturnBitstreamBuffer(
+ scoped_ptr<EncodeOutput> encode_output,
+ scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref);
+
+ // Reset the encoder's compression session by destroying the existing one
+ // using DestroyCompressionSession() and creating a new one. The new session
+ // is configured using ConfigureCompressionSession().
+ bool ResetCompressionSession();
+
+ // Create a compression session, with HW encoder enforced if
+ // |require_hw_encoding| is set.
+ bool CreateCompressionSession(
+ base::ScopedCFTypeRef<CFDictionaryRef> attributes,
+ const gfx::Size& input_size,
+ bool require_hw_encoding);
+
+ // Configure the current compression session using current encoder settings.
+ bool ConfigureCompressionSession();
+
+ // Destroy the current compression session if any. Blocks until all pending
+ // frames have been flushed out (similar to EmitFrames without doing any
+ // encoding work).
+ void DestroyCompressionSession();
+
+ // VideoToolboxGlue provides access to VideoToolbox at runtime.
+ const VideoToolboxGlue* videotoolbox_glue_;
+ base::ScopedCFTypeRef<VTCompressionSessionRef> compression_session_;
+
+ gfx::Size input_visible_size_;
+ size_t bitstream_buffer_size_;
+ int32_t frame_rate_;
+ int32_t target_bitrate_;
+
+ // Bitstream buffers ready to be used to return encoded output as a FIFO.
+ std::deque<scoped_ptr<BitstreamBufferRef>> bitstream_buffer_queue_;
+
+ // EncodeOutput needs to be copied into a BitstreamBufferRef as a FIFO.
+ std::deque<scoped_ptr<EncodeOutput>> encoder_output_queue_;
+
+ // Our original calling task runner for the child thread.
+ const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner_;
+
+ // To expose client callbacks from VideoEncodeAccelerator.
+ // NOTE: all calls to this object *MUST* be executed on
+ // |client_task_runner_|.
+ base::WeakPtr<Client> client_;
+ scoped_ptr<base::WeakPtrFactory<Client> > client_ptr_factory_;
+
+ // Thread checker to enforce that this object is used on a specific thread.
+ // It is pinned on |client_task_runner_| thread.
+ base::ThreadChecker thread_checker_;
+
+ // This thread services tasks posted from the VEA API entry points by the
+ // GPU child thread and CompressionCallback() posted from device thread.
+ base::Thread encoder_thread_;
+ scoped_refptr<base::SingleThreadTaskRunner> encoder_thread_task_runner_;
+
+ // Declared last to ensure that all weak pointers are invalidated before
+ // other destructors run.
+ base::WeakPtr<VTVideoEncodeAccelerator> encoder_weak_ptr_;
+ base::WeakPtrFactory<VTVideoEncodeAccelerator> encoder_task_weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(VTVideoEncodeAccelerator);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_
diff --git a/chromium/content/common/gpu/stream_texture_android.cc b/chromium/content/common/gpu/stream_texture_android.cc
deleted file mode 100644
index 17d841d8eb1..00000000000
--- a/chromium/content/common/gpu/stream_texture_android.cc
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/stream_texture_android.h"
-
-#include <string.h>
-
-#include "base/bind.h"
-#include "base/strings/stringize_macros.h"
-#include "content/common/android/surface_texture_peer.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "gpu/command_buffer/service/context_group.h"
-#include "gpu/command_buffer/service/context_state.h"
-#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
-#include "gpu/command_buffer/service/texture_manager.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_helper.h"
-#include "ui/gl/scoped_binders.h"
-#include "ui/gl/scoped_make_current.h"
-
-namespace content {
-
-using gpu::gles2::ContextGroup;
-using gpu::gles2::GLES2Decoder;
-using gpu::gles2::TextureManager;
-using gpu::gles2::TextureRef;
-
-// static
-bool StreamTexture::Create(GpuCommandBufferStub* owner_stub,
- uint32_t client_texture_id,
- int stream_id) {
- GLES2Decoder* decoder = owner_stub->decoder();
- TextureManager* texture_manager =
- decoder->GetContextGroup()->texture_manager();
- TextureRef* texture = texture_manager->GetTexture(client_texture_id);
-
- if (texture && (!texture->texture()->target() ||
- texture->texture()->target() == GL_TEXTURE_EXTERNAL_OES)) {
-
- // TODO: Ideally a valid image id was returned to the client so that
- // it could then call glBindTexImage2D() for doing the following.
- scoped_refptr<gl::GLImage> gl_image(
- new StreamTexture(owner_stub, stream_id, texture->service_id()));
- gfx::Size size = gl_image->GetSize();
- texture_manager->SetTarget(texture, GL_TEXTURE_EXTERNAL_OES);
- texture_manager->SetLevelInfo(texture, GL_TEXTURE_EXTERNAL_OES, 0, GL_RGBA,
- size.width(), size.height(), 1, 0, GL_RGBA,
- GL_UNSIGNED_BYTE, gfx::Rect(size));
- texture_manager->SetLevelImage(texture, GL_TEXTURE_EXTERNAL_OES, 0,
- gl_image.get(),
- gpu::gles2::Texture::UNBOUND);
- return true;
- }
-
- return false;
-}
-
-StreamTexture::StreamTexture(GpuCommandBufferStub* owner_stub,
- int32_t route_id,
- uint32_t texture_id)
- : surface_texture_(gfx::SurfaceTexture::Create(texture_id)),
- size_(0, 0),
- has_valid_frame_(false),
- has_pending_frame_(false),
- owner_stub_(owner_stub),
- route_id_(route_id),
- has_listener_(false),
- texture_id_(texture_id),
- framebuffer_(0),
- vertex_shader_(0),
- fragment_shader_(0),
- program_(0),
- vertex_buffer_(0),
- u_xform_location_(-1),
- weak_factory_(this) {
- owner_stub->AddDestructionObserver(this);
- memset(current_matrix_, 0, sizeof(current_matrix_));
- owner_stub->channel()->AddRoute(route_id, this);
- surface_texture_->SetFrameAvailableCallback(base::Bind(
- &StreamTexture::OnFrameAvailable, weak_factory_.GetWeakPtr()));
-}
-
-StreamTexture::~StreamTexture() {
- if (owner_stub_) {
- owner_stub_->RemoveDestructionObserver(this);
- owner_stub_->channel()->RemoveRoute(route_id_);
- }
-}
-
-void StreamTexture::OnWillDestroyStub() {
- owner_stub_->RemoveDestructionObserver(this);
- owner_stub_->channel()->RemoveRoute(route_id_);
-
- if (framebuffer_) {
- scoped_ptr<ui::ScopedMakeCurrent> scoped_make_current(MakeStubCurrent());
-
- glDeleteProgram(program_);
- glDeleteShader(vertex_shader_);
- glDeleteShader(fragment_shader_);
- glDeleteBuffersARB(1, &vertex_buffer_);
- glDeleteFramebuffersEXT(1, &framebuffer_);
- program_ = 0;
- vertex_shader_ = 0;
- fragment_shader_ = 0;
- vertex_buffer_ = 0;
- framebuffer_ = 0;
- u_xform_location_ = -1;
- }
-
- owner_stub_ = NULL;
-
- // If the owner goes away, there is no need to keep the SurfaceTexture around.
- // The GL texture will keep working regardless with the currently bound frame.
- surface_texture_ = NULL;
-}
-
-void StreamTexture::Destroy(bool have_context) {
- NOTREACHED();
-}
-
-scoped_ptr<ui::ScopedMakeCurrent> StreamTexture::MakeStubCurrent() {
- scoped_ptr<ui::ScopedMakeCurrent> scoped_make_current;
- bool needs_make_current =
- !owner_stub_->decoder()->GetGLContext()->IsCurrent(NULL);
- // On Android we should not have to perform a real context switch here when
- // using virtual contexts.
- DCHECK(!needs_make_current ||
- !owner_stub_->decoder()
- ->GetContextGroup()
- ->feature_info()
- ->workarounds()
- .use_virtualized_gl_contexts);
- if (needs_make_current) {
- scoped_make_current.reset(new ui::ScopedMakeCurrent(
- owner_stub_->decoder()->GetGLContext(), owner_stub_->surface()));
- }
- return scoped_make_current;
-}
-
-void StreamTexture::UpdateTexImage() {
- DCHECK(surface_texture_.get());
- DCHECK(owner_stub_);
-
- if (!has_pending_frame_) return;
-
- scoped_ptr<ui::ScopedMakeCurrent> scoped_make_current(MakeStubCurrent());
-
- surface_texture_->UpdateTexImage();
-
- has_valid_frame_ = true;
- has_pending_frame_ = false;
-
- float mtx[16];
- surface_texture_->GetTransformMatrix(mtx);
-
- if (memcmp(current_matrix_, mtx, sizeof(mtx)) != 0) {
- memcpy(current_matrix_, mtx, sizeof(mtx));
-
- if (has_listener_) {
- GpuStreamTextureMsg_MatrixChanged_Params params;
- memcpy(&params.m00, mtx, sizeof(mtx));
- owner_stub_->channel()->Send(
- new GpuStreamTextureMsg_MatrixChanged(route_id_, params));
- }
- }
-
- if (scoped_make_current.get()) {
- // UpdateTexImage() implies glBindTexture().
- // The cmd decoder takes care of restoring the binding for this GLImage as
- // far as the current context is concerned, but if we temporarily change
- // it, we have to keep the state intact in *that* context also.
- const gpu::gles2::ContextState* state =
- owner_stub_->decoder()->GetContextState();
- const gpu::gles2::TextureUnit& active_unit =
- state->texture_units[state->active_texture_unit];
- glBindTexture(GL_TEXTURE_EXTERNAL_OES,
- active_unit.bound_texture_external_oes.get()
- ? active_unit.bound_texture_external_oes->service_id()
- : 0);
- }
-}
-
-bool StreamTexture::CopyTexImage(unsigned target) {
- if (target == GL_TEXTURE_2D) {
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size_.width(), size_.height(), 0,
- GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
- return CopyTexSubImage(GL_TEXTURE_2D, gfx::Point(), gfx::Rect(size_));
- }
-
- if (target != GL_TEXTURE_EXTERNAL_OES)
- return false;
-
- if (!owner_stub_ || !surface_texture_.get())
- return true;
-
- GLint texture_id;
- glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &texture_id);
- DCHECK(texture_id);
-
- // The following code only works if we're being asked to copy into
- // |texture_id_|. Copying into a different texture is not supported.
- if (static_cast<unsigned>(texture_id) != texture_id_)
- return false;
-
- UpdateTexImage();
-
- TextureManager* texture_manager =
- owner_stub_->decoder()->GetContextGroup()->texture_manager();
- gpu::gles2::Texture* texture =
- texture_manager->GetTextureForServiceId(texture_id_);
- if (texture) {
- // By setting image state to UNBOUND instead of COPIED we ensure that
- // CopyTexImage() is called each time the surface texture is used for
- // drawing.
- texture->SetLevelImage(GL_TEXTURE_EXTERNAL_OES, 0, this,
- gpu::gles2::Texture::UNBOUND);
- }
-
- return true;
-}
-
-void StreamTexture::OnFrameAvailable() {
- has_pending_frame_ = true;
- if (has_listener_ && owner_stub_) {
- owner_stub_->channel()->Send(
- new GpuStreamTextureMsg_FrameAvailable(route_id_));
- }
-}
-
-gfx::Size StreamTexture::GetSize() {
- return size_;
-}
-
-unsigned StreamTexture::GetInternalFormat() {
- return GL_RGBA;
-}
-
-bool StreamTexture::OnMessageReceived(const IPC::Message& message) {
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(StreamTexture, message)
- IPC_MESSAGE_HANDLER(GpuStreamTextureMsg_StartListening, OnStartListening)
- IPC_MESSAGE_HANDLER(GpuStreamTextureMsg_EstablishPeer, OnEstablishPeer)
- IPC_MESSAGE_HANDLER(GpuStreamTextureMsg_SetSize, OnSetSize)
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
-
- DCHECK(handled);
- return handled;
-}
-
-void StreamTexture::OnStartListening() {
- DCHECK(!has_listener_);
- has_listener_ = true;
-}
-
-void StreamTexture::OnEstablishPeer(int32_t primary_id, int32_t secondary_id) {
- if (!owner_stub_)
- return;
-
- base::ProcessHandle process = owner_stub_->channel()->GetClientPID();
-
- SurfaceTexturePeer::GetInstance()->EstablishSurfaceTexturePeer(
- process, surface_texture_, primary_id, secondary_id);
-}
-
-bool StreamTexture::BindTexImage(unsigned target) {
- NOTREACHED();
- return false;
-}
-
-void StreamTexture::ReleaseTexImage(unsigned target) {
- NOTREACHED();
-}
-
-bool StreamTexture::CopyTexSubImage(unsigned target,
- const gfx::Point& offset,
- const gfx::Rect& rect) {
- if (target != GL_TEXTURE_2D)
- return false;
-
- if (!owner_stub_ || !surface_texture_.get())
- return true;
-
- if (!offset.IsOrigin()) {
- LOG(ERROR) << "Non-origin offset is not supported";
- return false;
- }
-
- if (rect != gfx::Rect(size_)) {
- LOG(ERROR) << "Sub-rectangle is not supported";
- return false;
- }
-
- GLint target_texture = 0;
- glGetIntegerv(GL_TEXTURE_BINDING_2D, &target_texture);
- DCHECK(target_texture);
-
- UpdateTexImage();
-
- if (!framebuffer_) {
- glGenFramebuffersEXT(1, &framebuffer_);
-
- // This vertex shader introduces a y flip before applying the stream
- // texture matrix. This is required because the stream texture matrix
- // Android provides is intended to be used in a y-up coordinate system,
- // whereas Chromium expects y-down.
-
- // clang-format off
- const char kVertexShader[] = STRINGIZE(
- attribute vec2 a_position;
- varying vec2 v_texCoord;
- uniform mat4 u_xform;
- void main() {
- gl_Position = vec4(a_position.x, a_position.y, 0.0, 1.0);
- vec2 uv_untransformed = a_position * vec2(0.5, -0.5) + vec2(0.5, 0.5);
- v_texCoord = (u_xform * vec4(uv_untransformed, 0.0, 1.0)).xy;
- }
- );
- const char kFragmentShader[] =
- "#extension GL_OES_EGL_image_external : require\n" STRINGIZE(
- precision mediump float;
- uniform samplerExternalOES a_texture;
- varying vec2 v_texCoord;
- void main() {
- gl_FragColor = texture2D(a_texture, v_texCoord);
- }
- );
- // clang-format on
-
- vertex_buffer_ = gfx::GLHelper::SetupQuadVertexBuffer();
- vertex_shader_ = gfx::GLHelper::LoadShader(GL_VERTEX_SHADER, kVertexShader);
- fragment_shader_ =
- gfx::GLHelper::LoadShader(GL_FRAGMENT_SHADER, kFragmentShader);
- program_ = gfx::GLHelper::SetupProgram(vertex_shader_, fragment_shader_);
- gfx::ScopedUseProgram use_program(program_);
- int sampler_location = glGetUniformLocation(program_, "a_texture");
- DCHECK_NE(-1, sampler_location);
- glUniform1i(sampler_location, 0);
- u_xform_location_ = glGetUniformLocation(program_, "u_xform");
- DCHECK_NE(-1, u_xform_location_);
- }
-
- gfx::ScopedActiveTexture active_texture(GL_TEXTURE0);
- // UpdateTexImage() call below will bind the surface texture to
- // TEXTURE_EXTERNAL_OES. This scoped texture binder will restore the current
- // binding before this function returns.
- gfx::ScopedTextureBinder texture_binder(GL_TEXTURE_EXTERNAL_OES, texture_id_);
-
- {
- gfx::ScopedFrameBufferBinder framebuffer_binder(framebuffer_);
- gfx::ScopedViewport viewport(0, 0, size_.width(), size_.height());
- glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- GL_TEXTURE_2D, target_texture, 0);
- DCHECK_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE),
- glCheckFramebufferStatusEXT(GL_FRAMEBUFFER));
- gfx::ScopedUseProgram use_program(program_);
-
- glUniformMatrix4fv(u_xform_location_, 1, false, current_matrix_);
- gfx::GLHelper::DrawQuad(vertex_buffer_);
-
- // Detach the output texture from the fbo.
- glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- GL_TEXTURE_2D, 0, 0);
- }
- return true;
-}
-
-bool StreamTexture::ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
- int z_order,
- gfx::OverlayTransform transform,
- const gfx::Rect& bounds_rect,
- const gfx::RectF& crop_rect) {
- NOTREACHED();
- return false;
-}
-
-void StreamTexture::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
- uint64_t process_tracing_id,
- const std::string& dump_name) {
- // TODO(ericrk): Add OnMemoryDump for GLImages. crbug.com/514914
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/stream_texture_android.h b/chromium/content/common/gpu/stream_texture_android.h
deleted file mode 100644
index e19fc1b321a..00000000000
--- a/chromium/content/common/gpu/stream_texture_android.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_STREAM_TEXTURE_ANDROID_H_
-#define CONTENT_COMMON_GPU_STREAM_TEXTURE_ANDROID_H_
-
-#include <stdint.h>
-
-#include "base/macros.h"
-#include "base/memory/weak_ptr.h"
-#include "content/common/gpu/gpu_command_buffer_stub.h"
-#include "ipc/ipc_listener.h"
-#include "ui/gl/android/surface_texture.h"
-#include "ui/gl/gl_image.h"
-
-namespace ui {
-class ScopedMakeCurrent;
-}
-
-namespace gfx {
-class Size;
-}
-
-namespace content {
-
-class StreamTexture : public gl::GLImage,
- public IPC::Listener,
- public GpuCommandBufferStub::DestructionObserver {
- public:
- static bool Create(GpuCommandBufferStub* owner_stub,
- uint32_t client_texture_id,
- int stream_id);
-
- private:
- StreamTexture(GpuCommandBufferStub* owner_stub,
- int32_t route_id,
- uint32_t texture_id);
- ~StreamTexture() override;
-
- // gl::GLImage implementation:
- void Destroy(bool have_context) override;
- gfx::Size GetSize() override;
- unsigned GetInternalFormat() override;
- bool BindTexImage(unsigned target) override;
- void ReleaseTexImage(unsigned target) override;
- bool CopyTexImage(unsigned target) override;
- bool CopyTexSubImage(unsigned target,
- const gfx::Point& offset,
- const gfx::Rect& rect) override;
- bool ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
- int z_order,
- gfx::OverlayTransform transform,
- const gfx::Rect& bounds_rect,
- const gfx::RectF& crop_rect) override;
- void OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
- uint64_t process_tracing_id,
- const std::string& dump_name) override;
-
- // GpuCommandBufferStub::DestructionObserver implementation.
- void OnWillDestroyStub() override;
-
- scoped_ptr<ui::ScopedMakeCurrent> MakeStubCurrent();
-
- void UpdateTexImage();
-
- // Called when a new frame is available for the SurfaceTexture.
- void OnFrameAvailable();
-
- // IPC::Listener implementation:
- bool OnMessageReceived(const IPC::Message& message) override;
-
- // IPC message handlers:
- void OnStartListening();
- void OnEstablishPeer(int32_t primary_id, int32_t secondary_id);
- void OnSetSize(const gfx::Size& size) { size_ = size; }
-
- scoped_refptr<gfx::SurfaceTexture> surface_texture_;
-
- // Current transform matrix of the surface texture.
- float current_matrix_[16];
-
- // Current size of the surface texture.
- gfx::Size size_;
-
- // Whether we ever bound a valid frame.
- bool has_valid_frame_;
-
- // Whether a new frame is available that we should update to.
- bool has_pending_frame_;
-
- GpuCommandBufferStub* owner_stub_;
- int32_t route_id_;
- bool has_listener_;
- uint32_t texture_id_;
-
- unsigned framebuffer_;
- unsigned vertex_shader_;
- unsigned fragment_shader_;
- unsigned program_;
- unsigned vertex_buffer_;
- int u_xform_location_;
-
- base::WeakPtrFactory<StreamTexture> weak_factory_;
- DISALLOW_COPY_AND_ASSIGN(StreamTexture);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_STREAM_TEXTURE_ANDROID_H_
diff --git a/chromium/content/common/gpu/x_util.h b/chromium/content/common/gpu/x_util.h
deleted file mode 100644
index 99687566356..00000000000
--- a/chromium/content/common/gpu/x_util.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_X_UTIL_H_
-#define CONTENT_COMMON_GPU_X_UTIL_H_
-
-// Some X-Windows specific stuff. This can be included on any platform, and will
-// be a NOP on non-Linux ones.
-
-#include "build/build_config.h"
-#include "content/common/gpu/gpu_config.h"
-
-#if defined(USE_X11)
-
-namespace content {
-
-// Forward declares ------------------------------------------------------------
-//
-// X Windows headers do a lot of evil stuff, like "#define Status int" which
-// will cause many problems when combined with our other header files (like
-// ones that define a class local enum called "Status."
-//
-// These definitions are not Kosher, but allow us to remove this dependency and
-// actually compile X at all.
-
-typedef unsigned long XID;
-
-extern "C" {
-
-typedef struct _XDisplay Display;
-typedef struct __GLXcontextRec *GLXContext;
-
-} // extern "C"
-
-} // namespace content
-
-#endif // USE_X11
-
-#endif // CONTENT_COMMON_GPU_X_UTIL_H_