summaryrefslogtreecommitdiff
path: root/chromium/content/common/gpu/media
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/content/common/gpu/media')
-rw-r--r--chromium/content/common/gpu/media/OWNERS13
-rw-r--r--chromium/content/common/gpu/media/android_copying_backing_strategy.cc113
-rw-r--r--chromium/content/common/gpu/media/android_copying_backing_strategy.h10
-rw-r--r--chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc329
-rw-r--r--chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.h40
-rw-r--r--chromium/content/common/gpu/media/android_video_decode_accelerator.cc897
-rw-r--r--chromium/content/common/gpu/media/android_video_decode_accelerator.h206
-rw-r--r--chromium/content/common/gpu/media/android_video_decode_accelerator_unittest.cc23
-rw-r--r--chromium/content/common/gpu/media/android_video_encode_accelerator.cc86
-rw-r--r--chromium/content/common/gpu/media/android_video_encode_accelerator.h9
-rw-r--r--chromium/content/common/gpu/media/avda_codec_image.cc154
-rw-r--r--chromium/content/common/gpu/media/avda_codec_image.h60
-rw-r--r--chromium/content/common/gpu/media/avda_shared_state.cc14
-rw-r--r--chromium/content/common/gpu/media/avda_shared_state.h11
-rw-r--r--chromium/content/common/gpu/media/avda_state_provider.h3
-rw-r--r--chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.cc1017
-rw-r--r--chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.h118
-rw-r--r--chromium/content/common/gpu/media/fake_video_decode_accelerator.cc35
-rw-r--r--chromium/content/common/gpu/media/fake_video_decode_accelerator.h14
-rw-r--r--chromium/content/common/gpu/media/gpu_arc_video_service.cc92
-rw-r--r--chromium/content/common/gpu/media/gpu_arc_video_service.h68
-rw-r--r--chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc56
-rw-r--r--chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.h12
-rw-r--r--chromium/content/common/gpu/media/gpu_video_accelerator_util.cc155
-rw-r--r--chromium/content/common/gpu/media/gpu_video_accelerator_util.h63
-rw-r--r--chromium/content/common/gpu/media/gpu_video_decode_accelerator.cc446
-rw-r--r--chromium/content/common/gpu/media/gpu_video_decode_accelerator.h71
-rw-r--r--chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc242
-rw-r--r--chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h123
-rw-r--r--chromium/content/common/gpu/media/gpu_video_decode_accelerator_helpers.h59
-rw-r--r--chromium/content/common/gpu/media/gpu_video_encode_accelerator.cc207
-rw-r--r--chromium/content/common/gpu/media/gpu_video_encode_accelerator.h41
-rw-r--r--chromium/content/common/gpu/media/media_channel.cc145
-rw-r--r--chromium/content/common/gpu/media/media_channel.h57
-rw-r--r--chromium/content/common/gpu/media/media_service.cc40
-rw-r--r--chromium/content/common/gpu/media/media_service.h42
-rw-r--r--chromium/content/common/gpu/media/rendering_helper.cc14
-rw-r--r--chromium/content/common/gpu/media/rendering_helper.h7
-rw-r--r--chromium/content/common/gpu/media/shared_memory_region.cc42
-rw-r--r--chromium/content/common/gpu/media/shared_memory_region.h57
-rw-r--r--chromium/content/common/gpu/media/v4l2_image_processor.cc30
-rw-r--r--chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc45
-rw-r--r--chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h11
-rw-r--r--chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc98
-rw-r--r--chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h29
-rw-r--r--chromium/content/common/gpu/media/v4l2_video_decode_accelerator.cc202
-rw-r--r--chromium/content/common/gpu/media/v4l2_video_decode_accelerator.h31
-rw-r--r--chromium/content/common/gpu/media/v4l2_video_encode_accelerator.cc103
-rw-r--r--chromium/content/common/gpu/media/vaapi_drm_picture.cc8
-rw-r--r--chromium/content/common/gpu/media/vaapi_drm_picture.h5
-rw-r--r--chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc39
-rw-r--r--chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h9
-rw-r--r--chromium/content/common/gpu/media/vaapi_picture.cc6
-rw-r--r--chromium/content/common/gpu/media/vaapi_picture.h6
-rw-r--r--chromium/content/common/gpu/media/vaapi_tfp_picture.cc8
-rw-r--r--chromium/content/common/gpu/media/vaapi_tfp_picture.h5
-rw-r--r--chromium/content/common/gpu/media/vaapi_video_decode_accelerator.cc63
-rw-r--r--chromium/content/common/gpu/media/vaapi_video_decode_accelerator.h30
-rw-r--r--chromium/content/common/gpu/media/vaapi_video_encode_accelerator.cc40
-rw-r--r--chromium/content/common/gpu/media/vaapi_wrapper.cc24
-rw-r--r--chromium/content/common/gpu/media/vaapi_wrapper.h2
-rw-r--r--chromium/content/common/gpu/media/video_decode_accelerator_unittest.cc187
-rw-r--r--chromium/content/common/gpu/media/video_encode_accelerator_unittest.cc58
-rw-r--r--chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.cc135
-rw-r--r--chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.h19
-rw-r--r--chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.cc552
-rw-r--r--chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.h142
67 files changed, 4836 insertions, 2242 deletions
diff --git a/chromium/content/common/gpu/media/OWNERS b/chromium/content/common/gpu/media/OWNERS
index 633a1877529..9999d737345 100644
--- a/chromium/content/common/gpu/media/OWNERS
+++ b/chromium/content/common/gpu/media/OWNERS
@@ -2,3 +2,16 @@ dalecurtis@chromium.org
posciak@chromium.org
sandersd@chromium.org
wuchengli@chromium.org
+
+# For security review of IPC message files.
+per-file *_messages*.h=set noparent
+per-file *_messages*.h=dcheng@chromium.org
+per-file *_messages*.h=inferno@chromium.org
+per-file *_messages*.h=jln@chromium.org
+per-file *_messages*.h=jschuh@chromium.org
+per-file *_messages*.h=kenrb@chromium.org
+per-file *_messages*.h=mkwst@chromium.org
+per-file *_messages*.h=nasko@chromium.org
+per-file *_messages*.h=palmer@chromium.org
+per-file *_messages*.h=tsepez@chromium.org
+per-file *_messages*.h=wfh@chromium.org
diff --git a/chromium/content/common/gpu/media/android_copying_backing_strategy.cc b/chromium/content/common/gpu/media/android_copying_backing_strategy.cc
index f80a16f3d72..b5216154829 100644
--- a/chromium/content/common/gpu/media/android_copying_backing_strategy.cc
+++ b/chromium/content/common/gpu/media/android_copying_backing_strategy.cc
@@ -8,6 +8,7 @@
#include "base/logging.h"
#include "base/trace_event/trace_event.h"
#include "content/common/gpu/media/avda_return_on_failure.h"
+#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "media/base/limits.h"
@@ -17,24 +18,47 @@
namespace content {
-const static GLfloat kIdentityMatrix[16] = {1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f,
- 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 1.0f};
-
-AndroidCopyingBackingStrategy::AndroidCopyingBackingStrategy()
- : state_provider_(nullptr), surface_texture_id_(0), media_codec_(nullptr) {}
+AndroidCopyingBackingStrategy::AndroidCopyingBackingStrategy(
+ AVDAStateProvider* state_provider)
+ : state_provider_(state_provider),
+ surface_texture_id_(0),
+ media_codec_(nullptr) {}
AndroidCopyingBackingStrategy::~AndroidCopyingBackingStrategy() {}
-void AndroidCopyingBackingStrategy::Initialize(
- AVDAStateProvider* state_provider) {
- state_provider_ = state_provider;
+gfx::ScopedJavaSurface AndroidCopyingBackingStrategy::Initialize(
+ int surface_view_id) {
+ if (surface_view_id != media::VideoDecodeAccelerator::Config::kNoSurfaceID) {
+ LOG(ERROR) << "The copying strategy should not be initialized with a "
+ "surface id.";
+ return gfx::ScopedJavaSurface();
+ }
+
+ // Create a texture and attach the SurfaceTexture to it.
+ glGenTextures(1, &surface_texture_id_);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, surface_texture_id_);
+
+ // Note that the target will be correctly sized, so nearest filtering is all
+ // that's needed.
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ state_provider_->GetGlDecoder()->RestoreTextureUnitBindings(0);
+ state_provider_->GetGlDecoder()->RestoreActiveTexture();
+
+ surface_texture_ = gfx::SurfaceTexture::Create(surface_texture_id_);
+
+ return gfx::ScopedJavaSurface(surface_texture_.get());
}
void AndroidCopyingBackingStrategy::Cleanup(
bool have_context,
const AndroidVideoDecodeAccelerator::OutputBufferMap&) {
DCHECK(state_provider_->ThreadChecker().CalledOnValidThread());
+
if (copier_)
copier_->Destroy();
@@ -42,26 +66,17 @@ void AndroidCopyingBackingStrategy::Cleanup(
glDeleteTextures(1, &surface_texture_id_);
}
+scoped_refptr<gfx::SurfaceTexture>
+AndroidCopyingBackingStrategy::GetSurfaceTexture() const {
+ return surface_texture_;
+}
+
uint32_t AndroidCopyingBackingStrategy::GetTextureTarget() const {
return GL_TEXTURE_2D;
}
-scoped_refptr<gfx::SurfaceTexture>
-AndroidCopyingBackingStrategy::CreateSurfaceTexture() {
- glGenTextures(1, &surface_texture_id_);
- glActiveTexture(GL_TEXTURE0);
- glBindTexture(GL_TEXTURE_EXTERNAL_OES, surface_texture_id_);
-
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- state_provider_->GetGlDecoder()->RestoreTextureUnitBindings(0);
- state_provider_->GetGlDecoder()->RestoreActiveTexture();
-
- surface_texture_ = gfx::SurfaceTexture::Create(surface_texture_id_);
-
- return surface_texture_;
+gfx::Size AndroidCopyingBackingStrategy::GetPictureBufferSize() const {
+ return state_provider_->GetSize();
}
void AndroidCopyingBackingStrategy::UseCodecBufferForPictureBuffer(
@@ -99,16 +114,19 @@ void AndroidCopyingBackingStrategy::UseCodecBufferForPictureBuffer(
surface_texture_->UpdateTexImage();
}
- float transfrom_matrix[16];
- surface_texture_->GetTransformMatrix(transfrom_matrix);
+ float transform_matrix[16];
+ surface_texture_->GetTransformMatrix(transform_matrix);
- uint32_t picture_buffer_texture_id = picture_buffer.texture_id();
+ DCHECK_LE(1u, picture_buffer.texture_ids().size());
+ uint32_t picture_buffer_texture_id = picture_buffer.texture_ids()[0];
// Defer initializing the CopyTextureCHROMIUMResourceManager until it is
// needed because it takes 10s of milliseconds to initialize.
if (!copier_) {
copier_.reset(new gpu::CopyTextureCHROMIUMResourceManager());
- copier_->Initialize(state_provider_->GetGlDecoder().get());
+ copier_->Initialize(state_provider_->GetGlDecoder().get(),
+ state_provider_->GetGlDecoder()->GetContextGroup()->
+ feature_info()->feature_flags());
}
// Here, we copy |surface_texture_id_| to the picture buffer instead of
@@ -118,13 +136,11 @@ void AndroidCopyingBackingStrategy::UseCodecBufferForPictureBuffer(
// attached.
// 2. SurfaceTexture requires us to apply a transform matrix when we show
// the texture.
- // TODO(hkuang): get the StreamTexture transform matrix in GPU process
- // instead of using default matrix crbug.com/226218.
copier_->DoCopyTextureWithTransform(
state_provider_->GetGlDecoder().get(), GL_TEXTURE_EXTERNAL_OES,
surface_texture_id_, GL_TEXTURE_2D, picture_buffer_texture_id,
state_provider_->GetSize().width(), state_provider_->GetSize().height(),
- false, false, false, kIdentityMatrix);
+ true, false, false, transform_matrix);
}
void AndroidCopyingBackingStrategy::CodecChanged(
@@ -140,4 +156,37 @@ void AndroidCopyingBackingStrategy::OnFrameAvailable() {
// instead preserve the old behavior.
}
+bool AndroidCopyingBackingStrategy::ArePicturesOverlayable() {
+ return false;
+}
+
+void AndroidCopyingBackingStrategy::UpdatePictureBufferSize(
+ media::PictureBuffer* picture_buffer,
+ const gfx::Size& new_size) {
+ // This strategy uses 2D textures who's allocated memory is dependent on the
+ // size. To update size in all places, we must:
+ // 1) Update the PictureBuffer meta-data
+ picture_buffer->set_size(new_size);
+
+ // 2) Update the GL texture via glTexImage2D. This step assumes the caller
+ // has made our GL context current.
+ DCHECK_LE(1u, picture_buffer->texture_ids().size());
+ glBindTexture(GL_TEXTURE_2D, picture_buffer->texture_ids()[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, new_size.width(), new_size.height(),
+ 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+ state_provider_->GetGlDecoder()->RestoreActiveTextureUnitBinding(
+ GL_TEXTURE_2D);
+
+ // 3) Update the CHROMIUM Texture's size.
+ gpu::gles2::TextureRef* texture_ref =
+ state_provider_->GetTextureForPicture(*picture_buffer);
+ RETURN_IF_NULL(texture_ref);
+ gpu::gles2::TextureManager* texture_manager =
+ state_provider_->GetGlDecoder()->GetContextGroup()->texture_manager();
+ RETURN_IF_NULL(texture_manager);
+ texture_manager->SetLevelInfo(texture_ref, GetTextureTarget(), 0, GL_RGBA,
+ new_size.width(), new_size.height(), 1, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, gfx::Rect(new_size));
+}
+
} // namespace content
diff --git a/chromium/content/common/gpu/media/android_copying_backing_strategy.h b/chromium/content/common/gpu/media/android_copying_backing_strategy.h
index 17b096aecd5..8980404dfdb 100644
--- a/chromium/content/common/gpu/media/android_copying_backing_strategy.h
+++ b/chromium/content/common/gpu/media/android_copying_backing_strategy.h
@@ -28,21 +28,25 @@ class AVDAStateProvider;
class CONTENT_EXPORT AndroidCopyingBackingStrategy
: public AndroidVideoDecodeAccelerator::BackingStrategy {
public:
- AndroidCopyingBackingStrategy();
+ explicit AndroidCopyingBackingStrategy(AVDAStateProvider* state_provider);
~AndroidCopyingBackingStrategy() override;
// AndroidVideoDecodeAccelerator::BackingStrategy
- void Initialize(AVDAStateProvider*) override;
+ gfx::ScopedJavaSurface Initialize(int surface_view_id) override;
void Cleanup(bool have_context,
const AndroidVideoDecodeAccelerator::OutputBufferMap&) override;
+ scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture() const override;
uint32_t GetTextureTarget() const override;
- scoped_refptr<gfx::SurfaceTexture> CreateSurfaceTexture() override;
+ gfx::Size GetPictureBufferSize() const override;
void UseCodecBufferForPictureBuffer(int32_t codec_buffer_index,
const media::PictureBuffer&) override;
void CodecChanged(
media::VideoCodecBridge*,
const AndroidVideoDecodeAccelerator::OutputBufferMap&) override;
void OnFrameAvailable() override;
+ bool ArePicturesOverlayable() override;
+ void UpdatePictureBufferSize(media::PictureBuffer* picture_buffer,
+ const gfx::Size& new_size) override;
private:
// Used for copy the texture from surface texture to picture buffers.
diff --git a/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc b/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc
index 660785eea90..3e62629745d 100644
--- a/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc
+++ b/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.cc
@@ -4,30 +4,41 @@
#include "content/common/gpu/media/android_deferred_rendering_backing_strategy.h"
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+
+#include "base/android/build_info.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
-#include "content/common/gpu/gpu_channel.h"
#include "content/common/gpu/media/avda_codec_image.h"
#include "content/common/gpu/media/avda_return_on_failure.h"
#include "content/common/gpu/media/avda_shared_state.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/gl_stream_texture_image.h"
+#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/ipc/common/gpu_surface_lookup.h"
+#include "gpu/ipc/service/gpu_channel.h"
#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/egl_util.h"
#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_surface_egl.h"
+#include "ui/gl/scoped_binders.h"
+#include "ui/gl/scoped_make_current.h"
namespace content {
AndroidDeferredRenderingBackingStrategy::
- AndroidDeferredRenderingBackingStrategy()
- : state_provider_(nullptr), media_codec_(nullptr) {}
+ AndroidDeferredRenderingBackingStrategy(AVDAStateProvider* state_provider)
+ : state_provider_(state_provider), media_codec_(nullptr) {}
AndroidDeferredRenderingBackingStrategy::
~AndroidDeferredRenderingBackingStrategy() {}
-void AndroidDeferredRenderingBackingStrategy::Initialize(
- AVDAStateProvider* state_provider) {
- state_provider_ = state_provider;
+gfx::ScopedJavaSurface AndroidDeferredRenderingBackingStrategy::Initialize(
+ int surface_view_id) {
shared_state_ = new AVDASharedState();
// Create a texture for the SurfaceTexture to use. We don't attach it here
@@ -36,6 +47,27 @@ void AndroidDeferredRenderingBackingStrategy::Initialize(
glGenTextures(1, &service_id);
DCHECK(service_id);
shared_state_->set_surface_texture_service_id(service_id);
+
+ gfx::ScopedJavaSurface surface;
+ if (surface_view_id != media::VideoDecodeAccelerator::Config::kNoSurfaceID) {
+ surface = gpu::GpuSurfaceLookup::GetInstance()->AcquireJavaSurface(
+ surface_view_id);
+ } else {
+ if (DoesSurfaceTextureDetachWork()) {
+ // Create a detached SurfaceTexture. Detaching it will silently fail to
+ // delete texture 0.
+ surface_texture_ = gfx::SurfaceTexture::Create(0);
+ surface_texture_->DetachFromGLContext();
+ } else {
+ // Detach doesn't work so well on all platforms. Just attach the
+ // SurfaceTexture here, and probably context switch later.
+ surface_texture_ = gfx::SurfaceTexture::Create(service_id);
+ shared_state_->DidAttachSurfaceTexture();
+ }
+ surface = gfx::ScopedJavaSurface(surface_texture_.get());
+ }
+
+ return surface;
}
void AndroidDeferredRenderingBackingStrategy::Cleanup(
@@ -50,6 +82,11 @@ void AndroidDeferredRenderingBackingStrategy::Cleanup(
for (const std::pair<int, media::PictureBuffer>& entry : buffers)
SetImageForPicture(entry.second, nullptr);
+ // If we're rendering to a SurfaceTexture we can make a copy of the current
+ // front buffer so that the PictureBuffer textures are still valid.
+ if (surface_texture_ && have_context && ShouldCopyPictures())
+ CopySurfaceTextureToPictures(buffers);
+
// Now that no AVDACodecImages refer to the SurfaceTexture's texture, delete
// the texture name.
GLuint service_id = shared_state_->surface_texture_service_id();
@@ -57,38 +94,35 @@ void AndroidDeferredRenderingBackingStrategy::Cleanup(
glDeleteTextures(1, &service_id);
}
-uint32_t AndroidDeferredRenderingBackingStrategy::GetTextureTarget() const {
- return GL_TEXTURE_EXTERNAL_OES;
-}
-
scoped_refptr<gfx::SurfaceTexture>
-AndroidDeferredRenderingBackingStrategy::CreateSurfaceTexture() {
- // AVDACodecImage will handle attaching this to a texture later.
- surface_texture_ = gfx::SurfaceTexture::Create(0);
- // Detach from our GL context so that the GLImages can attach. It will
- // silently fail to delete texture 0.
- surface_texture_->DetachFromGLContext();
-
+AndroidDeferredRenderingBackingStrategy::GetSurfaceTexture() const {
return surface_texture_;
}
-gpu::gles2::TextureRef*
-AndroidDeferredRenderingBackingStrategy::GetTextureForPicture(
- const media::PictureBuffer& picture_buffer) {
- RETURN_NULL_IF_NULL(state_provider_->GetGlDecoder());
- gpu::gles2::TextureManager* texture_manager =
- state_provider_->GetGlDecoder()->GetContextGroup()->texture_manager();
- RETURN_NULL_IF_NULL(texture_manager);
- gpu::gles2::TextureRef* texture_ref =
- texture_manager->GetTexture(picture_buffer.internal_texture_id());
- RETURN_NULL_IF_NULL(texture_ref);
+uint32_t AndroidDeferredRenderingBackingStrategy::GetTextureTarget() const {
+ // If we're using a surface texture, then we need an external texture target
+ // to sample from it. If not, then we'll use 2D transparent textures to draw
+ // a transparent hole through which to see the SurfaceView. This is normally
+ // needed only for the devtools inspector, since the overlay mechanism handles
+ // it otherwise.
+ return surface_texture_ ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+}
- return texture_ref;
+gfx::Size AndroidDeferredRenderingBackingStrategy::GetPictureBufferSize()
+ const {
+ // For SurfaceView, request a 1x1 2D texture to reduce memory during
+ // initialization. For SurfaceTexture, allocate a picture buffer that is the
+ // actual frame size. Note that it will be an external texture anyway, so it
+ // doesn't allocate an image of that size. However, it's still important to
+ // get the coded size right, so that VideoLayerImpl doesn't try to scale the
+ // texture when building the quad for it.
+ return surface_texture_ ? state_provider_->GetSize() : gfx::Size(1, 1);
}
AVDACodecImage* AndroidDeferredRenderingBackingStrategy::GetImageForPicture(
const media::PictureBuffer& picture_buffer) {
- gpu::gles2::TextureRef* texture_ref = GetTextureForPicture(picture_buffer);
+ gpu::gles2::TextureRef* texture_ref =
+ state_provider_->GetTextureForPicture(picture_buffer);
RETURN_NULL_IF_NULL(texture_ref);
gl::GLImage* image =
texture_ref->texture()->GetLevelImage(GetTextureTarget(), 0);
@@ -97,8 +131,9 @@ AVDACodecImage* AndroidDeferredRenderingBackingStrategy::GetImageForPicture(
void AndroidDeferredRenderingBackingStrategy::SetImageForPicture(
const media::PictureBuffer& picture_buffer,
- const scoped_refptr<gl::GLImage>& image) {
- gpu::gles2::TextureRef* texture_ref = GetTextureForPicture(picture_buffer);
+ const scoped_refptr<gpu::gles2::GLStreamTextureImage>& image) {
+ gpu::gles2::TextureRef* texture_ref =
+ state_provider_->GetTextureForPicture(picture_buffer);
RETURN_IF_NULL(texture_ref);
gpu::gles2::TextureManager* texture_manager =
@@ -120,15 +155,25 @@ void AndroidDeferredRenderingBackingStrategy::SetImageForPicture(
shared_state_->surface_texture_service_id());
static_cast<AVDACodecImage*>(image.get())
- ->setTexture(texture_ref->texture());
+ ->SetTexture(texture_ref->texture());
} else {
// Clear the unowned service_id, so that this texture is no longer going
// to depend on the surface texture at all.
texture_ref->texture()->SetUnownedServiceId(0);
}
- texture_manager->SetLevelImage(texture_ref, GetTextureTarget(), 0,
- image.get(), gpu::gles2::Texture::UNBOUND);
+ // For SurfaceTexture we set the image to UNBOUND so that the implementation
+ // will call CopyTexImage, which is where AVDACodecImage updates the
+ // SurfaceTexture to the right frame.
+ // For SurfaceView we set the image to be BOUND because ScheduleOverlayPlane
+ // expects it. If something tries to sample from this texture it won't work,
+ // but there's no way to sample from a SurfaceView anyway, so it doesn't
+ // matter. The only way to use this texture is to schedule it as an overlay.
+ const gpu::gles2::Texture::ImageState image_state =
+ surface_texture_ ? gpu::gles2::Texture::UNBOUND
+ : gpu::gles2::Texture::BOUND;
+ texture_manager->SetLevelStreamTextureImage(texture_ref, GetTextureTarget(),
+ 0, image.get(), image_state);
}
void AndroidDeferredRenderingBackingStrategy::UseCodecBufferForPictureBuffer(
@@ -139,36 +184,53 @@ void AndroidDeferredRenderingBackingStrategy::UseCodecBufferForPictureBuffer(
// Notify the AVDACodecImage for picture_buffer that it should use the
// decoded buffer codec_buf_index to render this frame.
- AVDACodecImage* avImage = GetImageForPicture(picture_buffer);
- RETURN_IF_NULL(avImage);
- DCHECK_EQ(avImage->GetMediaCodecBufferIndex(), -1);
+ AVDACodecImage* avda_image = GetImageForPicture(picture_buffer);
+ RETURN_IF_NULL(avda_image);
+ DCHECK_EQ(avda_image->GetMediaCodecBufferIndex(), -1);
// Note that this is not a race, since we do not re-use a PictureBuffer
// until after the CC is done drawing it.
- avImage->SetMediaCodecBufferIndex(codec_buf_index);
- avImage->SetSize(state_provider_->GetSize());
+ avda_image->SetMediaCodecBufferIndex(codec_buf_index);
+ avda_image->SetSize(state_provider_->GetSize());
}
void AndroidDeferredRenderingBackingStrategy::AssignOnePictureBuffer(
- const media::PictureBuffer& picture_buffer) {
+ const media::PictureBuffer& picture_buffer,
+ bool have_context) {
// Attach a GLImage to each texture that will use the surface texture.
// We use a refptr here in case SetImageForPicture fails.
- scoped_refptr<gl::GLImage> gl_image(
+ scoped_refptr<gpu::gles2::GLStreamTextureImage> gl_image =
new AVDACodecImage(shared_state_, media_codec_,
- state_provider_->GetGlDecoder(), surface_texture_));
+ state_provider_->GetGlDecoder(), surface_texture_);
SetImageForPicture(picture_buffer, gl_image);
+
+ if (!surface_texture_ && have_context) {
+ // To make devtools work, we're using a 2D texture. Make it transparent,
+ // so that it draws a hole for the SV to show through. This is only
+ // because devtools draws and reads back, which skips overlay processing.
+ // It's unclear why devtools renders twice -- once normally, and once
+ // including a readback layer. The result is that the device screen
+ // flashes as we alternately draw the overlay hole and this texture,
+ // unless we make the texture transparent.
+ static const uint8_t rgba[] = {0, 0, 0, 0};
+ const gfx::Size size(1, 1);
+ DCHECK_LE(1u, picture_buffer.texture_ids().size());
+ glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_ids()[0]);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.width(), size.height(), 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, rgba);
+ }
}
void AndroidDeferredRenderingBackingStrategy::ReleaseCodecBufferForPicture(
const media::PictureBuffer& picture_buffer) {
- AVDACodecImage* avImage = GetImageForPicture(picture_buffer);
+ AVDACodecImage* avda_image = GetImageForPicture(picture_buffer);
// See if there is a media codec buffer still attached to this image.
- const int32_t codec_buffer = avImage->GetMediaCodecBufferIndex();
+ const int32_t codec_buffer = avda_image->GetMediaCodecBufferIndex();
if (codec_buffer >= 0) {
// PictureBuffer wasn't displayed, so release the buffer.
media_codec_->ReleaseOutputBuffer(codec_buffer, false);
- avImage->SetMediaCodecBufferIndex(-1);
+ avda_image->SetMediaCodecBufferIndex(-1);
}
}
@@ -181,18 +243,6 @@ void AndroidDeferredRenderingBackingStrategy::ReuseOnePictureBuffer(
ReleaseCodecBufferForPicture(picture_buffer);
}
-void AndroidDeferredRenderingBackingStrategy::DismissOnePictureBuffer(
- const media::PictureBuffer& picture_buffer) {
- // If there is an outstanding codec buffer attached to this image, then
- // release it.
- ReleaseCodecBufferForPicture(picture_buffer);
-
- // This makes sure that the Texture no longer refers to the codec or to the
- // SurfaceTexture's service_id. That's important, so that it doesn't refer
- // to the texture by name after we've deleted it.
- SetImageForPicture(picture_buffer, nullptr);
-}
-
void AndroidDeferredRenderingBackingStrategy::CodecChanged(
media::VideoCodecBridge* codec,
const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) {
@@ -200,9 +250,9 @@ void AndroidDeferredRenderingBackingStrategy::CodecChanged(
// doesn't know about them.
media_codec_ = codec;
for (const std::pair<int, media::PictureBuffer>& entry : buffers) {
- AVDACodecImage* avImage = GetImageForPicture(entry.second);
- avImage->SetMediaCodec(codec);
- avImage->SetMediaCodecBufferIndex(-1);
+ AVDACodecImage* avda_image = GetImageForPicture(entry.second);
+ avda_image->SetMediaCodec(codec);
+ avda_image->SetMediaCodecBufferIndex(-1);
}
}
@@ -210,4 +260,163 @@ void AndroidDeferredRenderingBackingStrategy::OnFrameAvailable() {
shared_state_->SignalFrameAvailable();
}
+bool AndroidDeferredRenderingBackingStrategy::ArePicturesOverlayable() {
+ // SurfaceView frames are always overlayable because that's the only way to
+ // display them.
+ return !surface_texture_;
+}
+
+void AndroidDeferredRenderingBackingStrategy::UpdatePictureBufferSize(
+ media::PictureBuffer* picture_buffer,
+ const gfx::Size& new_size) {
+ // This strategy uses EGL images which manage the texture size for us. We
+ // simply update the PictureBuffer meta-data and leave the texture as-is.
+ picture_buffer->set_size(new_size);
+}
+
+void AndroidDeferredRenderingBackingStrategy::CopySurfaceTextureToPictures(
+ const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) {
+ DVLOG(3) << __FUNCTION__;
+
+ // Don't try to copy if the SurfaceTexture was never attached because that
+ // means it was never updated.
+ if (!shared_state_->surface_texture_is_attached())
+ return;
+
+ gpu::gles2::GLES2Decoder* gl_decoder = state_provider_->GetGlDecoder().get();
+ if (!gl_decoder)
+ return;
+
+ const gfx::Size size = state_provider_->GetSize();
+
+ // Create a 2D texture to hold a copy of the SurfaceTexture's front buffer.
+ GLuint tmp_texture_id;
+ glGenTextures(1, &tmp_texture_id);
+ {
+ gfx::ScopedTextureBinder texture_binder(GL_TEXTURE_2D, tmp_texture_id);
+ // The target texture's size will exactly match the source.
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.width(), size.height(), 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+ }
+
+
+
+ float transform_matrix[16];
+ surface_texture_->GetTransformMatrix(transform_matrix);
+
+ gpu::CopyTextureCHROMIUMResourceManager copier;
+ copier.Initialize(
+ gl_decoder,
+ gl_decoder->GetContextGroup()->feature_info()->feature_flags());
+ copier.DoCopyTextureWithTransform(gl_decoder, GL_TEXTURE_EXTERNAL_OES,
+ shared_state_->surface_texture_service_id(),
+ GL_TEXTURE_2D, tmp_texture_id, size.width(),
+ size.height(), true, false, false,
+ transform_matrix);
+
+ // Create an EGLImage from the 2D texture we just copied into. By associating
+ // the EGLImage with the PictureBuffer textures they will remain valid even
+ // after we delete the 2D texture and EGLImage.
+ const EGLImageKHR egl_image = eglCreateImageKHR(
+ gfx::GLSurfaceEGL::GetHardwareDisplay(), eglGetCurrentContext(),
+ EGL_GL_TEXTURE_2D_KHR, reinterpret_cast<EGLClientBuffer>(tmp_texture_id),
+ nullptr /* attrs */);
+
+ glDeleteTextures(1, &tmp_texture_id);
+ DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ if (egl_image == EGL_NO_IMAGE_KHR) {
+ DLOG(ERROR) << "Failed creating EGLImage: " << ui::GetLastEGLErrorString();
+ return;
+ }
+
+ for (const std::pair<int, media::PictureBuffer>& entry : buffers) {
+ gpu::gles2::TextureRef* texture_ref =
+ state_provider_->GetTextureForPicture(entry.second);
+ if (!texture_ref)
+ continue;
+ gfx::ScopedTextureBinder texture_binder(
+ GL_TEXTURE_EXTERNAL_OES, texture_ref->texture()->service_id());
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, egl_image);
+ DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ }
+
+ EGLBoolean result =
+ eglDestroyImageKHR(gfx::GLSurfaceEGL::GetHardwareDisplay(), egl_image);
+ if (result == EGL_FALSE) {
+ DLOG(ERROR) << "Error destroying EGLImage: "
+ << ui::GetLastEGLErrorString();
+ }
+}
+
+bool AndroidDeferredRenderingBackingStrategy::DoesSurfaceTextureDetachWork()
+ const {
+ bool surface_texture_detach_works = true;
+ if (gpu::gles2::GLES2Decoder* gl_decoder =
+ state_provider_->GetGlDecoder().get()) {
+ if (gpu::gles2::ContextGroup* group = gl_decoder->GetContextGroup()) {
+ if (gpu::gles2::FeatureInfo* feature_info = group->feature_info()) {
+ surface_texture_detach_works =
+ !feature_info->workarounds().surface_texture_cant_detach;
+ }
+ }
+ }
+
+ // As a special case, the MicroMax A114 doesn't get the workaround, even
+ // though it should. Hardcode it here until we get a device and figure out
+ // why. crbug.com/591600
+ if (base::android::BuildInfo::GetInstance()->sdk_int() <= 18) { // JB
+ const std::string brand(
+ base::ToLowerASCII(base::android::BuildInfo::GetInstance()->brand()));
+ if (brand == "micromax") {
+ const std::string model(
+ base::ToLowerASCII(base::android::BuildInfo::GetInstance()->model()));
+ if (model.find("a114") != std::string::npos)
+ surface_texture_detach_works = false;
+ }
+ }
+
+ return surface_texture_detach_works;
+}
+
+bool AndroidDeferredRenderingBackingStrategy::ShouldCopyPictures() const {
+ // Mali + <= KitKat crashes when we try to do this. We don't know if it's
+ // due to detaching a surface texture, but it's the same set of devices.
+ if (!DoesSurfaceTextureDetachWork())
+ return false;
+
+ // Other devices are unreliable for other reasons (e.g., EGLImage).
+ if (gpu::gles2::GLES2Decoder* gl_decoder =
+ state_provider_->GetGlDecoder().get()) {
+ if (gpu::gles2::ContextGroup* group = gl_decoder->GetContextGroup()) {
+ if (gpu::gles2::FeatureInfo* feature_info = group->feature_info()) {
+ return !feature_info->workarounds().avda_dont_copy_pictures;
+ }
+ }
+ }
+
+ // Samsung Galaxy Tab A, J3, and J1 Mini all like to crash on Lollipop in
+ // glEGLImageTargetTexture2DOES . Exact models were SM-T280, SM-J320F,
+ // and SM-j105H.
+ if (base::android::BuildInfo::GetInstance()->sdk_int() <= 22) { // L MR1
+ const std::string brand(
+ base::ToLowerASCII(base::android::BuildInfo::GetInstance()->brand()));
+ if (brand == "samsung") {
+ const std::string model(
+ base::ToLowerASCII(base::android::BuildInfo::GetInstance()->model()));
+ if (model.find("sm-t280") != std::string::npos ||
+ model.find("sm-j320f") != std::string::npos ||
+ model.find("sm-j105") != std::string::npos)
+ return false;
+ }
+ }
+
+ // Assume so.
+ return true;
+}
+
} // namespace content
diff --git a/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.h b/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.h
index 6fc1873cf5a..733b25b0a45 100644
--- a/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.h
+++ b/chromium/content/common/gpu/media/android_deferred_rendering_backing_strategy.h
@@ -17,6 +17,7 @@ class GLImage;
namespace gpu {
namespace gles2 {
+class GLStreamTextureImage;
class TextureRef;
}
}
@@ -33,42 +34,61 @@ class AVDASharedState;
class CONTENT_EXPORT AndroidDeferredRenderingBackingStrategy
: public AndroidVideoDecodeAccelerator::BackingStrategy {
public:
- AndroidDeferredRenderingBackingStrategy();
+ explicit AndroidDeferredRenderingBackingStrategy(
+ AVDAStateProvider* state_provider);
~AndroidDeferredRenderingBackingStrategy() override;
// AndroidVideoDecodeAccelerator::BackingStrategy
- void Initialize(AVDAStateProvider*) override;
+ gfx::ScopedJavaSurface Initialize(int surface_view_id) override;
void Cleanup(bool have_context,
const AndroidVideoDecodeAccelerator::OutputBufferMap&) override;
+ scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture() const override;
uint32_t GetTextureTarget() const override;
- scoped_refptr<gfx::SurfaceTexture> CreateSurfaceTexture() override;
+ gfx::Size GetPictureBufferSize() const override;
void UseCodecBufferForPictureBuffer(int32_t codec_buffer_index,
const media::PictureBuffer&) override;
- void AssignOnePictureBuffer(const media::PictureBuffer&) override;
+ void AssignOnePictureBuffer(const media::PictureBuffer&, bool) override;
void ReuseOnePictureBuffer(const media::PictureBuffer&) override;
- void DismissOnePictureBuffer(const media::PictureBuffer&) override;
void CodecChanged(
media::VideoCodecBridge*,
const AndroidVideoDecodeAccelerator::OutputBufferMap&) override;
void OnFrameAvailable() override;
+ bool ArePicturesOverlayable() override;
+ void UpdatePictureBufferSize(media::PictureBuffer* picture_buffer,
+ const gfx::Size& new_size) override;
private:
// Release any codec buffer that is associated with the given picture buffer
// back to the codec. It is okay if there is no such buffer.
void ReleaseCodecBufferForPicture(const media::PictureBuffer& picture_buffer);
- // Return the TextureRef for a given PictureBuffer's texture.
- gpu::gles2::TextureRef* GetTextureForPicture(const media::PictureBuffer&);
-
// Return the AVDACodecImage for a given PictureBuffer's texture.
AVDACodecImage* GetImageForPicture(const media::PictureBuffer&);
- void SetImageForPicture(const media::PictureBuffer& picture_buffer,
- const scoped_refptr<gl::GLImage>& image);
+ void SetImageForPicture(
+ const media::PictureBuffer& picture_buffer,
+ const scoped_refptr<gpu::gles2::GLStreamTextureImage>& image);
+
+ // Make a copy of the SurfaceTexture's front buffer and associate all given
+ // picture buffer textures with it. The picture buffer textures will not
+ // dependend on |this|, the SurfaceTexture, the MediaCodec or the VDA, so it's
+ // used to back the picture buffers when the VDA is being destroyed.
+ void CopySurfaceTextureToPictures(
+ const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers);
+
+ // Return true if and only if the surface_texture_cant_detach workaround is
+ // not set.
+ bool DoesSurfaceTextureDetachWork() const;
+
+ // Return true if and only if CopySurfaceTextureToPictures is expected to work
+ // on this device.
+ bool ShouldCopyPictures() const;
scoped_refptr<AVDASharedState> shared_state_;
AVDAStateProvider* state_provider_;
+ // The SurfaceTexture to render to. Non-null after Initialize() if
+ // we're not rendering to a SurfaceView.
scoped_refptr<gfx::SurfaceTexture> surface_texture_;
media::VideoCodecBridge* media_codec_;
diff --git a/chromium/content/common/gpu/media/android_video_decode_accelerator.cc b/chromium/content/common/gpu/media/android_video_decode_accelerator.cc
index 832d8dca218..e4f45ca73b3 100644
--- a/chromium/content/common/gpu/media/android_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/android_video_decode_accelerator.cc
@@ -7,21 +7,30 @@
#include <stddef.h>
#include "base/android/build_info.h"
+#include "base/auto_reset.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
+#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
+#include "base/task_runner_util.h"
+#include "base/threading/thread_checker.h"
#include "base/trace_event/trace_event.h"
-#include "content/common/gpu/gpu_channel.h"
#include "content/common/gpu/media/android_copying_backing_strategy.h"
#include "content/common/gpu/media/android_deferred_rendering_backing_strategy.h"
+#include "content/common/gpu/media/avda_return_on_failure.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_codec_util.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/limits.h"
-#include "media/base/media_switches.h"
+#include "media/base/media.h"
#include "media/base/timestamp_constants.h"
#include "media/base/video_decoder_config.h"
#include "media/video/picture.h"
@@ -30,7 +39,6 @@
#include "ui/gl/gl_bindings.h"
#if defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
-#include "media/base/media_keys.h"
#include "media/mojo/services/mojo_cdm_service.h"
#endif
@@ -92,6 +100,23 @@ static inline const base::TimeDelta IdleTimerTimeOut() {
return base::TimeDelta::FromSeconds(1);
}
+// Time between when we notice an error, and when we actually notify somebody.
+// This is to prevent codec errors caused by SurfaceView fullscreen transitions
+// from breaking the pipeline, if we're about to be reset anyway.
+static inline const base::TimeDelta ErrorPostingDelay() {
+ return base::TimeDelta::FromSeconds(2);
+}
+
+// For RecordFormatChangedMetric.
+enum FormatChangedValue {
+ CodecInitialized = false,
+ MissingFormatChanged = true
+};
+
+static inline void RecordFormatChangedMetric(FormatChangedValue value) {
+ UMA_HISTOGRAM_BOOLEAN("Media.AVDA.MissingFormatChanged", !!value);
+}
+
// Handle OnFrameAvailable callbacks safely. Since they occur asynchronously,
// we take care that the AVDA that wants them still exists. A WeakPtr to
// the AVDA would be preferable, except that OnFrameAvailable callbacks can
@@ -143,34 +168,166 @@ class AndroidVideoDecodeAccelerator::OnFrameAvailableHandler
DISALLOW_COPY_AND_ASSIGN(OnFrameAvailableHandler);
};
+// Helper class to share an IO timer for DoIOTask() execution; prevents each
+// AVDA instance from starting its own high frequency timer. The intuition
+// behind this is that, if we're waiting for long enough, then either (a)
+// MediaCodec is broken or (b) MediaCodec is waiting on us to change state
+// (e.g., get new demuxed data / get a free picture buffer / return an output
+// buffer to MediaCodec). This is inherently a race, since we don't know if
+// MediaCodec is broken or just slow. Since the MediaCodec API doesn't let
+// us wait on MediaCodec state changes prior to L, we more or less have to
+// time out or keep polling forever in some common cases.
+class AVDATimerManager {
+ public:
+ // Make sure that the construction thread is started for |avda_instance|.
+ bool StartThread(AndroidVideoDecodeAccelerator* avda_instance) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (thread_avda_instances_.empty()) {
+ if (!construction_thread_.Start()) {
+ LOG(ERROR) << "Failed to start construction thread.";
+ return false;
+ }
+ }
+
+ thread_avda_instances_.insert(avda_instance);
+ return true;
+ }
+
+ // |avda_instance| will no longer need the construction thread. Stop the
+ // thread if this is the last instance.
+ void StopThread(AndroidVideoDecodeAccelerator* avda_instance) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ thread_avda_instances_.erase(avda_instance);
+ if (thread_avda_instances_.empty())
+ construction_thread_.Stop();
+ }
+
+ // Request periodic callback of |avda_instance|->DoIOTask(). Does nothing if
+ // the instance is already registered and the timer started. The first request
+ // will start the repeating timer on an interval of DecodePollDelay().
+ void StartTimer(AndroidVideoDecodeAccelerator* avda_instance) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ timer_avda_instances_.insert(avda_instance);
+
+ // If the timer is running, StopTimer() might have been called earlier, if
+ // so remove the instance from the pending erasures.
+ if (timer_running_)
+ pending_erase_.erase(avda_instance);
+
+ if (io_timer_.IsRunning())
+ return;
+ io_timer_.Start(FROM_HERE, DecodePollDelay(), this,
+ &AVDATimerManager::RunTimer);
+ }
+
+ // Stop callbacks to |avda_instance|->DoIOTask(). Does nothing if the instance
+ // is not registered. If there are no instances left, the repeating timer will
+ // be stopped.
+ void StopTimer(AndroidVideoDecodeAccelerator* avda_instance) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // If the timer is running, defer erasures to avoid iterator invalidation.
+ if (timer_running_) {
+ pending_erase_.insert(avda_instance);
+ return;
+ }
+
+ timer_avda_instances_.erase(avda_instance);
+ if (timer_avda_instances_.empty())
+ io_timer_.Stop();
+ }
+
+ // Eventually, we should run the timer on this thread. For now, we just keep
+ // it as a convenience for construction.
+ scoped_refptr<base::SingleThreadTaskRunner> ConstructionTaskRunner() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return construction_thread_.task_runner();
+ }
+
+ private:
+ friend struct base::DefaultLazyInstanceTraits<AVDATimerManager>;
+
+ AVDATimerManager() : construction_thread_("AVDAThread") {}
+ ~AVDATimerManager() { NOTREACHED(); }
+
+ void RunTimer() {
+ {
+ // Call out to all AVDA instances, some of which may attempt to remove
+ // themselves from the list during this operation; those removals will be
+ // deferred until after all iterations are complete.
+ base::AutoReset<bool> scoper(&timer_running_, true);
+ for (auto* avda : timer_avda_instances_)
+ avda->DoIOTask(false);
+ }
+
+ // Take care of any deferred erasures.
+ for (auto* avda : pending_erase_)
+ StopTimer(avda);
+ pending_erase_.clear();
+
+ // TODO(dalecurtis): We may want to consider chunking this if task execution
+ // takes too long for the combined timer.
+ }
+
+ // All AVDA instances that would like us to poll DoIOTask.
+ std::set<AndroidVideoDecodeAccelerator*> timer_avda_instances_;
+
+ // All AVDA instances that might like to use the construction thread.
+ std::set<AndroidVideoDecodeAccelerator*> thread_avda_instances_;
+
+ // Since we can't delete while iterating when using a set, defer erasure until
+ // after iteration complete.
+ bool timer_running_ = false;
+ std::set<AndroidVideoDecodeAccelerator*> pending_erase_;
+
+ // Repeating timer responsible for draining pending IO to the codecs.
+ base::RepeatingTimer io_timer_;
+
+ base::Thread construction_thread_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(AVDATimerManager);
+};
+
+static base::LazyInstance<AVDATimerManager>::Leaky g_avda_timer =
+ LAZY_INSTANCE_INITIALIZER;
+
+AndroidVideoDecodeAccelerator::CodecConfig::CodecConfig() {}
+
+AndroidVideoDecodeAccelerator::CodecConfig::~CodecConfig() {}
+
AndroidVideoDecodeAccelerator::AndroidVideoDecodeAccelerator(
- const base::WeakPtr<gpu::gles2::GLES2Decoder> decoder,
- const base::Callback<bool(void)>& make_context_current)
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb)
: client_(NULL),
- make_context_current_(make_context_current),
- codec_(media::kCodecH264),
+ make_context_current_cb_(make_context_current_cb),
+ get_gles2_decoder_cb_(get_gles2_decoder_cb),
is_encrypted_(false),
- needs_protected_surface_(false),
state_(NO_ERROR),
picturebuffers_requested_(false),
- gl_decoder_(decoder),
+ media_drm_bridge_cdm_context_(nullptr),
cdm_registration_id_(0),
- weak_this_factory_(this) {
- if (UseDeferredRenderingStrategy())
- strategy_.reset(new AndroidDeferredRenderingBackingStrategy());
- else
- strategy_.reset(new AndroidCopyingBackingStrategy());
-}
+ pending_input_buf_index_(-1),
+ error_sequence_token_(0),
+ defer_errors_(false),
+ deferred_initialization_pending_(false),
+ weak_this_factory_(this) {}
AndroidVideoDecodeAccelerator::~AndroidVideoDecodeAccelerator() {
DCHECK(thread_checker_.CalledOnValidThread());
+ g_avda_timer.Pointer()->StopTimer(this);
+ g_avda_timer.Pointer()->StopThread(this);
#if defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
- if (cdm_) {
- DCHECK(cdm_registration_id_);
- static_cast<media::MediaDrmBridge*>(cdm_.get())
- ->UnregisterPlayer(cdm_registration_id_);
- }
+ if (!media_drm_bridge_cdm_context_)
+ return;
+
+ DCHECK(cdm_registration_id_);
+ media_drm_bridge_cdm_context_->UnregisterPlayer(cdm_registration_id_);
#endif // defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
}
@@ -182,76 +339,130 @@ bool AndroidVideoDecodeAccelerator::Initialize(const Config& config,
DVLOG(1) << __FUNCTION__ << ": " << config.AsHumanReadableString();
+ if (make_context_current_cb_.is_null() || get_gles2_decoder_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
DCHECK(client);
client_ = client;
- codec_ = VideoCodecProfileToVideoCodec(config.profile);
+ codec_config_ = new CodecConfig();
+ codec_config_->codec_ = VideoCodecProfileToVideoCodec(config.profile);
+ codec_config_->initial_expected_coded_size_ =
+ config.initial_expected_coded_size;
is_encrypted_ = config.is_encrypted;
- bool profile_supported = codec_ == media::kCodecVP8 ||
- codec_ == media::kCodecVP9 ||
- codec_ == media::kCodecH264;
+ bool profile_supported = codec_config_->codec_ == media::kCodecVP8 ||
+ codec_config_->codec_ == media::kCodecVP9 ||
+ codec_config_->codec_ == media::kCodecH264;
+
+ // We signalled that we support deferred initialization, so see if the client
+ // does also.
+ deferred_initialization_pending_ = config.is_deferred_initialization_allowed;
if (!profile_supported) {
LOG(ERROR) << "Unsupported profile: " << config.profile;
return false;
}
+ // For encrypted streams we postpone configuration until MediaCrypto is
+ // available.
+ DCHECK(!is_encrypted_ || deferred_initialization_pending_);
+
// Only use MediaCodec for VP8/9 if it's likely backed by hardware
// or if the stream is encrypted.
- if ((codec_ == media::kCodecVP8 || codec_ == media::kCodecVP9) &&
- !is_encrypted_) {
- if (media::VideoCodecBridge::IsKnownUnaccelerated(
- codec_, media::MEDIA_CODEC_DECODER)) {
- DVLOG(1) << "Initialization failed: "
- << (codec_ == media::kCodecVP8 ? "vp8" : "vp9")
- << " is not hardware accelerated";
- return false;
- }
+ if ((codec_config_->codec_ == media::kCodecVP8 ||
+ codec_config_->codec_ == media::kCodecVP9) &&
+ !is_encrypted_ &&
+ media::VideoCodecBridge::IsKnownUnaccelerated(
+ codec_config_->codec_, media::MEDIA_CODEC_DECODER)) {
+ DVLOG(1) << "Initialization failed: "
+ << (codec_config_->codec_ == media::kCodecVP8 ? "vp8" : "vp9")
+ << " is not hardware accelerated";
+ return false;
}
- if (!make_context_current_.Run()) {
+ auto gles_decoder = get_gles2_decoder_cb_.Run();
+ if (!gles_decoder) {
+ LOG(ERROR) << "Failed to get gles2 decoder instance.";
+ return false;
+ }
+
+ const gpu::GpuPreferences& gpu_preferences =
+ gles_decoder->GetContextGroup()->gpu_preferences();
+
+ if (UseDeferredRenderingStrategy(gpu_preferences)) {
+ // TODO(liberato, watk): Figure out what we want to do about zero copy for
+ // fullscreen external SurfaceView in WebView. http://crbug.com/582170.
+ DCHECK(!gles_decoder->GetContextGroup()->mailbox_manager()->UsesSync());
+ DVLOG(1) << __FUNCTION__ << ", using deferred rendering strategy.";
+ strategy_.reset(new AndroidDeferredRenderingBackingStrategy(this));
+ } else {
+ DVLOG(1) << __FUNCTION__ << ", using copy back strategy.";
+ strategy_.reset(new AndroidCopyingBackingStrategy(this));
+ }
+
+ if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "Failed to make this decoder's GL context current.";
return false;
}
- if (!gl_decoder_) {
- LOG(ERROR) << "Failed to get gles2 decoder instance.";
+ codec_config_->surface_ = strategy_->Initialize(config.surface_id);
+ if (codec_config_->surface_.IsEmpty()) {
+ LOG(ERROR) << "Failed to initialize the backing strategy. The returned "
+ "Java surface is empty.";
return false;
}
- strategy_->Initialize(this);
+ // TODO(watk,liberato): move this into the strategy.
+ scoped_refptr<gfx::SurfaceTexture> surface_texture =
+ strategy_->GetSurfaceTexture();
+ if (surface_texture) {
+ on_frame_available_handler_ =
+ new OnFrameAvailableHandler(this, surface_texture);
+ }
- surface_texture_ = strategy_->CreateSurfaceTexture();
- on_frame_available_handler_ =
- new OnFrameAvailableHandler(this, surface_texture_);
+ // Start the thread for async configuration, even if we don't need it now.
+ // ResetCodecState might rebuild the codec later, for example.
+ if (!g_avda_timer.Pointer()->StartThread(this)) {
+ LOG(ERROR) << "Failed to start thread for AVDA timer";
+ return false;
+ }
- // For encrypted streams we postpone configuration until MediaCrypto is
- // available.
+ // If we are encrypted, then we aren't able to create the codec yet.
if (is_encrypted_)
return true;
- return ConfigureMediaCodec();
+ if (deferred_initialization_pending_) {
+ ConfigureMediaCodecAsynchronously();
+ return true;
+ }
+
+ // If the client doesn't support deferred initialization (WebRTC), then we
+ // should complete it now and return a meaningful result.
+ return ConfigureMediaCodecSynchronously();
}
void AndroidVideoDecodeAccelerator::SetCdm(int cdm_id) {
DVLOG(2) << __FUNCTION__ << ": " << cdm_id;
#if defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
- using media::MediaDrmBridge;
-
DCHECK(client_) << "SetCdm() must be called after Initialize().";
- if (cdm_) {
+ if (media_drm_bridge_cdm_context_) {
NOTREACHED() << "We do not support resetting CDM.";
- NotifyCdmAttached(false);
+ NotifyInitializationComplete(false);
return;
}
- cdm_ = media::MojoCdmService::GetCdm(cdm_id);
- DCHECK(cdm_);
+ // Store the CDM to hold a reference to it.
+ cdm_for_reference_holding_only_ = media::MojoCdmService::LegacyGetCdm(cdm_id);
+ DCHECK(cdm_for_reference_holding_only_);
- // On Android platform the MediaKeys will be its subclass MediaDrmBridge.
- MediaDrmBridge* drm_bridge = static_cast<MediaDrmBridge*>(cdm_.get());
+ // On Android platform the CdmContext must be a MediaDrmBridgeCdmContext.
+ media_drm_bridge_cdm_context_ = static_cast<media::MediaDrmBridgeCdmContext*>(
+ cdm_for_reference_holding_only_->GetCdmContext());
+ DCHECK(media_drm_bridge_cdm_context_);
// Register CDM callbacks. The callbacks registered will be posted back to
// this thread via BindToCurrentLoop.
@@ -261,31 +472,30 @@ void AndroidVideoDecodeAccelerator::SetCdm(int cdm_id) {
// destructed as well. So the |cdm_unset_cb| will never have a chance to be
// called.
// TODO(xhwang): Remove |cdm_unset_cb| after it's not used on all platforms.
- cdm_registration_id_ =
- drm_bridge->RegisterPlayer(media::BindToCurrentLoop(base::Bind(
- &AndroidVideoDecodeAccelerator::OnKeyAdded,
- weak_this_factory_.GetWeakPtr())),
- base::Bind(&base::DoNothing));
+ cdm_registration_id_ = media_drm_bridge_cdm_context_->RegisterPlayer(
+ media::BindToCurrentLoop(
+ base::Bind(&AndroidVideoDecodeAccelerator::OnKeyAdded,
+ weak_this_factory_.GetWeakPtr())),
+ base::Bind(&base::DoNothing));
- drm_bridge->SetMediaCryptoReadyCB(media::BindToCurrentLoop(
+ media_drm_bridge_cdm_context_->SetMediaCryptoReadyCB(media::BindToCurrentLoop(
base::Bind(&AndroidVideoDecodeAccelerator::OnMediaCryptoReady,
weak_this_factory_.GetWeakPtr())));
- // Postpone NotifyCdmAttached() call till we create the MediaCodec after
- // OnMediaCryptoReady().
-
+// Postpone NotifyInitializationComplete() call till we create the MediaCodec
+// after OnMediaCryptoReady().
#else
NOTIMPLEMENTED();
- NotifyCdmAttached(false);
+ NotifyInitializationComplete(false);
#endif // !defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
}
-void AndroidVideoDecodeAccelerator::DoIOTask() {
+void AndroidVideoDecodeAccelerator::DoIOTask(bool start_timer) {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT0("media", "AVDA::DoIOTask");
- if (state_ == ERROR) {
+ if (state_ == ERROR || state_ == WAITING_FOR_CODEC) {
return;
}
@@ -293,49 +503,69 @@ void AndroidVideoDecodeAccelerator::DoIOTask() {
while (DequeueOutput())
did_work = true;
- ManageTimer(did_work);
+ ManageTimer(did_work || start_timer);
}
bool AndroidVideoDecodeAccelerator::QueueInput() {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT0("media", "AVDA::QueueInput");
+ base::AutoReset<bool> auto_reset(&defer_errors_, true);
if (bitstreams_notified_in_advance_.size() > kMaxBitstreamsNotifiedInAdvance)
return false;
if (pending_bitstream_buffers_.empty())
return false;
+ if (state_ == WAITING_FOR_KEY)
+ return false;
- int input_buf_index = 0;
- media::MediaCodecStatus status =
- media_codec_->DequeueInputBuffer(NoWaitTimeOut(), &input_buf_index);
+ int input_buf_index = pending_input_buf_index_;
- if (status == media::MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER)
- return false;
- if (status == media::MEDIA_CODEC_ERROR) {
- POST_ERROR(PLATFORM_FAILURE, "Failed to DequeueInputBuffer");
- return false;
+ // Do not dequeue a new input buffer if we failed with MEDIA_CODEC_NO_KEY.
+ // That status does not return this buffer back to the pool of
+ // available input buffers. We have to reuse it in QueueSecureInputBuffer().
+ if (input_buf_index == -1) {
+ media::MediaCodecStatus status =
+ media_codec_->DequeueInputBuffer(NoWaitTimeOut(), &input_buf_index);
+ switch (status) {
+ case media::MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER:
+ return false;
+ case media::MEDIA_CODEC_ERROR:
+ POST_ERROR(PLATFORM_FAILURE, "Failed to DequeueInputBuffer");
+ return false;
+ case media::MEDIA_CODEC_OK:
+ break;
+ default:
+ NOTREACHED() << "Unknown DequeueInputBuffer status " << status;
+ return false;
+ }
}
- DCHECK_EQ(status, media::MEDIA_CODEC_OK);
- base::Time queued_time = pending_bitstream_buffers_.front().second;
- UMA_HISTOGRAM_TIMES("Media.AVDA.InputQueueTime",
- base::Time::Now() - queued_time);
- media::BitstreamBuffer bitstream_buffer =
- pending_bitstream_buffers_.front().first;
+ DCHECK_NE(input_buf_index, -1);
+
+ media::BitstreamBuffer bitstream_buffer = pending_bitstream_buffers_.front();
if (bitstream_buffer.id() == -1) {
pending_bitstream_buffers_.pop();
TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
pending_bitstream_buffers_.size());
+ DCHECK_NE(state_, ERROR);
+ state_ = WAITING_FOR_EOS;
media_codec_->QueueEOS(input_buf_index);
return true;
}
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(bitstream_buffer.handle(), true));
- if (!shm->Map(bitstream_buffer.size())) {
- POST_ERROR(UNREADABLE_INPUT, "Failed to SharedMemory::Map()");
- return false;
+ scoped_ptr<SharedMemoryRegion> shm;
+
+ if (pending_input_buf_index_ == -1) {
+ // When |pending_input_buf_index_| is not -1, the buffer is already dequeued
+ // from MediaCodec, filled with data and bitstream_buffer.handle() is
+ // closed.
+ shm.reset(new SharedMemoryRegion(bitstream_buffer, true));
+
+ if (!shm->Map()) {
+ POST_ERROR(UNREADABLE_INPUT, "Failed to SharedMemoryRegion::Map()");
+ return false;
+ }
}
const base::TimeDelta presentation_timestamp =
@@ -351,12 +581,16 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
// result in them finding the right timestamp.
bitstream_buffers_in_decoder_[presentation_timestamp] = bitstream_buffer.id();
- const uint8_t* memory = static_cast<const uint8_t*>(shm->memory());
+ // Notice that |memory| will be null if we repeatedly enqueue the same buffer,
+ // this happens after MEDIA_CODEC_NO_KEY.
+ const uint8_t* memory =
+ shm ? static_cast<const uint8_t*>(shm->memory()) : nullptr;
const std::string& key_id = bitstream_buffer.key_id();
const std::string& iv = bitstream_buffer.iv();
const std::vector<media::SubsampleEntry>& subsamples =
bitstream_buffer.subsamples();
+ media::MediaCodecStatus status;
if (key_id.empty() || iv.empty()) {
status = media_codec_->QueueInputBuffer(input_buf_index, memory,
bitstream_buffer.size(),
@@ -372,24 +606,18 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
<< " status:" << status;
if (status == media::MEDIA_CODEC_NO_KEY) {
- // Keep trying to enqueue the front pending buffer.
- //
- // TODO(timav): Figure out whether stopping the pipeline in response to
- // this error and restarting it in OnKeyAdded() has significant benefits
- // (e.g. saving power).
+ // Keep trying to enqueue the same input buffer.
+ // The buffer is owned by us (not the MediaCodec) and is filled with data.
DVLOG(1) << "QueueSecureInputBuffer failed: NO_KEY";
- return true;
+ pending_input_buf_index_ = input_buf_index;
+ state_ = WAITING_FOR_KEY;
+ return false;
}
+ pending_input_buf_index_ = -1;
pending_bitstream_buffers_.pop();
TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
pending_bitstream_buffers_.size());
-
- if (status != media::MEDIA_CODEC_OK) {
- POST_ERROR(PLATFORM_FAILURE, "Failed to QueueInputBuffer: " << status);
- return false;
- }
-
// We should call NotifyEndOfBitstreamBuffer(), when no more decoded output
// will be returned from the bitstream buffer. However, MediaCodec API is
// not enough to guarantee it.
@@ -403,12 +631,18 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
weak_this_factory_.GetWeakPtr(), bitstream_buffer.id()));
bitstreams_notified_in_advance_.push_back(bitstream_buffer.id());
+ if (status != media::MEDIA_CODEC_OK) {
+ POST_ERROR(PLATFORM_FAILURE, "Failed to QueueInputBuffer: " << status);
+ return false;
+ }
+
return true;
}
bool AndroidVideoDecodeAccelerator::DequeueOutput() {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT0("media", "AVDA::DequeueOutput");
+ base::AutoReset<bool> auto_reset(&defer_errors_, true);
if (picturebuffers_requested_ && output_picture_buffers_.empty())
return false;
@@ -432,10 +666,6 @@ bool AndroidVideoDecodeAccelerator::DequeueOutput() {
"presentation_timestamp (ms)",
presentation_timestamp.InMilliseconds());
- DVLOG(3) << "AVDA::DequeueOutput: pts:" << presentation_timestamp
- << " buf_index:" << buf_index << " offset:" << offset
- << " size:" << size << " eos:" << eos;
-
switch (status) {
case media::MEDIA_CODEC_ERROR:
POST_ERROR(PLATFORM_FAILURE, "DequeueOutputBuffer failed.");
@@ -445,23 +675,30 @@ bool AndroidVideoDecodeAccelerator::DequeueOutput() {
return false;
case media::MEDIA_CODEC_OUTPUT_FORMAT_CHANGED: {
- if (!output_picture_buffers_.empty()) {
- // TODO(chcunningham): This will likely dismiss a handful of decoded
- // frames that have not yet been drawn and returned to us for re-use.
- // Consider a more complicated design that would wait for them to be
- // drawn before dismissing.
- DismissPictureBuffers();
+ if (media_codec_->GetOutputSize(&size_) != media::MEDIA_CODEC_OK) {
+ POST_ERROR(PLATFORM_FAILURE, "GetOutputSize failed.");
+ return false;
+ }
+ DVLOG(3) << __FUNCTION__
+ << " OUTPUT_FORMAT_CHANGED, new size: " << size_.ToString();
+
+ // Don't request picture buffers if we already have some. This avoids
+ // having to dismiss the existing buffers which may actively reference
+ // decoded images. Breaking their connection to the decoded image will
+ // cause rendering of black frames. Instead, we let the existing
+ // PictureBuffers live on and we simply update their size the next time
+ // they're attachted to an image of the new resolution. See the
+ // size update in |SendDecodedFrameToClient| and https://crbug/587994.
+ if (output_picture_buffers_.empty() && !picturebuffers_requested_) {
+ picturebuffers_requested_ = true;
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&AndroidVideoDecodeAccelerator::RequestPictureBuffers,
+ weak_this_factory_.GetWeakPtr()));
+ return false;
}
- picturebuffers_requested_ = true;
- int32_t width, height;
- media_codec_->GetOutputFormat(&width, &height);
- size_ = gfx::Size(width, height);
- base::MessageLoop::current()->PostTask(
- FROM_HERE,
- base::Bind(&AndroidVideoDecodeAccelerator::RequestPictureBuffers,
- weak_this_factory_.GetWeakPtr()));
- return false;
+ return true;
}
case media::MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
@@ -469,6 +706,9 @@ bool AndroidVideoDecodeAccelerator::DequeueOutput() {
case media::MEDIA_CODEC_OK:
DCHECK_GE(buf_index, 0);
+ DVLOG(3) << __FUNCTION__ << ": pts:" << presentation_timestamp
+ << " buf_index:" << buf_index << " offset:" << offset
+ << " size:" << size << " eos:" << eos;
break;
default:
@@ -478,12 +718,36 @@ bool AndroidVideoDecodeAccelerator::DequeueOutput() {
} while (buf_index < 0);
if (eos) {
- DVLOG(3) << "AVDA::DequeueOutput: Resetting codec state after EOS";
+ DVLOG(3) << __FUNCTION__ << ": Resetting codec state after EOS";
+
+ // If we were waiting for an EOS, clear the state and reset the MediaCodec
+ // as normal. Otherwise, enter the ERROR state which will force destruction
+ // of MediaCodec during ResetCodecState().
+ //
+ // Some Android platforms seem to send an EOS buffer even when we're not
+ // expecting it. In this case, destroy and reset the codec but don't notify
+ // flush done since it violates the state machine. http://crbug.com/585959.
+ const bool was_waiting_for_eos = state_ == WAITING_FOR_EOS;
+ state_ = was_waiting_for_eos ? NO_ERROR : ERROR;
+
ResetCodecState();
+ // |media_codec_| might still be null.
+ if (was_waiting_for_eos) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyFlushDone,
+ weak_this_factory_.GetWeakPtr()));
+ }
+ return false;
+ }
- base::MessageLoop::current()->PostTask(
- FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyFlushDone,
- weak_this_factory_.GetWeakPtr()));
+ if (!picturebuffers_requested_) {
+ // If, somehow, we get a decoded frame back before a FORMAT_CHANGED
+ // message, then we might not have any picture buffers to use. This
+ // isn't supposed to happen (see EncodeDecodeTest.java#617).
+ // Log a metric to see how common this is.
+ RecordFormatChangedMetric(FormatChangedValue::MissingFormatChanged);
+ media_codec_->ReleaseOutputBuffer(buf_index, false);
+ POST_ERROR(PLATFORM_FAILURE, "Dequeued buffers before FORMAT_CHANGED.");
return false;
}
@@ -515,7 +779,7 @@ bool AndroidVideoDecodeAccelerator::DequeueOutput() {
// correction and provides a non-decreasing timestamp sequence, which might
// result in timestamp duplicates. Discard the frame if we cannot get the
// corresponding buffer id.
- DVLOG(3) << "AVDA::DequeueOutput: Releasing buffer with unexpected PTS: "
+ DVLOG(3) << __FUNCTION__ << ": Releasing buffer with unexpected PTS: "
<< presentation_timestamp;
media_codec_->ReleaseOutputBuffer(buf_index, false);
}
@@ -532,7 +796,7 @@ void AndroidVideoDecodeAccelerator::SendDecodedFrameToClient(
DCHECK(!free_picture_ids_.empty());
TRACE_EVENT0("media", "AVDA::SendDecodedFrameToClient");
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
POST_ERROR(PLATFORM_FAILURE, "Failed to make the GL context current.");
return;
}
@@ -541,46 +805,71 @@ void AndroidVideoDecodeAccelerator::SendDecodedFrameToClient(
free_picture_ids_.pop();
TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
- OutputBufferMap::const_iterator i =
- output_picture_buffers_.find(picture_buffer_id);
+ const auto& i = output_picture_buffers_.find(picture_buffer_id);
if (i == output_picture_buffers_.end()) {
POST_ERROR(PLATFORM_FAILURE,
"Can't find PictureBuffer id: " << picture_buffer_id);
return;
}
+ bool size_changed = false;
+ if (i->second.size() != size_) {
+ // Size may have changed due to resolution change since the last time this
+ // PictureBuffer was used.
+ strategy_->UpdatePictureBufferSize(&i->second, size_);
+ size_changed = true;
+ }
+
// Connect the PictureBuffer to the decoded frame, via whatever
// mechanism the strategy likes.
strategy_->UseCodecBufferForPictureBuffer(codec_buffer_index, i->second);
+ const bool allow_overlay = strategy_->ArePicturesOverlayable();
+
+ media::Picture picture(picture_buffer_id, bitstream_id, gfx::Rect(size_),
+ allow_overlay);
+ picture.set_size_changed(size_changed);
+
base::MessageLoop::current()->PostTask(
FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyPictureReady,
- weak_this_factory_.GetWeakPtr(),
- media::Picture(picture_buffer_id, bitstream_id,
- gfx::Rect(size_), false)));
+ weak_this_factory_.GetWeakPtr(), picture));
}
void AndroidVideoDecodeAccelerator::Decode(
const media::BitstreamBuffer& bitstream_buffer) {
DCHECK(thread_checker_.CalledOnValidThread());
- if (bitstream_buffer.id() != -1 && bitstream_buffer.size() == 0) {
+
+ if (bitstream_buffer.id() >= 0 && bitstream_buffer.size() > 0) {
+ DecodeBuffer(bitstream_buffer);
+ return;
+ }
+
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+
+ if (bitstream_buffer.id() < 0) {
+ POST_ERROR(INVALID_ARGUMENT,
+ "Invalid bistream_buffer, id: " << bitstream_buffer.id());
+ } else {
base::MessageLoop::current()->PostTask(
FROM_HERE,
base::Bind(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer,
weak_this_factory_.GetWeakPtr(), bitstream_buffer.id()));
- return;
}
+}
- pending_bitstream_buffers_.push(
- std::make_pair(bitstream_buffer, base::Time::Now()));
+void AndroidVideoDecodeAccelerator::DecodeBuffer(
+ const media::BitstreamBuffer& bitstream_buffer) {
+ pending_bitstream_buffers_.push(bitstream_buffer);
TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
pending_bitstream_buffers_.size());
- DoIOTask();
+ DoIOTask(true);
}
void AndroidVideoDecodeAccelerator::RequestPictureBuffers() {
- client_->ProvidePictureBuffers(kNumPictureBuffers, size_,
+ client_->ProvidePictureBuffers(kNumPictureBuffers, 1,
+ strategy_->GetPictureBufferSize(),
strategy_->GetTextureTarget());
}
@@ -595,8 +884,12 @@ void AndroidVideoDecodeAccelerator::AssignPictureBuffers(
return;
}
+ const bool have_context = make_context_current_cb_.Run();
+ LOG_IF(WARNING, !have_context)
+ << "Failed to make GL context current for Assign, continuing.";
+
for (size_t i = 0; i < buffers.size(); ++i) {
- if (buffers[i].size() != size_) {
+ if (buffers[i].size() != strategy_->GetPictureBufferSize()) {
POST_ERROR(INVALID_ARGUMENT,
"Invalid picture buffer size assigned. Wanted "
<< size_.ToString() << ", but got "
@@ -606,29 +899,17 @@ void AndroidVideoDecodeAccelerator::AssignPictureBuffers(
int32_t id = buffers[i].id();
output_picture_buffers_.insert(std::make_pair(id, buffers[i]));
free_picture_ids_.push(id);
- // Since the client might be re-using |picture_buffer_id| values, forget
- // about previously-dismissed IDs now. See ReusePictureBuffer() comment
- // about "zombies" for why we maintain this set in the first place.
- dismissed_picture_ids_.erase(id);
- strategy_->AssignOnePictureBuffer(buffers[i]);
+ strategy_->AssignOnePictureBuffer(buffers[i], have_context);
}
TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
-
- DoIOTask();
+ DoIOTask(true);
}
void AndroidVideoDecodeAccelerator::ReusePictureBuffer(
int32_t picture_buffer_id) {
DCHECK(thread_checker_.CalledOnValidThread());
- // This ReusePictureBuffer() might have been in a pipe somewhere (queued in
- // IPC, or in a PostTask either at the sender or receiver) when we sent a
- // DismissPictureBuffer() for this |picture_buffer_id|. Account for such
- // potential "zombie" IDs here.
- if (dismissed_picture_ids_.erase(picture_buffer_id))
- return;
-
free_picture_ids_.push(picture_buffer_id);
TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
@@ -641,58 +922,139 @@ void AndroidVideoDecodeAccelerator::ReusePictureBuffer(
}
strategy_->ReuseOnePictureBuffer(i->second);
-
- DoIOTask();
+ DoIOTask(true);
}
void AndroidVideoDecodeAccelerator::Flush() {
DCHECK(thread_checker_.CalledOnValidThread());
- Decode(media::BitstreamBuffer(-1, base::SharedMemoryHandle(), 0));
+ DecodeBuffer(media::BitstreamBuffer(-1, base::SharedMemoryHandle(), 0));
}
-bool AndroidVideoDecodeAccelerator::ConfigureMediaCodec() {
+void AndroidVideoDecodeAccelerator::ConfigureMediaCodecAsynchronously() {
DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(surface_texture_.get());
- TRACE_EVENT0("media", "AVDA::ConfigureMediaCodec");
- gfx::ScopedJavaSurface surface(surface_texture_.get());
+ // It's probably okay just to return here, since the codec will be configured
+ // asynchronously. It's unclear that any state for the new request could
+ // be different, unless somebody modifies |codec_config_| while we're already
+ // waiting for a codec. One shouldn't do that for thread safety.
+ DCHECK_NE(state_, WAITING_FOR_CODEC);
+
+ state_ = WAITING_FOR_CODEC;
+
+ // Tell the strategy that we're changing codecs. The codec itself could be
+ // used normally, since we don't replace it until we're back on the main
+ // thread. However, if we're using an output surface, then the incoming codec
+ // might access that surface while the main thread is drawing. Telling the
+ // strategy to forget the codec avoids this.
+ if (media_codec_) {
+ media_codec_.reset();
+ strategy_->CodecChanged(nullptr, output_picture_buffers_);
+ }
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner =
+ g_avda_timer.Pointer()->ConstructionTaskRunner();
+ CHECK(task_runner);
+
+ base::PostTaskAndReplyWithResult(
+ task_runner.get(), FROM_HERE,
+ base::Bind(&AndroidVideoDecodeAccelerator::ConfigureMediaCodecOnAnyThread,
+ codec_config_),
+ base::Bind(&AndroidVideoDecodeAccelerator::OnCodecConfigured,
+ weak_this_factory_.GetWeakPtr()));
+}
+
+bool AndroidVideoDecodeAccelerator::ConfigureMediaCodecSynchronously() {
+ state_ = WAITING_FOR_CODEC;
+ scoped_ptr<media::VideoCodecBridge> media_codec =
+ ConfigureMediaCodecOnAnyThread(codec_config_);
+ OnCodecConfigured(std::move(media_codec));
+ return !!media_codec_;
+}
+
+scoped_ptr<media::VideoCodecBridge>
+AndroidVideoDecodeAccelerator::ConfigureMediaCodecOnAnyThread(
+ scoped_refptr<CodecConfig> codec_config) {
+ TRACE_EVENT0("media", "AVDA::ConfigureMediaCodec");
- jobject media_crypto = media_crypto_ ? media_crypto_->obj() : nullptr;
+ jobject media_crypto = codec_config->media_crypto_
+ ? codec_config->media_crypto_->obj()
+ : nullptr;
// |needs_protected_surface_| implies encrypted stream.
- DCHECK(!needs_protected_surface_ || media_crypto);
+ DCHECK(!codec_config->needs_protected_surface_ || media_crypto);
+
+ return scoped_ptr<media::VideoCodecBridge>(
+ media::VideoCodecBridge::CreateDecoder(
+ codec_config->codec_, codec_config->needs_protected_surface_,
+ codec_config->initial_expected_coded_size_,
+ codec_config->surface_.j_surface().obj(), media_crypto, true));
+}
+
+void AndroidVideoDecodeAccelerator::OnCodecConfigured(
+ scoped_ptr<media::VideoCodecBridge> media_codec) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(state_, WAITING_FOR_CODEC);
+
+ media_codec_ = std::move(media_codec);
+
+ // Record one instance of the codec being initialized.
+ RecordFormatChangedMetric(FormatChangedValue::CodecInitialized);
- // Pass a dummy 320x240 canvas size and let the codec signal the real size
- // when it's known from the bitstream.
- media_codec_.reset(media::VideoCodecBridge::CreateDecoder(
- codec_, needs_protected_surface_, gfx::Size(320, 240),
- surface.j_surface().obj(), media_crypto));
strategy_->CodecChanged(media_codec_.get(), output_picture_buffers_);
+
+ // If we are supposed to notify that initialization is complete, then do so
+ // now. Otherwise, this is a reconfiguration.
+ if (deferred_initialization_pending_) {
+ NotifyInitializationComplete(!!media_codec_);
+ deferred_initialization_pending_ = false;
+ }
+
if (!media_codec_) {
- LOG(ERROR) << "Failed to create MediaCodec instance.";
- return false;
+ POST_ERROR(PLATFORM_FAILURE, "Failed to create MediaCodec.");
+ return;
}
+ state_ = NO_ERROR;
+
ManageTimer(true);
- return true;
}
void AndroidVideoDecodeAccelerator::ResetCodecState() {
DCHECK(thread_checker_.CalledOnValidThread());
+
+ // If there is already a reset in flight, then that counts. This can really
+ // only happen if somebody calls Reset.
+ if (state_ == WAITING_FOR_CODEC)
+ return;
+
bitstream_buffers_in_decoder_.clear();
- // We don't dismiss picture buffers here since we might not get a format
- // changed message to re-request them, such as during a seek. In that case,
- // we want to reuse the existing buffers. However, we're about to invalidate
- // all the output buffers, so we must be sure that the strategy no longer
- // refers to them.
+ if (pending_input_buf_index_ != -1) {
+ // The data for that index exists in the input buffer, but corresponding
+ // shm block been deleted. Check that it is safe to flush the coec, i.e.
+ // |pending_bitstream_buffers_| is empty.
+ // TODO(timav): keep shm block for that buffer and remove this restriction.
+ DCHECK(pending_bitstream_buffers_.empty());
+ pending_input_buf_index_ = -1;
+ }
+
+ if (state_ == WAITING_FOR_KEY)
+ state_ = NO_ERROR;
+
+ // We might increment error_sequence_token here to cancel any delayed errors,
+ // but right now it's unclear that it's safe to do so. If we are in an error
+ // state because of a codec error, then it would be okay. Otherwise, it's
+ // less obvious that we are exiting the error state. Since deferred errors
+ // are only intended for fullscreen transitions right now, we take the more
+ // conservative approach and let the errors post.
+ // TODO(liberato): revisit this once we sort out the error state a bit more.
// When codec is not in error state we can quickly reset (internally calls
// flush()) for JB-MR2 and beyond. Prior to JB-MR2, flush() had several bugs
- // (b/8125974, b/8347958) so we must stop() and reconfigure MediaCodec. The
- // full reconfigure is much slower and may cause visible freezing if done
- // mid-stream.
+ // (b/8125974, b/8347958) so we must delete the MediaCodec and create a new
+ // one. The full reconfigure is much slower and may cause visible freezing if
+ // done mid-stream.
if (state_ == NO_ERROR &&
base::android::BuildInfo::GetInstance()->sdk_int() >= 18) {
DVLOG(3) << __FUNCTION__ << " Doing fast MediaCodec reset (flush).";
@@ -702,37 +1064,21 @@ void AndroidVideoDecodeAccelerator::ResetCodecState() {
strategy_->CodecChanged(media_codec_.get(), output_picture_buffers_);
} else {
DVLOG(3) << __FUNCTION__
- << " Doing slow MediaCodec reset (stop/re-configure).";
- io_timer_.Stop();
- media_codec_->Stop();
+ << " Deleting the MediaCodec and creating a new one.";
+ g_avda_timer.Pointer()->StopTimer(this);
// Changing the codec will also notify the strategy to forget about any
// output buffers it has currently.
- ConfigureMediaCodec();
state_ = NO_ERROR;
+ ConfigureMediaCodecAsynchronously();
}
}
-void AndroidVideoDecodeAccelerator::DismissPictureBuffers() {
- DCHECK(thread_checker_.CalledOnValidThread());
- DVLOG(3) << __FUNCTION__;
-
- for (const auto& pb : output_picture_buffers_) {
- strategy_->DismissOnePictureBuffer(pb.second);
- client_->DismissPictureBuffer(pb.first);
- dismissed_picture_ids_.insert(pb.first);
- }
- output_picture_buffers_.clear();
- std::queue<int32_t> empty;
- std::swap(free_picture_ids_, empty);
- picturebuffers_requested_ = false;
-}
-
void AndroidVideoDecodeAccelerator::Reset() {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT0("media", "AVDA::Reset");
while (!pending_bitstream_buffers_.empty()) {
- int32_t bitstream_buffer_id = pending_bitstream_buffers_.front().first.id();
+ int32_t bitstream_buffer_id = pending_bitstream_buffers_.front().id();
pending_bitstream_buffers_.pop();
if (bitstream_buffer_id != -1) {
@@ -745,8 +1091,13 @@ void AndroidVideoDecodeAccelerator::Reset() {
TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount", 0);
bitstreams_notified_in_advance_.clear();
+ // Any error that is waiting to post can be ignored.
+ error_sequence_token_++;
+
ResetCodecState();
+ // Note that |media_codec_| might not yet be ready, but we can still post
+ // this anyway.
base::MessageLoop::current()->PostTask(
FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyResetDone,
weak_this_factory_.GetWeakPtr()));
@@ -755,11 +1106,12 @@ void AndroidVideoDecodeAccelerator::Reset() {
void AndroidVideoDecodeAccelerator::Destroy() {
DCHECK(thread_checker_.CalledOnValidThread());
- bool have_context = make_context_current_.Run();
+ bool have_context = make_context_current_cb_.Run();
if (!have_context)
LOG(WARNING) << "Failed make GL context current for Destroy, continuing.";
- strategy_->Cleanup(have_context, output_picture_buffers_);
+ if (strategy_)
+ strategy_->Cleanup(have_context, output_picture_buffers_);
// If we have an OnFrameAvailable handler, tell it that we're going away.
if (on_frame_available_handler_) {
@@ -767,15 +1119,20 @@ void AndroidVideoDecodeAccelerator::Destroy() {
on_frame_available_handler_ = nullptr;
}
+ // Note that async codec construction might still be in progress. In that
+ // case, the codec will be deleted when it completes once we invalidate all
+ // our weak refs.
weak_this_factory_.InvalidateWeakPtrs();
if (media_codec_) {
- io_timer_.Stop();
- media_codec_->Stop();
+ g_avda_timer.Pointer()->StopTimer(this);
+ media_codec_.reset();
}
delete this;
}
-bool AndroidVideoDecodeAccelerator::CanDecodeOnIOThread() {
+bool AndroidVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
return false;
}
@@ -790,7 +1147,29 @@ const base::ThreadChecker& AndroidVideoDecodeAccelerator::ThreadChecker()
base::WeakPtr<gpu::gles2::GLES2Decoder>
AndroidVideoDecodeAccelerator::GetGlDecoder() const {
- return gl_decoder_;
+ return get_gles2_decoder_cb_.Run();
+}
+
+gpu::gles2::TextureRef* AndroidVideoDecodeAccelerator::GetTextureForPicture(
+ const media::PictureBuffer& picture_buffer) {
+ auto gles_decoder = GetGlDecoder();
+ RETURN_ON_FAILURE(this, gles_decoder, "Failed to get GL decoder",
+ ILLEGAL_STATE, nullptr);
+ RETURN_ON_FAILURE(this, gles_decoder->GetContextGroup(),
+ "Null gles_decoder->GetContextGroup()", ILLEGAL_STATE,
+ nullptr);
+ gpu::gles2::TextureManager* texture_manager =
+ gles_decoder->GetContextGroup()->texture_manager();
+ RETURN_ON_FAILURE(this, texture_manager, "Null texture_manager",
+ ILLEGAL_STATE, nullptr);
+
+ DCHECK_LE(1u, picture_buffer.internal_texture_ids().size());
+ gpu::gles2::TextureRef* texture_ref =
+ texture_manager->GetTexture(picture_buffer.internal_texture_ids()[0]);
+ RETURN_ON_FAILURE(this, texture_manager, "Null texture_ref", ILLEGAL_STATE,
+ nullptr);
+
+ return texture_ref;
}
void AndroidVideoDecodeAccelerator::OnFrameAvailable() {
@@ -802,20 +1181,24 @@ void AndroidVideoDecodeAccelerator::OnFrameAvailable() {
void AndroidVideoDecodeAccelerator::PostError(
const ::tracked_objects::Location& from_here,
media::VideoDecodeAccelerator::Error error) {
- base::MessageLoop::current()->PostTask(
- from_here, base::Bind(&AndroidVideoDecodeAccelerator::NotifyError,
- weak_this_factory_.GetWeakPtr(), error));
+ base::MessageLoop::current()->PostDelayedTask(
+ from_here,
+ base::Bind(&AndroidVideoDecodeAccelerator::NotifyError,
+ weak_this_factory_.GetWeakPtr(), error, error_sequence_token_),
+ (defer_errors_ ? ErrorPostingDelay() : base::TimeDelta()));
state_ = ERROR;
}
void AndroidVideoDecodeAccelerator::OnMediaCryptoReady(
- media::MediaDrmBridge::JavaObjectPtr media_crypto,
+ media::MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto,
bool needs_protected_surface) {
DVLOG(1) << __FUNCTION__;
if (!media_crypto) {
LOG(ERROR) << "MediaCrypto is not available, can't play encrypted stream.";
- NotifyCdmAttached(false);
+ cdm_for_reference_holding_only_ = nullptr;
+ media_drm_bridge_cdm_context_ = nullptr;
+ NotifyInitializationComplete(false);
return;
}
@@ -825,23 +1208,24 @@ void AndroidVideoDecodeAccelerator::OnMediaCryptoReady(
// is not created yet.
DCHECK(!media_codec_);
- media_crypto_ = std::move(media_crypto);
- needs_protected_surface_ = needs_protected_surface;
+ codec_config_->media_crypto_ = std::move(media_crypto);
+ codec_config_->needs_protected_surface_ = needs_protected_surface;
// After receiving |media_crypto_| we can configure MediaCodec.
- const bool success = ConfigureMediaCodec();
- NotifyCdmAttached(success);
+ ConfigureMediaCodecAsynchronously();
}
void AndroidVideoDecodeAccelerator::OnKeyAdded() {
DVLOG(1) << __FUNCTION__;
- // TODO(timav): Figure out whether stopping the pipeline in response to
- // NO_KEY error and restarting it here has significant benefits (e.g. saving
- // power). Right now do nothing here.
+
+ if (state_ == WAITING_FOR_KEY)
+ state_ = NO_ERROR;
+
+ DoIOTask(true);
}
-void AndroidVideoDecodeAccelerator::NotifyCdmAttached(bool success) {
- client_->NotifyCdmAttached(success);
+void AndroidVideoDecodeAccelerator::NotifyInitializationComplete(bool success) {
+ client_->NotifyInitializationComplete(success);
}
void AndroidVideoDecodeAccelerator::NotifyPictureReady(
@@ -863,7 +1247,13 @@ void AndroidVideoDecodeAccelerator::NotifyResetDone() {
}
void AndroidVideoDecodeAccelerator::NotifyError(
- media::VideoDecodeAccelerator::Error error) {
+ media::VideoDecodeAccelerator::Error error,
+ int token) {
+ DVLOG(1) << __FUNCTION__ << ": error: " << error << " token: " << token
+ << " current: " << error_sequence_token_;
+ if (token != error_sequence_token_)
+ return;
+
client_->NotifyError(error);
}
@@ -871,45 +1261,72 @@ void AndroidVideoDecodeAccelerator::ManageTimer(bool did_work) {
bool should_be_running = true;
base::TimeTicks now = base::TimeTicks::Now();
- if (!did_work) {
+ if (!did_work && !most_recent_work_.is_null()) {
// Make sure that we have done work recently enough, else stop the timer.
- if (now - most_recent_work_ > IdleTimerTimeOut())
+ if (now - most_recent_work_ > IdleTimerTimeOut()) {
+ most_recent_work_ = base::TimeTicks();
should_be_running = false;
+ }
} else {
most_recent_work_ = now;
}
- if (should_be_running && !io_timer_.IsRunning()) {
- io_timer_.Start(FROM_HERE, DecodePollDelay(), this,
- &AndroidVideoDecodeAccelerator::DoIOTask);
- } else if (!should_be_running && io_timer_.IsRunning()) {
- io_timer_.Stop();
- }
+ if (should_be_running)
+ g_avda_timer.Pointer()->StartTimer(this);
+ else
+ g_avda_timer.Pointer()->StopTimer(this);
}
// static
-bool AndroidVideoDecodeAccelerator::UseDeferredRenderingStrategy() {
- return base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableUnifiedMediaPipeline);
+bool AndroidVideoDecodeAccelerator::UseDeferredRenderingStrategy(
+ const gpu::GpuPreferences& gpu_preferences) {
+ // TODO(liberato, watk): Figure out what we want to do about zero copy for
+ // fullscreen external SurfaceView in WebView. http://crbug.com/582170.
+ return !gpu_preferences.enable_threaded_texture_mailboxes;
}
// static
media::VideoDecodeAccelerator::Capabilities
-AndroidVideoDecodeAccelerator::GetCapabilities() {
+AndroidVideoDecodeAccelerator::GetCapabilities(
+ const gpu::GpuPreferences& gpu_preferences) {
Capabilities capabilities;
SupportedProfiles& profiles = capabilities.supported_profiles;
- SupportedProfile profile;
-
- profile.profile = media::VP8PROFILE_ANY;
- profile.min_resolution.SetSize(0, 0);
- profile.max_resolution.SetSize(1920, 1088);
- profiles.push_back(profile);
+ if (media::MediaCodecUtil::IsVp8DecoderAvailable()) {
+ SupportedProfile profile;
+ profile.profile = media::VP8PROFILE_ANY;
+ profile.min_resolution.SetSize(0, 0);
+ profile.max_resolution.SetSize(1920, 1088);
+ // If we know MediaCodec will just create a software codec, prefer our
+ // internal software decoder instead. It's more up to date and secured
+ // within the renderer sandbox. However if the content is encrypted, we
+ // must use MediaCodec anyways since MediaDrm offers no way to decrypt
+ // the buffers and let us use our internal software decoders.
+ profile.encrypted_only = media::VideoCodecBridge::IsKnownUnaccelerated(
+ media::kCodecVP8, media::MEDIA_CODEC_DECODER);
+ profiles.push_back(profile);
+ }
- profile.profile = media::VP9PROFILE_ANY;
- profile.min_resolution.SetSize(0, 0);
- profile.max_resolution.SetSize(1920, 1088);
- profiles.push_back(profile);
+ if (media::MediaCodecUtil::IsVp9DecoderAvailable()) {
+ SupportedProfile profile;
+ profile.min_resolution.SetSize(0, 0);
+ profile.max_resolution.SetSize(1920, 1088);
+ // If we know MediaCodec will just create a software codec, prefer our
+ // internal software decoder instead. It's more up to date and secured
+ // within the renderer sandbox. However if the content is encrypted, we
+ // must use MediaCodec anyways since MediaDrm offers no way to decrypt
+ // the buffers and let us use our internal software decoders.
+ profile.encrypted_only = media::VideoCodecBridge::IsKnownUnaccelerated(
+ media::kCodecVP9, media::MEDIA_CODEC_DECODER);
+ profile.profile = media::VP9PROFILE_PROFILE0;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE1;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE2;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE3;
+ profiles.push_back(profile);
+ }
for (const auto& supported_profile : kSupportedH264Profiles) {
SupportedProfile profile;
@@ -922,9 +1339,15 @@ AndroidVideoDecodeAccelerator::GetCapabilities() {
profiles.push_back(profile);
}
- if (UseDeferredRenderingStrategy()) {
- capabilities.flags = media::VideoDecodeAccelerator::Capabilities::
+ capabilities.flags = media::VideoDecodeAccelerator::Capabilities::
+ SUPPORTS_DEFERRED_INITIALIZATION;
+ if (UseDeferredRenderingStrategy(gpu_preferences)) {
+ capabilities.flags |= media::VideoDecodeAccelerator::Capabilities::
NEEDS_ALL_PICTURE_BUFFERS_TO_DECODE;
+ if (media::MediaCodecUtil::IsSurfaceViewOutputSupported()) {
+ capabilities.flags |= media::VideoDecodeAccelerator::Capabilities::
+ SUPPORTS_EXTERNAL_OUTPUT_SURFACE;
+ }
}
return capabilities;
diff --git a/chromium/content/common/gpu/media/android_video_decode_accelerator.h b/chromium/content/common/gpu/media/android_video_decode_accelerator.h
index 1dd6816a72d..1e0543d3fc5 100644
--- a/chromium/content/common/gpu/media/android_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/android_video_decode_accelerator.h
@@ -18,11 +18,14 @@
#include "base/timer/timer.h"
#include "content/common/content_export.h"
#include "content/common/gpu/media/avda_state_provider.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
-#include "media/base/android/media_drm_bridge.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "media/base/android/media_drm_bridge_cdm_context.h"
#include "media/base/android/sdk_media_codec_bridge.h"
#include "media/base/media_keys.h"
#include "media/video/video_decode_accelerator.h"
+#include "ui/gl/android/scoped_java_surface.h"
namespace gfx {
class SurfaceTexture;
@@ -39,7 +42,7 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
: public media::VideoDecodeAccelerator,
public AVDAStateProvider {
public:
- typedef std::map<int32_t, media::PictureBuffer> OutputBufferMap;
+ using OutputBufferMap = std::map<int32_t, media::PictureBuffer>;
// A BackingStrategy is responsible for making a PictureBuffer's texture
// contain the image that a MediaCodec decoder buffer tells it to.
@@ -47,20 +50,26 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
public:
virtual ~BackingStrategy() {}
- // Called after the state provider is given, but before any other
- // calls to the BackingStrategy.
- virtual void Initialize(AVDAStateProvider* provider) = 0;
+ // Must be called before anything else. If surface_view_id is not equal to
+ // |kNoSurfaceID| it refers to a SurfaceView that the strategy must render
+ // to.
+ // Returns the Java surface to configure MediaCodec with.
+ virtual gfx::ScopedJavaSurface Initialize(int surface_view_id) = 0;
// Called before the AVDA does any Destroy() work. This will be
// the last call that the BackingStrategy receives.
virtual void Cleanup(bool have_context,
const OutputBufferMap& buffer_map) = 0;
+ // This returns the SurfaceTexture created by Initialize, or nullptr if
+ // the strategy was initialized with a SurfaceView.
+ virtual scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture() const = 0;
+
// Return the GL texture target that the PictureBuffer textures use.
virtual uint32_t GetTextureTarget() const = 0;
- // Create and return a surface texture for the MediaCodec to use.
- virtual scoped_refptr<gfx::SurfaceTexture> CreateSurfaceTexture() = 0;
+ // Return the size to use when requesting picture buffers.
+ virtual gfx::Size GetPictureBufferSize() const = 0;
// Make the provided PictureBuffer draw the image that is represented by
// the decoded output buffer at codec_buffer_index.
@@ -70,16 +79,13 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// Notify strategy that a picture buffer has been assigned.
virtual void AssignOnePictureBuffer(
- const media::PictureBuffer& picture_buffer) {}
+ const media::PictureBuffer& picture_buffer,
+ bool have_context) {}
// Notify strategy that a picture buffer has been reused.
virtual void ReuseOnePictureBuffer(
const media::PictureBuffer& picture_buffer) {}
- // Notify strategy that we are about to dismiss a picture buffer.
- virtual void DismissOnePictureBuffer(
- const media::PictureBuffer& picture_buffer) {}
-
// Notify strategy that we have a new android MediaCodec instance. This
// happens when we're starting up or re-configuring mid-stream. Any
// previously provided codec should no longer be referenced.
@@ -91,11 +97,22 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// Notify the strategy that a frame is available. This callback can happen
// on any thread at any time.
virtual void OnFrameAvailable() = 0;
+
+ // Whether the pictures produced by this backing strategy are overlayable.
+ virtual bool ArePicturesOverlayable() = 0;
+
+ // Size may have changed due to resolution change since the last time this
+ // PictureBuffer was used. Update the size of the picture buffer to
+ // |new_size| and also update any size-dependent state (e.g. size of
+ // associated texture). Callers should set the correct GL context prior to
+ // calling.
+ virtual void UpdatePictureBufferSize(media::PictureBuffer* picture_buffer,
+ const gfx::Size& new_size) = 0;
};
AndroidVideoDecodeAccelerator(
- const base::WeakPtr<gpu::gles2::GLES2Decoder> decoder,
- const base::Callback<bool(void)>& make_context_current);
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb);
~AndroidVideoDecodeAccelerator() override;
@@ -109,31 +126,98 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
// AVDAStateProvider implementation:
const gfx::Size& GetSize() const override;
const base::ThreadChecker& ThreadChecker() const override;
base::WeakPtr<gpu::gles2::GLES2Decoder> GetGlDecoder() const override;
+ gpu::gles2::TextureRef* GetTextureForPicture(
+ const media::PictureBuffer& picture_buffer) override;
void PostError(const ::tracked_objects::Location& from_here,
media::VideoDecodeAccelerator::Error error) override;
- static media::VideoDecodeAccelerator::Capabilities GetCapabilities();
+ static media::VideoDecodeAccelerator::Capabilities GetCapabilities(
+ const gpu::GpuPreferences& gpu_preferences);
// Notifies about SurfaceTexture::OnFrameAvailable. This can happen on any
// thread at any time!
void OnFrameAvailable();
private:
+ friend class AVDATimerManager;
+
+ // TODO(timav): evaluate the need for more states in the AVDA state machine.
enum State {
NO_ERROR,
ERROR,
+ // Set when we are asynchronously constructing the codec. Will transition
+ // to NO_ERROR or ERROR depending on success.
+ WAITING_FOR_CODEC,
+ // Set when we have a codec, but it doesn't yet have a key.
+ WAITING_FOR_KEY,
+ WAITING_FOR_EOS,
};
- static const base::TimeDelta kDecodePollDelay;
+ // Configuration info for MediaCodec.
+ // This is used to shuttle configuration info between threads without needing
+ // to worry about the lifetime of the AVDA instance. All of these should not
+ // be modified while |state_| is WAITING_FOR_CODEC.
+ class CodecConfig : public base::RefCountedThreadSafe<CodecConfig> {
+ public:
+ CodecConfig();
+
+ // Codec type. Used when we configure media codec.
+ media::VideoCodec codec_ = media::kUnknownVideoCodec;
+
+ // Whether encryption scheme requires to use protected surface.
+ bool needs_protected_surface_ = false;
+
+ // The surface that MediaCodec is configured to output to. It's created by
+ // the backing strategy.
+ gfx::ScopedJavaSurface surface_;
+
+ // The MediaCrypto object is used in the MediaCodec.configure() in case of
+ // an encrypted stream.
+ media::MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto_;
+
+ // Initial coded size. The actual size might change at any time, so this
+ // is only a hint.
+ gfx::Size initial_expected_coded_size_;
+
+ protected:
+ friend class base::RefCountedThreadSafe<CodecConfig>;
+ virtual ~CodecConfig();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CodecConfig);
+ };
// Configures |media_codec_| with the given codec parameters from the client.
- bool ConfigureMediaCodec();
+ // This configuration will (probably) not be complete before this call
+ // returns. Multiple calls before completion will be ignored. |state_|
+ // must be NO_ERROR or WAITING_FOR_CODEC. Note that, once you call this,
+ // you should be careful to avoid modifying members of |codec_config_| until
+ // |state_| is no longer WAITING_FOR_CODEC.
+ void ConfigureMediaCodecAsynchronously();
+
+ // Like ConfigureMediaCodecAsynchronously, but synchronous. Returns true if
+ // and only if |media_codec_| is non-null. Since all configuration is done
+ // synchronously, there is no concern with modifying |codec_config_| after
+ // this returns.
+ bool ConfigureMediaCodecSynchronously();
+
+ // Instantiate a media codec using |codec_config|.
+ // This may be called on any thread.
+ static scoped_ptr<media::VideoCodecBridge> ConfigureMediaCodecOnAnyThread(
+ scoped_refptr<CodecConfig> codec_config);
+
+ // Called on the main thread to update |media_codec_| and complete codec
+ // configuration. |media_codec| will be null if configuration failed.
+ void OnCodecConfigured(scoped_ptr<media::VideoCodecBridge> media_codec);
// Sends the decoded frame specified by |codec_buffer_index| to the client.
void SendDecodedFrameToClient(int32_t codec_buffer_index,
@@ -142,7 +226,7 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// Does pending IO tasks if any. Once this is called, it polls |media_codec_|
// until it finishes pending tasks. For the polling, |kDecodePollDelay| is
// used.
- void DoIOTask();
+ void DoIOTask(bool start_timer);
// Feeds input data to |media_codec_|. This checks
// |pending_bitstream_buffers_| and queues a buffer to |media_codec_|.
@@ -157,15 +241,20 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// Requests picture buffers from the client.
void RequestPictureBuffers();
+ // Decode the content in the |bitstream_buffer|. Note that a
+ // |bitstream_buffer| of id as -1 indicates a flush command.
+ void DecodeBuffer(const media::BitstreamBuffer& bitstream_buffer);
+
// This callback is called after CDM obtained a MediaCrypto object.
- void OnMediaCryptoReady(media::MediaDrmBridge::JavaObjectPtr media_crypto,
- bool needs_protected_surface);
+ void OnMediaCryptoReady(
+ media::MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto,
+ bool needs_protected_surface);
// This callback is called when a new key is added to CDM.
void OnKeyAdded();
- // Notifies the client of the CDM setting result.
- void NotifyCdmAttached(bool success);
+ // Notifies the client of the result of deferred initialization.
+ void NotifyInitializationComplete(bool success);
// Notifies the client about the availability of a picture.
void NotifyPictureReady(const media::Picture& picture);
@@ -181,7 +270,12 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
void NotifyResetDone();
// Notifies about decoding errors.
- void NotifyError(media::VideoDecodeAccelerator::Error error);
+ // Note: you probably don't want to call this directly. Use PostError or
+ // RETURN_ON_FAILURE, since we can defer error reporting to keep the pipeline
+ // from breaking. NotifyError will do so immediately, PostError may wait.
+ // |token| has to match |error_sequence_token_|, or else it's assumed to be
+ // from a post that's prior to a previous reset, and ignored.
+ void NotifyError(media::VideoDecodeAccelerator::Error error, int token);
// Start or stop our work-polling timer based on whether we did any work, and
// how long it has been since we've done work. Calling this with true will
@@ -194,12 +288,9 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// is still valid and should be processed.
void ResetCodecState();
- // Dismiss all |output_picture_buffers_| in preparation for requesting new
- // ones.
- void DismissPictureBuffers();
-
// Return true if and only if we should use deferred rendering.
- static bool UseDeferredRenderingStrategy();
+ static bool UseDeferredRenderingStrategy(
+ const gpu::GpuPreferences& gpu_preferences);
// Used to DCHECK that we are called on the correct thread.
base::ThreadChecker thread_checker_;
@@ -208,17 +299,14 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
Client* client_;
// Callback to set the correct gl context.
- base::Callback<bool(void)> make_context_current_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
- // Codec type. Used when we configure media codec.
- media::VideoCodec codec_;
+ // Callback to get the GLES2Decoder instance.
+ GetGLES2DecoderCallback get_gles2_decoder_cb_;
// Whether the stream is encrypted.
bool is_encrypted_;
- // Whether encryption scheme requires to use protected surface.
- bool needs_protected_surface_;
-
// The current state of this class. For now, this is used only for setting
// error state.
State state_;
@@ -231,17 +319,9 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// decoded frames to the client.
std::queue<int32_t> free_picture_ids_;
- // Picture buffer ids which have been dismissed and not yet re-assigned. Used
- // to ignore ReusePictureBuffer calls that were in flight when the
- // DismissPictureBuffer call was made.
- std::set<int32_t> dismissed_picture_ids_;
-
// The low-level decoder which Android SDK provides.
scoped_ptr<media::VideoCodecBridge> media_codec_;
- // A container of texture. Used to set a texture to |media_codec_|.
- scoped_refptr<gfx::SurfaceTexture> surface_texture_;
-
// Set to true after requesting picture buffers to the client.
bool picturebuffers_requested_;
@@ -249,11 +329,8 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
gfx::Size size_;
// Encoded bitstream buffers to be passed to media codec, queued until an
- // input buffer is available, along with the time when they were first
- // enqueued.
- typedef std::queue<std::pair<media::BitstreamBuffer, base::Time> >
- PendingBitstreamBuffers;
- PendingBitstreamBuffers pending_bitstream_buffers_;
+ // input buffer is available.
+ std::queue<media::BitstreamBuffer> pending_bitstream_buffers_;
// A map of presentation timestamp to bitstream buffer id for the bitstream
// buffers that have been submitted to the decoder but haven't yet produced an
@@ -265,12 +342,6 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// NotifyEndOfBitstreamBuffer() before getting output from the bitstream.
std::list<int32_t> bitstreams_notified_in_advance_;
- // Owner of the GL context. Used to restore the context state.
- base::WeakPtr<gpu::gles2::GLES2Decoder> gl_decoder_;
-
- // Repeating timer responsible for draining pending IO to the codec.
- base::RepeatingTimer io_timer_;
-
// Backing strategy that we'll use to connect PictureBuffers to frames.
scoped_ptr<BackingStrategy> strategy_;
@@ -283,16 +354,33 @@ class CONTENT_EXPORT AndroidVideoDecodeAccelerator
// CDM related stuff.
- // Holds a ref-count to the CDM.
- scoped_refptr<media::MediaKeys> cdm_;
+ // Holds a ref-count to the CDM to avoid using the CDM after it's destroyed.
+ scoped_refptr<media::MediaKeys> cdm_for_reference_holding_only_;
+
+ media::MediaDrmBridgeCdmContext* media_drm_bridge_cdm_context_;
// MediaDrmBridge requires registration/unregistration of the player, this
// registration id is used for this.
int cdm_registration_id_;
- // The MediaCrypto object is used in the MediaCodec.configure() in case of
- // an encrypted stream.
- media::MediaDrmBridge::JavaObjectPtr media_crypto_;
+ // Configuration that we use for MediaCodec.
+ // Do not update any of its members while |state_| is WAITING_FOR_CODEC.
+ scoped_refptr<CodecConfig> codec_config_;
+
+ // Index of the dequeued and filled buffer that we keep trying to enqueue.
+ // Such buffer appears in MEDIA_CODEC_NO_KEY processing.
+ int pending_input_buf_index_;
+
+ // Monotonically increasing value that is used to prevent old, delayed errors
+ // from being sent after a reset.
+ int error_sequence_token_;
+
+ // PostError will defer sending an error if and only if this is true.
+ bool defer_errors_;
+
+ // True if and only if VDA initialization is deferred, and we have not yet
+ // called NotifyInitializationComplete.
+ bool deferred_initialization_pending_;
// WeakPtrFactory for posting tasks back to |this|.
base::WeakPtrFactory<AndroidVideoDecodeAccelerator> weak_this_factory_;
diff --git a/chromium/content/common/gpu/media/android_video_decode_accelerator_unittest.cc b/chromium/content/common/gpu/media/android_video_decode_accelerator_unittest.cc
index 3cd79157162..d21ad9e58a8 100644
--- a/chromium/content/common/gpu/media/android_video_decode_accelerator_unittest.cc
+++ b/chromium/content/common/gpu/media/android_video_decode_accelerator_unittest.cc
@@ -27,13 +27,15 @@ bool MockMakeContextCurrent() {
return true;
}
+static base::WeakPtr<gpu::gles2::GLES2Decoder> MockGetGLES2Decoder(
+ const base::WeakPtr<gpu::gles2::GLES2Decoder>& decoder) {
+ return decoder;
+}
+
} // namespace
namespace content {
-// TODO(felipeg): Add more unit tests to test the ordinary behavior of
-// AndroidVideoDecodeAccelerator.
-// http://crbug.com/178647
class MockVideoDecodeAcceleratorClient
: public media::VideoDecodeAccelerator::Client {
public:
@@ -42,6 +44,7 @@ class MockVideoDecodeAcceleratorClient
// VideoDecodeAccelerator::Client implementation.
void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
+ uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) override {}
void DismissPictureBuffer(int32_t picture_buffer_id) override {}
@@ -60,8 +63,6 @@ class AndroidVideoDecodeAcceleratorTest : public testing::Test {
void SetUp() override {
JNIEnv* env = base::android::AttachCurrentThread();
media::RegisterJni(env);
- // TODO(felipeg): fix GL bindings, so that the decoder can perform GL
- // calls.
// Start message loop because
// AndroidVideoDecodeAccelerator::ConfigureMediaCodec() starts a timer task.
@@ -72,15 +73,19 @@ class AndroidVideoDecodeAcceleratorTest : public testing::Test {
scoped_ptr<MockVideoDecodeAcceleratorClient> client(
new MockVideoDecodeAcceleratorClient());
accelerator_.reset(new AndroidVideoDecodeAccelerator(
- decoder->AsWeakPtr(), base::Bind(&MockMakeContextCurrent)));
+ base::Bind(&MockMakeContextCurrent),
+ base::Bind(&MockGetGLES2Decoder, decoder->AsWeakPtr())));
}
bool Configure(media::VideoCodec codec) {
AndroidVideoDecodeAccelerator* accelerator =
static_cast<AndroidVideoDecodeAccelerator*>(accelerator_.get());
- accelerator->surface_texture_ = gfx::SurfaceTexture::Create(0);
- accelerator->codec_ = codec;
- return accelerator->ConfigureMediaCodec();
+ scoped_refptr<gfx::SurfaceTexture> surface_texture =
+ gfx::SurfaceTexture::Create(0);
+ accelerator->codec_config_->surface_ =
+ gfx::ScopedJavaSurface(surface_texture.get());
+ accelerator->codec_config_->codec_ = codec;
+ return accelerator->ConfigureMediaCodecSynchronously();
}
private:
diff --git a/chromium/content/common/gpu/media/android_video_encode_accelerator.cc b/chromium/content/common/gpu/media/android_video_encode_accelerator.cc
index eb383081d7f..ac2ff39e9b7 100644
--- a/chromium/content/common/gpu/media/android_video_encode_accelerator.cc
+++ b/chromium/content/common/gpu/media/android_video_encode_accelerator.cc
@@ -7,13 +7,12 @@
#include <set>
#include "base/bind.h"
-#include "base/command_line.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/public/common/content_switches.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/ipc/service/gpu_channel.h"
#include "media/base/android/media_codec_util.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/limits.h"
@@ -95,8 +94,6 @@ static bool GetSupportedColorFormatForMime(const std::string& mime,
AndroidVideoEncodeAccelerator::AndroidVideoEncodeAccelerator()
: num_buffers_at_codec_(0),
- num_output_buffers_(-1),
- output_buffers_capacity_(0),
last_set_bitrate_(0) {}
AndroidVideoEncodeAccelerator::~AndroidVideoEncodeAccelerator() {
@@ -107,12 +104,6 @@ media::VideoEncodeAccelerator::SupportedProfiles
AndroidVideoEncodeAccelerator::GetSupportedProfiles() {
SupportedProfiles profiles;
-#if defined(ENABLE_WEBRTC)
- const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kDisableWebRtcHWEncoding))
- return profiles;
-#endif
-
const struct {
const media::VideoCodec codec;
const media::VideoCodecProfile profile;
@@ -123,6 +114,11 @@ AndroidVideoEncodeAccelerator::GetSupportedProfiles() {
};
for (const auto& supported_codec : kSupportedCodecs) {
+ if (supported_codec.codec == media::kCodecVP8 &&
+ !media::MediaCodecUtil::IsVp8EncoderAvailable()) {
+ continue;
+ }
+
if (VideoCodecBridge::IsKnownUnaccelerated(supported_codec.codec,
media::MEDIA_CODEC_ENCODER)) {
continue;
@@ -164,17 +160,24 @@ bool AndroidVideoEncodeAccelerator::Initialize(
std::string mime_type;
media::VideoCodec codec;
+ // The client should be prepared to feed at least this many frames into the
+ // encoder before being returned any output frames, since the encoder may
+ // need to hold onto some subset of inputs as reference pictures.
+ uint32_t frame_input_count;
if (output_profile == media::VP8PROFILE_ANY) {
codec = media::kCodecVP8;
mime_type = "video/x-vnd.on2.vp8";
+ frame_input_count = 1;
} else if (output_profile == media::H264PROFILE_BASELINE ||
output_profile == media::H264PROFILE_MAIN) {
codec = media::kCodecH264;
mime_type = "video/avc";
+ frame_input_count = 30;
} else {
return false;
}
+ frame_size_ = input_visible_size;
last_set_bitrate_ = initial_bitrate;
// Only consider using MediaCodec if it's likely backed by hardware.
@@ -202,15 +205,16 @@ bool AndroidVideoEncodeAccelerator::Initialize(
return false;
}
- num_output_buffers_ = media_codec_->GetOutputBuffersCount();
- output_buffers_capacity_ = media_codec_->GetOutputBuffersCapacity();
+ // Conservative upper bound for output buffer size: decoded size + 2KB.
+ const size_t output_buffer_capacity =
+ VideoFrame::AllocationSize(format, input_visible_size) + 2048;
base::MessageLoop::current()->PostTask(
FROM_HERE,
base::Bind(&VideoEncodeAccelerator::Client::RequireBitstreamBuffers,
client_ptr_factory_->GetWeakPtr(),
- num_output_buffers_,
+ frame_input_count,
input_visible_size,
- output_buffers_capacity_));
+ output_buffer_capacity));
return true;
}
@@ -238,7 +242,8 @@ void AndroidVideoEncodeAccelerator::Encode(
DCHECK(thread_checker_.CalledOnValidThread());
RETURN_ON_FAILURE(frame->format() == media::PIXEL_FORMAT_I420,
"Unexpected format", kInvalidArgumentError);
-
+ RETURN_ON_FAILURE(frame->visible_rect().size() == frame_size_,
+ "Unexpected resolution", kInvalidArgumentError);
// MediaCodec doesn't have a way to specify stride for non-Packed formats, so
// we insist on being called with packed frames and no cropping :(
RETURN_ON_FAILURE(frame->row_bytes(VideoFrame::kYPlane) ==
@@ -260,9 +265,6 @@ void AndroidVideoEncodeAccelerator::UseOutputBitstreamBuffer(
const media::BitstreamBuffer& buffer) {
DVLOG(3) << __PRETTY_FUNCTION__ << ": bitstream_buffer_id=" << buffer.id();
DCHECK(thread_checker_.CalledOnValidThread());
- RETURN_ON_FAILURE(buffer.size() >= media_codec_->GetOutputBuffersCapacity(),
- "Output buffers too small!",
- kInvalidArgumentError);
available_bitstream_buffers_.push_back(buffer);
DoIOTask();
}
@@ -331,7 +333,9 @@ void AndroidVideoEncodeAccelerator::QueueInput() {
uint8_t* buffer = NULL;
size_t capacity = 0;
- media_codec_->GetInputBuffer(input_buf_index, &buffer, &capacity);
+ status = media_codec_->GetInputBuffer(input_buf_index, &buffer, &capacity);
+ RETURN_ON_FAILURE(status == media::MEDIA_CODEC_OK, "GetInputBuffer failed.",
+ kPlatformFailureError);
size_t queued_size =
VideoFrame::AllocationSize(media::PIXEL_FORMAT_I420, frame->coded_size());
@@ -373,21 +377,6 @@ void AndroidVideoEncodeAccelerator::QueueInput() {
pending_frames_.pop();
}
-bool AndroidVideoEncodeAccelerator::DoOutputBuffersSuffice() {
- // If this returns false ever, then the VEA::Client interface will need to
- // grow a DismissBitstreamBuffer() call, and VEA::Client impls will have to be
- // prepared to field multiple requests to RequireBitstreamBuffers().
- int count = media_codec_->GetOutputBuffersCount();
- size_t capacity = media_codec_->GetOutputBuffersCapacity();
- bool ret = count <= num_output_buffers_ &&
- capacity <= output_buffers_capacity_;
- LOG_IF(ERROR, !ret) << "Need more/bigger buffers; before: "
- << num_output_buffers_ << "x" << output_buffers_capacity_
- << ", now: " << count << "x" << capacity;
- UMA_HISTOGRAM_BOOLEAN("Media.AVEA.OutputBuffersSuffice", ret);
- return ret;
-}
-
void AndroidVideoEncodeAccelerator::DequeueOutput() {
if (!client_ptr_factory_->GetWeakPtr() ||
available_bitstream_buffers_.empty() || num_buffers_at_codec_ == 0) {
@@ -410,13 +399,14 @@ void AndroidVideoEncodeAccelerator::DequeueOutput() {
// Unreachable because of previous statement, but included for clarity.
return;
- case media::MEDIA_CODEC_OUTPUT_FORMAT_CHANGED: // Fall-through.
- case media::MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
- RETURN_ON_FAILURE(DoOutputBuffersSuffice(),
- "Bitstream now requires more/larger buffers",
+ case media::MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
+ RETURN_ON_FAILURE(false, "Unexpected output format change",
kPlatformFailureError);
break;
+ case media::MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
+ break;
+
case media::MEDIA_CODEC_OK:
DCHECK_GE(buf_index, 0);
break;
@@ -429,17 +419,17 @@ void AndroidVideoEncodeAccelerator::DequeueOutput() {
media::BitstreamBuffer bitstream_buffer = available_bitstream_buffers_.back();
available_bitstream_buffers_.pop_back();
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(bitstream_buffer.handle(), false));
- RETURN_ON_FAILURE(shm->Map(bitstream_buffer.size()),
- "Failed to map SHM",
- kPlatformFailureError);
- RETURN_ON_FAILURE(size <= shm->mapped_size(),
- "Encoded buffer too large: " << size << ">"
- << shm->mapped_size(),
+ scoped_ptr<SharedMemoryRegion> shm(
+ new SharedMemoryRegion(bitstream_buffer, false));
+ RETURN_ON_FAILURE(shm->Map(), "Failed to map SHM", kPlatformFailureError);
+ RETURN_ON_FAILURE(size <= shm->size(),
+ "Encoded buffer too large: " << size << ">" << shm->size(),
kPlatformFailureError);
- media_codec_->CopyFromOutputBuffer(buf_index, offset, shm->memory(), size);
+ media::MediaCodecStatus status = media_codec_->CopyFromOutputBuffer(
+ buf_index, offset, shm->memory(), size);
+ RETURN_ON_FAILURE(status == media::MEDIA_CODEC_OK,
+ "CopyFromOutputBuffer failed", kPlatformFailureError);
media_codec_->ReleaseOutputBuffer(buf_index, false);
--num_buffers_at_codec_;
diff --git a/chromium/content/common/gpu/media/android_video_encode_accelerator.h b/chromium/content/common/gpu/media/android_video_encode_accelerator.h
index 426360dca7c..0de3d1866b1 100644
--- a/chromium/content/common/gpu/media/android_video_encode_accelerator.h
+++ b/chromium/content/common/gpu/media/android_video_encode_accelerator.h
@@ -67,9 +67,6 @@ class CONTENT_EXPORT AndroidVideoEncodeAccelerator
void QueueInput();
void DequeueOutput();
- // Returns true if we don't need more or bigger output buffers.
- bool DoOutputBuffersSuffice();
-
// Start & stop |io_timer_| if the time seems right.
void MaybeStartIOTimer();
void MaybeStopIOTimer();
@@ -103,9 +100,9 @@ class CONTENT_EXPORT AndroidVideoEncodeAccelerator
// appearing to move forward.
base::TimeDelta fake_input_timestamp_;
- // Number of requested output buffers and their capacity.
- int num_output_buffers_; // -1 until RequireBitstreamBuffers.
- size_t output_buffers_capacity_; // 0 until RequireBitstreamBuffers.
+ // Resolution of input stream. Set once in initialization and not allowed to
+ // change after.
+ gfx::Size frame_size_;
uint32_t last_set_bitrate_; // In bps.
diff --git a/chromium/content/common/gpu/media/avda_codec_image.cc b/chromium/content/common/gpu/media/avda_codec_image.cc
index 1df753d167e..5830433cdf2 100644
--- a/chromium/content/common/gpu/media/avda_codec_image.cc
+++ b/chromium/content/common/gpu/media/avda_codec_image.cc
@@ -24,16 +24,17 @@ AVDACodecImage::AVDACodecImage(
const base::WeakPtr<gpu::gles2::GLES2Decoder>& decoder,
const scoped_refptr<gfx::SurfaceTexture>& surface_texture)
: shared_state_(shared_state),
- codec_buffer_index_(-1),
+ codec_buffer_index_(kInvalidCodecBufferIndex),
media_codec_(codec),
decoder_(decoder),
surface_texture_(surface_texture),
detach_surface_texture_on_destruction_(false),
- texture_(0),
- need_shader_info_(true),
- texmatrix_uniform_location_(-1) {
+ texture_(0) {
+ // Default to a sane guess of "flip Y", just in case we can't get
+ // the matrix on the first call.
memset(gl_matrix_, 0, sizeof(gl_matrix_));
- gl_matrix_[0] = gl_matrix_[5] = gl_matrix_[10] = gl_matrix_[15] = 1.0f;
+ gl_matrix_[0] = gl_matrix_[10] = gl_matrix_[15] = 1.0f;
+ gl_matrix_[5] = -1.0f;
}
AVDACodecImage::~AVDACodecImage() {}
@@ -55,38 +56,35 @@ bool AVDACodecImage::BindTexImage(unsigned target) {
void AVDACodecImage::ReleaseTexImage(unsigned target) {}
bool AVDACodecImage::CopyTexImage(unsigned target) {
+ if (!surface_texture_)
+ return false;
+
if (target != GL_TEXTURE_EXTERNAL_OES)
return false;
- // Verify that the currently bound texture is the right one. If we're not
- // copying to a Texture that shares our service_id, then we can't do much.
- // This will force a copy.
- // TODO(liberato): Fall back to a copy that uses the texture matrix.
GLint bound_service_id = 0;
glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &bound_service_id);
+ // We insist that the currently bound texture is the right one. We could
+ // make a new glimage from a 2D image.
if (bound_service_id != shared_state_->surface_texture_service_id())
return false;
- // Attach the surface texture to our GL context if needed.
+ // If the surface texture isn't attached yet, then attach it. Note that this
+ // will be to the texture in |shared_state_|, because of the checks above.
if (!shared_state_->surface_texture_is_attached())
AttachSurfaceTextureToContext();
- // Make sure that we have the right image in the front buffer.
- UpdateSurfaceTexture();
-
- InstallTextureMatrix();
-
- // TODO(liberato): Handle the texture matrix properly.
- // Either we can update the shader with it or we can move all of the logic
- // to updateTexImage() to the right place in the cc to send it to the shader.
- // For now, we just skip it. crbug.com/530681
+ // Make sure that we have the right image in the front buffer. Note that the
+ // bound_service_id is guaranteed to be equal to the surface texture's client
+ // texture id, so we can skip preserving it if the right context is current.
+ UpdateSurfaceTexture(kDontRestoreBindings);
// By setting image state to UNBOUND instead of COPIED we ensure that
// CopyTexImage() is called each time the surface texture is used for drawing.
// It would be nice if we could do this via asking for the currently bound
// Texture, but the active unit never seems to change.
- texture_->SetLevelImage(GL_TEXTURE_EXTERNAL_OES, 0, this,
- gpu::gles2::Texture::UNBOUND);
+ texture_->SetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES, 0, this,
+ gpu::gles2::Texture::UNBOUND);
return true;
}
@@ -102,16 +100,29 @@ bool AVDACodecImage::ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
gfx::OverlayTransform transform,
const gfx::Rect& bounds_rect,
const gfx::RectF& crop_rect) {
- return false;
+ // This should only be called when we're rendering to a SurfaceView.
+ if (surface_texture_) {
+ DVLOG(1) << "Invalid call to ScheduleOverlayPlane; this image is "
+ "SurfaceTexture backed.";
+ return false;
+ }
+
+ if (codec_buffer_index_ != kInvalidCodecBufferIndex) {
+ media_codec_->ReleaseOutputBuffer(codec_buffer_index_, true);
+ codec_buffer_index_ = kInvalidCodecBufferIndex;
+ }
+ return true;
}
void AVDACodecImage::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
uint64_t process_tracing_id,
const std::string& dump_name) {}
-void AVDACodecImage::UpdateSurfaceTexture() {
+void AVDACodecImage::UpdateSurfaceTexture(RestoreBindingsMode mode) {
+ DCHECK(surface_texture_);
+
// Render via the media codec if needed.
- if (codec_buffer_index_ <= -1 || !media_codec_)
+ if (!IsCodecBufferOutstanding())
return;
// The decoder buffer is still pending.
@@ -123,15 +134,24 @@ void AVDACodecImage::UpdateSurfaceTexture() {
}
// Don't bother to check if we're rendered again.
- codec_buffer_index_ = -1;
+ codec_buffer_index_ = kInvalidCodecBufferIndex;
// Swap the rendered image to the front.
- scoped_ptr<ui::ScopedMakeCurrent> scoped_make_current;
- if (!shared_state_->context()->IsCurrent(NULL)) {
- scoped_make_current.reset(new ui::ScopedMakeCurrent(
- shared_state_->context(), shared_state_->surface()));
- }
+ scoped_ptr<ui::ScopedMakeCurrent> scoped_make_current = MakeCurrentIfNeeded();
+
+ // If we changed contexts, then we always want to restore it, since the caller
+ // doesn't know that we're switching contexts.
+ if (scoped_make_current)
+ mode = kDoRestoreBindings;
+
+ // Save the current binding if requested.
+ GLint bound_service_id = 0;
+ if (mode == kDoRestoreBindings)
+ glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &bound_service_id);
+
surface_texture_->UpdateTexImage();
+ if (mode == kDoRestoreBindings)
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, bound_service_id);
// Helpfully, this is already column major.
surface_texture_->GetTransformMatrix(gl_matrix_);
@@ -153,16 +173,19 @@ void AVDACodecImage::SetMediaCodec(media::MediaCodecBridge* codec) {
media_codec_ = codec;
}
-void AVDACodecImage::setTexture(gpu::gles2::Texture* texture) {
+void AVDACodecImage::SetTexture(gpu::gles2::Texture* texture) {
texture_ = texture;
}
void AVDACodecImage::AttachSurfaceTextureToContext() {
+ DCHECK(surface_texture_);
+
+ // We assume that the currently bound texture is the intended one.
+
// Attach the surface texture to the first context we're bound on, so that
// no context switch is needed later.
-
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
@@ -170,35 +193,48 @@ void AVDACodecImage::AttachSurfaceTextureToContext() {
// We could do this earlier, but SurfaceTexture has context affinity, and we
// don't want to require a context switch.
surface_texture_->AttachToGLContext();
- shared_state_->did_attach_surface_texture();
-}
-
-void AVDACodecImage::InstallTextureMatrix() {
- // glUseProgram() has been run already -- just modify the uniform.
- // Updating this via VideoFrameProvider::Client::DidUpdateMatrix() would
- // be a better solution, except that we'd definitely miss a frame at this
- // point in drawing.
- // Our current method assumes that we'll end up being a stream resource,
- // and that the program has a texMatrix uniform that does what we want.
- if (need_shader_info_) {
- GLint program_id = -1;
- glGetIntegerv(GL_CURRENT_PROGRAM, &program_id);
-
- if (program_id >= 0) {
- // This is memorized from cc/output/shader.cc .
- const char* uniformName = "texMatrix";
- texmatrix_uniform_location_ =
- glGetUniformLocation(program_id, uniformName);
- DCHECK(texmatrix_uniform_location_ != -1);
- }
+ shared_state_->DidAttachSurfaceTexture();
+}
- // Only try once.
- need_shader_info_ = false;
+scoped_ptr<ui::ScopedMakeCurrent> AVDACodecImage::MakeCurrentIfNeeded() {
+ DCHECK(shared_state_->context());
+ scoped_ptr<ui::ScopedMakeCurrent> scoped_make_current;
+ if (!shared_state_->context()->IsCurrent(NULL)) {
+ scoped_make_current.reset(new ui::ScopedMakeCurrent(
+ shared_state_->context(), shared_state_->surface()));
}
- if (texmatrix_uniform_location_ >= 0) {
- glUniformMatrix4fv(texmatrix_uniform_location_, 1, false, gl_matrix_);
+ return scoped_make_current;
+}
+
+void AVDACodecImage::GetTextureMatrix(float matrix[16]) {
+ if (IsCodecBufferOutstanding() && shared_state_ && surface_texture_) {
+ // Our current matrix may be stale. Update it if possible.
+ if (!shared_state_->surface_texture_is_attached()) {
+ // Don't attach the surface texture permanently. Perhaps we should
+ // just attach the surface texture in avda and be done with it.
+ GLuint service_id = 0;
+ glGenTextures(1, &service_id);
+ GLint bound_service_id = 0;
+ glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &bound_service_id);
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, service_id);
+ AttachSurfaceTextureToContext();
+ UpdateSurfaceTexture(kDontRestoreBindings);
+ // Detach the surface texture, which deletes the generated texture.
+ surface_texture_->DetachFromGLContext();
+ shared_state_->DidDetachSurfaceTexture();
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, bound_service_id);
+ } else {
+ // Surface texture is already attached, so just update it.
+ UpdateSurfaceTexture(kDoRestoreBindings);
+ }
}
+
+ memcpy(matrix, gl_matrix_, sizeof(gl_matrix_));
+}
+
+bool AVDACodecImage::IsCodecBufferOutstanding() const {
+ return codec_buffer_index_ != kInvalidCodecBufferIndex && media_codec_;
}
} // namespace content
diff --git a/chromium/content/common/gpu/media/avda_codec_image.h b/chromium/content/common/gpu/media/avda_codec_image.h
index ef0456a9fba..46547e478c8 100644
--- a/chromium/content/common/gpu/media/avda_codec_image.h
+++ b/chromium/content/common/gpu/media/avda_codec_image.h
@@ -9,13 +9,17 @@
#include "base/macros.h"
#include "content/common/gpu/media/avda_shared_state.h"
-#include "ui/gl/gl_image.h"
+#include "gpu/command_buffer/service/gl_stream_texture_image.h"
+
+namespace ui {
+class ScopedMakeCurrent;
+}
namespace content {
-// GLImage that renders MediaCodec buffers to a SurfaceTexture as needed
-// in order to draw them.
-class AVDACodecImage : public gl::GLImage {
+// GLImage that renders MediaCodec buffers to a SurfaceTexture or SurfaceView as
+// needed in order to draw them.
+class AVDACodecImage : public gpu::gles2::GLStreamTextureImage {
public:
AVDACodecImage(const scoped_refptr<AVDASharedState>&,
media::VideoCodecBridge* codec,
@@ -44,6 +48,8 @@ class AVDACodecImage : public gl::GLImage {
void OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
uint64_t process_tracing_id,
const std::string& dump_name) override;
+ // gpu::gles2::GLStreamTextureMatrix implementation
+ void GetTextureMatrix(float xform[16]) override;
public:
// Decoded buffer index that has the image for us to display.
@@ -58,24 +64,42 @@ class AVDACodecImage : public gl::GLImage {
void SetMediaCodec(media::MediaCodecBridge* codec);
- void setTexture(gpu::gles2::Texture* texture);
+ void SetTexture(gpu::gles2::Texture* texture);
private:
- // Make sure that the surface texture's front buffer is current.
- void UpdateSurfaceTexture();
-
- // Attach the surface texture to our GL context, with a texture that we
- // create for it.
+ enum { kInvalidCodecBufferIndex = -1 };
+
+ // Make sure that the surface texture's front buffer is current. This will
+ // save / restore the current context. It will optionally restore the texture
+ // bindings in the surface texture's context, based on |mode|. This is
+ // intended as a hint if we don't need to change contexts. If we do need to
+ // change contexts, then we'll always preserve the texture bindings in the
+ // both contexts. In other words, the caller is telling us whether it's
+ // okay to change the binding in the current context.
+ enum RestoreBindingsMode { kDontRestoreBindings, kDoRestoreBindings };
+ void UpdateSurfaceTexture(RestoreBindingsMode mode);
+
+ // Attach the surface texture to our GL context to whatever texture is bound
+ // on the active unit.
void AttachSurfaceTextureToContext();
- // Install the current texture matrix into the shader.
- void InstallTextureMatrix();
+ // Make shared_state_->context() current if it isn't already.
+ scoped_ptr<ui::ScopedMakeCurrent> MakeCurrentIfNeeded();
+
+ // Return whether or not the current context is in the same share group as
+ // |surface_texture_|'s client texture.
+ // TODO(liberato): is this needed?
+ bool IsCorrectShareGroup() const;
+
+ // Return whether there is a codec buffer that we haven't rendered yet. Will
+ // return false also if there's no codec or we otherwise can't update.
+ bool IsCodecBufferOutstanding() const;
// Shared state between the AVDA and all AVDACodecImages.
scoped_refptr<AVDASharedState> shared_state_;
- // Codec's buffer index that we should render to the surface texture,
- // or <0 if none.
+ // The MediaCodec buffer index that we should render. Only valid if not equal
+ // to |kInvalidCodecBufferIndex|.
int codec_buffer_index_;
// Our image size.
@@ -86,6 +110,8 @@ class AVDACodecImage : public gl::GLImage {
const base::WeakPtr<gpu::gles2::GLES2Decoder> decoder_;
+ // The SurfaceTexture to render to. This is null when rendering to a
+ // SurfaceView.
const scoped_refptr<gfx::SurfaceTexture> surface_texture_;
// Should we detach |surface_texture_| from its GL context when we are
@@ -95,12 +121,6 @@ class AVDACodecImage : public gl::GLImage {
// The texture that we're attached to.
gpu::gles2::Texture* texture_;
- // Have we cached |texmatrix_uniform_location_| yet?
- bool need_shader_info_;
-
- // Uniform ID of the texture matrix in the shader.
- GLint texmatrix_uniform_location_;
-
// Texture matrix of the front buffer of the surface texture.
float gl_matrix_[16];
diff --git a/chromium/content/common/gpu/media/avda_shared_state.cc b/chromium/content/common/gpu/media/avda_shared_state.cc
index c182bf05385..7746254fee9 100644
--- a/chromium/content/common/gpu/media/avda_shared_state.cc
+++ b/chromium/content/common/gpu/media/avda_shared_state.cc
@@ -4,6 +4,7 @@
#include "content/common/gpu/media/avda_shared_state.h"
+#include "base/time/time.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/scoped_make_current.h"
@@ -21,10 +22,13 @@ void AVDASharedState::SignalFrameAvailable() {
}
void AVDASharedState::WaitForFrameAvailable() {
- frame_available_event_.Wait();
+ // 10msec covers >99.9% of cases, so just wait for up to that much before
+ // giving up. If an error occurs, we might not ever get a notification.
+ const base::TimeDelta max_wait_time(base::TimeDelta::FromMilliseconds(10));
+ frame_available_event_.TimedWait(max_wait_time);
}
-void AVDASharedState::did_attach_surface_texture() {
+void AVDASharedState::DidAttachSurfaceTexture() {
context_ = gfx::GLContext::GetCurrent();
surface_ = gfx::GLSurface::GetCurrent();
DCHECK(context_);
@@ -33,4 +37,10 @@ void AVDASharedState::did_attach_surface_texture() {
surface_texture_is_attached_ = true;
}
+void AVDASharedState::DidDetachSurfaceTexture() {
+ context_ = nullptr;
+ surface_ = nullptr;
+ surface_texture_is_attached_ = false;
+}
+
} // namespace content
diff --git a/chromium/content/common/gpu/media/avda_shared_state.h b/chromium/content/common/gpu/media/avda_shared_state.h
index eb62681fcd5..5f80c44d729 100644
--- a/chromium/content/common/gpu/media/avda_shared_state.h
+++ b/chromium/content/common/gpu/media/avda_shared_state.h
@@ -50,10 +50,19 @@ class AVDASharedState : public base::RefCounted<AVDASharedState> {
return surface_texture_is_attached_;
}
+ // TODO(liberato): move the surface texture here and make these calls
+ // attach / detach it also. There are several changes going on in avda
+ // concurrently, so I don't want to change that until the dust settles.
+ // AVDACodecImage would no longer hold the surface texture.
+
// Call this when the SurfaceTexture is attached to a GL context. This will
// update surface_texture_is_attached(), and set the context() and surface()
// to match.
- void did_attach_surface_texture();
+ void DidAttachSurfaceTexture();
+
+ // Call this when the SurfaceTexture is detached from its GL context. This
+ // will cause us to forget the last binding.
+ void DidDetachSurfaceTexture();
private:
// Platform gl texture Id for |surface_texture_|. This will be zero if
diff --git a/chromium/content/common/gpu/media/avda_state_provider.h b/chromium/content/common/gpu/media/avda_state_provider.h
index 2c84f2ed04a..e7dfac62ded 100644
--- a/chromium/content/common/gpu/media/avda_state_provider.h
+++ b/chromium/content/common/gpu/media/avda_state_provider.h
@@ -8,6 +8,7 @@
#include "base/compiler_specific.h"
#include "base/threading/thread_checker.h"
#include "content/common/content_export.h"
+#include "gpu/command_buffer/service/texture_manager.h"
#include "media/video/video_decode_accelerator.h"
namespace gfx {
@@ -36,6 +37,8 @@ class AVDAStateProvider {
virtual const gfx::Size& GetSize() const = 0;
virtual const base::ThreadChecker& ThreadChecker() const = 0;
virtual base::WeakPtr<gpu::gles2::GLES2Decoder> GetGlDecoder() const = 0;
+ virtual gpu::gles2::TextureRef* GetTextureForPicture(
+ const media::PictureBuffer& picture_buffer) = 0;
// Helper function to report an error condition and stop decoding.
// This will post NotifyError(), and transition to the error state.
diff --git a/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.cc b/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.cc
index 40a3239cb25..e55c9009720 100644
--- a/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.cc
+++ b/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.cc
@@ -21,7 +21,6 @@
#include "base/base_paths_win.h"
#include "base/bind.h"
#include "base/callback.h"
-#include "base/command_line.h"
#include "base/debug/alias.h"
#include "base/file_version_info.h"
#include "base/files/file_path.h"
@@ -34,15 +33,14 @@
#include "base/trace_event/trace_event.h"
#include "base/win/windows_version.h"
#include "build/build_config.h"
-#include "content/public/common/content_switches.h"
#include "media/base/win/mf_initializer.h"
#include "media/video/video_decode_accelerator.h"
#include "third_party/angle/include/EGL/egl.h"
#include "third_party/angle/include/EGL/eglext.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_surface_egl.h"
-#include "ui/gl/gl_switches.h"
namespace {
@@ -113,6 +111,91 @@ DEFINE_GUID(CLSID_VideoProcessorMFT,
DEFINE_GUID(MF_XVP_PLAYBACK_MODE, 0x3c5d293f, 0xad67, 0x4e29, 0xaf, 0x12,
0xcf, 0x3e, 0x23, 0x8a, 0xcc, 0xe9);
+// Defines the GUID for the Intel H264 DXVA device.
+static const GUID DXVA2_Intel_ModeH264_E = {
+ 0x604F8E68, 0x4951, 0x4c54,{ 0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6}
+};
+
+// R600, R700, Evergreen and Cayman AMD cards. These support DXVA via UVD3
+// or earlier, and don't handle resolutions higher than 1920 x 1088 well.
+static const DWORD g_AMDUVD3GPUList[] = {
+ 0x9400, 0x9401, 0x9402, 0x9403, 0x9405, 0x940a, 0x940b, 0x940f, 0x94c0,
+ 0x94c1, 0x94c3, 0x94c4, 0x94c5, 0x94c6, 0x94c7, 0x94c8, 0x94c9, 0x94cb,
+ 0x94cc, 0x94cd, 0x9580, 0x9581, 0x9583, 0x9586, 0x9587, 0x9588, 0x9589,
+ 0x958a, 0x958b, 0x958c, 0x958d, 0x958e, 0x958f, 0x9500, 0x9501, 0x9504,
+ 0x9505, 0x9506, 0x9507, 0x9508, 0x9509, 0x950f, 0x9511, 0x9515, 0x9517,
+ 0x9519, 0x95c0, 0x95c2, 0x95c4, 0x95c5, 0x95c6, 0x95c7, 0x95c9, 0x95cc,
+ 0x95cd, 0x95ce, 0x95cf, 0x9590, 0x9591, 0x9593, 0x9595, 0x9596, 0x9597,
+ 0x9598, 0x9599, 0x959b, 0x9610, 0x9611, 0x9612, 0x9613, 0x9614, 0x9615,
+ 0x9616, 0x9710, 0x9711, 0x9712, 0x9713, 0x9714, 0x9715, 0x9440, 0x9441,
+ 0x9442, 0x9443, 0x9444, 0x9446, 0x944a, 0x944b, 0x944c, 0x944e, 0x9450,
+ 0x9452, 0x9456, 0x945a, 0x945b, 0x945e, 0x9460, 0x9462, 0x946a, 0x946b,
+ 0x947a, 0x947b, 0x9480, 0x9487, 0x9488, 0x9489, 0x948a, 0x948f, 0x9490,
+ 0x9491, 0x9495, 0x9498, 0x949c, 0x949e, 0x949f, 0x9540, 0x9541, 0x9542,
+ 0x954e, 0x954f, 0x9552, 0x9553, 0x9555, 0x9557, 0x955f, 0x94a0, 0x94a1,
+ 0x94a3, 0x94b1, 0x94b3, 0x94b4, 0x94b5, 0x94b9, 0x68e0, 0x68e1, 0x68e4,
+ 0x68e5, 0x68e8, 0x68e9, 0x68f1, 0x68f2, 0x68f8, 0x68f9, 0x68fa, 0x68fe,
+ 0x68c0, 0x68c1, 0x68c7, 0x68c8, 0x68c9, 0x68d8, 0x68d9, 0x68da, 0x68de,
+ 0x68a0, 0x68a1, 0x68a8, 0x68a9, 0x68b0, 0x68b8, 0x68b9, 0x68ba, 0x68be,
+ 0x68bf, 0x6880, 0x6888, 0x6889, 0x688a, 0x688c, 0x688d, 0x6898, 0x6899,
+ 0x689b, 0x689e, 0x689c, 0x689d, 0x9802, 0x9803, 0x9804, 0x9805, 0x9806,
+ 0x9807, 0x9808, 0x9809, 0x980a, 0x9640, 0x9641, 0x9647, 0x9648, 0x964a,
+ 0x964b, 0x964c, 0x964e, 0x964f, 0x9642, 0x9643, 0x9644, 0x9645, 0x9649,
+ 0x6720, 0x6721, 0x6722, 0x6723, 0x6724, 0x6725, 0x6726, 0x6727, 0x6728,
+ 0x6729, 0x6738, 0x6739, 0x673e, 0x6740, 0x6741, 0x6742, 0x6743, 0x6744,
+ 0x6745, 0x6746, 0x6747, 0x6748, 0x6749, 0x674a, 0x6750, 0x6751, 0x6758,
+ 0x6759, 0x675b, 0x675d, 0x675f, 0x6840, 0x6841, 0x6842, 0x6843, 0x6849,
+ 0x6850, 0x6858, 0x6859, 0x6760, 0x6761, 0x6762, 0x6763, 0x6764, 0x6765,
+ 0x6766, 0x6767, 0x6768, 0x6770, 0x6771, 0x6772, 0x6778, 0x6779, 0x677b,
+ 0x6700, 0x6701, 0x6702, 0x6703, 0x6704, 0x6705, 0x6706, 0x6707, 0x6708,
+ 0x6709, 0x6718, 0x6719, 0x671c, 0x671d, 0x671f, 0x683D, 0x9900, 0x9901,
+ 0x9903, 0x9904, 0x9905, 0x9906, 0x9907, 0x9908, 0x9909, 0x990a, 0x990b,
+ 0x990c, 0x990d, 0x990e, 0x990f, 0x9910, 0x9913, 0x9917, 0x9918, 0x9919,
+ 0x9990, 0x9991, 0x9992, 0x9993, 0x9994, 0x9995, 0x9996, 0x9997, 0x9998,
+ 0x9999, 0x999a, 0x999b, 0x999c, 0x999d, 0x99a0, 0x99a2, 0x99a4,
+};
+
+// Legacy Intel GPUs (Second generation) which have trouble with resolutions
+// higher than 1920 x 1088
+static const DWORD g_IntelLegacyGPUList[] = {
+ 0x102, 0x106, 0x116, 0x126,
+};
+
+// Provides scoped access to the underlying buffer in an IMFMediaBuffer
+// instance.
+class MediaBufferScopedPointer {
+ public:
+ MediaBufferScopedPointer(IMFMediaBuffer* media_buffer)
+ : media_buffer_(media_buffer),
+ buffer_(nullptr),
+ max_length_(0),
+ current_length_(0) {
+ HRESULT hr = media_buffer_->Lock(&buffer_, &max_length_, &current_length_);
+ CHECK(SUCCEEDED(hr));
+ }
+
+ ~MediaBufferScopedPointer() {
+ HRESULT hr = media_buffer_->Unlock();
+ CHECK(SUCCEEDED(hr));
+ }
+
+ uint8_t* get() {
+ return buffer_;
+ }
+
+ DWORD current_length() const {
+ return current_length_;
+ }
+
+ private:
+ base::win::ScopedComPtr<IMFMediaBuffer> media_buffer_;
+ uint8_t* buffer_;
+ DWORD max_length_;
+ DWORD current_length_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaBufferScopedPointer);
+};
+
} // namespace
namespace content {
@@ -122,7 +205,10 @@ static const media::VideoCodecProfile kSupportedProfiles[] = {
media::H264PROFILE_MAIN,
media::H264PROFILE_HIGH,
media::VP8PROFILE_ANY,
- media::VP9PROFILE_ANY
+ media::VP9PROFILE_PROFILE0,
+ media::VP9PROFILE_PROFILE1,
+ media::VP9PROFILE_PROFILE2,
+ media::VP9PROFILE_PROFILE3
};
CreateDXGIDeviceManager DXVAVideoDecodeAccelerator::create_dxgi_device_manager_
@@ -162,10 +248,16 @@ enum {
kFlushDecoderSurfaceTimeoutMs = 1,
// Maximum iterations where we try to flush the d3d device.
kMaxIterationsForD3DFlush = 4,
+ // Maximum iterations where we try to flush the ANGLE device before reusing
+ // the texture.
+ kMaxIterationsForANGLEReuseFlush = 16,
// We only request 5 picture buffers from the client which are used to hold
// the decoded samples. These buffers are then reused when the client tells
// us that it is done with the buffer.
kNumPictureBuffers = 5,
+ // The keyed mutex should always be released before the other thread
+ // attempts to acquire it, so AcquireSync should always return immediately.
+ kAcquireSyncWaitMs = 0,
};
static IMFSample* CreateEmptySample() {
@@ -177,8 +269,9 @@ static IMFSample* CreateEmptySample() {
// Creates a Media Foundation sample with one buffer of length |buffer_length|
// on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0.
-static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
- CHECK_GT(buffer_length, 0);
+static IMFSample* CreateEmptySampleWithBuffer(uint32_t buffer_length,
+ int align) {
+ CHECK_GT(buffer_length, 0U);
base::win::ScopedComPtr<IMFSample> sample;
sample.Attach(CreateEmptySample());
@@ -209,11 +302,11 @@ static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
// |min_size| specifies the minimum size of the buffer (might be required by
// the decoder for input). If no alignment is required, provide 0.
static IMFSample* CreateInputSample(const uint8_t* stream,
- int size,
- int min_size,
+ uint32_t size,
+ uint32_t min_size,
int alignment) {
CHECK(stream);
- CHECK_GT(size, 0);
+ CHECK_GT(size, 0U);
base::win::ScopedComPtr<IMFSample> sample;
sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size),
alignment));
@@ -230,28 +323,16 @@ static IMFSample* CreateInputSample(const uint8_t* stream,
RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL);
CHECK_EQ(current_length, 0u);
- CHECK_GE(static_cast<int>(max_length), size);
+ CHECK_GE(max_length, size);
memcpy(destination, stream, size);
- hr = buffer->Unlock();
- RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL);
-
hr = buffer->SetCurrentLength(size);
RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", NULL);
- return sample.Detach();
-}
-
-static IMFSample* CreateSampleFromInputBuffer(
- const media::BitstreamBuffer& bitstream_buffer,
- DWORD stream_size,
- DWORD alignment) {
- base::SharedMemory shm(bitstream_buffer.handle(), true);
- RETURN_ON_FAILURE(shm.Map(bitstream_buffer.size()),
- "Failed in base::SharedMemory::Map", NULL);
+ hr = buffer->Unlock();
+ RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL);
- return CreateInputSample(reinterpret_cast<const uint8_t*>(shm.memory()),
- bitstream_buffer.size(), stream_size, alignment);
+ return sample.Detach();
}
// Helper function to create a COM object instance from a DLL. The alternative
@@ -289,55 +370,188 @@ template<class T>
base::win::ScopedComPtr<T> QueryDeviceObjectFromANGLE(int object_type) {
base::win::ScopedComPtr<T> device_object;
- EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display = nullptr;
intptr_t egl_device = 0;
intptr_t device = 0;
+ {
+ TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. GetHardwareDisplay");
+ egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
+ }
+
RETURN_ON_FAILURE(
gfx::GLSurfaceEGL::HasEGLExtension("EGL_EXT_device_query"),
"EGL_EXT_device_query missing",
device_object);
- PFNEGLQUERYDISPLAYATTRIBEXTPROC QueryDisplayAttribEXT =
- reinterpret_cast<PFNEGLQUERYDISPLAYATTRIBEXTPROC>(eglGetProcAddress(
- "eglQueryDisplayAttribEXT"));
+ PFNEGLQUERYDISPLAYATTRIBEXTPROC QueryDisplayAttribEXT = nullptr;
- RETURN_ON_FAILURE(
- QueryDisplayAttribEXT,
- "Failed to get the eglQueryDisplayAttribEXT function from ANGLE",
- device_object);
+ {
+ TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress");
- PFNEGLQUERYDEVICEATTRIBEXTPROC QueryDeviceAttribEXT =
- reinterpret_cast<PFNEGLQUERYDEVICEATTRIBEXTPROC>(eglGetProcAddress(
- "eglQueryDeviceAttribEXT"));
+ QueryDisplayAttribEXT =
+ reinterpret_cast<PFNEGLQUERYDISPLAYATTRIBEXTPROC>(eglGetProcAddress(
+ "eglQueryDisplayAttribEXT"));
- RETURN_ON_FAILURE(
- QueryDeviceAttribEXT,
- "Failed to get the eglQueryDeviceAttribEXT function from ANGLE",
- device_object);
+ RETURN_ON_FAILURE(
+ QueryDisplayAttribEXT,
+ "Failed to get the eglQueryDisplayAttribEXT function from ANGLE",
+ device_object);
+ }
- RETURN_ON_FAILURE(
- QueryDisplayAttribEXT(egl_display, EGL_DEVICE_EXT, &egl_device),
- "The eglQueryDisplayAttribEXT function failed to get the EGL device",
- device_object);
+ PFNEGLQUERYDEVICEATTRIBEXTPROC QueryDeviceAttribEXT = nullptr;
+
+ {
+ TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress");
+
+ QueryDeviceAttribEXT =
+ reinterpret_cast<PFNEGLQUERYDEVICEATTRIBEXTPROC>(eglGetProcAddress(
+ "eglQueryDeviceAttribEXT"));
+
+ RETURN_ON_FAILURE(
+ QueryDeviceAttribEXT,
+ "Failed to get the eglQueryDeviceAttribEXT function from ANGLE",
+ device_object);
+ }
+
+ {
+ TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. QueryDisplayAttribEXT");
+
+ RETURN_ON_FAILURE(
+ QueryDisplayAttribEXT(egl_display, EGL_DEVICE_EXT, &egl_device),
+ "The eglQueryDisplayAttribEXT function failed to get the EGL device",
+ device_object);
+ }
RETURN_ON_FAILURE(
egl_device,
"Failed to get the EGL device",
device_object);
- RETURN_ON_FAILURE(
- QueryDeviceAttribEXT(
- reinterpret_cast<EGLDeviceEXT>(egl_device), object_type, &device),
- "The eglQueryDeviceAttribEXT function failed to get the device",
- device_object);
+ {
+ TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. QueryDisplayAttribEXT");
- RETURN_ON_FAILURE(device, "Failed to get the ANGLE device", device_object);
+ RETURN_ON_FAILURE(
+ QueryDeviceAttribEXT(
+ reinterpret_cast<EGLDeviceEXT>(egl_device), object_type, &device),
+ "The eglQueryDeviceAttribEXT function failed to get the device",
+ device_object);
+
+ RETURN_ON_FAILURE(device, "Failed to get the ANGLE device", device_object);
+ }
device_object = reinterpret_cast<T*>(device);
return device_object;
}
+H264ConfigChangeDetector::H264ConfigChangeDetector()
+ : last_sps_id_(0),
+ last_pps_id_(0),
+ config_changed_(false),
+ pending_config_changed_(false) {
+}
+
+H264ConfigChangeDetector::~H264ConfigChangeDetector() {
+}
+
+bool H264ConfigChangeDetector::DetectConfig(const uint8_t* stream,
+ unsigned int size) {
+ std::vector<uint8_t> sps;
+ std::vector<uint8_t> pps;
+ media::H264NALU nalu;
+ bool idr_seen = false;
+
+ if (!parser_.get())
+ parser_.reset(new media::H264Parser);
+
+ parser_->SetStream(stream, size);
+ config_changed_ = false;
+
+ while (true) {
+ media::H264Parser::Result result = parser_->AdvanceToNextNALU(&nalu);
+
+ if (result == media::H264Parser::kEOStream)
+ break;
+
+ if (result == media::H264Parser::kUnsupportedStream) {
+ DLOG(ERROR) << "Unsupported H.264 stream";
+ return false;
+ }
+
+ if (result != media::H264Parser::kOk) {
+ DLOG(ERROR) << "Failed to parse H.264 stream";
+ return false;
+ }
+
+ switch (nalu.nal_unit_type) {
+ case media::H264NALU::kSPS:
+ result = parser_->ParseSPS(&last_sps_id_);
+ if (result == media::H264Parser::kUnsupportedStream) {
+ DLOG(ERROR) << "Unsupported SPS";
+ return false;
+ }
+
+ if (result != media::H264Parser::kOk) {
+ DLOG(ERROR) << "Could not parse SPS";
+ return false;
+ }
+
+ sps.assign(nalu.data, nalu.data + nalu.size);
+ break;
+
+ case media::H264NALU::kPPS:
+ result = parser_->ParsePPS(&last_pps_id_);
+ if (result == media::H264Parser::kUnsupportedStream) {
+ DLOG(ERROR) << "Unsupported PPS";
+ return false;
+ }
+ if (result != media::H264Parser::kOk) {
+ DLOG(ERROR) << "Could not parse PPS";
+ return false;
+ }
+ pps.assign(nalu.data, nalu.data + nalu.size);
+ break;
+
+ case media::H264NALU::kIDRSlice:
+ idr_seen = true;
+ // If we previously detected a configuration change, and see an IDR
+ // slice next time around, we need to flag a configuration change.
+ if (pending_config_changed_) {
+ config_changed_ = true;
+ pending_config_changed_ = false;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!sps.empty() && sps != last_sps_) {
+ if (!last_sps_.empty()) {
+ // Flag configuration changes after we see an IDR slice.
+ if (idr_seen) {
+ config_changed_ = true;
+ } else {
+ pending_config_changed_ = true;
+ }
+ }
+ last_sps_.swap(sps);
+ }
+
+ if (!pps.empty() && pps != last_pps_) {
+ if (!last_pps_.empty()) {
+ // Flag configuration changes after we see an IDR slice.
+ if (idr_seen) {
+ config_changed_ = true;
+ } else {
+ pending_config_changed_ = true;
+ }
+ }
+ last_pps_.swap(pps);
+ }
+ return true;
+}
// Maintains information about a DXVA picture buffer, i.e. whether it is
// available for rendering, the texture information, etc.
@@ -349,7 +563,11 @@ struct DXVAVideoDecodeAccelerator::DXVAPictureBuffer {
EGLConfig egl_config);
~DXVAPictureBuffer();
- void ReusePictureBuffer();
+ bool InitializeTexture(const DXVAVideoDecodeAccelerator& decoder,
+ bool use_rgb);
+
+ bool ReusePictureBuffer();
+ void ResetReuseFence();
// Copies the output sample data to the picture buffer provided by the
// client.
// The dest_surface parameter contains the decoded bits.
@@ -375,20 +593,37 @@ struct DXVAVideoDecodeAccelerator::DXVAPictureBuffer {
return picture_buffer_.size();
}
+ bool waiting_to_reuse() const { return waiting_to_reuse_; }
+
+ gfx::GLFence* reuse_fence() { return reuse_fence_.get(); }
+
// Called when the source surface |src_surface| is copied to the destination
// |dest_surface|
- void CopySurfaceComplete(IDirect3DSurface9* src_surface,
+ bool CopySurfaceComplete(IDirect3DSurface9* src_surface,
IDirect3DSurface9* dest_surface);
private:
explicit DXVAPictureBuffer(const media::PictureBuffer& buffer);
bool available_;
+
+ // This is true if the decoder is currently waiting on the fence before
+ // reusing the buffer.
+ bool waiting_to_reuse_;
media::PictureBuffer picture_buffer_;
EGLSurface decoding_surface_;
+ scoped_ptr<gfx::GLFence> reuse_fence_;
+
+ HANDLE texture_share_handle_;
base::win::ScopedComPtr<IDirect3DTexture9> decoding_texture_;
base::win::ScopedComPtr<ID3D11Texture2D> dx11_decoding_texture_;
+ base::win::ScopedComPtr<IDXGIKeyedMutex> egl_keyed_mutex_;
+ base::win::ScopedComPtr<IDXGIKeyedMutex> dx11_keyed_mutex_;
+
+ // This is the last value that was used to release the keyed mutex.
+ uint64_t keyed_mutex_value_;
+
// The following |IDirect3DSurface9| interface pointers are used to hold
// references on the surfaces during the course of a StretchRect operation
// to copy the source surface to the target. The references are released
@@ -422,6 +657,9 @@ DXVAVideoDecodeAccelerator::DXVAPictureBuffer::Create(
eglGetConfigAttrib(egl_display, egl_config, EGL_BIND_TO_TEXTURE_RGB,
&use_rgb);
+ if (!picture_buffer->InitializeTexture(decoder, !!use_rgb))
+ return linked_ptr<DXVAPictureBuffer>(nullptr);
+
EGLint attrib_list[] = {
EGL_WIDTH, buffer.size().width(),
EGL_HEIGHT, buffer.size().height(),
@@ -430,59 +668,84 @@ DXVAVideoDecodeAccelerator::DXVAPictureBuffer::Create(
EGL_NONE
};
- picture_buffer->decoding_surface_ = eglCreatePbufferSurface(
- egl_display,
- egl_config,
- attrib_list);
+ picture_buffer->decoding_surface_ = eglCreatePbufferFromClientBuffer(
+ egl_display, EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE,
+ picture_buffer->texture_share_handle_, egl_config, attrib_list);
RETURN_ON_FAILURE(picture_buffer->decoding_surface_,
"Failed to create surface",
linked_ptr<DXVAPictureBuffer>(NULL));
+ if (decoder.d3d11_device_ && decoder.use_keyed_mutex_) {
+ void* keyed_mutex = nullptr;
+ EGLBoolean ret = eglQuerySurfacePointerANGLE(
+ egl_display, picture_buffer->decoding_surface_,
+ EGL_DXGI_KEYED_MUTEX_ANGLE, &keyed_mutex);
+ RETURN_ON_FAILURE(keyed_mutex && ret == EGL_TRUE,
+ "Failed to query ANGLE keyed mutex",
+ linked_ptr<DXVAPictureBuffer>(nullptr));
+ picture_buffer->egl_keyed_mutex_ = base::win::ScopedComPtr<IDXGIKeyedMutex>(
+ static_cast<IDXGIKeyedMutex*>(keyed_mutex));
+ }
+ picture_buffer->use_rgb_ = !!use_rgb;
+ return picture_buffer;
+}
- HANDLE share_handle = NULL;
- EGLBoolean ret = eglQuerySurfacePointerANGLE(
- egl_display,
- picture_buffer->decoding_surface_,
- EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE,
- &share_handle);
+bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::InitializeTexture(
+ const DXVAVideoDecodeAccelerator& decoder,
+ bool use_rgb) {
+ DCHECK(!texture_share_handle_);
+ if (decoder.d3d11_device_) {
+ D3D11_TEXTURE2D_DESC desc;
+ desc.Width = picture_buffer_.size().width();
+ desc.Height = picture_buffer_.size().height();
+ desc.MipLevels = 1;
+ desc.ArraySize = 1;
+ desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
+ desc.SampleDesc.Count = 1;
+ desc.SampleDesc.Quality = 0;
+ desc.Usage = D3D11_USAGE_DEFAULT;
+ desc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
+ desc.CPUAccessFlags = 0;
+ desc.MiscFlags = decoder.use_keyed_mutex_
+ ? D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX
+ : D3D11_RESOURCE_MISC_SHARED;
+
+ HRESULT hr = decoder.d3d11_device_->CreateTexture2D(
+ &desc, nullptr, dx11_decoding_texture_.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to create texture", false);
+ if (decoder.use_keyed_mutex_) {
+ hr = dx11_keyed_mutex_.QueryFrom(dx11_decoding_texture_.get());
+ RETURN_ON_HR_FAILURE(hr, "Failed to get keyed mutex", false);
+ }
- RETURN_ON_FAILURE(share_handle && ret == EGL_TRUE,
- "Failed to query ANGLE surface pointer",
- linked_ptr<DXVAPictureBuffer>(NULL));
+ base::win::ScopedComPtr<IDXGIResource> resource;
+ hr = resource.QueryFrom(dx11_decoding_texture_.get());
+ DCHECK(SUCCEEDED(hr));
+ hr = resource->GetSharedHandle(&texture_share_handle_);
+ RETURN_ON_FAILURE(SUCCEEDED(hr) && texture_share_handle_,
+ "Failed to query shared handle", false);
- HRESULT hr = E_FAIL;
- if (decoder.d3d11_device_) {
- base::win::ScopedComPtr<ID3D11Resource> resource;
- hr = decoder.d3d11_device_->OpenSharedResource(
- share_handle,
- __uuidof(ID3D11Resource),
- reinterpret_cast<void**>(resource.Receive()));
- RETURN_ON_HR_FAILURE(hr, "Failed to open shared resource",
- linked_ptr<DXVAPictureBuffer>(NULL));
- hr = picture_buffer->dx11_decoding_texture_.QueryFrom(resource.get());
} else {
+ HRESULT hr = E_FAIL;
hr = decoder.d3d9_device_ex_->CreateTexture(
- buffer.size().width(),
- buffer.size().height(),
- 1,
- D3DUSAGE_RENDERTARGET,
- use_rgb ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8,
- D3DPOOL_DEFAULT,
- picture_buffer->decoding_texture_.Receive(),
- &share_handle);
- }
- RETURN_ON_HR_FAILURE(hr, "Failed to create texture",
- linked_ptr<DXVAPictureBuffer>(NULL));
- picture_buffer->use_rgb_ = !!use_rgb;
- return picture_buffer;
+ picture_buffer_.size().width(), picture_buffer_.size().height(), 1,
+ D3DUSAGE_RENDERTARGET, use_rgb ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8,
+ D3DPOOL_DEFAULT, decoding_texture_.Receive(), &texture_share_handle_);
+ RETURN_ON_HR_FAILURE(hr, "Failed to create texture", false);
+ RETURN_ON_FAILURE(texture_share_handle_, "Failed to query shared handle",
+ false);
+ }
+ return true;
}
DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer(
const media::PictureBuffer& buffer)
: available_(true),
+ waiting_to_reuse_(false),
picture_buffer_(buffer),
decoding_surface_(NULL),
- use_rgb_(true) {
-}
+ texture_share_handle_(nullptr),
+ keyed_mutex_value_(0),
+ use_rgb_(true) {}
DXVAVideoDecodeAccelerator::DXVAPictureBuffer::~DXVAPictureBuffer() {
if (decoding_surface_) {
@@ -500,7 +763,7 @@ DXVAVideoDecodeAccelerator::DXVAPictureBuffer::~DXVAPictureBuffer() {
}
}
-void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() {
+bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() {
DCHECK(decoding_surface_);
EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
eglReleaseTexImage(
@@ -510,7 +773,21 @@ void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() {
decoder_surface_.Release();
target_surface_.Release();
decoder_dx11_texture_.Release();
+ waiting_to_reuse_ = false;
set_available(true);
+ if (egl_keyed_mutex_) {
+ HRESULT hr = egl_keyed_mutex_->ReleaseSync(++keyed_mutex_value_);
+ RETURN_ON_FAILURE(hr == S_OK, "Could not release sync mutex", false);
+ }
+ return true;
+}
+
+void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ResetReuseFence() {
+ if (!reuse_fence_ || !reuse_fence_->ResetSupported())
+ reuse_fence_.reset(gfx::GLFence::Create());
+ else
+ reuse_fence_->ResetState();
+ waiting_to_reuse_ = true;
}
bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::
@@ -525,8 +802,9 @@ bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::
// when we receive a notification that the copy was completed or when the
// DXVAPictureBuffer instance is destroyed.
decoder_dx11_texture_ = dx11_texture;
- decoder->CopyTexture(dx11_texture, dx11_decoding_texture_.get(), NULL,
- id(), input_buffer_id);
+ decoder->CopyTexture(dx11_texture, dx11_decoding_texture_.get(),
+ dx11_keyed_mutex_, keyed_mutex_value_, NULL, id(),
+ input_buffer_id);
return true;
}
D3DSURFACE_DESC surface_desc;
@@ -566,7 +844,7 @@ bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::
return true;
}
-void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete(
+bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete(
IDirect3DSurface9* src_surface,
IDirect3DSurface9* dest_surface) {
DCHECK(!available());
@@ -574,7 +852,7 @@ void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete(
GLint current_texture = 0;
glGetIntegerv(GL_TEXTURE_BINDING_2D, &current_texture);
- glBindTexture(GL_TEXTURE_2D, picture_buffer_.texture_id());
+ glBindTexture(GL_TEXTURE_2D, picture_buffer_.texture_ids()[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
@@ -587,6 +865,12 @@ void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete(
DCHECK(decoder_dx11_texture_.get());
decoder_dx11_texture_.Release();
}
+ if (egl_keyed_mutex_) {
+ keyed_mutex_value_++;
+ HRESULT result =
+ egl_keyed_mutex_->AcquireSync(keyed_mutex_value_, kAcquireSyncWaitMs);
+ RETURN_ON_FAILURE(result == S_OK, "Could not acquire sync mutex", false);
+ }
EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
eglBindTexImage(
@@ -596,6 +880,7 @@ void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete(
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, current_texture);
+ return true;
}
DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
@@ -608,8 +893,9 @@ DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {}
DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- gfx::GLContext* gl_context)
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ bool enable_accelerated_vpx_decode)
: client_(NULL),
dev_manager_reset_token_(0),
dx11_dev_manager_reset_token_(0),
@@ -618,14 +904,16 @@ DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
pictures_requested_(false),
inputs_before_decode_(0),
sent_drain_message_(false),
- make_context_current_(make_context_current),
+ get_gl_context_cb_(get_gl_context_cb),
+ make_context_current_cb_(make_context_current_cb),
codec_(media::kUnknownVideoCodec),
decoder_thread_("DXVAVideoDecoderThread"),
pending_flush_(false),
use_dx11_(false),
+ use_keyed_mutex_(false),
dx11_video_format_converter_media_type_needs_init_(true),
- gl_context_(gl_context),
using_angle_device_(false),
+ enable_accelerated_vpx_decode_(enable_accelerated_vpx_decode),
weak_this_factory_(this) {
weak_ptr_ = weak_this_factory_.GetWeakPtr();
memset(&input_stream_info_, 0, sizeof(input_stream_info_));
@@ -638,6 +926,11 @@ DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() {
bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
Client* client) {
+ if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
if (config.is_encrypted) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
return false;
@@ -695,6 +988,10 @@ bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
PLATFORM_FAILURE,
false);
+ RETURN_AND_NOTIFY_ON_FAILURE(gfx::GLFence::IsSupported(),
+ "GL fences are unsupported", PLATFORM_FAILURE,
+ false);
+
State state = GetState();
RETURN_AND_NOTIFY_ON_FAILURE((state == kUninitialized),
"Initialize: invalid state: " << state, ILLEGAL_STATE, false);
@@ -717,6 +1014,10 @@ bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
"Send MFT_MESSAGE_NOTIFY_START_OF_STREAM notification failed",
PLATFORM_FAILURE, false);
+ config_ = config;
+
+ config_change_detector_.reset(new H264ConfigChangeDetector);
+
SetState(kNormal);
StartDecoderThread();
@@ -883,15 +1184,28 @@ void DXVAVideoDecodeAccelerator::Decode(
const media::BitstreamBuffer& bitstream_buffer) {
DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
+ // SharedMemory will take over the ownership of handle.
+ base::SharedMemory shm(bitstream_buffer.handle(), true);
+
State state = GetState();
RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped ||
state == kFlushing),
"Invalid state: " << state, ILLEGAL_STATE,);
+ if (bitstream_buffer.id() < 0) {
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ false, "Invalid bitstream_buffer, id: " << bitstream_buffer.id(),
+ INVALID_ARGUMENT, );
+ }
base::win::ScopedComPtr<IMFSample> sample;
- sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer,
- input_stream_info_.cbSize,
- input_stream_info_.cbAlignment));
+ RETURN_AND_NOTIFY_ON_FAILURE(shm.Map(bitstream_buffer.size()),
+ "Failed in base::SharedMemory::Map",
+ PLATFORM_FAILURE, );
+
+ sample.Attach(CreateInputSample(
+ reinterpret_cast<const uint8_t*>(shm.memory()), bitstream_buffer.size(),
+ std::min<uint32_t>(bitstream_buffer.size(), input_stream_info_.cbSize),
+ input_stream_info_.cbAlignment));
RETURN_AND_NOTIFY_ON_FAILURE(sample.get(), "Failed to create input sample",
PLATFORM_FAILURE, );
@@ -919,6 +1233,7 @@ void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
// and mark these buffers as available for use.
for (size_t buffer_index = 0; buffer_index < buffers.size();
++buffer_index) {
+ DCHECK_LE(1u, buffers[buffer_index].texture_ids().size());
linked_ptr<DXVAPictureBuffer> picture_buffer =
DXVAPictureBuffer::Create(*this, buffers[buffer_index], egl_config_);
RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer.get(),
@@ -956,17 +1271,70 @@ void DXVAVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) {
// us that we can now recycle this picture buffer, so if we were waiting to
// dispose of it we now can.
if (it == output_picture_buffers_.end()) {
- it = stale_output_picture_buffers_.find(picture_buffer_id);
- RETURN_AND_NOTIFY_ON_FAILURE(it != stale_output_picture_buffers_.end(),
- "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,);
- main_thread_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer,
- weak_this_factory_.GetWeakPtr(), picture_buffer_id));
+ if (!stale_output_picture_buffers_.empty()) {
+ it = stale_output_picture_buffers_.find(picture_buffer_id);
+ RETURN_AND_NOTIFY_ON_FAILURE(it != stale_output_picture_buffers_.end(),
+ "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,);
+ main_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer,
+ weak_this_factory_.GetWeakPtr(), picture_buffer_id));
+ }
+ return;
+ }
+
+ if (it->second->available() || it->second->waiting_to_reuse())
+ return;
+
+ if (use_keyed_mutex_ || using_angle_device_) {
+ RETURN_AND_NOTIFY_ON_FAILURE(it->second->ReusePictureBuffer(),
+ "Failed to reuse picture buffer",
+ PLATFORM_FAILURE, );
+
+ ProcessPendingSamples();
+ if (pending_flush_) {
+ decoder_thread_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
+ base::Unretained(this)));
+ }
+ } else {
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
+ it->second->ResetReuseFence();
+
+ WaitForOutputBuffer(picture_buffer_id, 0);
+ }
+}
+
+void DXVAVideoDecodeAccelerator::WaitForOutputBuffer(int32_t picture_buffer_id,
+ int count) {
+ DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
+ OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id);
+ if (it == output_picture_buffers_.end())
+ return;
+
+ DXVAPictureBuffer* picture_buffer = it->second.get();
+
+ DCHECK(!picture_buffer->available());
+ DCHECK(picture_buffer->waiting_to_reuse());
+
+ gfx::GLFence* fence = picture_buffer->reuse_fence();
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
+ if (count <= kMaxIterationsForANGLEReuseFlush && !fence->HasCompleted()) {
+ main_thread_task_runner_->PostDelayedTask(
+ FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::WaitForOutputBuffer,
+ weak_this_factory_.GetWeakPtr(),
+ picture_buffer_id, count + 1),
+ base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs));
return;
}
+ RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer->ReusePictureBuffer(),
+ "Failed to reuse picture buffer",
+ PLATFORM_FAILURE, );
- it->second->ReusePictureBuffer();
ProcessPendingSamples();
if (pending_flush_) {
decoder_thread_task_runner_->PostTask(
@@ -1046,7 +1414,9 @@ void DXVAVideoDecodeAccelerator::Destroy() {
delete this;
}
-bool DXVAVideoDecodeAccelerator::CanDecodeOnIOThread() {
+bool DXVAVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
return false;
}
@@ -1057,17 +1427,19 @@ GLenum DXVAVideoDecodeAccelerator::GetSurfaceInternalFormat() const {
// static
media::VideoDecodeAccelerator::SupportedProfiles
DXVAVideoDecodeAccelerator::GetSupportedProfiles() {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::GetSupportedProfiles");
+
// TODO(henryhsu): Need to ensure the profiles are actually supported.
SupportedProfiles profiles;
for (const auto& supported_profile : kSupportedProfiles) {
+ std::pair<int, int> min_resolution = GetMinResolution(supported_profile);
+ std::pair<int, int> max_resolution = GetMaxResolution(supported_profile);
+
SupportedProfile profile;
profile.profile = supported_profile;
- // Windows Media Foundation H.264 decoding does not support decoding videos
- // with any dimension smaller than 48 pixels:
- // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815
- profile.min_resolution.SetSize(48, 48);
- // Use 1088 to account for 16x16 macroblocks.
- profile.max_resolution.SetSize(1920, 1088);
+ profile.min_resolution.SetSize(min_resolution.first, min_resolution.second);
+ profile.max_resolution.SetSize(max_resolution.first, max_resolution.second);
profiles.push_back(profile);
}
return profiles;
@@ -1077,17 +1449,224 @@ DXVAVideoDecodeAccelerator::GetSupportedProfiles() {
void DXVAVideoDecodeAccelerator::PreSandboxInitialization() {
::LoadLibrary(L"MFPlat.dll");
::LoadLibrary(L"msmpeg2vdec.dll");
+ ::LoadLibrary(L"mf.dll");
+ ::LoadLibrary(L"dxva2.dll");
if (base::win::GetVersion() > base::win::VERSION_WIN7) {
LoadLibrary(L"msvproc.dll");
} else {
- LoadLibrary(L"dxva2.dll");
#if defined(ENABLE_DX11_FOR_WIN7)
LoadLibrary(L"mshtmlmedia.dll");
#endif
}
}
+// static
+std::pair<int, int> DXVAVideoDecodeAccelerator::GetMinResolution(
+ media::VideoCodecProfile profile) {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::GetMinResolution");
+ std::pair<int, int> min_resolution;
+ if (profile >= media::H264PROFILE_BASELINE &&
+ profile <= media::H264PROFILE_HIGH) {
+ // Windows Media Foundation H.264 decoding does not support decoding videos
+ // with any dimension smaller than 48 pixels:
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815
+ min_resolution = std::make_pair(48, 48);
+ } else {
+ // TODO(ananta)
+ // Detect this properly for VP8/VP9 profiles.
+ min_resolution = std::make_pair(16, 16);
+ }
+ return min_resolution;
+}
+
+// static
+std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxResolution(
+ const media::VideoCodecProfile profile) {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::GetMaxResolution");
+ std::pair<int, int> max_resolution;
+ if (profile >= media::H264PROFILE_BASELINE &&
+ profile <= media::H264PROFILE_HIGH) {
+ max_resolution = GetMaxH264Resolution();
+ } else {
+ // TODO(ananta)
+ // Detect this properly for VP8/VP9 profiles.
+ max_resolution = std::make_pair(4096, 2160);
+ }
+ return max_resolution;
+}
+
+std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxH264Resolution() {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::GetMaxH264Resolution");
+ // The H.264 resolution detection operation is expensive. This static flag
+ // allows us to run the detection once.
+ static bool resolution_detected = false;
+ // Use 1088 to account for 16x16 macroblocks.
+ static std::pair<int, int> max_resolution = std::make_pair(1920, 1088);
+ if (resolution_detected)
+ return max_resolution;
+
+ resolution_detected = true;
+
+ // On Windows 7 the maximum resolution supported by media foundation is
+ // 1920 x 1088.
+ if (base::win::GetVersion() == base::win::VERSION_WIN7)
+ return max_resolution;
+
+ // To detect if a driver supports the desired resolutions, we try and create
+ // a DXVA decoder instance for that resolution and profile. If that succeeds
+ // we assume that the driver supports H/W H.264 decoding for that resolution.
+ HRESULT hr = E_FAIL;
+ base::win::ScopedComPtr<ID3D11Device> device;
+
+ {
+ TRACE_EVENT0("gpu,startup",
+ "GetMaxH264Resolution. QueryDeviceObjectFromANGLE");
+
+ device = QueryDeviceObjectFromANGLE<ID3D11Device>(EGL_D3D11_DEVICE_ANGLE);
+ if (!device.get())
+ return max_resolution;
+ }
+
+ base::win::ScopedComPtr<ID3D11VideoDevice> video_device;
+ hr = device.QueryInterface(IID_ID3D11VideoDevice,
+ video_device.ReceiveVoid());
+ if (FAILED(hr))
+ return max_resolution;
+
+ GUID decoder_guid = {};
+
+ {
+ TRACE_EVENT0("gpu,startup",
+ "GetMaxH264Resolution. H.264 guid search begin");
+ // Enumerate supported video profiles and look for the H264 profile.
+ bool found = false;
+ UINT profile_count = video_device->GetVideoDecoderProfileCount();
+ for (UINT profile_idx = 0; profile_idx < profile_count; profile_idx++) {
+ GUID profile_id = {};
+ hr = video_device->GetVideoDecoderProfile(profile_idx, &profile_id);
+ if (SUCCEEDED(hr) &&
+ (profile_id == DXVA2_ModeH264_E ||
+ profile_id == DXVA2_Intel_ModeH264_E)) {
+ decoder_guid = profile_id;
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return max_resolution;
+ }
+
+ // Legacy AMD drivers with UVD3 or earlier and some Intel GPU's crash while
+ // creating surfaces larger than 1920 x 1088.
+ if (IsLegacyGPU(device.get()))
+ return max_resolution;
+
+ // We look for the following resolutions in the driver.
+ // TODO(ananta)
+ // Look into whether this list needs to be expanded.
+ static std::pair<int, int> resolution_array[] = {
+ // Use 1088 to account for 16x16 macroblocks.
+ std::make_pair(1920, 1088),
+ std::make_pair(2560, 1440),
+ std::make_pair(3840, 2160),
+ std::make_pair(4096, 2160),
+ std::make_pair(4096, 2304),
+ };
+
+ {
+ TRACE_EVENT0("gpu,startup",
+ "GetMaxH264Resolution. Resolution search begin");
+
+ for (size_t res_idx = 0; res_idx < arraysize(resolution_array);
+ res_idx++) {
+ D3D11_VIDEO_DECODER_DESC desc = {};
+ desc.Guid = decoder_guid;
+ desc.SampleWidth = resolution_array[res_idx].first;
+ desc.SampleHeight = resolution_array[res_idx].second;
+ desc.OutputFormat = DXGI_FORMAT_NV12;
+ UINT config_count = 0;
+ hr = video_device->GetVideoDecoderConfigCount(&desc, &config_count);
+ if (FAILED(hr) || config_count == 0)
+ return max_resolution;
+
+ D3D11_VIDEO_DECODER_CONFIG config = {};
+ hr = video_device->GetVideoDecoderConfig(&desc, 0, &config);
+ if (FAILED(hr))
+ return max_resolution;
+
+ base::win::ScopedComPtr<ID3D11VideoDecoder> video_decoder;
+ hr = video_device->CreateVideoDecoder(&desc, &config,
+ video_decoder.Receive());
+ if (!video_decoder.get())
+ return max_resolution;
+
+ max_resolution = resolution_array[res_idx];
+ }
+ }
+ return max_resolution;
+}
+
+// static
+bool DXVAVideoDecodeAccelerator::IsLegacyGPU(ID3D11Device* device) {
+ static const int kAMDGPUId1 = 0x1002;
+ static const int kAMDGPUId2 = 0x1022;
+ static const int kIntelGPU = 0x8086;
+
+ static bool legacy_gpu = true;
+ // This flag ensures that we determine the GPU type once.
+ static bool legacy_gpu_determined = false;
+
+ if (legacy_gpu_determined)
+ return legacy_gpu;
+
+ legacy_gpu_determined = true;
+
+ base::win::ScopedComPtr<IDXGIDevice> dxgi_device;
+ HRESULT hr = dxgi_device.QueryFrom(device);
+ if (FAILED(hr))
+ return legacy_gpu;
+
+ base::win::ScopedComPtr<IDXGIAdapter> adapter;
+ hr = dxgi_device->GetAdapter(adapter.Receive());
+ if (FAILED(hr))
+ return legacy_gpu;
+
+ DXGI_ADAPTER_DESC adapter_desc = {};
+ hr = adapter->GetDesc(&adapter_desc);
+ if (FAILED(hr))
+ return legacy_gpu;
+
+ // We check if the device is an Intel or an AMD device and whether it is in
+ // the global list defined by the g_AMDUVD3GPUList and g_IntelLegacyGPUList
+ // arrays above. If yes then the device is treated as a legacy device.
+ if ((adapter_desc.VendorId == kAMDGPUId1) ||
+ adapter_desc.VendorId == kAMDGPUId2) {
+ {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::IsLegacyGPU. AMD check");
+ for (size_t i = 0; i < arraysize(g_AMDUVD3GPUList); i++) {
+ if (adapter_desc.DeviceId == g_AMDUVD3GPUList[i])
+ return legacy_gpu;
+ }
+ }
+ } else if (adapter_desc.VendorId == kIntelGPU) {
+ {
+ TRACE_EVENT0("gpu,startup",
+ "DXVAVideoDecodeAccelerator::IsLegacyGPU. Intel check");
+ for (size_t i = 0; i < arraysize(g_IntelLegacyGPUList); i++) {
+ if (adapter_desc.DeviceId == g_IntelLegacyGPUList[i])
+ return legacy_gpu;
+ }
+ }
+ }
+ legacy_gpu = false;
+ return legacy_gpu;
+}
+
bool DXVAVideoDecodeAccelerator::InitDecoder(media::VideoCodecProfile profile) {
HMODULE decoder_dll = NULL;
@@ -1104,24 +1683,26 @@ bool DXVAVideoDecodeAccelerator::InitDecoder(media::VideoCodecProfile profile) {
"msmpeg2vdec.dll required for decoding is not loaded",
false);
- // Check version of DLL, version 6.7.7140 is blacklisted due to high crash
+ // Check version of DLL, version 6.1.7140 is blacklisted due to high crash
// rates in browsers loading that DLL. If that is the version installed we
// fall back to software decoding. See crbug/403440.
- FileVersionInfo* version_info =
- FileVersionInfo::CreateFileVersionInfoForModule(decoder_dll);
+ scoped_ptr<FileVersionInfo> version_info(
+ FileVersionInfo::CreateFileVersionInfoForModule(decoder_dll));
RETURN_ON_FAILURE(version_info,
"unable to get version of msmpeg2vdec.dll",
false);
base::string16 file_version = version_info->file_version();
RETURN_ON_FAILURE(file_version.find(L"6.1.7140") == base::string16::npos,
- "blacklisted version of msmpeg2vdec.dll 6.7.7140",
+ "blacklisted version of msmpeg2vdec.dll 6.1.7140",
false);
codec_ = media::kCodecH264;
clsid = __uuidof(CMSH264DecoderMFT);
- } else if ((profile == media::VP8PROFILE_ANY ||
- profile == media::VP9PROFILE_ANY) &&
- base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableAcceleratedVpxDecode)) {
+ } else if (enable_accelerated_vpx_decode_ &&
+ (profile == media::VP8PROFILE_ANY ||
+ profile == media::VP9PROFILE_PROFILE0 ||
+ profile == media::VP9PROFILE_PROFILE1 ||
+ profile == media::VP9PROFILE_PROFILE2 ||
+ profile == media::VP9PROFILE_PROFILE3)) {
int program_files_key = base::DIR_PROGRAM_FILES;
if (base::win::OSInfo::GetInstance()->wow64_status() ==
base::win::OSInfo::WOW64_ENABLED) {
@@ -1230,19 +1811,24 @@ bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() {
DVLOG(1) << "Failed to set Low latency mode on decoder. Error: " << hr;
}
+ auto gl_context = get_gl_context_cb_.Run();
+ RETURN_ON_FAILURE(gl_context, "Couldn't get GL context", false);
+
// The decoder should use DX11 iff
// 1. The underlying H/W decoder supports it.
// 2. We have a pointer to the MFCreateDXGIDeviceManager function needed for
// this. This should always be true for Windows 8+.
// 3. ANGLE is using DX11.
- DCHECK(gl_context_);
if (create_dxgi_device_manager_ &&
- (gl_context_->GetGLRenderer().find("Direct3D11") !=
- std::string::npos)) {
+ (gl_context->GetGLRenderer().find("Direct3D11") != std::string::npos)) {
UINT32 dx11_aware = 0;
attributes->GetUINT32(MF_SA_D3D11_AWARE, &dx11_aware);
use_dx11_ = !!dx11_aware;
}
+
+ use_keyed_mutex_ =
+ use_dx11_ && gfx::GLSurfaceEGL::HasEGLExtension("EGL_ANGLE_keyed_mutex");
+
return true;
}
@@ -1436,8 +2022,9 @@ void DXVAVideoDecodeAccelerator::ProcessPendingSamples() {
if (!output_picture_buffers_.size())
return;
- RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_.Run(),
- "Failed to make context current", PLATFORM_FAILURE,);
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
OutputBuffers::iterator index;
@@ -1449,7 +2036,6 @@ void DXVAVideoDecodeAccelerator::ProcessPendingSamples() {
PendingSampleInfo* pending_sample = NULL;
{
base::AutoLock lock(decoder_lock_);
-
PendingSampleInfo& sample_info = pending_output_samples_.front();
if (sample_info.picture_buffer_id != -1)
continue;
@@ -1533,13 +2119,22 @@ void DXVAVideoDecodeAccelerator::Invalidate() {
if (GetState() == kUninitialized)
return;
+ // Best effort to make the GL context current.
+ make_context_current_cb_.Run();
+
decoder_thread_.Stop();
weak_this_factory_.InvalidateWeakPtrs();
output_picture_buffers_.clear();
stale_output_picture_buffers_.clear();
pending_output_samples_.clear();
- pending_input_buffers_.clear();
+ // We want to continue processing pending input after detecting a config
+ // change.
+ if (GetState() != kConfigChange)
+ pending_input_buffers_.clear();
decoder_.Release();
+ pictures_requested_ = false;
+
+ config_change_detector_.reset();
if (use_dx11_) {
if (video_format_converter_mft_.get()) {
@@ -1552,6 +2147,7 @@ void DXVAVideoDecodeAccelerator::Invalidate() {
d3d11_device_manager_.Release();
d3d11_query_.Release();
dx11_video_format_converter_media_type_needs_init_ = true;
+ multi_threaded_.Release();
} else {
d3d9_.Release();
d3d9_device_ex_.Release();
@@ -1591,10 +2187,8 @@ void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) {
DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
// This task could execute after the decoder has been torn down.
if (GetState() != kUninitialized && client_) {
- client_->ProvidePictureBuffers(
- kNumPictureBuffers,
- gfx::Size(width, height),
- GL_TEXTURE_2D);
+ client_->ProvidePictureBuffers(kNumPictureBuffers, 1,
+ gfx::Size(width, height), GL_TEXTURE_2D);
}
}
@@ -1706,13 +2300,31 @@ void DXVAVideoDecodeAccelerator::DecodeInternal(
return;
}
+ // Check if the resolution, bit rate, etc changed in the stream. If yes we
+ // reinitialize the decoder to ensure that the stream decodes correctly.
+ bool config_changed = false;
+
+ HRESULT hr = CheckConfigChanged(sample.get(), &config_changed);
+ RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to check video stream config",
+ PLATFORM_FAILURE,);
+
+ if (config_changed) {
+ pending_input_buffers_.push_back(sample);
+ main_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&DXVAVideoDecodeAccelerator::ConfigChanged,
+ weak_this_factory_.GetWeakPtr(),
+ config_));
+ return;
+ }
+
if (!inputs_before_decode_) {
TRACE_EVENT_ASYNC_BEGIN0("gpu", "DXVAVideoDecodeAccelerator.Decoding",
this);
}
inputs_before_decode_++;
- HRESULT hr = decoder_->ProcessInput(0, sample.get(), 0);
+ hr = decoder_->ProcessInput(0, sample.get(), 0);
// As per msdn if the decoder returns MF_E_NOTACCEPTING then it means that it
// has enough data to produce one or more output samples. In this case the
// recommended options are to
@@ -1790,7 +2402,7 @@ void DXVAVideoDecodeAccelerator::HandleResolutionChanged(int width,
main_thread_task_runner_->PostTask(
FROM_HERE,
base::Bind(&DXVAVideoDecodeAccelerator::DismissStaleBuffers,
- weak_this_factory_.GetWeakPtr()));
+ weak_this_factory_.GetWeakPtr(), false));
main_thread_task_runner_->PostTask(
FROM_HERE,
@@ -1800,13 +2412,17 @@ void DXVAVideoDecodeAccelerator::HandleResolutionChanged(int width,
height));
}
-void DXVAVideoDecodeAccelerator::DismissStaleBuffers() {
+void DXVAVideoDecodeAccelerator::DismissStaleBuffers(bool force) {
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
+
OutputBuffers::iterator index;
for (index = output_picture_buffers_.begin();
index != output_picture_buffers_.end();
++index) {
- if (index->second->available()) {
+ if (force || index->second->available()) {
DVLOG(1) << "Dismissing picture id: " << index->second->id();
client_->DismissPictureBuffer(index->second->id());
} else {
@@ -1821,6 +2437,10 @@ void DXVAVideoDecodeAccelerator::DismissStaleBuffers() {
void DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer(
int32_t picture_buffer_id) {
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
+
OutputBuffers::iterator it = stale_output_picture_buffers_.find(
picture_buffer_id);
DCHECK(it != stale_output_picture_buffers_.end());
@@ -1935,13 +2555,15 @@ void DXVAVideoDecodeAccelerator::CopySurfaceComplete(
if (picture_buffer->available())
return;
- RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_.Run(),
- "Failed to make context current", PLATFORM_FAILURE,);
+ RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
+ "Failed to make context current",
+ PLATFORM_FAILURE, );
DCHECK(!output_picture_buffers_.empty());
- picture_buffer->CopySurfaceComplete(src_surface,
- dest_surface);
+ bool result = picture_buffer->CopySurfaceComplete(src_surface, dest_surface);
+ RETURN_AND_NOTIFY_ON_FAILURE(result, "Failed to complete copying surface",
+ PLATFORM_FAILURE, );
NotifyPictureReady(picture_buffer->id(), input_buffer_id);
@@ -1964,11 +2586,14 @@ void DXVAVideoDecodeAccelerator::CopySurfaceComplete(
base::Unretained(this)));
}
-void DXVAVideoDecodeAccelerator::CopyTexture(ID3D11Texture2D* src_texture,
- ID3D11Texture2D* dest_texture,
- IMFSample* video_frame,
- int picture_buffer_id,
- int input_buffer_id) {
+void DXVAVideoDecodeAccelerator::CopyTexture(
+ ID3D11Texture2D* src_texture,
+ ID3D11Texture2D* dest_texture,
+ base::win::ScopedComPtr<IDXGIKeyedMutex> dest_keyed_mutex,
+ uint64_t keyed_mutex_value,
+ IMFSample* video_frame,
+ int picture_buffer_id,
+ int input_buffer_id) {
HRESULT hr = E_FAIL;
DCHECK(use_dx11_);
@@ -2005,14 +2630,11 @@ void DXVAVideoDecodeAccelerator::CopyTexture(ID3D11Texture2D* src_texture,
}
decoder_thread_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&DXVAVideoDecodeAccelerator::CopyTexture,
- base::Unretained(this),
- src_texture,
- dest_texture,
- input_sample_for_conversion.Detach(),
- picture_buffer_id,
- input_buffer_id));
+ FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopyTexture,
+ base::Unretained(this), src_texture, dest_texture,
+ dest_keyed_mutex, keyed_mutex_value,
+ input_sample_for_conversion.Detach(),
+ picture_buffer_id, input_buffer_id));
return;
}
@@ -2023,6 +2645,13 @@ void DXVAVideoDecodeAccelerator::CopyTexture(ID3D11Texture2D* src_texture,
DCHECK(video_format_converter_mft_.get());
+ if (dest_keyed_mutex) {
+ HRESULT hr =
+ dest_keyed_mutex->AcquireSync(keyed_mutex_value, kAcquireSyncWaitMs);
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ hr == S_OK, "D3D11 failed to acquire keyed mutex for texture.",
+ PLATFORM_FAILURE, );
+ }
// The video processor MFT requires output samples to be allocated by the
// caller. We create a sample with a buffer backed with the ID3D11Texture2D
// interface exposed by ANGLE. This works nicely as this ensures that the
@@ -2077,18 +2706,27 @@ void DXVAVideoDecodeAccelerator::CopyTexture(ID3D11Texture2D* src_texture,
"Failed to convert output sample format.", PLATFORM_FAILURE,);
}
- d3d11_device_context_->Flush();
- d3d11_device_context_->End(d3d11_query_.get());
+ if (dest_keyed_mutex) {
+ HRESULT hr = dest_keyed_mutex->ReleaseSync(keyed_mutex_value + 1);
+ RETURN_AND_NOTIFY_ON_FAILURE(hr == S_OK, "Failed to release keyed mutex.",
+ PLATFORM_FAILURE, );
- decoder_thread_task_runner_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder,
- base::Unretained(this), 0,
- reinterpret_cast<IDirect3DSurface9*>(NULL),
- reinterpret_cast<IDirect3DSurface9*>(NULL),
- picture_buffer_id, input_buffer_id),
- base::TimeDelta::FromMilliseconds(
- kFlushDecoderSurfaceTimeoutMs));
+ main_thread_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete,
+ weak_this_factory_.GetWeakPtr(), nullptr, nullptr,
+ picture_buffer_id, input_buffer_id));
+ } else {
+ d3d11_device_context_->Flush();
+ d3d11_device_context_->End(d3d11_query_.get());
+
+ decoder_thread_task_runner_->PostDelayedTask(
+ FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder,
+ base::Unretained(this), 0,
+ reinterpret_cast<IDirect3DSurface9*>(NULL),
+ reinterpret_cast<IDirect3DSurface9*>(NULL),
+ picture_buffer_id, input_buffer_id),
+ base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs));
+ }
}
void DXVAVideoDecodeAccelerator::FlushDecoder(
@@ -2290,12 +2928,6 @@ bool DXVAVideoDecodeAccelerator::SetTransformOutputType(
RETURN_ON_HR_FAILURE(hr, "Failed to set media type attributes", false);
}
hr = transform->SetOutputType(0, media_type.get(), 0); // No flags
- if (FAILED(hr)) {
- base::debug::Alias(&hr);
- // TODO(ananta)
- // Remove this CHECK when this stabilizes in the field.
- CHECK(false);
- }
RETURN_ON_HR_FAILURE(hr, "Failed to set output type", false);
return true;
}
@@ -2304,4 +2936,39 @@ bool DXVAVideoDecodeAccelerator::SetTransformOutputType(
return false;
}
+HRESULT DXVAVideoDecodeAccelerator::CheckConfigChanged(
+ IMFSample* sample, bool* config_changed) {
+ if (codec_ != media::kCodecH264)
+ return S_FALSE;
+
+ base::win::ScopedComPtr<IMFMediaBuffer> buffer;
+ HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from input sample", hr);
+
+ MediaBufferScopedPointer scoped_media_buffer(buffer.get());
+
+ if (!config_change_detector_->DetectConfig(
+ scoped_media_buffer.get(),
+ scoped_media_buffer.current_length())) {
+ RETURN_ON_HR_FAILURE(E_FAIL, "Failed to detect H.264 stream config",
+ E_FAIL);
+ }
+ *config_changed = config_change_detector_->config_changed();
+ return S_OK;
+}
+
+void DXVAVideoDecodeAccelerator::ConfigChanged(
+ const Config& config) {
+ DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
+
+ SetState(kConfigChange);
+ DismissStaleBuffers(true);
+ Invalidate();
+ Initialize(config_, client_);
+ decoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
+ base::Unretained(this)));
+}
+
} // namespace content
diff --git a/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.h b/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.h
index d3aeda62c9b..01c15e62430 100644
--- a/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.h
+++ b/chromium/content/common/gpu/media/dxva_video_decode_accelerator_win.h
@@ -7,6 +7,7 @@
#include <d3d11.h>
#include <d3d9.h>
+#include <initguid.h>
#include <stdint.h>
// Work around bug in this header by disabling the relevant warning for it.
// https://connect.microsoft.com/VisualStudio/feedback/details/911260/dxva2api-h-in-win8-sdk-triggers-c4201-with-w4
@@ -29,6 +30,8 @@
#include "base/threading/thread.h"
#include "base/win/scoped_comptr.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
+#include "media/filters/h264_parser.h"
#include "media/video/video_decode_accelerator.h"
interface IMFSample;
@@ -44,6 +47,43 @@ typedef HRESULT (WINAPI* CreateDXGIDeviceManager)(
namespace content {
+// Provides functionality to detect H.264 stream configuration changes.
+// TODO(ananta)
+// Move this to a common place so that all VDA's can use this.
+class H264ConfigChangeDetector {
+ public:
+ H264ConfigChangeDetector();
+ ~H264ConfigChangeDetector();
+
+ // Detects stream configuration changes.
+ // Returns false on failure.
+ bool DetectConfig(const uint8_t* stream, unsigned int size);
+
+ bool config_changed() const {
+ return config_changed_;
+ }
+
+ private:
+ // These fields are used to track the SPS/PPS in the H.264 bitstream and
+ // are eventually compared against the SPS/PPS in the bitstream to detect
+ // a change.
+ int last_sps_id_;
+ std::vector<uint8_t> last_sps_;
+ int last_pps_id_;
+ std::vector<uint8_t> last_pps_;
+ // Set to true if we detect a stream configuration change.
+ bool config_changed_;
+ // We want to indicate configuration changes only after we see IDR slices.
+ // This flag tracks that we potentially have a configuration change which
+ // we want to honor after we see an IDR slice.
+ bool pending_config_changed_;
+
+ scoped_ptr<media::H264Parser> parser_;
+
+ DISALLOW_COPY_AND_ASSIGN(H264ConfigChangeDetector);
+};
+
+
// Class to provide a DXVA 2.0 based accelerator using the Microsoft Media
// foundation APIs via the VideoDecodeAccelerator interface.
// This class lives on a single thread and DCHECKs that it is never accessed
@@ -57,12 +97,14 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
kResetting, // upon received Reset(), before ResetDone()
kStopped, // upon output EOS received.
kFlushing, // upon flush request received.
+ kConfigChange, // stream configuration change detected.
};
// Does not take ownership of |client| which must outlive |*this|.
- explicit DXVAVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- gfx::GLContext* gl_context);
+ DXVAVideoDecodeAccelerator(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ bool enable_accelerated_vpx_decode);
~DXVAVideoDecodeAccelerator() override;
// media::VideoDecodeAccelerator implementation.
@@ -74,7 +116,10 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
GLenum GetSurfaceInternalFormat() const override;
static media::VideoDecodeAccelerator::SupportedProfiles
@@ -87,6 +132,23 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
typedef void* EGLConfig;
typedef void* EGLSurface;
+ // Returns the minimum resolution for the |profile| passed in.
+ static std::pair<int, int> GetMinResolution(
+ const media::VideoCodecProfile profile);
+
+ // Returns the maximum resolution for the |profile| passed in.
+ static std::pair<int, int> GetMaxResolution(
+ const media::VideoCodecProfile profile);
+
+ // Returns the maximum resolution for H264 video.
+ static std::pair<int, int> GetMaxH264Resolution();
+
+ // Certain AMD GPU drivers like R600, R700, Evergreen and Cayman and
+ // some second generation Intel GPU drivers crash if we create a video
+ // device with a resolution higher then 1920 x 1088. This function
+ // checks if the GPU is in this list and if yes returns true.
+ static bool IsLegacyGPU(ID3D11Device* device);
+
// Creates and initializes an instance of the D3D device and the
// corresponding device manager. The device manager instance is eventually
// passed to the IMFTransform interface implemented by the decoder.
@@ -178,7 +240,7 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
typedef std::map<int32_t, linked_ptr<DXVAPictureBuffer>> OutputBuffers;
// Tells the client to dismiss the stale picture buffers passed in.
- void DismissStaleBuffers();
+ void DismissStaleBuffers(bool force);
// Called after the client indicates we can recycle a stale picture buffer.
void DeferredDismissStaleBuffer(int32_t picture_buffer_id);
@@ -191,10 +253,6 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
// the decoder thread. Thread safe.
State GetState();
- // Worker function for the Decoder Reset functionality. Executes on the
- // decoder thread and queues tasks on the main thread as needed.
- void ResetHelper();
-
// Starts the thread used for decoding.
void StartDecoderThread();
@@ -222,6 +280,8 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
// is the sample containing the frame to be copied.
void CopyTexture(ID3D11Texture2D* src_texture,
ID3D11Texture2D* dest_texture,
+ base::win::ScopedComPtr<IDXGIKeyedMutex> dest_keyed_mutex,
+ uint64_t keyed_mutex_value,
IMFSample* video_frame,
int picture_buffer_id,
int input_buffer_id);
@@ -235,6 +295,10 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
int picture_buffer_id,
int input_buffer_id);
+ // Polls to wait for GPU commands to be finished on the picture buffer
+ // before reusing it.
+ void WaitForOutputBuffer(int32_t picture_buffer_id, int count);
+
// Initializes the DX11 Video format converter media types.
// Returns true on success.
bool InitializeDX11VideoFormatConverterMediaType(int width, int height);
@@ -257,6 +321,18 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
int width,
int height);
+ // Checks if the resolution, bitrate etc of the stream changed. We do this
+ // by keeping track of the SPS/PPS frames and if they change we assume
+ // that the configuration changed.
+ // Returns S_OK or S_FALSE on succcess.
+ // The |config_changed| parameter is set to true if we detect a change in the
+ // stream.
+ HRESULT CheckConfigChanged(IMFSample* sample, bool* config_changed);
+
+ // Called when we detect a stream configuration change. We reinitialize the
+ // decoder here.
+ void ConfigChanged(const Config& config);
+
// To expose client callbacks from VideoDecodeAccelerator.
media::VideoDecodeAccelerator::Client* client_;
@@ -340,8 +416,10 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
typedef std::list<base::win::ScopedComPtr<IMFSample>> PendingInputs;
PendingInputs pending_input_buffers_;
+ // Callback to get current GLContext.
+ GetGLContextCallback get_gl_context_cb_;
// Callback to set the correct gl context.
- base::Callback<bool(void)> make_context_current_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
// Which codec we are decoding with hardware acceleration.
media::VideoCodec codec_;
@@ -373,16 +451,30 @@ class CONTENT_EXPORT DXVAVideoDecodeAccelerator
// H/W decoding.
bool use_dx11_;
+ // True if we should use DXGI keyed mutexes to synchronize between the two
+ // contexts.
+ bool use_keyed_mutex_;
+
// Set to true if the DX11 video format converter input media types need to
// be initialized. Defaults to true.
bool dx11_video_format_converter_media_type_needs_init_;
- // The GLContext to be used by the decoder.
- scoped_refptr<gfx::GLContext> gl_context_;
-
// Set to true if we are sharing ANGLE's device.
bool using_angle_device_;
+ // Enables experimental hardware acceleration for VP8/VP9 video decoding.
+ const bool enable_accelerated_vpx_decode_;
+
+ // The media foundation H.264 decoder has problems handling changes like
+ // resolution change, bitrate change etc. If we reinitialize the decoder
+ // when these changes occur then, the decoder works fine. The
+ // H264ConfigChangeDetector class provides functionality to check if the
+ // stream configuration changed.
+ scoped_ptr<H264ConfigChangeDetector> config_change_detector_;
+
+ // Contains the initialization parameters for the video.
+ Config config_;
+
// WeakPtrFactory for posting tasks back to |this|.
base::WeakPtrFactory<DXVAVideoDecodeAccelerator> weak_this_factory_;
diff --git a/chromium/content/common/gpu/media/fake_video_decode_accelerator.cc b/chromium/content/common/gpu/media/fake_video_decode_accelerator.cc
index 7524dd18ebf..01ac07dcd3b 100644
--- a/chromium/content/common/gpu/media/fake_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/fake_video_decode_accelerator.cc
@@ -29,17 +29,14 @@ static const unsigned int kNumBuffers = media::limits::kMaxVideoFrames +
(media::limits::kMaxVideoFrames & 1u);
FakeVideoDecodeAccelerator::FakeVideoDecodeAccelerator(
- gfx::GLContext* gl,
- gfx::Size size,
- const base::Callback<bool(void)>& make_context_current)
+ const gfx::Size& size,
+ const MakeGLContextCurrentCallback& make_context_current_cb)
: child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
client_(NULL),
- make_context_current_(make_context_current),
- gl_(gl),
+ make_context_current_cb_(make_context_current_cb),
frame_buffer_size_(size),
flushing_(false),
- weak_this_factory_(this) {
-}
+ weak_this_factory_(this) {}
FakeVideoDecodeAccelerator::~FakeVideoDecodeAccelerator() {
}
@@ -59,14 +56,23 @@ bool FakeVideoDecodeAccelerator::Initialize(const Config& config,
// V4L2VideoDecodeAccelerator waits until first decode call to ask for buffers
// This class asks for it on initialization instead.
client_ = client;
- client_->ProvidePictureBuffers(kNumBuffers,
- frame_buffer_size_,
+ client_->ProvidePictureBuffers(kNumBuffers, 1, frame_buffer_size_,
kDefaultTextureTarget);
return true;
}
void FakeVideoDecodeAccelerator::Decode(
const media::BitstreamBuffer& bitstream_buffer) {
+ // We won't really read from the bitstream_buffer, close the handle.
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+
+ if (bitstream_buffer.id() < 0) {
+ LOG(ERROR) << "Invalid bitstream: id=" << bitstream_buffer.id();
+ client_->NotifyError(INVALID_ARGUMENT);
+ return;
+ }
+
int bitstream_buffer_id = bitstream_buffer.id();
queued_bitstream_ids_.push(bitstream_buffer_id);
child_task_runner_->PostTask(
@@ -93,12 +99,13 @@ void FakeVideoDecodeAccelerator::AssignPictureBuffers(
memset(black_data.get(),
0,
frame_buffer_size_.width() * frame_buffer_size_.height() * 4);
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "ReusePictureBuffer(): could not make context current";
return;
}
for (size_t index = 0; index < buffers.size(); ++index) {
- glBindTexture(GL_TEXTURE_2D, buffers[index].texture_id());
+ DCHECK_LE(1u, buffers[index].texture_ids().size());
+ glBindTexture(GL_TEXTURE_2D, buffers[index].texture_ids()[0]);
// Every other frame white and the rest black.
uint8_t* data = index % 2 ? white_data.get() : black_data.get();
glTexImage2D(GL_TEXTURE_2D,
@@ -152,8 +159,10 @@ void FakeVideoDecodeAccelerator::Destroy() {
delete this;
}
-bool FakeVideoDecodeAccelerator::CanDecodeOnIOThread() {
- return true;
+bool FakeVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+ return false;
}
void FakeVideoDecodeAccelerator::DoPictureReady() {
diff --git a/chromium/content/common/gpu/media/fake_video_decode_accelerator.h b/chromium/content/common/gpu/media/fake_video_decode_accelerator.h
index 7dcbfda2e77..10d47822b45 100644
--- a/chromium/content/common/gpu/media/fake_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/fake_video_decode_accelerator.h
@@ -13,6 +13,7 @@
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "media/video/video_decode_accelerator.h"
#include "ui/gfx/geometry/size_f.h"
#include "ui/gl/gl_context.h"
@@ -23,9 +24,8 @@ class CONTENT_EXPORT FakeVideoDecodeAccelerator
: public media::VideoDecodeAccelerator {
public:
FakeVideoDecodeAccelerator(
- gfx::GLContext* gl,
- gfx::Size size,
- const base::Callback<bool(void)>& make_context_current);
+ const gfx::Size& size,
+ const MakeGLContextCurrentCallback& make_context_current_cb);
~FakeVideoDecodeAccelerator() override;
bool Initialize(const Config& config, Client* client) override;
@@ -36,7 +36,10 @@ class CONTENT_EXPORT FakeVideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
private:
void DoPictureReady();
@@ -49,8 +52,7 @@ class CONTENT_EXPORT FakeVideoDecodeAccelerator
Client* client_;
// Make our context current before running any GL entry points.
- base::Callback<bool(void)> make_context_current_;
- gfx::GLContext* gl_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
// Output picture size.
gfx::Size frame_buffer_size_;
diff --git a/chromium/content/common/gpu/media/gpu_arc_video_service.cc b/chromium/content/common/gpu/media/gpu_arc_video_service.cc
deleted file mode 100644
index 91d36980ad1..00000000000
--- a/chromium/content/common/gpu/media/gpu_arc_video_service.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/media/gpu_arc_video_service.h"
-
-#include "base/logging.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "ipc/ipc_listener.h"
-#include "ipc/ipc_message_macros.h"
-#include "ipc/ipc_sync_channel.h"
-
-namespace content {
-
-// TODO(kcwu) implement ArcVideoAccelerator::Client.
-class GpuArcVideoService::AcceleratorStub : public IPC::Listener,
- public IPC::Sender {
- public:
- // |owner| outlives AcceleratorStub.
- explicit AcceleratorStub(GpuArcVideoService* owner) : owner_(owner) {}
-
- ~AcceleratorStub() override {
- DCHECK(thread_checker_.CalledOnValidThread());
- channel_->Close();
- }
-
- IPC::ChannelHandle CreateChannel(
- base::WaitableEvent* shutdown_event,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) {
- IPC::ChannelHandle handle =
- IPC::Channel::GenerateVerifiedChannelID("arc-video");
- channel_ = IPC::SyncChannel::Create(handle, IPC::Channel::MODE_SERVER, this,
- io_task_runner, false, shutdown_event);
- base::ScopedFD client_fd = channel_->TakeClientFileDescriptor();
- DCHECK(client_fd.is_valid());
- handle.socket = base::FileDescriptor(std::move(client_fd));
- return handle;
- }
-
- // IPC::Sender implementation:
- bool Send(IPC::Message* msg) override {
- DCHECK(msg);
- return channel_->Send(msg);
- }
-
- // IPC::Listener implementation:
- void OnChannelError() override {
- DCHECK(thread_checker_.CalledOnValidThread());
- // RemoveClient will delete |this|.
- owner_->RemoveClient(this);
- }
-
- // IPC::Listener implementation:
- bool OnMessageReceived(const IPC::Message& msg) override {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- // TODO(kcwu) Add handlers here.
- return false;
- }
-
- private:
- base::ThreadChecker thread_checker_;
- GpuArcVideoService* const owner_;
- scoped_ptr<IPC::SyncChannel> channel_;
-};
-
-GpuArcVideoService::GpuArcVideoService(
- base::WaitableEvent* shutdown_event,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
- : shutdown_event_(shutdown_event), io_task_runner_(io_task_runner) {}
-
-GpuArcVideoService::~GpuArcVideoService() {}
-
-void GpuArcVideoService::CreateChannel(const CreateChannelCallback& callback) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- scoped_ptr<AcceleratorStub> stub(new AcceleratorStub(this));
-
- IPC::ChannelHandle handle =
- stub->CreateChannel(shutdown_event_, io_task_runner_);
- accelerator_stubs_[stub.get()] = std::move(stub);
-
- callback.Run(handle);
-}
-
-void GpuArcVideoService::RemoveClient(AcceleratorStub* stub) {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- accelerator_stubs_.erase(stub);
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/media/gpu_arc_video_service.h b/chromium/content/common/gpu/media/gpu_arc_video_service.h
deleted file mode 100644
index 131150c9f94..00000000000
--- a/chromium/content/common/gpu/media/gpu_arc_video_service.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_ARC_VIDEO_SERVICE_H_
-#define CONTENT_COMMON_GPU_MEDIA_GPU_ARC_VIDEO_SERVICE_H_
-
-#include <map>
-
-#include "base/callback.h"
-#include "base/threading/thread_checker.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-class WaitableEvent;
-}
-
-namespace IPC {
-struct ChannelHandle;
-}
-
-namespace content {
-
-// GpuArcVideoService manages life-cycle and IPC message translation for
-// ArcVideoAccelerator.
-//
-// For each creation request from GpuChannelManager, GpuArcVideoService will
-// create a new IPC channel.
-class GpuArcVideoService {
- public:
- class AcceleratorStub;
- using CreateChannelCallback = base::Callback<void(const IPC::ChannelHandle&)>;
-
- // |shutdown_event| should signal an event when this process is about to be
- // shut down in order to notify our new IPC channel to terminate.
- GpuArcVideoService(
- base::WaitableEvent* shutdown_event,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
-
- // Upon deletion, all ArcVideoAccelerator will be deleted and the associated
- // IPC channels are closed.
- ~GpuArcVideoService();
-
- // Creates a new accelerator stub. The creation result will be sent back via
- // |callback|.
- void CreateChannel(const CreateChannelCallback& callback);
-
- // Removes the reference of |stub| (and trigger deletion) from this class.
- void RemoveClient(AcceleratorStub* stub);
-
- private:
- base::ThreadChecker thread_checker_;
-
- // Shutdown event of GPU process.
- base::WaitableEvent* shutdown_event_;
-
- // GPU io thread task runner.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
-
- // Bookkeeping all accelerator stubs.
- std::map<AcceleratorStub*, scoped_ptr<AcceleratorStub>> accelerator_stubs_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuArcVideoService);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_MEDIA_GPU_ARC_VIDEO_SERVICE_H_
diff --git a/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc b/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc
index 7408e46d927..3e256073e84 100644
--- a/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc
@@ -13,13 +13,14 @@
#include "base/memory/shared_memory.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
+#include "base/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_messages.h"
+#include "gpu/ipc/service/gpu_channel.h"
#include "ipc/ipc_message_macros.h"
#include "ipc/message_filter.h"
#include "media/filters/jpeg_parser.h"
+#include "media/gpu/ipc/common/media_messages.h"
#include "ui/gfx/geometry/size.h"
#if defined(OS_CHROMEOS)
@@ -41,12 +42,6 @@ void DecodeFinished(scoped_ptr<base::SharedMemory> shm) {
}
bool VerifyDecodeParams(const AcceleratedJpegDecoderMsg_Decode_Params& params) {
- if (params.input_buffer_id < 0) {
- LOG(ERROR) << "BitstreamBuffer id " << params.input_buffer_id
- << " out of range";
- return false;
- }
-
const int kJpegMaxDimension = UINT16_MAX;
if (params.coded_size.IsEmpty() ||
params.coded_size.width() > kJpegMaxDimension ||
@@ -55,11 +50,6 @@ bool VerifyDecodeParams(const AcceleratedJpegDecoderMsg_Decode_Params& params) {
return false;
}
- if (!base::SharedMemory::IsHandleValid(params.input_buffer_handle)) {
- LOG(ERROR) << "invalid input_buffer_handle";
- return false;
- }
-
if (!base::SharedMemory::IsHandleValid(params.output_video_frame_handle)) {
LOG(ERROR) << "invalid output_video_frame_handle";
return false;
@@ -163,13 +153,12 @@ class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
void AddClientOnIOThread(int32_t route_id,
Client* client,
- IPC::Message* reply_msg) {
+ base::Callback<void(bool)> response) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
DCHECK(client_map_.count(route_id) == 0);
client_map_[route_id] = client;
- GpuMsg_CreateJpegDecoder::WriteReplyParams(reply_msg, true);
- SendOnIOThread(reply_msg);
+ response.Run(true);
}
void OnDestroyOnIOThread(const int32_t* route_id) {
@@ -208,34 +197,28 @@ class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
if (!VerifyDecodeParams(params)) {
NotifyDecodeStatusOnIOThread(
- *route_id, params.input_buffer_id,
+ *route_id, params.input_buffer.id(),
media::JpegDecodeAccelerator::INVALID_ARGUMENT);
- if (base::SharedMemory::IsHandleValid(params.input_buffer_handle))
- base::SharedMemory::CloseHandle(params.input_buffer_handle);
if (base::SharedMemory::IsHandleValid(params.output_video_frame_handle))
base::SharedMemory::CloseHandle(params.output_video_frame_handle);
return;
}
// For handles in |params|, from now on, |params.output_video_frame_handle|
- // is taken cared by scoper. |params.input_buffer_handle| need to be closed
- // manually for early exits.
+ // is taken cared by scoper. |params.input_buffer.handle()| need to be
+ // closed manually for early exits.
scoped_ptr<base::SharedMemory> output_shm(
new base::SharedMemory(params.output_video_frame_handle, false));
if (!output_shm->Map(params.output_buffer_size)) {
LOG(ERROR) << "Could not map output shared memory for input buffer id "
- << params.input_buffer_id;
+ << params.input_buffer.id();
NotifyDecodeStatusOnIOThread(
- *route_id, params.input_buffer_id,
+ *route_id, params.input_buffer.id(),
media::JpegDecodeAccelerator::PLATFORM_FAILURE);
- base::SharedMemory::CloseHandle(params.input_buffer_handle);
+ base::SharedMemory::CloseHandle(params.input_buffer.handle());
return;
}
- media::BitstreamBuffer input_buffer(params.input_buffer_id,
- params.input_buffer_handle,
- params.input_buffer_size);
-
uint8_t* shm_memory = static_cast<uint8_t*>(output_shm->memory());
scoped_refptr<media::VideoFrame> frame =
media::VideoFrame::WrapExternalSharedMemory(
@@ -250,11 +233,11 @@ class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
base::TimeDelta()); // timestamp
if (!frame.get()) {
LOG(ERROR) << "Could not create VideoFrame for input buffer id "
- << params.input_buffer_id;
+ << params.input_buffer.id();
NotifyDecodeStatusOnIOThread(
- *route_id, params.input_buffer_id,
+ *route_id, params.input_buffer.id(),
media::JpegDecodeAccelerator::PLATFORM_FAILURE);
- base::SharedMemory::CloseHandle(params.input_buffer_handle);
+ base::SharedMemory::CloseHandle(params.input_buffer.handle());
return;
}
frame->AddDestructionObserver(
@@ -262,7 +245,7 @@ class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
DCHECK_GT(client_map_.count(*route_id), 0u);
Client* client = client_map_[*route_id];
- client->Decode(input_buffer, frame);
+ client->Decode(params.input_buffer, frame);
}
protected:
@@ -309,7 +292,7 @@ class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
};
GpuJpegDecodeAccelerator::GpuJpegDecodeAccelerator(
- GpuChannel* channel,
+ gpu::GpuChannel* channel,
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
: channel_(channel),
child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
@@ -325,7 +308,7 @@ GpuJpegDecodeAccelerator::~GpuJpegDecodeAccelerator() {
}
void GpuJpegDecodeAccelerator::AddClient(int32_t route_id,
- IPC::Message* reply_msg) {
+ base::Callback<void(bool)> response) {
DCHECK(CalledOnValidThread());
// When adding non-chromeos platforms, VideoCaptureGpuJpegDecoder::Initialize
@@ -350,8 +333,7 @@ void GpuJpegDecodeAccelerator::AddClient(int32_t route_id,
if (!accelerator) {
DLOG(ERROR) << "JPEG accelerator Initialize failed";
- GpuMsg_CreateJpegDecoder::WriteReplyParams(reply_msg, false);
- Send(reply_msg);
+ response.Run(false);
return;
}
client->set_accelerator(std::move(accelerator));
@@ -372,7 +354,7 @@ void GpuJpegDecodeAccelerator::AddClient(int32_t route_id,
// here instead of making the code unnecessary complicated.
io_task_runner_->PostTask(
FROM_HERE, base::Bind(&MessageFilter::AddClientOnIOThread, filter_,
- route_id, client.release(), reply_msg));
+ route_id, client.release(), response));
}
void GpuJpegDecodeAccelerator::NotifyDecodeStatus(
diff --git a/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.h b/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.h
index 0fc316e026f..680dac578e0 100644
--- a/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/gpu_jpeg_decode_accelerator.h
@@ -20,9 +20,11 @@ namespace base {
class SingleThreadTaskRunner;
}
-namespace content {
+namespace gpu {
class GpuChannel;
+}
+namespace content {
class GpuJpegDecodeAccelerator
: public IPC::Sender,
public base::NonThreadSafe,
@@ -30,11 +32,11 @@ class GpuJpegDecodeAccelerator
public:
// |channel| must outlive this object.
GpuJpegDecodeAccelerator(
- GpuChannel* channel,
+ gpu::GpuChannel* channel,
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
~GpuJpegDecodeAccelerator() override;
- void AddClient(int32_t route_id, IPC::Message* reply_msg);
+ void AddClient(int32_t route_id, base::Callback<void(bool)> response);
void NotifyDecodeStatus(int32_t route_id,
int32_t bitstream_buffer_id,
@@ -61,10 +63,10 @@ class GpuJpegDecodeAccelerator
static scoped_ptr<media::JpegDecodeAccelerator> CreateVaapiJDA(
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
- // The lifetime of objects of this class is managed by a GpuChannel. The
+ // The lifetime of objects of this class is managed by a gpu::GpuChannel. The
// GpuChannels destroy all the GpuJpegDecodeAccelerator that they own when
// they are destroyed. So a raw pointer is safe.
- GpuChannel* channel_;
+ gpu::GpuChannel* channel_;
// The message filter to run JpegDecodeAccelerator::Decode on IO thread.
scoped_refptr<MessageFilter> filter_;
diff --git a/chromium/content/common/gpu/media/gpu_video_accelerator_util.cc b/chromium/content/common/gpu/media/gpu_video_accelerator_util.cc
deleted file mode 100644
index 7692fddc40a..00000000000
--- a/chromium/content/common/gpu/media/gpu_video_accelerator_util.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/media/gpu_video_accelerator_util.h"
-
-namespace content {
-
-// Make sure the enum values of media::VideoCodecProfile and
-// gpu::VideoCodecProfile match.
-#define STATIC_ASSERT_ENUM_MATCH(name) \
- static_assert( \
- media::name == static_cast<media::VideoCodecProfile>(gpu::name), \
- #name " value must match in media and gpu.")
-
-STATIC_ASSERT_ENUM_MATCH(VIDEO_CODEC_PROFILE_UNKNOWN);
-STATIC_ASSERT_ENUM_MATCH(VIDEO_CODEC_PROFILE_MIN);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_BASELINE);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_MAIN);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_EXTENDED);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_HIGH);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_HIGH10PROFILE);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_HIGH422PROFILE);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_HIGH444PREDICTIVEPROFILE);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_SCALABLEBASELINE);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_SCALABLEHIGH);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_STEREOHIGH);
-STATIC_ASSERT_ENUM_MATCH(H264PROFILE_MULTIVIEWHIGH);
-STATIC_ASSERT_ENUM_MATCH(VP8PROFILE_ANY);
-STATIC_ASSERT_ENUM_MATCH(VP9PROFILE_ANY);
-STATIC_ASSERT_ENUM_MATCH(VIDEO_CODEC_PROFILE_MAX);
-
-// static
-media::VideoDecodeAccelerator::Capabilities
-GpuVideoAcceleratorUtil::ConvertGpuToMediaDecodeCapabilities(
- const gpu::VideoDecodeAcceleratorCapabilities& gpu_capabilities) {
- media::VideoDecodeAccelerator::Capabilities capabilities;
- capabilities.supported_profiles =
- ConvertGpuToMediaDecodeProfiles(gpu_capabilities.supported_profiles);
- capabilities.flags = gpu_capabilities.flags;
- return capabilities;
-}
-
-// static
-media::VideoDecodeAccelerator::SupportedProfiles
-GpuVideoAcceleratorUtil::ConvertGpuToMediaDecodeProfiles(const
- gpu::VideoDecodeAcceleratorSupportedProfiles& gpu_profiles) {
- media::VideoDecodeAccelerator::SupportedProfiles profiles;
- for (const auto& gpu_profile : gpu_profiles) {
- media::VideoDecodeAccelerator::SupportedProfile profile;
- profile.profile =
- static_cast<media::VideoCodecProfile>(gpu_profile.profile);
- profile.max_resolution = gpu_profile.max_resolution;
- profile.min_resolution = gpu_profile.min_resolution;
- profiles.push_back(profile);
- }
- return profiles;
-}
-
-// static
-gpu::VideoDecodeAcceleratorCapabilities
-GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeCapabilities(
- const media::VideoDecodeAccelerator::Capabilities& media_capabilities) {
- gpu::VideoDecodeAcceleratorCapabilities capabilities;
- capabilities.supported_profiles =
- ConvertMediaToGpuDecodeProfiles(media_capabilities.supported_profiles);
- capabilities.flags = media_capabilities.flags;
- return capabilities;
-}
-
-// static
-gpu::VideoDecodeAcceleratorSupportedProfiles
-GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeProfiles(const
- media::VideoDecodeAccelerator::SupportedProfiles& media_profiles) {
- gpu::VideoDecodeAcceleratorSupportedProfiles profiles;
- for (const auto& media_profile : media_profiles) {
- gpu::VideoDecodeAcceleratorSupportedProfile profile;
- profile.profile =
- static_cast<gpu::VideoCodecProfile>(media_profile.profile);
- profile.max_resolution = media_profile.max_resolution;
- profile.min_resolution = media_profile.min_resolution;
- profiles.push_back(profile);
- }
- return profiles;
-}
-
-// static
-media::VideoEncodeAccelerator::SupportedProfiles
-GpuVideoAcceleratorUtil::ConvertGpuToMediaEncodeProfiles(const
- gpu::VideoEncodeAcceleratorSupportedProfiles& gpu_profiles) {
- media::VideoEncodeAccelerator::SupportedProfiles profiles;
- for (const auto& gpu_profile : gpu_profiles) {
- media::VideoEncodeAccelerator::SupportedProfile profile;
- profile.profile =
- static_cast<media::VideoCodecProfile>(gpu_profile.profile);
- profile.max_resolution = gpu_profile.max_resolution;
- profile.max_framerate_numerator = gpu_profile.max_framerate_numerator;
- profile.max_framerate_denominator = gpu_profile.max_framerate_denominator;
- profiles.push_back(profile);
- }
- return profiles;
-}
-
-// static
-gpu::VideoEncodeAcceleratorSupportedProfiles
-GpuVideoAcceleratorUtil::ConvertMediaToGpuEncodeProfiles(const
- media::VideoEncodeAccelerator::SupportedProfiles& media_profiles) {
- gpu::VideoEncodeAcceleratorSupportedProfiles profiles;
- for (const auto& media_profile : media_profiles) {
- gpu::VideoEncodeAcceleratorSupportedProfile profile;
- profile.profile =
- static_cast<gpu::VideoCodecProfile>(media_profile.profile);
- profile.max_resolution = media_profile.max_resolution;
- profile.max_framerate_numerator = media_profile.max_framerate_numerator;
- profile.max_framerate_denominator = media_profile.max_framerate_denominator;
- profiles.push_back(profile);
- }
- return profiles;
-}
-
-// static
-void GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
- const media::VideoDecodeAccelerator::SupportedProfiles& new_profiles,
- media::VideoDecodeAccelerator::SupportedProfiles* media_profiles) {
- for (const auto& profile : new_profiles) {
- bool duplicate = false;
- for (const auto& media_profile : *media_profiles) {
- if (media_profile.profile == profile.profile) {
- duplicate = true;
- break;
- }
- }
- if (!duplicate)
- media_profiles->push_back(profile);
- }
-}
-
-// static
-void GpuVideoAcceleratorUtil::InsertUniqueEncodeProfiles(
- const media::VideoEncodeAccelerator::SupportedProfiles& new_profiles,
- media::VideoEncodeAccelerator::SupportedProfiles* media_profiles) {
- for (const auto& profile : new_profiles) {
- bool duplicate = false;
- for (const auto& media_profile : *media_profiles) {
- if (media_profile.profile == profile.profile) {
- duplicate = true;
- break;
- }
- }
- if (!duplicate)
- media_profiles->push_back(profile);
- }
-}
-
-} // namespace content
diff --git a/chromium/content/common/gpu/media/gpu_video_accelerator_util.h b/chromium/content/common/gpu/media/gpu_video_accelerator_util.h
deleted file mode 100644
index e39034e191e..00000000000
--- a/chromium/content/common/gpu/media/gpu_video_accelerator_util.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ACCELERATOR_UTIL_H_
-#define CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ACCELERATOR_UTIL_H_
-
-#include <vector>
-
-#include "gpu/config/gpu_info.h"
-#include "media/video/video_decode_accelerator.h"
-#include "media/video/video_encode_accelerator.h"
-
-namespace content {
-
-class GpuVideoAcceleratorUtil {
- public:
- // Convert decoder gpu capabilities to media capabilities.
- static media::VideoDecodeAccelerator::Capabilities
- ConvertGpuToMediaDecodeCapabilities(
- const gpu::VideoDecodeAcceleratorCapabilities& gpu_capabilities);
-
- // Convert decoder gpu profiles to media profiles.
- static media::VideoDecodeAccelerator::SupportedProfiles
- ConvertGpuToMediaDecodeProfiles(const
- gpu::VideoDecodeAcceleratorSupportedProfiles& gpu_profiles);
-
- // Convert decoder media capabilities to gpu capabilities.
- static gpu::VideoDecodeAcceleratorCapabilities
- ConvertMediaToGpuDecodeCapabilities(
- const media::VideoDecodeAccelerator::Capabilities& media_capabilities);
-
- // Convert decoder media profiles to gpu profiles.
- static gpu::VideoDecodeAcceleratorSupportedProfiles
- ConvertMediaToGpuDecodeProfiles(const
- media::VideoDecodeAccelerator::SupportedProfiles& media_profiles);
-
- // Convert encoder gpu profiles to media profiles.
- static media::VideoEncodeAccelerator::SupportedProfiles
- ConvertGpuToMediaEncodeProfiles(const
- gpu::VideoEncodeAcceleratorSupportedProfiles& gpu_profiles);
-
- // Convert encoder media profiles to gpu profiles.
- static gpu::VideoEncodeAcceleratorSupportedProfiles
- ConvertMediaToGpuEncodeProfiles(const
- media::VideoEncodeAccelerator::SupportedProfiles& media_profiles);
-
- // Insert |new_profiles| into |media_profiles|, ensuring no duplicates are
- // inserted.
- static void InsertUniqueDecodeProfiles(
- const media::VideoDecodeAccelerator::SupportedProfiles& new_profiles,
- media::VideoDecodeAccelerator::SupportedProfiles* media_profiles);
-
- // Insert |new_profiles| into |media_profiles|, ensuring no duplicates are
- // inserted.
- static void InsertUniqueEncodeProfiles(
- const media::VideoEncodeAccelerator::SupportedProfiles& new_profiles,
- media::VideoEncodeAccelerator::SupportedProfiles* media_profiles);
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ACCELERATOR_UTIL_H_
diff --git a/chromium/content/common/gpu/media/gpu_video_decode_accelerator.cc b/chromium/content/common/gpu/media/gpu_video_decode_accelerator.cc
index 5424a5ff32d..3d30266f05a 100644
--- a/chromium/content/common/gpu/media/gpu_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/gpu_video_decode_accelerator.cc
@@ -7,7 +7,6 @@
#include <vector>
#include "base/bind.h"
-#include "base/command_line.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
@@ -15,47 +14,36 @@
#include "base/stl_util.h"
#include "base/thread_task_runner_handle.h"
#include "build/build_config.h"
-
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/gpu/media/gpu_video_accelerator_util.h"
-#include "content/public/common/content_switches.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h"
#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
#include "ipc/ipc_message_macros.h"
#include "ipc/ipc_message_utils.h"
#include "ipc/message_filter.h"
#include "media/base/limits.h"
+#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
+#include "media/gpu/ipc/common/media_messages.h"
+#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image.h"
-#include "ui/gl/gl_surface_egl.h"
-
-#if defined(OS_WIN)
-#include "base/win/windows_version.h"
-#include "content/common/gpu/media/dxva_video_decode_accelerator_win.h"
-#elif defined(OS_MACOSX)
-#include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
-#elif defined(OS_CHROMEOS)
-#if defined(USE_V4L2_CODEC)
-#include "content/common/gpu/media/v4l2_device.h"
-#include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
-#include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
-#endif
-#if defined(ARCH_CPU_X86_FAMILY)
-#include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
-#include "ui/gl/gl_implementation.h"
-#endif
-#elif defined(USE_OZONE)
-#include "media/ozone/media_ozone_platform.h"
-#elif defined(OS_ANDROID)
-#include "content/common/gpu/media/android_video_decode_accelerator.h"
-#endif
-
-#include "ui/gfx/geometry/size.h"
namespace content {
+namespace {
+static gfx::GLContext* GetGLContext(
+ const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) {
+ if (!stub) {
+ DLOG(ERROR) << "Stub is gone; no GLContext.";
+ return nullptr;
+ }
+
+ return stub->decoder()->GetGLContext();
+}
+
static bool MakeDecoderContextCurrent(
- const base::WeakPtr<GpuCommandBufferStub> stub) {
+ const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) {
if (!stub) {
DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
return false;
@@ -69,6 +57,43 @@ static bool MakeDecoderContextCurrent(
return true;
}
+#if (defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)) || defined(OS_MACOSX)
+static bool BindImage(const base::WeakPtr<gpu::GpuCommandBufferStub>& stub,
+ uint32_t client_texture_id,
+ uint32_t texture_target,
+ const scoped_refptr<gl::GLImage>& image,
+ bool can_bind_to_sampler) {
+ if (!stub) {
+ DLOG(ERROR) << "Stub is gone; won't BindImage().";
+ return false;
+ }
+
+ gpu::gles2::GLES2Decoder* command_decoder = stub->decoder();
+ gpu::gles2::TextureManager* texture_manager =
+ command_decoder->GetContextGroup()->texture_manager();
+ gpu::gles2::TextureRef* ref = texture_manager->GetTexture(client_texture_id);
+ if (ref) {
+ texture_manager->SetLevelImage(ref, texture_target, 0, image.get(),
+ can_bind_to_sampler
+ ? gpu::gles2::Texture::BOUND
+ : gpu::gles2::Texture::UNBOUND);
+ }
+
+ return true;
+}
+#endif
+
+static base::WeakPtr<gpu::gles2::GLES2Decoder> GetGLES2Decoder(
+ const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) {
+ if (!stub) {
+ DLOG(ERROR) << "Stub is gone; no GLES2Decoder.";
+ return base::WeakPtr<gpu::gles2::GLES2Decoder>();
+ }
+
+ return stub->decoder()->AsWeakPtr();
+}
+} // anonymous namespace
+
// DebugAutoLock works like AutoLock but only acquires the lock when
// DCHECK is on.
#if DCHECK_IS_ON()
@@ -103,7 +128,7 @@ class GpuVideoDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg)
IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_,
GpuVideoDecodeAccelerator::OnDecode)
- IPC_MESSAGE_UNHANDLED(return false;)
+ IPC_MESSAGE_UNHANDLED(return false)
IPC_END_MESSAGE_MAP()
return true;
}
@@ -129,19 +154,25 @@ class GpuVideoDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
int32_t host_route_id,
- GpuCommandBufferStub* stub,
+ gpu::GpuCommandBufferStub* stub,
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
: host_route_id_(host_route_id),
stub_(stub),
texture_target_(0),
+ textures_per_buffer_(0),
filter_removed_(true, false),
child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
io_task_runner_(io_task_runner),
weak_factory_for_io_(this) {
DCHECK(stub_);
stub_->AddDestructionObserver(this);
- make_context_current_ =
+ get_gl_context_cb_ = base::Bind(&GetGLContext, stub_->AsWeakPtr());
+ make_context_current_cb_ =
base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
+#if (defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)) || defined(OS_MACOSX)
+ bind_image_cb_ = base::Bind(&BindImage, stub_->AsWeakPtr());
+#endif
+ get_gles2_decoder_cb_ = base::Bind(&GetGLES2Decoder, stub_->AsWeakPtr());
}
GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
@@ -152,41 +183,10 @@ GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
// static
gpu::VideoDecodeAcceleratorCapabilities
-GpuVideoDecodeAccelerator::GetCapabilities() {
- media::VideoDecodeAccelerator::Capabilities capabilities;
- const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kDisableAcceleratedVideoDecode))
- return gpu::VideoDecodeAcceleratorCapabilities();
-
- // Query supported profiles for each VDA. The order of querying VDAs should
- // be the same as the order of initializing VDAs. Then the returned profile
- // can be initialized by corresponding VDA successfully.
-#if defined(OS_WIN)
- capabilities.supported_profiles =
- DXVAVideoDecodeAccelerator::GetSupportedProfiles();
-#elif defined(OS_CHROMEOS)
- media::VideoDecodeAccelerator::SupportedProfiles vda_profiles;
-#if defined(USE_V4L2_CODEC)
- vda_profiles = V4L2VideoDecodeAccelerator::GetSupportedProfiles();
- GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
- vda_profiles, &capabilities.supported_profiles);
- vda_profiles = V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles();
- GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
- vda_profiles, &capabilities.supported_profiles);
-#endif
-#if defined(ARCH_CPU_X86_FAMILY)
- vda_profiles = VaapiVideoDecodeAccelerator::GetSupportedProfiles();
- GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
- vda_profiles, &capabilities.supported_profiles);
-#endif
-#elif defined(OS_MACOSX)
- capabilities.supported_profiles =
- VTVideoDecodeAccelerator::GetSupportedProfiles();
-#elif defined(OS_ANDROID)
- capabilities = AndroidVideoDecodeAccelerator::GetCapabilities();
-#endif
- return GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeCapabilities(
- capabilities);
+GpuVideoDecodeAccelerator::GetCapabilities(
+ const gpu::GpuPreferences& gpu_preferences) {
+ return GpuVideoDecodeAcceleratorFactoryImpl::GetDecoderCapabilities(
+ gpu_preferences);
}
bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
@@ -209,14 +209,16 @@ bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
return handled;
}
-void GpuVideoDecodeAccelerator::NotifyCdmAttached(bool success) {
- if (!Send(new AcceleratedVideoDecoderHostMsg_CdmAttached(host_route_id_,
- success)))
- DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_CdmAttached) failed";
+void GpuVideoDecodeAccelerator::NotifyInitializationComplete(bool success) {
+ if (!Send(new AcceleratedVideoDecoderHostMsg_InitializationComplete(
+ host_route_id_, success)))
+ DLOG(ERROR)
+ << "Send(AcceleratedVideoDecoderHostMsg_InitializationComplete) failed";
}
void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
uint32_t requested_num_of_buffers,
+ uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) {
if (dimensions.width() > media::limits::kMaxDimension ||
@@ -226,14 +228,13 @@ void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
return;
}
if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
- host_route_id_,
- requested_num_of_buffers,
- dimensions,
- texture_target))) {
+ host_route_id_, requested_num_of_buffers, textures_per_buffer,
+ dimensions, texture_target))) {
DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
<< "failed";
}
texture_dimensions_ = dimensions;
+ textures_per_buffer_ = textures_per_buffer;
texture_target_ = texture_target;
}
@@ -265,7 +266,7 @@ void GpuVideoDecodeAccelerator::PictureReady(
if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
host_route_id_, picture.picture_buffer_id(),
picture.bitstream_buffer_id(), picture.visible_rect(),
- picture.allow_overlay()))) {
+ picture.allow_overlay(), picture.size_changed()))) {
DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
}
}
@@ -327,161 +328,51 @@ bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) {
return stub_->channel()->Send(message);
}
-void GpuVideoDecodeAccelerator::Initialize(
- const media::VideoDecodeAccelerator::Config& config,
- IPC::Message* init_done_msg) {
+bool GpuVideoDecodeAccelerator::Initialize(
+ const media::VideoDecodeAccelerator::Config& config) {
DCHECK(!video_decode_accelerator_);
- if (!stub_->channel()->AddRoute(host_route_id_, this)) {
+ if (!stub_->channel()->AddRoute(host_route_id_, stub_->stream_id(), this)) {
DLOG(ERROR) << "Initialize(): failed to add route";
- SendCreateDecoderReply(init_done_msg, false);
+ return false;
}
#if !defined(OS_WIN)
// Ensure we will be able to get a GL context at all before initializing
// non-Windows VDAs.
- if (!make_context_current_.Run()) {
- SendCreateDecoderReply(init_done_msg, false);
- return;
- }
+ if (!make_context_current_cb_.Run())
+ return false;
#endif
- // Array of Create..VDA() function pointers, maybe applicable to the current
- // platform. This list is ordered by priority of use and it should be the
- // same as the order of querying supported profiles of VDAs.
- const GpuVideoDecodeAccelerator::CreateVDAFp create_vda_fps[] = {
- &GpuVideoDecodeAccelerator::CreateDXVAVDA,
- &GpuVideoDecodeAccelerator::CreateV4L2VDA,
- &GpuVideoDecodeAccelerator::CreateV4L2SliceVDA,
- &GpuVideoDecodeAccelerator::CreateVaapiVDA,
- &GpuVideoDecodeAccelerator::CreateVTVDA,
- &GpuVideoDecodeAccelerator::CreateOzoneVDA,
- &GpuVideoDecodeAccelerator::CreateAndroidVDA};
-
- for (const auto& create_vda_function : create_vda_fps) {
- video_decode_accelerator_ = (this->*create_vda_function)();
- if (!video_decode_accelerator_ ||
- !video_decode_accelerator_->Initialize(config, this))
- continue;
-
- if (video_decode_accelerator_->CanDecodeOnIOThread()) {
- filter_ = new MessageFilter(this, host_route_id_);
- stub_->channel()->AddFilter(filter_.get());
- }
- SendCreateDecoderReply(init_done_msg, true);
- return;
- }
- video_decode_accelerator_.reset();
- LOG(ERROR) << "HW video decode not available for profile " << config.profile
- << (config.is_encrypted ? " with encryption" : "");
- SendCreateDecoderReply(init_done_msg, false);
-}
+ scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl> vda_factory =
+ GpuVideoDecodeAcceleratorFactoryImpl::CreateWithGLES2Decoder(
+ get_gl_context_cb_, make_context_current_cb_, bind_image_cb_,
+ get_gles2_decoder_cb_);
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateDXVAVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_WIN)
- if (base::win::GetVersion() >= base::win::VERSION_WIN7) {
- DVLOG(0) << "Initializing DXVA HW decoder for windows.";
- decoder.reset(new DXVAVideoDecodeAccelerator(make_context_current_,
- stub_->decoder()->GetGLContext()));
- } else {
- NOTIMPLEMENTED() << "HW video decode acceleration not available.";
+ if (!vda_factory) {
+ LOG(ERROR) << "Failed creating the VDA factory";
+ return false;
}
-#endif
- return decoder;
-}
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateV4L2VDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
- scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
- if (device.get()) {
- decoder.reset(new V4L2VideoDecodeAccelerator(
- gfx::GLSurfaceEGL::GetHardwareDisplay(),
- stub_->decoder()->GetGLContext()->GetHandle(),
- weak_factory_for_io_.GetWeakPtr(),
- make_context_current_,
- device,
- io_task_runner_));
+ const gpu::GpuPreferences& gpu_preferences =
+ stub_->channel()->gpu_channel_manager()->gpu_preferences();
+ video_decode_accelerator_ =
+ vda_factory->CreateVDA(this, config, gpu_preferences);
+ if (!video_decode_accelerator_) {
+ LOG(ERROR) << "HW video decode not available for profile " << config.profile
+ << (config.is_encrypted ? " with encryption" : "");
+ return false;
}
-#endif
- return decoder;
-}
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateV4L2SliceVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
- scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
- if (device.get()) {
- decoder.reset(new V4L2SliceVideoDecodeAccelerator(
- device,
- gfx::GLSurfaceEGL::GetHardwareDisplay(),
- stub_->decoder()->GetGLContext()->GetHandle(),
- weak_factory_for_io_.GetWeakPtr(),
- make_context_current_,
- io_task_runner_));
+ // Attempt to set up performing decoding tasks on IO thread, if supported by
+ // the VDA.
+ if (video_decode_accelerator_->TryToSetupDecodeOnSeparateThread(
+ weak_factory_for_io_.GetWeakPtr(), io_task_runner_)) {
+ filter_ = new MessageFilter(this, host_route_id_);
+ stub_->channel()->AddFilter(filter_.get());
}
-#endif
- return decoder;
-}
-void GpuVideoDecodeAccelerator::BindImage(uint32_t client_texture_id,
- uint32_t texture_target,
- scoped_refptr<gl::GLImage> image) {
- gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder();
- gpu::gles2::TextureManager* texture_manager =
- command_decoder->GetContextGroup()->texture_manager();
- gpu::gles2::TextureRef* ref = texture_manager->GetTexture(client_texture_id);
- if (ref) {
- texture_manager->SetLevelImage(ref, texture_target, 0, image.get(),
- gpu::gles2::Texture::BOUND);
- }
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateVaapiVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
- decoder.reset(new VaapiVideoDecodeAccelerator(
- make_context_current_, base::Bind(&GpuVideoDecodeAccelerator::BindImage,
- base::Unretained(this))));
-#endif
- return decoder;
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateVTVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_MACOSX)
- decoder.reset(new VTVideoDecodeAccelerator(
- make_context_current_, base::Bind(&GpuVideoDecodeAccelerator::BindImage,
- base::Unretained(this))));
-#endif
- return decoder;
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateOzoneVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if !defined(OS_CHROMEOS) && defined(USE_OZONE)
- media::MediaOzonePlatform* platform =
- media::MediaOzonePlatform::GetInstance();
- decoder.reset(platform->CreateVideoDecodeAccelerator(make_context_current_));
-#endif
- return decoder;
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GpuVideoDecodeAccelerator::CreateAndroidVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_ANDROID)
- decoder.reset(new AndroidVideoDecodeAccelerator(stub_->decoder()->AsWeakPtr(),
- make_context_current_));
-#endif
- return decoder;
+ return true;
}
void GpuVideoDecodeAccelerator::OnSetCdm(int cdm_id) {
@@ -489,40 +380,17 @@ void GpuVideoDecodeAccelerator::OnSetCdm(int cdm_id) {
video_decode_accelerator_->SetCdm(cdm_id);
}
-// Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
-// true, otherwise on the main thread.
+// Runs on IO thread if VDA::TryToSetupDecodeOnSeparateThread() succeeded,
+// otherwise on the main thread.
void GpuVideoDecodeAccelerator::OnDecode(
- const AcceleratedVideoDecoderMsg_Decode_Params& params) {
+ const media::BitstreamBuffer& bitstream_buffer) {
DCHECK(video_decode_accelerator_);
- if (params.bitstream_buffer_id < 0) {
- DLOG(ERROR) << "BitstreamBuffer id " << params.bitstream_buffer_id
- << " out of range";
- if (child_task_runner_->BelongsToCurrentThread()) {
- NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
- } else {
- child_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&GpuVideoDecodeAccelerator::NotifyError,
- base::Unretained(this),
- media::VideoDecodeAccelerator::INVALID_ARGUMENT));
- }
- return;
- }
-
- media::BitstreamBuffer bitstream_buffer(params.bitstream_buffer_id,
- params.buffer_handle, params.size,
- params.presentation_timestamp);
- if (!params.key_id.empty()) {
- bitstream_buffer.SetDecryptConfig(
- media::DecryptConfig(params.key_id, params.iv, params.subsamples));
- }
-
video_decode_accelerator_->Decode(bitstream_buffer);
}
void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
const std::vector<int32_t>& buffer_ids,
- const std::vector<uint32_t>& texture_ids) {
+ const std::vector<media::PictureBuffer::TextureIds>& texture_ids) {
if (buffer_ids.size() != texture_ids.size()) {
NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
@@ -540,51 +408,65 @@ void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
}
- gpu::gles2::TextureRef* texture_ref = texture_manager->GetTexture(
- texture_ids[i]);
- if (!texture_ref) {
- DLOG(ERROR) << "Failed to find texture id " << texture_ids[i];
- NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
- return;
- }
- gpu::gles2::Texture* info = texture_ref->texture();
- if (info->target() != texture_target_) {
- DLOG(ERROR) << "Texture target mismatch for texture id "
- << texture_ids[i];
+ media::PictureBuffer::TextureIds buffer_texture_ids = texture_ids[i];
+ media::PictureBuffer::TextureIds service_ids;
+ if (buffer_texture_ids.size() != textures_per_buffer_) {
+ DLOG(ERROR) << "Requested " << textures_per_buffer_
+ << " textures per picture buffer, got "
+ << buffer_texture_ids.size();
NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
}
- if (texture_target_ == GL_TEXTURE_EXTERNAL_OES ||
- texture_target_ == GL_TEXTURE_RECTANGLE_ARB) {
- // These textures have their dimensions defined by the underlying storage.
- // Use |texture_dimensions_| for this size.
- texture_manager->SetLevelInfo(
- texture_ref, texture_target_, 0, GL_RGBA, texture_dimensions_.width(),
- texture_dimensions_.height(), 1, 0, GL_RGBA, 0, gfx::Rect());
- } else {
- // For other targets, texture dimensions should already be defined.
- GLsizei width = 0, height = 0;
- info->GetLevelSize(texture_target_, 0, &width, &height, nullptr);
- if (width != texture_dimensions_.width() ||
- height != texture_dimensions_.height()) {
- DLOG(ERROR) << "Size mismatch for texture id " << texture_ids[i];
+ for (size_t j = 0; j < textures_per_buffer_; j++) {
+ gpu::gles2::TextureRef* texture_ref =
+ texture_manager->GetTexture(buffer_texture_ids[j]);
+ if (!texture_ref) {
+ DLOG(ERROR) << "Failed to find texture id " << buffer_texture_ids[j];
NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
}
-
- // TODO(dshwang): after moving to D3D11, remove this. crbug.com/438691
- GLenum format =
- video_decode_accelerator_.get()->GetSurfaceInternalFormat();
- if (format != GL_RGBA) {
- texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, format,
- width, height, 1, 0, format, 0,
- gfx::Rect());
+ gpu::gles2::Texture* info = texture_ref->texture();
+ if (info->target() != texture_target_) {
+ DLOG(ERROR) << "Texture target mismatch for texture id "
+ << buffer_texture_ids[j];
+ NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
+ return;
+ }
+ if (texture_target_ == GL_TEXTURE_EXTERNAL_OES ||
+ texture_target_ == GL_TEXTURE_RECTANGLE_ARB) {
+ // These textures have their dimensions defined by the underlying
+ // storage.
+ // Use |texture_dimensions_| for this size.
+ texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, GL_RGBA,
+ texture_dimensions_.width(),
+ texture_dimensions_.height(), 1, 0,
+ GL_RGBA, 0, gfx::Rect());
+ } else {
+ // For other targets, texture dimensions should already be defined.
+ GLsizei width = 0, height = 0;
+ info->GetLevelSize(texture_target_, 0, &width, &height, nullptr);
+ if (width != texture_dimensions_.width() ||
+ height != texture_dimensions_.height()) {
+ DLOG(ERROR) << "Size mismatch for texture id "
+ << buffer_texture_ids[j];
+ NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
+ return;
+ }
+
+ // TODO(dshwang): after moving to D3D11, remove this. crbug.com/438691
+ GLenum format =
+ video_decode_accelerator_.get()->GetSurfaceInternalFormat();
+ if (format != GL_RGBA) {
+ texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, format,
+ width, height, 1, 0, format, 0,
+ gfx::Rect());
+ }
}
+ service_ids.push_back(texture_ref->service_id());
+ textures.push_back(texture_ref);
}
buffers.push_back(media::PictureBuffer(buffer_ids[i], texture_dimensions_,
- texture_ref->service_id(),
- texture_ids[i]));
- textures.push_back(texture_ref);
+ service_ids, buffer_texture_ids));
}
video_decode_accelerator_->AssignPictureBuffers(buffers);
DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
@@ -637,10 +519,4 @@ void GpuVideoDecodeAccelerator::SetTextureCleared(
uncleared_textures_.erase(it);
}
-void GpuVideoDecodeAccelerator::SendCreateDecoderReply(IPC::Message* message,
- bool succeeded) {
- GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(message, succeeded);
- Send(message);
-}
-
} // namespace content
diff --git a/chromium/content/common/gpu/media/gpu_video_decode_accelerator.h b/chromium/content/common/gpu/media/gpu_video_decode_accelerator.h
index eb6459b37c3..47859d957f9 100644
--- a/chromium/content/common/gpu/media/gpu_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/gpu_video_decode_accelerator.h
@@ -15,15 +15,19 @@
#include "base/memory/ref_counted.h"
#include "base/memory/shared_memory.h"
#include "base/synchronization/waitable_event.h"
-#include "content/common/gpu/gpu_command_buffer_stub.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/config/gpu_info.h"
+#include "gpu/ipc/service/gpu_command_buffer_stub.h"
+#include "gpu/ipc/service/gpu_command_buffer_stub.h"
#include "ipc/ipc_listener.h"
#include "ipc/ipc_sender.h"
#include "media/video/video_decode_accelerator.h"
#include "ui/gfx/geometry/size.h"
-struct AcceleratedVideoDecoderMsg_Decode_Params;
+namespace gpu {
+struct GpuPreferences;
+} // namespace gpu
namespace content {
@@ -31,27 +35,29 @@ class GpuVideoDecodeAccelerator
: public IPC::Listener,
public IPC::Sender,
public media::VideoDecodeAccelerator::Client,
- public GpuCommandBufferStub::DestructionObserver {
+ public gpu::GpuCommandBufferStub::DestructionObserver {
public:
// Each of the arguments to the constructor must outlive this object.
// |stub->decoder()| will be made current around any operation that touches
// the underlying VDA so that it can make GL calls safely.
GpuVideoDecodeAccelerator(
int32_t host_route_id,
- GpuCommandBufferStub* stub,
+ gpu::GpuCommandBufferStub* stub,
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
// Static query for the capabilities, which includes the supported profiles.
// This query calls the appropriate platform-specific version. The returned
// capabilities will not contain duplicate supported profile entries.
- static gpu::VideoDecodeAcceleratorCapabilities GetCapabilities();
+ static gpu::VideoDecodeAcceleratorCapabilities GetCapabilities(
+ const gpu::GpuPreferences& gpu_preferences);
// IPC::Listener implementation.
bool OnMessageReceived(const IPC::Message& message) override;
// media::VideoDecodeAccelerator::Client implementation.
- void NotifyCdmAttached(bool success) override;
+ void NotifyInitializationComplete(bool success) override;
void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
+ uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) override;
void DismissPictureBuffer(int32_t picture_buffer_id) override;
@@ -69,33 +75,22 @@ class GpuVideoDecodeAccelerator
// Initialize VDAs from the set of VDAs supported for current platform until
// one of them succeeds for given |config|. Send the |init_done_msg| when
- // done. filter_ is passed to GpuCommandBufferStub channel only if the chosen
- // VDA can decode on IO thread.
- void Initialize(const media::VideoDecodeAccelerator::Config& config,
- IPC::Message* init_done_msg);
+ // done. filter_ is passed to gpu::GpuCommandBufferStub channel only if the
+ // chosen VDA can decode on IO thread.
+ bool Initialize(const media::VideoDecodeAccelerator::Config& config);
private:
- typedef scoped_ptr<media::VideoDecodeAccelerator>(
- GpuVideoDecodeAccelerator::*CreateVDAFp)();
-
class MessageFilter;
- scoped_ptr<media::VideoDecodeAccelerator> CreateDXVAVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2VDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2SliceVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateVaapiVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateVTVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateOzoneVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateAndroidVDA();
-
// We only allow self-delete, from OnWillDestroyStub(), after cleanup there.
~GpuVideoDecodeAccelerator() override;
// Handlers for IPC messages.
void OnSetCdm(int cdm_id);
- void OnDecode(const AcceleratedVideoDecoderMsg_Decode_Params& params);
- void OnAssignPictureBuffers(const std::vector<int32_t>& buffer_ids,
- const std::vector<uint32_t>& texture_ids);
+ void OnDecode(const media::BitstreamBuffer& bitstream_buffer);
+ void OnAssignPictureBuffers(
+ const std::vector<int32_t>& buffer_ids,
+ const std::vector<media::PictureBuffer::TextureIds>& texture_ids);
void OnReusePictureBuffer(int32_t picture_buffer_id);
void OnFlush();
void OnReset();
@@ -107,28 +102,28 @@ class GpuVideoDecodeAccelerator
// Sets the texture to cleared.
void SetTextureCleared(const media::Picture& picture);
- // Helper for replying to the creation request.
- void SendCreateDecoderReply(IPC::Message* message, bool succeeded);
-
- // Helper to bind |image| to the texture specified by |client_texture_id|.
- void BindImage(uint32_t client_texture_id,
- uint32_t texture_target,
- scoped_refptr<gl::GLImage> image);
-
// Route ID to communicate with the host.
const int32_t host_route_id_;
- // Unowned pointer to the underlying GpuCommandBufferStub. |this| is
+ // Unowned pointer to the underlying gpu::GpuCommandBufferStub. |this| is
// registered as a DestuctionObserver of |stub_| and will self-delete when
// |stub_| is destroyed.
- GpuCommandBufferStub* const stub_;
+ gpu::GpuCommandBufferStub* const stub_;
// The underlying VideoDecodeAccelerator.
scoped_ptr<media::VideoDecodeAccelerator> video_decode_accelerator_;
+ // Callback to return current GLContext, if available.
+ GetGLContextCallback get_gl_context_cb_;
+
// Callback for making the relevant context current for GL calls.
- // Returns false if failed.
- base::Callback<bool(void)> make_context_current_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
+
+ // Callback to bind a GLImage to a given texture id and target.
+ BindGLImageCallback bind_image_cb_;
+
+ // Callback to return a WeakPtr to GLES2Decoder.
+ GetGLES2DecoderCallback get_gles2_decoder_cb_;
// The texture dimensions as requested by ProvidePictureBuffers().
gfx::Size texture_dimensions_;
@@ -136,6 +131,10 @@ class GpuVideoDecodeAccelerator
// The texture target as requested by ProvidePictureBuffers().
uint32_t texture_target_;
+ // The number of textures per picture buffer as requests by
+ // ProvidePictureBuffers()
+ uint32_t textures_per_buffer_;
+
// The message filter to run VDA::Decode on IO thread if VDA supports it.
scoped_refptr<MessageFilter> filter_;
diff --git a/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc
new file mode 100644
index 00000000000..048314863d9
--- /dev/null
+++ b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.cc
@@ -0,0 +1,242 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h"
+
+#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_version.h"
+#include "content/common/gpu/media/dxva_video_decode_accelerator_win.h"
+#elif defined(OS_MACOSX)
+#include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
+#elif defined(OS_CHROMEOS)
+#if defined(USE_V4L2_CODEC)
+#include "content/common/gpu/media/v4l2_device.h"
+#include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
+#include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
+#include "ui/gl/gl_surface_egl.h"
+#endif
+#if defined(ARCH_CPU_X86_FAMILY)
+#include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
+#include "ui/gl/gl_implementation.h"
+#endif
+#elif defined(OS_ANDROID)
+#include "content/common/gpu/media/android_video_decode_accelerator.h"
+#endif
+
+namespace content {
+
+namespace {
+static base::WeakPtr<gpu::gles2::GLES2Decoder> GetEmptyGLES2Decoder() {
+ NOTREACHED() << "VDA requests a GLES2Decoder, but client did not provide it";
+ return base::WeakPtr<gpu::gles2::GLES2Decoder>();
+}
+}
+
+// static
+scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
+GpuVideoDecodeAcceleratorFactoryImpl::Create(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb) {
+ return make_scoped_ptr(new GpuVideoDecodeAcceleratorFactoryImpl(
+ get_gl_context_cb, make_context_current_cb, bind_image_cb,
+ base::Bind(&GetEmptyGLES2Decoder)));
+}
+
+// static
+scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateWithGLES2Decoder(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb) {
+ return make_scoped_ptr(new GpuVideoDecodeAcceleratorFactoryImpl(
+ get_gl_context_cb, make_context_current_cb, bind_image_cb,
+ get_gles2_decoder_cb));
+}
+
+// static
+gpu::VideoDecodeAcceleratorCapabilities
+GpuVideoDecodeAcceleratorFactoryImpl::GetDecoderCapabilities(
+ const gpu::GpuPreferences& gpu_preferences) {
+ media::VideoDecodeAccelerator::Capabilities capabilities;
+ if (gpu_preferences.disable_accelerated_video_decode)
+ return gpu::VideoDecodeAcceleratorCapabilities();
+
+ // Query VDAs for their capabilities and construct a set of supported
+ // profiles for current platform. This must be done in the same order as in
+ // CreateVDA(), as we currently preserve additional capabilities (such as
+ // resolutions supported) only for the first VDA supporting the given codec
+ // profile (instead of calculating a superset).
+ // TODO(posciak,henryhsu): improve this so that we choose a superset of
+ // resolutions and other supported profile parameters.
+#if defined(OS_WIN)
+ capabilities.supported_profiles =
+ DXVAVideoDecodeAccelerator::GetSupportedProfiles();
+#elif defined(OS_CHROMEOS)
+ media::VideoDecodeAccelerator::SupportedProfiles vda_profiles;
+#if defined(USE_V4L2_CODEC)
+ vda_profiles = V4L2VideoDecodeAccelerator::GetSupportedProfiles();
+ media::GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
+ vda_profiles, &capabilities.supported_profiles);
+ vda_profiles = V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles();
+ media::GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
+ vda_profiles, &capabilities.supported_profiles);
+#endif
+#if defined(ARCH_CPU_X86_FAMILY)
+ vda_profiles = VaapiVideoDecodeAccelerator::GetSupportedProfiles();
+ media::GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
+ vda_profiles, &capabilities.supported_profiles);
+#endif
+#elif defined(OS_MACOSX)
+ capabilities.supported_profiles =
+ VTVideoDecodeAccelerator::GetSupportedProfiles();
+#elif defined(OS_ANDROID)
+ capabilities =
+ AndroidVideoDecodeAccelerator::GetCapabilities(gpu_preferences);
+#endif
+ return media::GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeCapabilities(
+ capabilities);
+}
+
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateVDA(
+ media::VideoDecodeAccelerator::Client* client,
+ const media::VideoDecodeAccelerator::Config& config,
+ const gpu::GpuPreferences& gpu_preferences) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (gpu_preferences.disable_accelerated_video_decode)
+ return nullptr;
+
+ // Array of Create..VDA() function pointers, potentially usable on current
+ // platform. This list is ordered by priority, from most to least preferred,
+ // if applicable. This list must be in the same order as the querying order
+ // in GetDecoderCapabilities() above.
+ using CreateVDAFp = scoped_ptr<media::VideoDecodeAccelerator> (
+ GpuVideoDecodeAcceleratorFactoryImpl::*)(const gpu::GpuPreferences&)
+ const;
+ const CreateVDAFp create_vda_fps[] = {
+#if defined(OS_WIN)
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateDXVAVDA,
+#endif
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateV4L2VDA,
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateV4L2SVDA,
+#endif
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateVaapiVDA,
+#endif
+#if defined(OS_MACOSX)
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateVTVDA,
+#endif
+#if defined(OS_ANDROID)
+ &GpuVideoDecodeAcceleratorFactoryImpl::CreateAndroidVDA,
+#endif
+ };
+
+ scoped_ptr<media::VideoDecodeAccelerator> vda;
+
+ for (const auto& create_vda_function : create_vda_fps) {
+ vda = (this->*create_vda_function)(gpu_preferences);
+ if (vda && vda->Initialize(config, client))
+ return vda;
+ }
+
+ return nullptr;
+}
+
+#if defined(OS_WIN)
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateDXVAVDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ if (base::win::GetVersion() >= base::win::VERSION_WIN7) {
+ DVLOG(0) << "Initializing DXVA HW decoder for windows.";
+ decoder.reset(new DXVAVideoDecodeAccelerator(
+ get_gl_context_cb_, make_context_current_cb_,
+ gpu_preferences.enable_accelerated_vpx_decode));
+ }
+ return decoder;
+}
+#endif
+
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateV4L2VDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
+ if (device.get()) {
+ decoder.reset(new V4L2VideoDecodeAccelerator(
+ gfx::GLSurfaceEGL::GetHardwareDisplay(), get_gl_context_cb_,
+ make_context_current_cb_, device));
+ }
+ return decoder;
+}
+
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateV4L2SVDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
+ if (device.get()) {
+ decoder.reset(new V4L2SliceVideoDecodeAccelerator(
+ device, gfx::GLSurfaceEGL::GetHardwareDisplay(), get_gl_context_cb_,
+ make_context_current_cb_));
+ }
+ return decoder;
+}
+#endif
+
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateVaapiVDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ decoder.reset(new VaapiVideoDecodeAccelerator(make_context_current_cb_,
+ bind_image_cb_));
+ return decoder;
+}
+#endif
+
+#if defined(OS_MACOSX)
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateVTVDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ decoder.reset(
+ new VTVideoDecodeAccelerator(make_context_current_cb_, bind_image_cb_));
+ return decoder;
+}
+#endif
+
+#if defined(OS_ANDROID)
+scoped_ptr<media::VideoDecodeAccelerator>
+GpuVideoDecodeAcceleratorFactoryImpl::CreateAndroidVDA(
+ const gpu::GpuPreferences& gpu_preferences) const {
+ scoped_ptr<media::VideoDecodeAccelerator> decoder;
+ decoder.reset(new AndroidVideoDecodeAccelerator(make_context_current_cb_,
+ get_gles2_decoder_cb_));
+ return decoder;
+}
+#endif
+
+GpuVideoDecodeAcceleratorFactoryImpl::GpuVideoDecodeAcceleratorFactoryImpl(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb)
+ : get_gl_context_cb_(get_gl_context_cb),
+ make_context_current_cb_(make_context_current_cb),
+ bind_image_cb_(bind_image_cb),
+ get_gles2_decoder_cb_(get_gles2_decoder_cb) {}
+
+GpuVideoDecodeAcceleratorFactoryImpl::~GpuVideoDecodeAcceleratorFactoryImpl() {}
+
+} // namespace content
diff --git a/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h
new file mode 100644
index 00000000000..2d4c10b8c32
--- /dev/null
+++ b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h
@@ -0,0 +1,123 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
+#define CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
+
+#include "base/callback.h"
+#include "base/threading/thread_checker.h"
+#include "content/common/content_export.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "gpu/config/gpu_info.h"
+#include "media/video/video_decode_accelerator.h"
+
+namespace gfx {
+class GLContext;
+}
+
+namespace gl {
+class GLImage;
+}
+
+namespace gpu {
+struct GpuPreferences;
+
+namespace gles2 {
+class GLES2Decoder;
+}
+}
+
+namespace content {
+
+// TODO(posciak): this class should be an implementation of
+// content::GpuVideoDecodeAcceleratorFactory, however that can only be achieved
+// once this is moved out of content/common, see crbug.com/597150 and related.
+class CONTENT_EXPORT GpuVideoDecodeAcceleratorFactoryImpl {
+public:
+ ~GpuVideoDecodeAcceleratorFactoryImpl();
+
+ // Return current GLContext.
+ using GetGLContextCallback = base::Callback<gfx::GLContext*(void)>;
+
+ // Make the applicable GL context current. To be called by VDAs before
+ // executing any GL calls. Return true on success, false otherwise.
+ using MakeGLContextCurrentCallback = base::Callback<bool(void)>;
+
+ // Bind |image| to |client_texture_id| given |texture_target|. If
+ // |can_bind_to_sampler| is true, then the image may be used as a sampler
+ // directly, otherwise a copy to a staging buffer is required.
+ // Return true on success, false otherwise.
+ using BindGLImageCallback =
+ base::Callback<bool(uint32_t client_texture_id,
+ uint32_t texture_target,
+ const scoped_refptr<gl::GLImage>& image,
+ bool can_bind_to_sampler)>;
+
+ // Return a WeakPtr to a GLES2Decoder, if one is available.
+ using GetGLES2DecoderCallback =
+ base::Callback<base::WeakPtr<gpu::gles2::GLES2Decoder>(void)>;
+
+ static scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl> Create(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb);
+
+ static scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl>
+ CreateWithGLES2Decoder(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb);
+
+ static gpu::VideoDecodeAcceleratorCapabilities GetDecoderCapabilities(
+ const gpu::GpuPreferences& gpu_preferences);
+
+ scoped_ptr<media::VideoDecodeAccelerator> CreateVDA(
+ media::VideoDecodeAccelerator::Client* client,
+ const media::VideoDecodeAccelerator::Config& config,
+ const gpu::GpuPreferences& gpu_preferences);
+
+ private:
+ GpuVideoDecodeAcceleratorFactoryImpl(
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb);
+
+#if defined(OS_WIN)
+ scoped_ptr<media::VideoDecodeAccelerator> CreateDXVAVDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+#endif
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
+ scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2VDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+ scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2SVDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+#endif
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+ scoped_ptr<media::VideoDecodeAccelerator> CreateVaapiVDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+#endif
+#if defined(OS_MACOSX)
+ scoped_ptr<media::VideoDecodeAccelerator> CreateVTVDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+#endif
+#if defined(OS_ANDROID)
+ scoped_ptr<media::VideoDecodeAccelerator> CreateAndroidVDA(
+ const gpu::GpuPreferences& gpu_preferences) const;
+#endif
+
+ const GetGLContextCallback get_gl_context_cb_;
+ const MakeGLContextCurrentCallback make_context_current_cb_;
+ const BindGLImageCallback bind_image_cb_;
+ const GetGLES2DecoderCallback get_gles2_decoder_cb_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(GpuVideoDecodeAcceleratorFactoryImpl);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_FACTORY_IMPL_H_
diff --git a/chromium/content/common/gpu/media/gpu_video_decode_accelerator_helpers.h b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_helpers.h
new file mode 100644
index 00000000000..1717f592603
--- /dev/null
+++ b/chromium/content/common/gpu/media/gpu_video_decode_accelerator_helpers.h
@@ -0,0 +1,59 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
+#define CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
+
+#include "base/callback.h"
+#include "base/memory/weak_ptr.h"
+
+namespace gfx {
+class GLContext;
+}
+
+namespace gl {
+class GLImage;
+}
+
+namespace gpu {
+namespace gles2 {
+class GLES2Decoder;
+}
+}
+
+namespace content {
+
+// Helpers/defines for specific VideoDecodeAccelerator implementations in GPU
+// process. Which callbacks are required depends on the implementation.
+//
+// Note that these callbacks may be called more than once, and so must own/share
+// ownership of any objects bound to them.
+//
+// Unless specified otherwise, these callbacks must be executed on the GPU Child
+// thread (i.e. the thread which the VDAs are initialized on).
+
+// Return current GLContext.
+using GetGLContextCallback = base::Callback<gfx::GLContext*(void)>;
+
+// Make the applicable GL context current. To be called by VDAs before
+// executing any GL calls. Return true on success, false otherwise.
+using MakeGLContextCurrentCallback = base::Callback<bool(void)>;
+
+// Bind |image| to |client_texture_id| given |texture_target|. If
+// |can_bind_to_sampler| is true, then the image may be used as a sampler
+// directly, otherwise a copy to a staging buffer is required.
+// Return true on success, false otherwise.
+using BindGLImageCallback =
+ base::Callback<bool(uint32_t client_texture_id,
+ uint32_t texture_target,
+ const scoped_refptr<gl::GLImage>& image,
+ bool can_bind_to_sampler)>;
+
+// Return a WeakPtr to a GLES2Decoder, if one is available.
+using GetGLES2DecoderCallback =
+ base::Callback<base::WeakPtr<gpu::gles2::GLES2Decoder>(void)>;
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_DECODE_ACCELERATOR_HELPERS_H_
diff --git a/chromium/content/common/gpu/media/gpu_video_encode_accelerator.cc b/chromium/content/common/gpu/media/gpu_video_encode_accelerator.cc
index 7dd9a082b1d..7b1457e88f1 100644
--- a/chromium/content/common/gpu/media/gpu_video_encode_accelerator.cc
+++ b/chromium/content/common/gpu/media/gpu_video_encode_accelerator.cc
@@ -11,15 +11,15 @@
#include "base/numerics/safe_math.h"
#include "base/sys_info.h"
#include "build/build_config.h"
-#include "content/common/gpu/client/gpu_memory_buffer_impl.h"
-#include "content/common/gpu/gpu_channel.h"
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/gpu/media/gpu_video_accelerator_util.h"
-#include "content/public/common/content_switches.h"
+#include "gpu/ipc/client/gpu_memory_buffer_impl.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
#include "ipc/ipc_message_macros.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/limits.h"
#include "media/base/video_frame.h"
+#include "media/gpu/ipc/common/gpu_video_accelerator_util.h"
+#include "media/gpu/ipc/common/media_messages.h"
#if defined(OS_CHROMEOS)
#if defined(USE_V4L2_CODEC)
@@ -30,20 +30,14 @@
#endif
#elif defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
#include "content/common/gpu/media/android_video_encode_accelerator.h"
+#elif defined(OS_MACOSX)
+#include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
#endif
namespace content {
-namespace {
-
-// Allocation and destruction of buffer are done on the Browser process, so we
-// don't need to handle synchronization here.
-void DestroyGpuMemoryBuffer(const gpu::SyncToken& sync_token) {}
-
-} // namespace
-
static bool MakeDecoderContextCurrent(
- const base::WeakPtr<GpuCommandBufferStub> stub) {
+ const base::WeakPtr<gpu::GpuCommandBufferStub> stub) {
if (!stub) {
DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
return false;
@@ -57,8 +51,9 @@ static bool MakeDecoderContextCurrent(
return true;
}
-GpuVideoEncodeAccelerator::GpuVideoEncodeAccelerator(int32_t host_route_id,
- GpuCommandBufferStub* stub)
+GpuVideoEncodeAccelerator::GpuVideoEncodeAccelerator(
+ int32_t host_route_id,
+ gpu::GpuCommandBufferStub* stub)
: host_route_id_(host_route_id),
stub_(stub),
input_format_(media::PIXEL_FORMAT_UNKNOWN),
@@ -75,12 +70,11 @@ GpuVideoEncodeAccelerator::~GpuVideoEncodeAccelerator() {
DCHECK(!encoder_);
}
-void GpuVideoEncodeAccelerator::Initialize(
+bool GpuVideoEncodeAccelerator::Initialize(
media::VideoPixelFormat input_format,
const gfx::Size& input_visible_size,
media::VideoCodecProfile output_profile,
- uint32_t initial_bitrate,
- IPC::Message* init_done_msg) {
+ uint32_t initial_bitrate) {
DVLOG(2) << "GpuVideoEncodeAccelerator::Initialize(): "
"input_format=" << input_format
<< ", input_visible_size=" << input_visible_size.ToString()
@@ -88,11 +82,10 @@ void GpuVideoEncodeAccelerator::Initialize(
<< ", initial_bitrate=" << initial_bitrate;
DCHECK(!encoder_);
- if (!stub_->channel()->AddRoute(host_route_id_, this)) {
+ if (!stub_->channel()->AddRoute(host_route_id_, stub_->stream_id(), this)) {
DLOG(ERROR) << "GpuVideoEncodeAccelerator::Initialize(): "
"failed to add route";
- SendCreateEncoderReply(init_done_msg, false);
- return;
+ return false;
}
if (input_visible_size.width() > media::limits::kMaxDimension ||
@@ -101,12 +94,14 @@ void GpuVideoEncodeAccelerator::Initialize(
DLOG(ERROR) << "GpuVideoEncodeAccelerator::Initialize(): "
"input_visible_size " << input_visible_size.ToString()
<< " too large";
- SendCreateEncoderReply(init_done_msg, false);
- return;
+ return false;
}
- std::vector<GpuVideoEncodeAccelerator::CreateVEAFp>
- create_vea_fps = CreateVEAFps();
+ const gpu::GpuPreferences& gpu_preferences =
+ stub_->channel()->gpu_channel_manager()->gpu_preferences();
+
+ std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> create_vea_fps =
+ CreateVEAFps(gpu_preferences);
// Try all possible encoders and use the first successful encoder.
for (size_t i = 0; i < create_vea_fps.size(); ++i) {
encoder_ = (*create_vea_fps[i])();
@@ -117,14 +112,13 @@ void GpuVideoEncodeAccelerator::Initialize(
this)) {
input_format_ = input_format;
input_visible_size_ = input_visible_size;
- SendCreateEncoderReply(init_done_msg, true);
- return;
+ return true;
}
}
encoder_.reset();
DLOG(ERROR)
<< "GpuVideoEncodeAccelerator::Initialize(): VEA initialization failed";
- SendCreateEncoderReply(init_done_msg, false);
+ return false;
}
bool GpuVideoEncodeAccelerator::OnMessageReceived(const IPC::Message& message) {
@@ -176,10 +170,11 @@ void GpuVideoEncodeAccelerator::OnWillDestroyStub() {
// static
gpu::VideoEncodeAcceleratorSupportedProfiles
-GpuVideoEncodeAccelerator::GetSupportedProfiles() {
+GpuVideoEncodeAccelerator::GetSupportedProfiles(
+ const gpu::GpuPreferences& gpu_preferences) {
media::VideoEncodeAccelerator::SupportedProfiles profiles;
- std::vector<GpuVideoEncodeAccelerator::CreateVEAFp>
- create_vea_fps = CreateVEAFps();
+ std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> create_vea_fps =
+ CreateVEAFps(gpu_preferences);
for (size_t i = 0; i < create_vea_fps.size(); ++i) {
scoped_ptr<media::VideoEncodeAccelerator>
@@ -188,55 +183,73 @@ GpuVideoEncodeAccelerator::GetSupportedProfiles() {
continue;
media::VideoEncodeAccelerator::SupportedProfiles vea_profiles =
encoder->GetSupportedProfiles();
- GpuVideoAcceleratorUtil::InsertUniqueEncodeProfiles(
- vea_profiles, &profiles);
+ media::GpuVideoAcceleratorUtil::InsertUniqueEncodeProfiles(vea_profiles,
+ &profiles);
}
- return GpuVideoAcceleratorUtil::ConvertMediaToGpuEncodeProfiles(profiles);
+ return media::GpuVideoAcceleratorUtil::ConvertMediaToGpuEncodeProfiles(
+ profiles);
}
// static
std::vector<GpuVideoEncodeAccelerator::CreateVEAFp>
-GpuVideoEncodeAccelerator::CreateVEAFps() {
+GpuVideoEncodeAccelerator::CreateVEAFps(
+ const gpu::GpuPreferences& gpu_preferences) {
std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> create_vea_fps;
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateV4L2VEA);
- create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateVaapiVEA);
- create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateAndroidVEA);
+#endif
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+ if (!gpu_preferences.disable_vaapi_accelerated_video_encode)
+ create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateVaapiVEA);
+#endif
+#if defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
+ if (!gpu_preferences.disable_web_rtc_hw_encoding)
+ create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateAndroidVEA);
+#endif
+#if defined(OS_MACOSX)
+ create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateVTVEA);
+#endif
return create_vea_fps;
}
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
// static
scoped_ptr<media::VideoEncodeAccelerator>
GpuVideoEncodeAccelerator::CreateV4L2VEA() {
scoped_ptr<media::VideoEncodeAccelerator> encoder;
-#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kEncoder);
if (device)
encoder.reset(new V4L2VideoEncodeAccelerator(device));
-#endif
return encoder;
}
+#endif
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
// static
scoped_ptr<media::VideoEncodeAccelerator>
GpuVideoEncodeAccelerator::CreateVaapiVEA() {
- scoped_ptr<media::VideoEncodeAccelerator> encoder;
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
- const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- if (!cmd_line->HasSwitch(switches::kDisableVaapiAcceleratedVideoEncode))
- encoder.reset(new VaapiVideoEncodeAccelerator());
-#endif
- return encoder;
+ return make_scoped_ptr<media::VideoEncodeAccelerator>(
+ new VaapiVideoEncodeAccelerator());
}
+#endif
+#if defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
// static
scoped_ptr<media::VideoEncodeAccelerator>
GpuVideoEncodeAccelerator::CreateAndroidVEA() {
- scoped_ptr<media::VideoEncodeAccelerator> encoder;
-#if defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
- encoder.reset(new AndroidVideoEncodeAccelerator());
+ return make_scoped_ptr<media::VideoEncodeAccelerator>(
+ new AndroidVideoEncodeAccelerator());
+}
#endif
- return encoder;
+
+#if defined(OS_MACOSX)
+// static
+scoped_ptr<media::VideoEncodeAccelerator>
+GpuVideoEncodeAccelerator::CreateVTVEA() {
+ return make_scoped_ptr<media::VideoEncodeAccelerator>(
+ new VTVideoEncodeAccelerator());
}
+#endif
void GpuVideoEncodeAccelerator::OnEncode(
const AcceleratedVideoEncoderMsg_Encode_Params& params) {
@@ -315,79 +328,8 @@ void GpuVideoEncodeAccelerator::OnEncode2(
<< params.frame_id << ", size=" << params.size.ToString()
<< ", force_keyframe=" << params.force_keyframe << ", handle type="
<< params.gpu_memory_buffer_handles[0].type;
- DCHECK_EQ(media::PIXEL_FORMAT_I420, input_format_);
- DCHECK_EQ(media::VideoFrame::NumPlanes(input_format_),
- params.gpu_memory_buffer_handles.size());
-
- bool map_result = true;
- uint8_t* data[media::VideoFrame::kMaxPlanes];
- int32_t strides[media::VideoFrame::kMaxPlanes];
- ScopedVector<gfx::GpuMemoryBuffer> buffers;
- const auto& handles = params.gpu_memory_buffer_handles;
- for (size_t i = 0; i < handles.size(); ++i) {
- const size_t width =
- media::VideoFrame::Columns(i, input_format_, params.size.width());
- const size_t height =
- media::VideoFrame::Rows(i, input_format_, params.size.height());
- scoped_ptr<gfx::GpuMemoryBuffer> buffer =
- GpuMemoryBufferImpl::CreateFromHandle(
- handles[i], gfx::Size(width, height), gfx::BufferFormat::R_8,
- gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
- media::BindToCurrentLoop(base::Bind(&DestroyGpuMemoryBuffer)));
-
- // TODO(emircan): Refactor such that each frame is mapped once.
- // See http://crbug/536938.
- if (!buffer.get() || !buffer->Map()) {
- map_result = false;
- continue;
- }
-
- data[i] = reinterpret_cast<uint8_t*>(buffer->memory(0));
- strides[i] = buffer->stride(0);
- buffers.push_back(buffer.release());
- }
-
- if (!map_result) {
- DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode2(): "
- << "failed to map buffers";
- NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
- return;
- }
-
- if (!encoder_)
- return;
-
- if (params.frame_id < 0) {
- DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode2(): invalid frame_id="
- << params.frame_id;
- NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
- return;
- }
-
- scoped_refptr<media::VideoFrame> frame =
- media::VideoFrame::WrapExternalYuvData(
- input_format_,
- input_coded_size_,
- gfx::Rect(input_visible_size_),
- input_visible_size_,
- strides[media::VideoFrame::kYPlane],
- strides[media::VideoFrame::kUPlane],
- strides[media::VideoFrame::kVPlane],
- data[media::VideoFrame::kYPlane],
- data[media::VideoFrame::kUPlane],
- data[media::VideoFrame::kVPlane],
- params.timestamp);
- if (!frame.get()) {
- DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode2(): "
- << "could not create a frame";
- NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
- return;
- }
- frame->AddDestructionObserver(media::BindToCurrentLoop(
- base::Bind(&GpuVideoEncodeAccelerator::EncodeFrameFinished2,
- weak_this_factory_.GetWeakPtr(), params.frame_id,
- base::Passed(&buffers))));
- encoder_->Encode(frame, params.force_keyframe);
+ // Encoding GpuMemoryBuffer backed frames is not supported.
+ NOTREACHED();
}
void GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(
@@ -439,25 +381,8 @@ void GpuVideoEncodeAccelerator::EncodeFrameFinished(
// Just let |shm| fall out of scope.
}
-void GpuVideoEncodeAccelerator::EncodeFrameFinished2(
- int32_t frame_id,
- ScopedVector<gfx::GpuMemoryBuffer> buffers) {
- // TODO(emircan): Consider calling Unmap() in dtor.
- for (const auto& buffer : buffers)
- buffer->Unmap();
- Send(new AcceleratedVideoEncoderHostMsg_NotifyInputDone(host_route_id_,
- frame_id));
- // Just let |buffers| fall out of scope.
-}
-
void GpuVideoEncodeAccelerator::Send(IPC::Message* message) {
stub_->channel()->Send(message);
}
-void GpuVideoEncodeAccelerator::SendCreateEncoderReply(IPC::Message* message,
- bool succeeded) {
- GpuCommandBufferMsg_CreateVideoEncoder::WriteReplyParams(message, succeeded);
- Send(message);
-}
-
} // namespace content
diff --git a/chromium/content/common/gpu/media/gpu_video_encode_accelerator.h b/chromium/content/common/gpu/media/gpu_video_encode_accelerator.h
index ecc14f28e99..2c2db293db3 100644
--- a/chromium/content/common/gpu/media/gpu_video_encode_accelerator.h
+++ b/chromium/content/common/gpu/media/gpu_video_encode_accelerator.h
@@ -13,8 +13,8 @@
#include "base/macros.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
-#include "content/common/gpu/gpu_command_buffer_stub.h"
#include "gpu/config/gpu_info.h"
+#include "gpu/ipc/service/gpu_command_buffer_stub.h"
#include "ipc/ipc_listener.h"
#include "media/video/video_encode_accelerator.h"
#include "ui/gfx/geometry/size.h"
@@ -26,6 +26,10 @@ namespace base {
class SharedMemory;
} // namespace base
+namespace gpu {
+struct GpuPreferences;
+} // namespace gpu
+
namespace content {
// This class encapsulates the GPU process view of a VideoEncodeAccelerator,
@@ -34,18 +38,18 @@ namespace content {
class GpuVideoEncodeAccelerator
: public IPC::Listener,
public media::VideoEncodeAccelerator::Client,
- public GpuCommandBufferStub::DestructionObserver {
+ public gpu::GpuCommandBufferStub::DestructionObserver {
public:
- GpuVideoEncodeAccelerator(int32_t host_route_id, GpuCommandBufferStub* stub);
+ GpuVideoEncodeAccelerator(int32_t host_route_id,
+ gpu::GpuCommandBufferStub* stub);
~GpuVideoEncodeAccelerator() override;
// Initialize this accelerator with the given parameters and send
// |init_done_msg| when complete.
- void Initialize(media::VideoPixelFormat input_format,
+ bool Initialize(media::VideoPixelFormat input_format,
const gfx::Size& input_visible_size,
media::VideoCodecProfile output_profile,
- uint32_t initial_bitrate,
- IPC::Message* init_done_msg);
+ uint32_t initial_bitrate);
// IPC::Listener implementation
bool OnMessageReceived(const IPC::Message& message) override;
@@ -59,23 +63,34 @@ class GpuVideoEncodeAccelerator
bool key_frame) override;
void NotifyError(media::VideoEncodeAccelerator::Error error) override;
- // GpuCommandBufferStub::DestructionObserver implementation.
+ // gpu::GpuCommandBufferStub::DestructionObserver implementation.
void OnWillDestroyStub() override;
// Static query for supported profiles. This query calls the appropriate
// platform-specific version. The returned supported profiles vector will
// not contain duplicates.
- static gpu::VideoEncodeAcceleratorSupportedProfiles GetSupportedProfiles();
+ static gpu::VideoEncodeAcceleratorSupportedProfiles GetSupportedProfiles(
+ const gpu::GpuPreferences& gpu_preferences);
private:
typedef scoped_ptr<media::VideoEncodeAccelerator>(*CreateVEAFp)();
// Return a set of VEA Create function pointers applicable to the current
// platform.
- static std::vector<CreateVEAFp> CreateVEAFps();
+ static std::vector<CreateVEAFp> CreateVEAFps(
+ const gpu::GpuPreferences& gpu_preferences);
+#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
static scoped_ptr<media::VideoEncodeAccelerator> CreateV4L2VEA();
+#endif
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
static scoped_ptr<media::VideoEncodeAccelerator> CreateVaapiVEA();
+#endif
+#if defined(OS_ANDROID) && defined(ENABLE_WEBRTC)
static scoped_ptr<media::VideoEncodeAccelerator> CreateAndroidVEA();
+#endif
+#if defined(OS_MACOSX)
+ static scoped_ptr<media::VideoEncodeAccelerator> CreateVTVEA();
+#endif
// IPC handlers, proxying media::VideoEncodeAccelerator for the renderer
// process.
@@ -90,19 +105,15 @@ class GpuVideoEncodeAccelerator
void EncodeFrameFinished(int32_t frame_id,
scoped_ptr<base::SharedMemory> shm);
- void EncodeFrameFinished2(int32_t frame_id,
- ScopedVector<gfx::GpuMemoryBuffer> buffers);
void Send(IPC::Message* message);
- // Helper for replying to the creation request.
- void SendCreateEncoderReply(IPC::Message* message, bool succeeded);
// Route ID to communicate with the host.
const uint32_t host_route_id_;
- // Unowned pointer to the underlying GpuCommandBufferStub. |this| is
+ // Unowned pointer to the underlying gpu::GpuCommandBufferStub. |this| is
// registered as a DestuctionObserver of |stub_| and will self-delete when
// |stub_| is destroyed.
- GpuCommandBufferStub* const stub_;
+ gpu::GpuCommandBufferStub* const stub_;
// Owned pointer to the underlying VideoEncodeAccelerator.
scoped_ptr<media::VideoEncodeAccelerator> encoder_;
diff --git a/chromium/content/common/gpu/media/media_channel.cc b/chromium/content/common/gpu/media/media_channel.cc
new file mode 100644
index 00000000000..7baeba075e4
--- /dev/null
+++ b/chromium/content/common/gpu/media/media_channel.cc
@@ -0,0 +1,145 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/media_channel.h"
+
+#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
+#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "media/gpu/ipc/common/media_messages.h"
+
+namespace content {
+
+namespace {
+
+void SendCreateJpegDecoderResult(
+ scoped_ptr<IPC::Message> reply_message,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
+ base::WeakPtr<gpu::GpuChannel> channel,
+ scoped_refptr<gpu::GpuChannelMessageFilter> filter,
+ bool result) {
+ GpuChannelMsg_CreateJpegDecoder::WriteReplyParams(reply_message.get(),
+ result);
+ if (io_task_runner->BelongsToCurrentThread()) {
+ filter->Send(reply_message.release());
+ } else if (channel) {
+ channel->Send(reply_message.release());
+ }
+}
+
+} // namespace
+
+class MediaChannelDispatchHelper {
+ public:
+ MediaChannelDispatchHelper(MediaChannel* channel, int32_t routing_id)
+ : channel_(channel), routing_id_(routing_id) {}
+
+ bool Send(IPC::Message* msg) { return channel_->Send(msg); }
+
+ void OnCreateVideoDecoder(const media::VideoDecodeAccelerator::Config& config,
+ int32_t decoder_route_id,
+ IPC::Message* reply_message) {
+ channel_->OnCreateVideoDecoder(routing_id_, config, decoder_route_id,
+ reply_message);
+ }
+
+ void OnCreateVideoEncoder(const media::CreateVideoEncoderParams& params,
+ IPC::Message* reply_message) {
+ channel_->OnCreateVideoEncoder(routing_id_, params, reply_message);
+ }
+
+ private:
+ MediaChannel* const channel_;
+ const int32_t routing_id_;
+ DISALLOW_COPY_AND_ASSIGN(MediaChannelDispatchHelper);
+};
+
+MediaChannel::MediaChannel(gpu::GpuChannel* channel) : channel_(channel) {}
+
+MediaChannel::~MediaChannel() {}
+
+bool MediaChannel::Send(IPC::Message* msg) {
+ return channel_->Send(msg);
+}
+
+bool MediaChannel::OnMessageReceived(const IPC::Message& message) {
+ MediaChannelDispatchHelper helper(this, message.routing_id());
+ bool handled = true;
+ IPC_BEGIN_MESSAGE_MAP(MediaChannel, message)
+ IPC_MESSAGE_FORWARD_DELAY_REPLY(
+ GpuCommandBufferMsg_CreateVideoDecoder, &helper,
+ MediaChannelDispatchHelper::OnCreateVideoDecoder)
+ IPC_MESSAGE_FORWARD_DELAY_REPLY(
+ GpuCommandBufferMsg_CreateVideoEncoder, &helper,
+ MediaChannelDispatchHelper::OnCreateVideoEncoder)
+ IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateJpegDecoder,
+ OnCreateJpegDecoder)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ return handled;
+}
+
+void MediaChannel::OnCreateJpegDecoder(int32_t route_id,
+ IPC::Message* reply_msg) {
+ scoped_ptr<IPC::Message> msg(reply_msg);
+ if (!jpeg_decoder_) {
+ jpeg_decoder_.reset(
+ new GpuJpegDecodeAccelerator(channel_, channel_->io_task_runner()));
+ }
+ jpeg_decoder_->AddClient(
+ route_id, base::Bind(&SendCreateJpegDecoderResult, base::Passed(&msg),
+ channel_->io_task_runner(), channel_->AsWeakPtr(),
+ make_scoped_refptr(channel_->filter())));
+}
+
+void MediaChannel::OnCreateVideoDecoder(
+ int32_t command_buffer_route_id,
+ const media::VideoDecodeAccelerator::Config& config,
+ int32_t decoder_route_id,
+ IPC::Message* reply_message) {
+ TRACE_EVENT0("gpu", "MediaChannel::OnCreateVideoDecoder");
+ gpu::GpuCommandBufferStub* stub =
+ channel_->LookupCommandBuffer(command_buffer_route_id);
+ if (!stub) {
+ reply_message->set_reply_error();
+ Send(reply_message);
+ return;
+ }
+ GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
+ decoder_route_id, stub, stub->channel()->io_task_runner());
+ bool succeeded = decoder->Initialize(config);
+ GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(reply_message,
+ succeeded);
+ Send(reply_message);
+
+ // decoder is registered as a DestructionObserver of this stub and will
+ // self-delete during destruction of this stub.
+}
+
+void MediaChannel::OnCreateVideoEncoder(
+ int32_t command_buffer_route_id,
+ const media::CreateVideoEncoderParams& params,
+ IPC::Message* reply_message) {
+ TRACE_EVENT0("gpu", "MediaChannel::OnCreateVideoEncoder");
+ gpu::GpuCommandBufferStub* stub =
+ channel_->LookupCommandBuffer(command_buffer_route_id);
+ if (!stub) {
+ reply_message->set_reply_error();
+ Send(reply_message);
+ return;
+ }
+ GpuVideoEncodeAccelerator* encoder =
+ new GpuVideoEncodeAccelerator(params.encoder_route_id, stub);
+ bool succeeded =
+ encoder->Initialize(params.input_format, params.input_visible_size,
+ params.output_profile, params.initial_bitrate);
+ GpuCommandBufferMsg_CreateVideoEncoder::WriteReplyParams(reply_message,
+ succeeded);
+ Send(reply_message);
+
+ // encoder is registered as a DestructionObserver of this stub and will
+ // self-delete during destruction of this stub.
+}
+
+} // namespace content
diff --git a/chromium/content/common/gpu/media/media_channel.h b/chromium/content/common/gpu/media/media_channel.h
new file mode 100644
index 00000000000..7cfe0378587
--- /dev/null
+++ b/chromium/content/common/gpu/media/media_channel.h
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_MEDIA_CHANNEL_H_
+#define CONTENT_COMMON_GPU_MEDIA_MEDIA_CHANNEL_H_
+
+#include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
+#include "ipc/ipc_listener.h"
+#include "ipc/ipc_sender.h"
+#include "media/video/video_decode_accelerator.h"
+
+namespace media {
+struct CreateVideoEncoderParams;
+}
+
+namespace gpu {
+class GpuChannel;
+class GpuCommandBufferStub;
+}
+
+namespace content {
+
+class MediaChannelDispatchHelper;
+
+class MediaChannel : public IPC::Listener, public IPC::Sender {
+ public:
+ explicit MediaChannel(gpu::GpuChannel* channel);
+ ~MediaChannel() override;
+
+ // IPC::Sender implementation:
+ bool Send(IPC::Message* msg) override;
+
+ private:
+ friend class MediaChannelDispatchHelper;
+
+ // IPC::Listener implementation:
+ bool OnMessageReceived(const IPC::Message& message) override;
+
+ // Message handlers.
+ void OnCreateJpegDecoder(int32_t route_id, IPC::Message* reply_msg);
+ void OnCreateVideoDecoder(int32_t command_buffer_route_id,
+ const media::VideoDecodeAccelerator::Config& config,
+ int32_t route_id,
+ IPC::Message* reply_message);
+ void OnCreateVideoEncoder(int32_t command_buffer_route_id,
+ const media::CreateVideoEncoderParams& params,
+ IPC::Message* reply_message);
+
+ gpu::GpuChannel* const channel_;
+ scoped_ptr<GpuJpegDecodeAccelerator> jpeg_decoder_;
+ DISALLOW_COPY_AND_ASSIGN(MediaChannel);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_MEDIA_CHANNEL_H_
diff --git a/chromium/content/common/gpu/media/media_service.cc b/chromium/content/common/gpu/media/media_service.cc
new file mode 100644
index 00000000000..89ec8b1fe50
--- /dev/null
+++ b/chromium/content/common/gpu/media/media_service.cc
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/media_service.h"
+
+#include <utility>
+
+#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
+#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
+#include "content/common/gpu/media/media_channel.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
+#include "ipc/ipc_message_macros.h"
+#include "ipc/param_traits_macros.h"
+
+namespace content {
+
+MediaService::MediaService(gpu::GpuChannelManager* channel_manager)
+ : channel_manager_(channel_manager) {}
+
+MediaService::~MediaService() {}
+
+void MediaService::AddChannel(int32_t client_id) {
+ gpu::GpuChannel* gpu_channel = channel_manager_->LookupChannel(client_id);
+ DCHECK(gpu_channel);
+ scoped_ptr<MediaChannel> media_channel(new MediaChannel(gpu_channel));
+ gpu_channel->SetUnhandledMessageListener(media_channel.get());
+ media_channels_.set(client_id, std::move(media_channel));
+}
+
+void MediaService::RemoveChannel(int32_t client_id) {
+ media_channels_.erase(client_id);
+}
+
+void MediaService::DestroyAllChannels() {
+ media_channels_.clear();
+}
+
+} // namespace content
diff --git a/chromium/content/common/gpu/media/media_service.h b/chromium/content/common/gpu/media/media_service.h
new file mode 100644
index 00000000000..15dca82260a
--- /dev/null
+++ b/chromium/content/common/gpu/media/media_service.h
@@ -0,0 +1,42 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_MEDIA_SERVICE_H_
+#define CONTENT_COMMON_GPU_MEDIA_MEDIA_SERVICE_H_
+
+#include <stdint.h>
+
+#include "base/containers/scoped_ptr_hash_map.h"
+#include "base/macros.h"
+#include "ipc/ipc_listener.h"
+#include "ipc/ipc_sender.h"
+#include "media/video/video_decode_accelerator.h"
+
+namespace gpu {
+class GpuChannel;
+class GpuChannelManager;
+}
+
+namespace content {
+
+class MediaChannel;
+
+class MediaService {
+ public:
+ MediaService(gpu::GpuChannelManager* channel_manager);
+ ~MediaService();
+
+ void AddChannel(int32_t client_id);
+ void RemoveChannel(int32_t client_id);
+ void DestroyAllChannels();
+
+ private:
+ gpu::GpuChannelManager* const channel_manager_;
+ base::ScopedPtrHashMap<int32_t, scoped_ptr<MediaChannel>> media_channels_;
+ DISALLOW_COPY_AND_ASSIGN(MediaService);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_MEDIA_SERVICE_H_
diff --git a/chromium/content/common/gpu/media/rendering_helper.cc b/chromium/content/common/gpu/media/rendering_helper.cc
index 85bfe0a840d..2a19428b2d0 100644
--- a/chromium/content/common/gpu/media/rendering_helper.cc
+++ b/chromium/content/common/gpu/media/rendering_helper.cc
@@ -160,6 +160,9 @@ RenderingHelperParams::RenderingHelperParams()
: rendering_fps(0), warm_up_iterations(0), render_as_thumbnails(false) {
}
+RenderingHelperParams::RenderingHelperParams(
+ const RenderingHelperParams& other) = default;
+
RenderingHelperParams::~RenderingHelperParams() {}
VideoFrameTexture::VideoFrameTexture(uint32_t texture_target,
@@ -179,6 +182,9 @@ RenderingHelper::RenderedVideo::RenderedVideo()
: is_flushing(false), frames_to_drop(0) {
}
+RenderingHelper::RenderedVideo::RenderedVideo(const RenderedVideo& other) =
+ default;
+
RenderingHelper::RenderedVideo::~RenderedVideo() {
}
@@ -665,12 +671,8 @@ void RenderingHelper::DeleteTexture(uint32_t texture_id) {
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
}
-scoped_refptr<gfx::GLContext> RenderingHelper::GetGLContext() {
- return gl_context_;
-}
-
-void* RenderingHelper::GetGLContextHandle() {
- return gl_context_->GetHandle();
+gfx::GLContext* RenderingHelper::GetGLContext() {
+ return gl_context_.get();
}
void* RenderingHelper::GetGLDisplay() {
diff --git a/chromium/content/common/gpu/media/rendering_helper.h b/chromium/content/common/gpu/media/rendering_helper.h
index 8a6c28bd3f7..250d382ac61 100644
--- a/chromium/content/common/gpu/media/rendering_helper.h
+++ b/chromium/content/common/gpu/media/rendering_helper.h
@@ -54,6 +54,7 @@ class VideoFrameTexture : public base::RefCounted<VideoFrameTexture> {
struct RenderingHelperParams {
RenderingHelperParams();
+ RenderingHelperParams(const RenderingHelperParams& other);
~RenderingHelperParams();
// The rendering FPS.
@@ -135,10 +136,7 @@ class RenderingHelper {
void* GetGLDisplay();
// Get the GL context.
- scoped_refptr<gfx::GLContext> GetGLContext();
-
- // Get the platform specific handle to the OpenGL context.
- void* GetGLContextHandle();
+ gfx::GLContext* GetGLContext();
// Get rendered thumbnails as RGB.
// Sets alpha_solid to true if the alpha channel is entirely 0xff.
@@ -165,6 +163,7 @@ class RenderingHelper {
std::queue<scoped_refptr<VideoFrameTexture> > pending_frames;
RenderedVideo();
+ RenderedVideo(const RenderedVideo& other);
~RenderedVideo();
};
diff --git a/chromium/content/common/gpu/media/shared_memory_region.cc b/chromium/content/common/gpu/media/shared_memory_region.cc
new file mode 100644
index 00000000000..4ee6a242578
--- /dev/null
+++ b/chromium/content/common/gpu/media/shared_memory_region.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+#include "content/common/gpu/media/shared_memory_region.h"
+
+namespace content {
+
+SharedMemoryRegion::SharedMemoryRegion(const base::SharedMemoryHandle& handle,
+ off_t offset,
+ size_t size,
+ bool read_only)
+ : shm_(handle, read_only),
+ offset_(offset),
+ size_(size),
+ alignment_size_(offset % base::SysInfo::VMAllocationGranularity()) {
+ DCHECK_GE(offset_, 0) << "Invalid offset: " << offset_;
+}
+
+SharedMemoryRegion::SharedMemoryRegion(
+ const media::BitstreamBuffer& bitstream_buffer,
+ bool read_only)
+ : SharedMemoryRegion(bitstream_buffer.handle(),
+ bitstream_buffer.offset(),
+ bitstream_buffer.size(),
+ read_only) {}
+
+bool SharedMemoryRegion::Map() {
+ if (offset_ < 0) {
+ DVLOG(1) << "Invalid offset: " << offset_;
+ return false;
+ }
+ return shm_.MapAt(offset_ - alignment_size_, size_ + alignment_size_);
+}
+
+void* SharedMemoryRegion::memory() {
+ int8_t* addr = reinterpret_cast<int8_t*>(shm_.memory());
+ return addr ? addr + alignment_size_ : nullptr;
+}
+
+} // namespace content
diff --git a/chromium/content/common/gpu/media/shared_memory_region.h b/chromium/content/common/gpu/media/shared_memory_region.h
new file mode 100644
index 00000000000..f7c5db29982
--- /dev/null
+++ b/chromium/content/common/gpu/media/shared_memory_region.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_SHARED_MEMORY_REGION_H_
+#define CONTENT_COMMON_GPU_MEDIA_SHARED_MEMORY_REGION_H_
+
+#include "base/memory/shared_memory.h"
+#include "media/base/bitstream_buffer.h"
+
+namespace content {
+
+// Helper class to access a region of a SharedMemory. Different from
+// SharedMemory, in which the |offset| of function MapAt() must be aligned to
+// the value of |SysInfo::VMAllocationGranularity()|, the |offset| of a
+// SharedMemoryRegion needs not to be aligned, this class hides the details
+// and returns the mapped address of the given offset.
+class SharedMemoryRegion {
+ public:
+ // Creates a SharedMemoryRegion.
+ // The mapped memory region begins at |offset| bytes from the start of the
+ // shared memory and the length is |size|. It will take the ownership of
+ // the |handle| and release the resource when being destroyed. Different
+ // from SharedMemory, the |offset| needs not to be aligned to the value of
+ // |SysInfo::VMAllocationGranularity()|.
+ SharedMemoryRegion(const base::SharedMemoryHandle& handle,
+ off_t offset,
+ size_t size,
+ bool read_only);
+
+ // Creates a SharedMemoryRegion from the given |bistream_buffer|.
+ SharedMemoryRegion(const media::BitstreamBuffer& bitstream_buffer,
+ bool read_only);
+
+ // Maps the shared memory into the caller's address space.
+ // Return true on success, false otherwise.
+ bool Map();
+
+ // Gets a pointer to the mapped region if it has been mapped via Map().
+ // Returns |nullptr| if it is not mapped. The returned pointer points
+ // to the memory at the offset previously passed to the constructor.
+ void* memory();
+
+ size_t size() const { return size_; }
+
+ private:
+ base::SharedMemory shm_;
+ off_t offset_;
+ size_t size_;
+ size_t alignment_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedMemoryRegion);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_SHARED_MEMORY_REGION_H_
diff --git a/chromium/content/common/gpu/media/v4l2_image_processor.cc b/chromium/content/common/gpu/media/v4l2_image_processor.cc
index f0cf3977774..340a1484335 100644
--- a/chromium/content/common/gpu/media/v4l2_image_processor.cc
+++ b/chromium/content/common/gpu/media/v4l2_image_processor.cc
@@ -468,24 +468,22 @@ void V4L2ImageProcessor::Enqueue() {
}
}
- // TODO(posciak): Fix this to be non-Exynos specific.
- // Exynos GSC is liable to race conditions if more than one output buffer is
- // simultaneously enqueued, so enqueue just one.
- if (output_buffer_queued_count_ == 0 && !free_output_buffers_.empty()) {
- const int old_outputs_queued = output_buffer_queued_count_;
+ const int old_outputs_queued = output_buffer_queued_count_;
+ while (!free_output_buffers_.empty()) {
if (!EnqueueOutputRecord())
return;
- if (old_outputs_queued == 0 && output_buffer_queued_count_ != 0) {
- // We just started up a previously empty queue.
- // Queue state changed; signal interrupt.
- if (!device_->SetDevicePollInterrupt())
- return;
- // Start VIDIOC_STREAMON if we haven't yet.
- if (!output_streamon_) {
- __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
- output_streamon_ = true;
- }
+ }
+
+ if (old_outputs_queued == 0 && output_buffer_queued_count_ != 0) {
+ // We just started up a previously empty queue.
+ // Queue state changed; signal interrupt.
+ if (!device_->SetDevicePollInterrupt())
+ return;
+ // Start VIDIOC_STREAMON if we haven't yet.
+ if (!output_streamon_) {
+ __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
+ output_streamon_ = true;
}
}
DCHECK_LE(output_buffer_queued_count_, 1);
diff --git a/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc b/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc
index 06091a36b4d..0121eadbc09 100644
--- a/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.cc
@@ -112,10 +112,11 @@ V4L2JpegDecodeAccelerator::BufferRecord::~BufferRecord() {
}
V4L2JpegDecodeAccelerator::JobRecord::JobRecord(
- media::BitstreamBuffer bitstream_buffer,
+ const media::BitstreamBuffer& bitstream_buffer,
scoped_refptr<media::VideoFrame> video_frame)
- : bitstream_buffer(bitstream_buffer), out_frame(video_frame) {
-}
+ : bitstream_buffer_id(bitstream_buffer.id()),
+ shm(bitstream_buffer, true),
+ out_frame(video_frame) {}
V4L2JpegDecodeAccelerator::JobRecord::~JobRecord() {
}
@@ -233,6 +234,14 @@ void V4L2JpegDecodeAccelerator::Decode(
<< ", size=" << bitstream_buffer.size();
DCHECK(io_task_runner_->BelongsToCurrentThread());
+ if (bitstream_buffer.id() < 0) {
+ LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+ PostNotifyError(bitstream_buffer.id(), INVALID_ARGUMENT);
+ return;
+ }
+
if (video_frame->format() != media::PIXEL_FORMAT_I420) {
PostNotifyError(bitstream_buffer.id(), UNSUPPORTED_JPEG);
return;
@@ -260,11 +269,9 @@ bool V4L2JpegDecodeAccelerator::IsSupported() {
void V4L2JpegDecodeAccelerator::DecodeTask(scoped_ptr<JobRecord> job_record) {
DCHECK(decoder_task_runner_->BelongsToCurrentThread());
- job_record->shm.reset(
- new base::SharedMemory(job_record->bitstream_buffer.handle(), true));
- if (!job_record->shm->Map(job_record->bitstream_buffer.size())) {
+ if (!job_record->shm.Map()) {
PLOG(ERROR) << __func__ << ": could not map bitstream_buffer";
- PostNotifyError(job_record->bitstream_buffer.id(), UNREADABLE_INPUT);
+ PostNotifyError(job_record->bitstream_buffer_id, UNREADABLE_INPUT);
return;
}
input_jobs_.push(make_linked_ptr(job_record.release()));
@@ -288,7 +295,7 @@ bool V4L2JpegDecodeAccelerator::ShouldRecreateInputBuffers() {
linked_ptr<JobRecord> job_record = input_jobs_.front();
// Check input buffer size is enough
return (input_buffer_map_.empty() ||
- (job_record->bitstream_buffer.size() + sizeof(kDefaultDhtSeg)) >
+ (job_record->shm.size() + sizeof(kDefaultDhtSeg)) >
input_buffer_map_.front().length);
}
@@ -333,8 +340,7 @@ bool V4L2JpegDecodeAccelerator::CreateInputBuffers() {
// The input image may miss huffman table. We didn't parse the image before,
// so we create more to avoid the situation of not enough memory.
// Reserve twice size to avoid recreating input buffer frequently.
- size_t reserve_size =
- (job_record->bitstream_buffer.size() + sizeof(kDefaultDhtSeg)) * 2;
+ size_t reserve_size = (job_record->shm.size() + sizeof(kDefaultDhtSeg)) * 2;
struct v4l2_format format;
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
@@ -711,17 +717,16 @@ void V4L2JpegDecodeAccelerator::Dequeue() {
// V4L2_PIX_FMT_YUV420.
if (!CopyOutputImage(output_buffer_pixelformat_, output_record.address,
output_buffer_coded_size_, job_record->out_frame)) {
- PostNotifyError(job_record->bitstream_buffer.id(), PLATFORM_FAILURE);
+ PostNotifyError(job_record->bitstream_buffer_id, PLATFORM_FAILURE);
return;
}
DVLOG(3) << "Decoding finished, returning bitstream buffer, id="
- << job_record->bitstream_buffer.id();
+ << job_record->bitstream_buffer_id;
child_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&V4L2JpegDecodeAccelerator::VideoFrameReady, weak_ptr_,
- job_record->bitstream_buffer.id()));
+ FROM_HERE, base::Bind(&V4L2JpegDecodeAccelerator::VideoFrameReady,
+ weak_ptr_, job_record->bitstream_buffer_id));
}
}
}
@@ -819,10 +824,9 @@ bool V4L2JpegDecodeAccelerator::EnqueueInputRecord() {
DCHECK(!input_record.at_device);
// It will add default huffman segment if it's missing.
- if (!AddHuffmanTable(job_record->shm->memory(),
- job_record->bitstream_buffer.size(),
+ if (!AddHuffmanTable(job_record->shm.memory(), job_record->shm.size(),
input_record.address, input_record.length)) {
- PostNotifyError(job_record->bitstream_buffer.id(), PARSE_JPEG_FAILED);
+ PostNotifyError(job_record->bitstream_buffer_id, PARSE_JPEG_FAILED);
return false;
}
@@ -836,8 +840,9 @@ bool V4L2JpegDecodeAccelerator::EnqueueInputRecord() {
running_jobs_.push(job_record);
free_input_buffers_.pop_back();
- DVLOG(3) << __func__ << ": enqueued frame id="
- << job_record->bitstream_buffer.id() << " to device.";
+ DVLOG(3) << __func__
+ << ": enqueued frame id=" << job_record->bitstream_buffer_id
+ << " to device.";
return true;
}
diff --git a/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h b/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h
index 435808012ec..bef33b22c10 100644
--- a/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/v4l2_jpeg_decode_accelerator.h
@@ -18,6 +18,7 @@
#include "base/single_thread_task_runner.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/v4l2_device.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/video_frame.h"
@@ -58,16 +59,16 @@ class CONTENT_EXPORT V4L2JpegDecodeAccelerator
// the time of submission we may not have one available (and don't need one
// to submit input to the device).
struct JobRecord {
- JobRecord(media::BitstreamBuffer bitstream_buffer,
+ JobRecord(const media::BitstreamBuffer& bitstream_buffer,
scoped_refptr<media::VideoFrame> video_frame);
~JobRecord();
- // Input image buffer.
- media::BitstreamBuffer bitstream_buffer;
+ // Input image buffer ID.
+ int32_t bitstream_buffer_id;
+ // Memory mapped from |bitstream_buffer|.
+ SharedMemoryRegion shm;
// Output frame buffer.
scoped_refptr<media::VideoFrame> out_frame;
- // Memory mapped from |bitstream_buffer|.
- scoped_ptr<base::SharedMemory> shm;
};
void EnqueueInput();
diff --git a/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc b/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
index 4c3b724daa5..80087232b65 100644
--- a/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
@@ -19,9 +19,11 @@
#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/stringprintf.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/media_switches.h"
+#include "ui/gl/gl_context.h"
#include "ui/gl/scoped_binders.h"
#define LOGF(level) LOG(level) << __FUNCTION__ << "(): "
@@ -169,14 +171,12 @@ struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef {
BitstreamBufferRef(
base::WeakPtr<VideoDecodeAccelerator::Client>& client,
const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
- base::SharedMemory* shm,
- size_t size,
+ SharedMemoryRegion* shm,
int32_t input_id);
~BitstreamBufferRef();
const base::WeakPtr<VideoDecodeAccelerator::Client> client;
const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
- const scoped_ptr<base::SharedMemory> shm;
- const size_t size;
+ const scoped_ptr<SharedMemoryRegion> shm;
off_t bytes_used;
const int32_t input_id;
};
@@ -184,13 +184,11 @@ struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef {
V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
base::WeakPtr<VideoDecodeAccelerator::Client>& client,
const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
- base::SharedMemory* shm,
- size_t size,
+ SharedMemoryRegion* shm,
int32_t input_id)
: client(client),
client_task_runner(client_task_runner),
shm(shm),
- size(size),
bytes_used(0),
input_id(input_id) {}
@@ -382,15 +380,11 @@ V4L2VP8Picture::~V4L2VP8Picture() {
V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
const scoped_refptr<V4L2Device>& device,
EGLDisplay egl_display,
- EGLContext egl_context,
- const base::WeakPtr<Client>& io_client,
- const base::Callback<bool(void)>& make_context_current,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb)
: input_planes_count_(0),
output_planes_count_(0),
child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
- io_task_runner_(io_task_runner),
- io_client_(io_client),
device_(device),
decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"),
device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"),
@@ -406,9 +400,9 @@ V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
surface_set_change_pending_(false),
picture_clearing_count_(0),
pictures_assigned_(false, false),
- make_context_current_(make_context_current),
egl_display_(egl_display),
- egl_context_(egl_context),
+ get_gl_context_cb_(get_gl_context_cb),
+ make_context_current_cb_(make_context_current_cb),
weak_this_factory_(this) {
weak_this_ = weak_this_factory_.GetWeakPtr();
}
@@ -444,6 +438,11 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
DCHECK(child_task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kUninitialized);
+ if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
if (config.is_encrypted) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
return false;
@@ -459,6 +458,14 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
client_ptr_factory_.reset(
new base::WeakPtrFactory<VideoDecodeAccelerator::Client>(client));
client_ = client_ptr_factory_->GetWeakPtr();
+ // If we haven't been set up to decode on separate thread via
+ // TryToSetupDecodeOnSeparateThread(), use the main thread/client for
+ // decode tasks.
+ if (!decode_task_runner_) {
+ decode_task_runner_ = child_task_runner_;
+ DCHECK(!decode_client_);
+ decode_client_ = client_;
+ }
video_profile_ = config.profile;
@@ -485,7 +492,7 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
}
// We need the context to be initialized to query extensions.
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "Initialize(): could not make context current";
return false;
}
@@ -750,7 +757,7 @@ bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() {
child_task_runner_->PostTask(
FROM_HERE,
base::Bind(&VideoDecodeAccelerator::Client::ProvidePictureBuffers,
- client_, num_pictures, coded_size_,
+ client_, num_pictures, 1, coded_size_,
device_->GetTextureTarget()));
// Wait for the client to call AssignPictureBuffers() on the Child thread.
@@ -1182,7 +1189,15 @@ void V4L2SliceVideoDecodeAccelerator::Decode(
const media::BitstreamBuffer& bitstream_buffer) {
DVLOGF(3) << "input_id=" << bitstream_buffer.id()
<< ", size=" << bitstream_buffer.size();
- DCHECK(io_task_runner_->BelongsToCurrentThread());
+ DCHECK(decode_task_runner_->BelongsToCurrentThread());
+
+ if (bitstream_buffer.id() < 0) {
+ LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
decoder_thread_task_runner_->PostTask(
FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeTask,
@@ -1196,10 +1211,9 @@ void V4L2SliceVideoDecodeAccelerator::DecodeTask(
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
scoped_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef(
- io_client_, io_task_runner_,
- new base::SharedMemory(bitstream_buffer.handle(), true),
- bitstream_buffer.size(), bitstream_buffer.id()));
- if (!bitstream_record->shm->Map(bitstream_buffer.size())) {
+ decode_client_, decode_task_runner_,
+ new SharedMemoryRegion(bitstream_buffer, true), bitstream_buffer.id()));
+ if (!bitstream_record->shm->Map()) {
LOGF(ERROR) << "Could not map bitstream_buffer";
NOTIFY_ERROR(UNREADABLE_INPUT);
return;
@@ -1231,7 +1245,7 @@ bool V4L2SliceVideoDecodeAccelerator::TrySetNewBistreamBuffer() {
const uint8_t* const data = reinterpret_cast<const uint8_t*>(
decoder_current_bitstream_buffer_->shm->memory());
- const size_t data_size = decoder_current_bitstream_buffer_->size;
+ const size_t data_size = decoder_current_bitstream_buffer_->shm->size();
decoder_->SetStream(data, data_size);
return true;
@@ -1442,8 +1456,9 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
return;
}
- if (!make_context_current_.Run()) {
- DLOG(ERROR) << "could not make context current";
+ gfx::GLContext* gl_context = get_gl_context_cb_.Run();
+ if (!gl_context || !make_context_current_cb_.Run()) {
+ DLOG(ERROR) << "No GL context";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
}
@@ -1481,13 +1496,10 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
DCHECK_EQ(output_record.picture_id, -1);
DCHECK_EQ(output_record.cleared, false);
- EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_,
- egl_context_,
- buffers[i].texture_id(),
- coded_size_,
- i,
- output_format_fourcc_,
- output_planes_count_);
+ DCHECK_LE(1u, buffers[i].texture_ids().size());
+ EGLImageKHR egl_image = device_->CreateEGLImage(
+ egl_display_, gl_context->GetHandle(), buffers[i].texture_ids()[0],
+ buffers[i].size(), i, output_format_fourcc_, output_planes_count_);
if (egl_image == EGL_NO_IMAGE_KHR) {
LOGF(ERROR) << "Could not create EGLImageKHR";
// Ownership of EGLImages allocated in previous iterations of this loop
@@ -1511,7 +1523,7 @@ void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
DCHECK(child_task_runner_->BelongsToCurrentThread());
DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
LOGF(ERROR) << "could not make context current";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
@@ -1587,7 +1599,7 @@ void V4L2SliceVideoDecodeAccelerator::FlushTask() {
// which - when reached - will trigger flush sequence.
decoder_input_queue_.push(
linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef(
- io_client_, io_task_runner_, nullptr, 0, kFlushBufferId)));
+ decode_client_, decode_task_runner_, nullptr, kFlushBufferId)));
return;
}
@@ -2501,12 +2513,14 @@ void V4L2SliceVideoDecodeAccelerator::SendPictureReady() {
bool cleared = pending_picture_ready_.front().cleared;
const media::Picture& picture = pending_picture_ready_.front().picture;
if (cleared && picture_clearing_count_ == 0) {
- DVLOGF(4) << "Posting picture ready to IO for: "
+ DVLOGF(4) << "Posting picture ready to decode task runner for: "
<< picture.picture_buffer_id();
- // This picture is cleared. Post it to IO thread to reduce latency. This
- // should be the case after all pictures are cleared at the beginning.
- io_task_runner_->PostTask(
- FROM_HERE, base::Bind(&Client::PictureReady, io_client_, picture));
+ // This picture is cleared. It can be posted to a thread different than
+ // the main GPU thread to reduce latency. This should be the case after
+ // all pictures are cleared at the beginning.
+ decode_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&Client::PictureReady, decode_client_, picture));
pending_picture_ready_.pop();
} else if (!cleared || resetting_or_flushing) {
DVLOGF(3) << "cleared=" << pending_picture_ready_.front().cleared
@@ -2544,7 +2558,11 @@ void V4L2SliceVideoDecodeAccelerator::PictureCleared() {
SendPictureReady();
}
-bool V4L2SliceVideoDecodeAccelerator::CanDecodeOnIOThread() {
+bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+ decode_client_ = decode_client_;
+ decode_task_runner_ = decode_task_runner;
return true;
}
diff --git a/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h b/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h
index dd72eb7a6dd..cc11da302a1 100644
--- a/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/v4l2_slice_video_decode_accelerator.h
@@ -19,6 +19,7 @@
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "content/common/gpu/media/h264_decoder.h"
#include "content/common/gpu/media/v4l2_device.h"
#include "content/common/gpu/media/vp8_decoder.h"
@@ -38,10 +39,8 @@ class CONTENT_EXPORT V4L2SliceVideoDecodeAccelerator
V4L2SliceVideoDecodeAccelerator(
const scoped_refptr<V4L2Device>& device,
EGLDisplay egl_display,
- EGLContext egl_context,
- const base::WeakPtr<Client>& io_client_,
- const base::Callback<bool(void)>& make_context_current,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb);
~V4L2SliceVideoDecodeAccelerator() override;
// media::VideoDecodeAccelerator implementation.
@@ -53,7 +52,10 @@ class CONTENT_EXPORT V4L2SliceVideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
static media::VideoDecodeAccelerator::SupportedProfiles
GetSupportedProfiles();
@@ -282,8 +284,8 @@ class CONTENT_EXPORT V4L2SliceVideoDecodeAccelerator
// GPU Child thread task runner.
const scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
- // IO thread task runner.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+ // Task runner Decode() and PictureReady() run on.
+ scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner_;
// WeakPtr<> pointing to |this| for use in posting tasks from the decoder or
// device worker threads back to the child thread.
@@ -295,8 +297,8 @@ class CONTENT_EXPORT V4L2SliceVideoDecodeAccelerator
scoped_ptr<base::WeakPtrFactory<VideoDecodeAccelerator::Client>>
client_ptr_factory_;
base::WeakPtr<VideoDecodeAccelerator::Client> client_;
- // Callbacks to |io_client_| must be executed on |io_task_runner_|.
- base::WeakPtr<Client> io_client_;
+ // Callbacks to |decode_client_| must be executed on |decode_task_runner_|.
+ base::WeakPtr<Client> decode_client_;
// V4L2 device in use.
scoped_refptr<V4L2Device> device_;
@@ -381,12 +383,13 @@ class CONTENT_EXPORT V4L2SliceVideoDecodeAccelerator
// to avoid races with potential Reset requests.
base::WaitableEvent pictures_assigned_;
- // Make the GL context current callback.
- base::Callback<bool(void)> make_context_current_;
-
// EGL state
EGLDisplay egl_display_;
- EGLContext egl_context_;
+
+ // Callback to get current GLContext.
+ GetGLContextCallback get_gl_context_cb_;
+ // Callback to set the correct gl context.
+ MakeGLContextCurrentCallback make_context_current_cb_;
// The WeakPtrFactory for |weak_this_|.
base::WeakPtrFactory<V4L2SliceVideoDecodeAccelerator> weak_this_factory_;
diff --git a/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.cc b/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.cc
index f9311257ed7..719dbf7a80f 100644
--- a/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.cc
@@ -15,16 +15,17 @@
#include "base/bind.h"
#include "base/command_line.h"
#include "base/macros.h"
-#include "base/memory/shared_memory.h"
#include "base/message_loop/message_loop.h"
#include "base/numerics/safe_conversions.h"
#include "base/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
#include "media/base/media_switches.h"
#include "media/filters/h264_parser.h"
#include "ui/gfx/geometry/rect.h"
+#include "ui/gl/gl_context.h"
#include "ui/gl/scoped_binders.h"
#define NOTIFY_ERROR(x) \
@@ -65,14 +66,12 @@ struct V4L2VideoDecodeAccelerator::BitstreamBufferRef {
BitstreamBufferRef(
base::WeakPtr<Client>& client,
scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
- base::SharedMemory* shm,
- size_t size,
+ scoped_ptr<SharedMemoryRegion> shm,
int32_t input_id);
~BitstreamBufferRef();
const base::WeakPtr<Client> client;
const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
- const scoped_ptr<base::SharedMemory> shm;
- const size_t size;
+ const scoped_ptr<SharedMemoryRegion> shm;
size_t bytes_used;
const int32_t input_id;
};
@@ -94,13 +93,11 @@ struct V4L2VideoDecodeAccelerator::PictureRecord {
V4L2VideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
base::WeakPtr<Client>& client,
scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
- base::SharedMemory* shm,
- size_t size,
+ scoped_ptr<SharedMemoryRegion> shm,
int32_t input_id)
: client(client),
client_task_runner(client_task_runner),
- shm(shm),
- size(size),
+ shm(std::move(shm)),
bytes_used(0),
input_id(input_id) {}
@@ -157,14 +154,10 @@ V4L2VideoDecodeAccelerator::PictureRecord::~PictureRecord() {}
V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator(
EGLDisplay egl_display,
- EGLContext egl_context,
- const base::WeakPtr<Client>& io_client,
- const base::Callback<bool(void)>& make_context_current,
- const scoped_refptr<V4L2Device>& device,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const scoped_refptr<V4L2Device>& device)
: child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
- io_task_runner_(io_task_runner),
- io_client_(io_client),
decoder_thread_("V4L2DecoderThread"),
decoder_state_(kUninitialized),
device_(device),
@@ -184,9 +177,9 @@ V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator(
picture_clearing_count_(0),
pictures_assigned_(false, false),
device_poll_thread_("V4L2DevicePollThread"),
- make_context_current_(make_context_current),
egl_display_(egl_display),
- egl_context_(egl_context),
+ get_gl_context_cb_(get_gl_context_cb),
+ make_context_current_cb_(make_context_current_cb),
video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN),
output_format_fourcc_(0),
weak_this_factory_(this) {
@@ -212,6 +205,11 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
DCHECK(child_task_runner_->BelongsToCurrentThread());
DCHECK_EQ(decoder_state_, kUninitialized);
+ if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
if (config.is_encrypted) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
return false;
@@ -226,6 +224,14 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
client_ = client_ptr_factory_->GetWeakPtr();
+ // If we haven't been set up to decode on separate thread via
+ // TryToSetupDecodeOnSeparateThread(), use the main thread/client for
+ // decode tasks.
+ if (!decode_task_runner_) {
+ decode_task_runner_ = child_task_runner_;
+ DCHECK(!decode_client_);
+ decode_client_ = client_;
+ }
video_profile_ = config.profile;
@@ -235,7 +241,7 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
}
// We need the context to be initialized to query extensions.
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "Initialize(): could not make context current";
return false;
}
@@ -253,16 +259,9 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
- // This cap combination is deprecated, but some older drivers may still be
- // returning it.
- const __u32 kCapsRequiredCompat = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE |
- V4L2_CAP_STREAMING;
- if ((caps.capabilities & kCapsRequiredCompat) != kCapsRequiredCompat) {
- LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
- ", caps check failed: 0x" << std::hex << caps.capabilities;
- return false;
- }
+ LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
+ ", caps check failed: 0x" << std::hex << caps.capabilities;
+ return false;
}
if (!SetupFormats())
@@ -303,7 +302,15 @@ void V4L2VideoDecodeAccelerator::Decode(
const media::BitstreamBuffer& bitstream_buffer) {
DVLOG(1) << "Decode(): input_id=" << bitstream_buffer.id()
<< ", size=" << bitstream_buffer.size();
- DCHECK(io_task_runner_->BelongsToCurrentThread());
+ DCHECK(decode_task_runner_->BelongsToCurrentThread());
+
+ if (bitstream_buffer.id() < 0) {
+ LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
// DecodeTask() will take care of running a DecodeBufferTask().
decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
@@ -327,7 +334,8 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffers(
return;
}
- if (!make_context_current_.Run()) {
+ gfx::GLContext* gl_context = get_gl_context_cb_.Run();
+ if (!gl_context || !make_context_current_cb_.Run()) {
LOG(ERROR) << "AssignPictureBuffers(): could not make context current";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
@@ -365,14 +373,11 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffers(
DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
DCHECK_EQ(output_record.picture_id, -1);
DCHECK_EQ(output_record.cleared, false);
+ DCHECK_LE(1u, buffers[i].texture_ids().size());
- EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_,
- egl_context_,
- buffers[i].texture_id(),
- coded_size_,
- i,
- output_format_fourcc_,
- output_planes_count_);
+ EGLImageKHR egl_image = device_->CreateEGLImage(
+ egl_display_, gl_context->GetHandle(), buffers[i].texture_ids()[0],
+ coded_size_, i, output_format_fourcc_, output_planes_count_);
if (egl_image == EGL_NO_IMAGE_KHR) {
LOG(ERROR) << "AssignPictureBuffers(): could not create EGLImageKHR";
// Ownership of EGLImages allocated in previous iterations of this loop
@@ -397,7 +402,7 @@ void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) {
// Must be run on child thread, as we'll insert a sync in the EGL context.
DCHECK(child_task_runner_->BelongsToCurrentThread());
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "ReusePictureBuffer(): could not make context current";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
@@ -458,7 +463,13 @@ void V4L2VideoDecodeAccelerator::Destroy() {
delete this;
}
-bool V4L2VideoDecodeAccelerator::CanDecodeOnIOThread() { return true; }
+bool V4L2VideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+ decode_client_ = decode_client_;
+ decode_task_runner_ = decode_task_runner;
+ return true;
+}
// static
media::VideoDecodeAccelerator::SupportedProfiles
@@ -480,10 +491,11 @@ void V4L2VideoDecodeAccelerator::DecodeTask(
bitstream_buffer.id());
scoped_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef(
- io_client_, io_task_runner_,
- new base::SharedMemory(bitstream_buffer.handle(), true),
- bitstream_buffer.size(), bitstream_buffer.id()));
- if (!bitstream_record->shm->Map(bitstream_buffer.size())) {
+ decode_client_, decode_task_runner_,
+ scoped_ptr<SharedMemoryRegion>(
+ new SharedMemoryRegion(bitstream_buffer, true)),
+ bitstream_buffer.id()));
+ if (!bitstream_record->shm->Map()) {
LOG(ERROR) << "Decode(): could not map bitstream_buffer";
NOTIFY_ERROR(UNREADABLE_INPUT);
return;
@@ -542,54 +554,51 @@ void V4L2VideoDecodeAccelerator::DecodeBufferTask() {
// Setup to use the next buffer.
decoder_current_bitstream_buffer_.reset(buffer_ref.release());
decoder_input_queue_.pop();
- DVLOG(3) << "DecodeBufferTask(): reading input_id="
- << decoder_current_bitstream_buffer_->input_id
- << ", addr=" << (decoder_current_bitstream_buffer_->shm ?
- decoder_current_bitstream_buffer_->shm->memory() :
- NULL)
- << ", size=" << decoder_current_bitstream_buffer_->size;
+ const auto& shm = decoder_current_bitstream_buffer_->shm;
+ if (shm) {
+ DVLOG(3) << "DecodeBufferTask(): reading input_id="
+ << decoder_current_bitstream_buffer_->input_id
+ << ", addr=" << shm->memory() << ", size=" << shm->size();
+ } else {
+ DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId);
+ DVLOG(3) << "DecodeBufferTask(): reading input_id=kFlushBufferId";
+ }
}
bool schedule_task = false;
- const size_t size = decoder_current_bitstream_buffer_->size;
size_t decoded_size = 0;
- if (size == 0) {
- const int32_t input_id = decoder_current_bitstream_buffer_->input_id;
- if (input_id >= 0) {
- // This is a buffer queued from the client that has zero size. Skip.
+ const auto& shm = decoder_current_bitstream_buffer_->shm;
+ if (!shm) {
+ // This is a dummy buffer, queued to flush the pipe. Flush.
+ DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId);
+ // Enqueue a buffer guaranteed to be empty. To do that, we flush the
+ // current input, enqueue no data to the next frame, then flush that down.
+ schedule_task = true;
+ if (decoder_current_input_buffer_ != -1 &&
+ input_buffer_map_[decoder_current_input_buffer_].input_id !=
+ kFlushBufferId)
+ schedule_task = FlushInputFrame();
+
+ if (schedule_task && AppendToInputFrame(NULL, 0) && FlushInputFrame()) {
+ DVLOG(2) << "DecodeBufferTask(): enqueued flush buffer";
+ decoder_partial_frame_pending_ = false;
schedule_task = true;
} else {
- // This is a buffer of zero size, queued to flush the pipe. Flush.
- DCHECK_EQ(decoder_current_bitstream_buffer_->shm.get(),
- static_cast<base::SharedMemory*>(NULL));
- // Enqueue a buffer guaranteed to be empty. To do that, we flush the
- // current input, enqueue no data to the next frame, then flush that down.
- schedule_task = true;
- if (decoder_current_input_buffer_ != -1 &&
- input_buffer_map_[decoder_current_input_buffer_].input_id !=
- kFlushBufferId)
- schedule_task = FlushInputFrame();
-
- if (schedule_task && AppendToInputFrame(NULL, 0) && FlushInputFrame()) {
- DVLOG(2) << "DecodeBufferTask(): enqueued flush buffer";
- decoder_partial_frame_pending_ = false;
- schedule_task = true;
- } else {
- // If we failed to enqueue the empty buffer (due to pipeline
- // backpressure), don't advance the bitstream buffer queue, and don't
- // schedule the next task. This bitstream buffer queue entry will get
- // reprocessed when the pipeline frees up.
- schedule_task = false;
- }
+ // If we failed to enqueue the empty buffer (due to pipeline
+ // backpressure), don't advance the bitstream buffer queue, and don't
+ // schedule the next task. This bitstream buffer queue entry will get
+ // reprocessed when the pipeline frees up.
+ schedule_task = false;
}
+ } else if (shm->size() == 0) {
+ // This is a buffer queued from the client that has zero size. Skip.
+ schedule_task = true;
} else {
// This is a buffer queued from the client, with actual contents. Decode.
const uint8_t* const data =
- reinterpret_cast<const uint8_t*>(
- decoder_current_bitstream_buffer_->shm->memory()) +
+ reinterpret_cast<const uint8_t*>(shm->memory()) +
decoder_current_bitstream_buffer_->bytes_used;
const size_t data_size =
- decoder_current_bitstream_buffer_->size -
- decoder_current_bitstream_buffer_->bytes_used;
+ shm->size() - decoder_current_bitstream_buffer_->bytes_used;
if (!AdvanceFrameFragment(data, data_size, &decoded_size)) {
NOTIFY_ERROR(UNREADABLE_INPUT);
return;
@@ -618,8 +627,8 @@ void V4L2VideoDecodeAccelerator::DecodeBufferTask() {
if (schedule_task) {
decoder_current_bitstream_buffer_->bytes_used += decoded_size;
- if (decoder_current_bitstream_buffer_->bytes_used ==
- decoder_current_bitstream_buffer_->size) {
+ if ((shm ? shm->size() : 0) ==
+ decoder_current_bitstream_buffer_->bytes_used) {
// Our current bitstream buffer is done; return it.
int32_t input_id = decoder_current_bitstream_buffer_->input_id;
DVLOG(3) << "DecodeBufferTask(): finished input_id=" << input_id;
@@ -1023,14 +1032,7 @@ bool V4L2VideoDecodeAccelerator::DequeueResolutionChangeEvent() {
while (device_->Ioctl(VIDIOC_DQEVENT, &ev) == 0) {
if (ev.type == V4L2_EVENT_SOURCE_CHANGE) {
- uint32_t changes = ev.u.src_change.changes;
- // We used to define source change was always resolution change. The union
- // |ev.u| is not used and it is zero by default. When using the upstream
- // version of the resolution event change, we also need to check
- // |ev.u.src_change.changes| to know what is changed. For API backward
- // compatibility, event is treated as resolution change when all bits in
- // |ev.u.src_change.changes| are cleared.
- if (changes == 0 || (changes & V4L2_EVENT_SRC_CH_RESOLUTION)) {
+ if (ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
DVLOG(3)
<< "DequeueResolutionChangeEvent(): got resolution change event.";
return true;
@@ -1282,7 +1284,7 @@ void V4L2VideoDecodeAccelerator::FlushTask() {
// Queue up an empty buffer -- this triggers the flush.
decoder_input_queue_.push(
linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef(
- io_client_, io_task_runner_, NULL, 0, kFlushBufferId)));
+ decode_client_, decode_task_runner_, nullptr, kFlushBufferId)));
decoder_flushing_ = true;
SendPictureReady(); // Send all pending PictureReady.
@@ -1886,9 +1888,9 @@ bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() {
<< "buffer_count=" << buffer_count
<< ", coded_size=" << coded_size_.ToString();
child_task_runner_->PostTask(
- FROM_HERE, base::Bind(&Client::ProvidePictureBuffers, client_,
- buffer_count, coded_size_,
- device_->GetTextureTarget()));
+ FROM_HERE,
+ base::Bind(&Client::ProvidePictureBuffers, client_, buffer_count, 1,
+ coded_size_, device_->GetTextureTarget()));
// Wait for the client to call AssignPictureBuffers() on the Child thread.
// We do this, because if we continue decoding without finishing buffer
@@ -2005,10 +2007,12 @@ void V4L2VideoDecodeAccelerator::SendPictureReady() {
bool cleared = pending_picture_ready_.front().cleared;
const media::Picture& picture = pending_picture_ready_.front().picture;
if (cleared && picture_clearing_count_ == 0) {
- // This picture is cleared. Post it to IO thread to reduce latency. This
- // should be the case after all pictures are cleared at the beginning.
- io_task_runner_->PostTask(
- FROM_HERE, base::Bind(&Client::PictureReady, io_client_, picture));
+ // This picture is cleared. It can be posted to a thread different than
+ // the main GPU thread to reduce latency. This should be the case after
+ // all pictures are cleared at the beginning.
+ decode_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&Client::PictureReady, decode_client_, picture));
pending_picture_ready_.pop();
} else if (!cleared || resetting_or_flushing) {
DVLOG(3) << "SendPictureReady()"
diff --git a/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.h b/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.h
index 3d06665e344..cb749569241 100644
--- a/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/v4l2_video_decode_accelerator.h
@@ -23,6 +23,7 @@
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "content/common/gpu/media/v4l2_device.h"
#include "media/base/limits.h"
#include "media/base/video_decoder_config.h"
@@ -78,11 +79,9 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
public:
V4L2VideoDecodeAccelerator(
EGLDisplay egl_display,
- EGLContext egl_context,
- const base::WeakPtr<Client>& io_client_,
- const base::Callback<bool(void)>& make_context_current,
- const scoped_refptr<V4L2Device>& device,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
+ const GetGLContextCallback& get_gl_context_cb,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const scoped_refptr<V4L2Device>& device);
~V4L2VideoDecodeAccelerator() override;
// media::VideoDecodeAccelerator implementation.
@@ -95,7 +94,10 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
static media::VideoDecodeAccelerator::SupportedProfiles
GetSupportedProfiles();
@@ -316,8 +318,8 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
// Our original calling task runner for the child thread.
scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
- // Task runner of the IO thread.
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+ // Task runner Decode() and PictureReady() run on.
+ scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner_;
// WeakPtr<> pointing to |this| for use in posting tasks from the decoder or
// device worker threads back to the child thread. Because the worker threads
@@ -332,8 +334,8 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
// child_task_runner_.
scoped_ptr<base::WeakPtrFactory<Client> > client_ptr_factory_;
base::WeakPtr<Client> client_;
- // Callbacks to |io_client_| must be executed on |io_task_runner_|.
- base::WeakPtr<Client> io_client_;
+ // Callbacks to |decode_client_| must be executed on |decode_task_runner_|.
+ base::WeakPtr<Client> decode_client_;
//
// Decoder state, owned and operated by decoder_thread_.
@@ -438,12 +440,13 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
// Other state, held by the child (main) thread.
//
- // Make our context current before running any EGL entry points.
- base::Callback<bool(void)> make_context_current_;
-
// EGL state
EGLDisplay egl_display_;
- EGLContext egl_context_;
+
+ // Callback to get current GLContext.
+ GetGLContextCallback get_gl_context_cb_;
+ // Callback to set the correct gl context.
+ MakeGLContextCurrentCallback make_context_current_cb_;
// The codec we'll be decoding for.
media::VideoCodecProfile video_profile_;
diff --git a/chromium/content/common/gpu/media/v4l2_video_encode_accelerator.cc b/chromium/content/common/gpu/media/v4l2_video_encode_accelerator.cc
index 98f4e48db35..d724d8dea40 100644
--- a/chromium/content/common/gpu/media/v4l2_video_encode_accelerator.cc
+++ b/chromium/content/common/gpu/media/v4l2_video_encode_accelerator.cc
@@ -17,8 +17,8 @@
#include "base/numerics/safe_conversions.h"
#include "base/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/v4l2_video_encode_accelerator.h"
-#include "content/public/common/content_switches.h"
#include "media/base/bitstream_buffer.h"
#define NOTIFY_ERROR(x) \
@@ -51,13 +51,10 @@
namespace content {
struct V4L2VideoEncodeAccelerator::BitstreamBufferRef {
- BitstreamBufferRef(int32_t id,
- scoped_ptr<base::SharedMemory> shm,
- size_t size)
- : id(id), shm(std::move(shm)), size(size) {}
+ BitstreamBufferRef(int32_t id, scoped_ptr<SharedMemoryRegion> shm)
+ : id(id), shm(std::move(shm)) {}
const int32_t id;
- const scoped_ptr<base::SharedMemory> shm;
- const size_t size;
+ const scoped_ptr<SharedMemoryRegion> shm;
};
V4L2VideoEncodeAccelerator::InputRecord::InputRecord() : at_device(false) {
@@ -128,20 +125,13 @@ bool V4L2VideoEncodeAccelerator::Initialize(
const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
- // This cap combination is deprecated, but some older drivers may still be
- // returning it.
- const __u32 kCapsRequiredCompat = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE |
- V4L2_CAP_STREAMING;
- if ((caps.capabilities & kCapsRequiredCompat) != kCapsRequiredCompat) {
- LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP: "
- "caps check failed: 0x" << std::hex << caps.capabilities;
- return false;
- }
+ LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP: "
+ "caps check failed: 0x" << std::hex << caps.capabilities;
+ return false;
}
if (!SetFormats(input_format, output_profile)) {
- LOG(ERROR) << "Failed setting up formats";
+ DLOG(ERROR) << "Failed setting up formats";
return false;
}
@@ -231,15 +221,14 @@ void V4L2VideoEncodeAccelerator::UseOutputBitstreamBuffer(
return;
}
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(buffer.handle(), false));
- if (!shm->Map(buffer.size())) {
+ scoped_ptr<SharedMemoryRegion> shm(new SharedMemoryRegion(buffer, false));
+ if (!shm->Map()) {
NOTIFY_ERROR(kPlatformFailureError);
return;
}
scoped_ptr<BitstreamBufferRef> buffer_ref(
- new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
+ new BitstreamBufferRef(buffer.id(), std::move(shm)));
encoder_thread_.message_loop()->PostTask(
FROM_HERE,
base::Bind(&V4L2VideoEncodeAccelerator::UseOutputBitstreamBufferTask,
@@ -318,7 +307,13 @@ V4L2VideoEncodeAccelerator::GetSupportedProfiles() {
profiles.push_back(profile);
break;
case V4L2_PIX_FMT_VP9:
- profile.profile = media::VP9PROFILE_ANY;
+ profile.profile = media::VP9PROFILE_PROFILE0;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE1;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE2;
+ profiles.push_back(profile);
+ profile.profile = media::VP9PROFILE_PROFILE3;
profiles.push_back(profile);
break;
}
@@ -365,13 +360,21 @@ void V4L2VideoEncodeAccelerator::EncodeTask(
std::vector<struct v4l2_ext_control> ctrls;
struct v4l2_ext_control ctrl;
memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE;
- ctrl.value = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME;
+ ctrl.id = V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME;
ctrls.push_back(ctrl);
if (!SetExtCtrls(ctrls)) {
- LOG(ERROR) << "Failed requesting keyframe";
- NOTIFY_ERROR(kPlatformFailureError);
- return;
+ // Some platforms still use the old control. Fallback before they are
+ // updated.
+ ctrls.clear();
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE;
+ ctrl.value = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME;
+ ctrls.push_back(ctrl);
+ if (!SetExtCtrls(ctrls)) {
+ LOG(ERROR) << "Failed requesting keyframe";
+ NOTIFY_ERROR(kPlatformFailureError);
+ return;
+ }
}
}
}
@@ -893,7 +896,7 @@ bool V4L2VideoEncodeAccelerator::NegotiateInputFormat(
uint32_t input_format_fourcc =
V4L2Device::VideoPixelFormatToV4L2PixFmt(input_format);
if (!input_format_fourcc) {
- LOG(ERROR) << "Unsupported input format";
+ LOG(ERROR) << "Unsupported input format" << input_format_fourcc;
return false;
}
@@ -913,8 +916,10 @@ bool V4L2VideoEncodeAccelerator::NegotiateInputFormat(
input_format_fourcc = device_->PreferredInputFormat();
input_format =
V4L2Device::V4L2PixFmtToVideoPixelFormat(input_format_fourcc);
- if (input_format == media::PIXEL_FORMAT_UNKNOWN)
+ if (input_format == media::PIXEL_FORMAT_UNKNOWN) {
+ LOG(ERROR) << "Unsupported input format" << input_format_fourcc;
return false;
+ }
input_planes_count = media::VideoFrame::NumPlanes(input_format);
DCHECK_LE(input_planes_count, static_cast<size_t>(VIDEO_MAX_PLANES));
@@ -930,9 +935,14 @@ bool V4L2VideoEncodeAccelerator::NegotiateInputFormat(
DCHECK_EQ(format.fmt.pix_mp.num_planes, input_planes_count);
}
- // Take device-adjusted sizes for allocated size.
+ // Take device-adjusted sizes for allocated size. If the size is adjusted
+ // down, it means the input is too big and the hardware does not support it.
input_allocated_size_ = V4L2Device::CodedSizeFromV4L2Format(format);
- DCHECK(gfx::Rect(input_allocated_size_).Contains(gfx::Rect(visible_size_)));
+ if (!gfx::Rect(input_allocated_size_).Contains(gfx::Rect(visible_size_))) {
+ DVLOG(1) << "Input size too big " << visible_size_.ToString()
+ << ", adjusted to " << input_allocated_size_.ToString();
+ return false;
+ }
device_input_format_ = input_format;
input_planes_count_ = input_planes_count;
@@ -1031,30 +1041,35 @@ bool V4L2VideoEncodeAccelerator::InitControls() {
ctrls.push_back(ctrl);
}
- // Enable "tight" bitrate mode. For this to work properly, frame- and mb-level
- // bitrate controls have to be enabled as well.
+ // Enable macroblock-level bitrate control.
memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF;
+ ctrl.id = V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE;
ctrl.value = 1;
ctrls.push_back(ctrl);
- // Force bitrate control to average over a GOP (for tight bitrate
- // tolerance).
+ // Disable periodic key frames.
memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT;
- ctrl.value = 1;
+ ctrl.id = V4L2_CID_MPEG_VIDEO_GOP_SIZE;
+ ctrl.value = 0;
ctrls.push_back(ctrl);
- // Enable macroblock-level bitrate control.
+ // Ignore return value as these controls are optional.
+ SetExtCtrls(ctrls);
+
+ // Optional Exynos specific controls.
+ ctrls.clear();
+ // Enable "tight" bitrate mode. For this to work properly, frame- and mb-level
+ // bitrate controls have to be enabled as well.
memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE;
+ ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF;
ctrl.value = 1;
ctrls.push_back(ctrl);
- // Disable periodic key frames.
+ // Force bitrate control to average over a GOP (for tight bitrate
+ // tolerance).
memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_GOP_SIZE;
- ctrl.value = 0;
+ ctrl.id = V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT;
+ ctrl.value = 1;
ctrls.push_back(ctrl);
// Ignore return value as these controls are optional.
diff --git a/chromium/content/common/gpu/media/vaapi_drm_picture.cc b/chromium/content/common/gpu/media/vaapi_drm_picture.cc
index f20716426fd..ab5a4f28b1a 100644
--- a/chromium/content/common/gpu/media/vaapi_drm_picture.cc
+++ b/chromium/content/common/gpu/media/vaapi_drm_picture.cc
@@ -27,16 +27,16 @@ namespace content {
VaapiDrmPicture::VaapiDrmPicture(
const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)>& make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size)
: VaapiPicture(picture_buffer_id, texture_id, size),
vaapi_wrapper_(vaapi_wrapper),
- make_context_current_(make_context_current) {}
+ make_context_current_cb_(make_context_current_cb) {}
VaapiDrmPicture::~VaapiDrmPicture() {
- if (gl_image_ && make_context_current_.Run()) {
+ if (gl_image_ && make_context_current_cb_.Run()) {
gl_image_->ReleaseTexImage(GL_TEXTURE_EXTERNAL_OES);
gl_image_->Destroy(true);
@@ -67,7 +67,7 @@ bool VaapiDrmPicture::Initialize() {
pixmap_->SetProcessingCallback(
base::Bind(&VaapiWrapper::ProcessPixmap, vaapi_wrapper_));
- if (!make_context_current_.Run())
+ if (!make_context_current_cb_.Run())
return false;
gfx::ScopedTextureBinder texture_binder(GL_TEXTURE_EXTERNAL_OES,
diff --git a/chromium/content/common/gpu/media/vaapi_drm_picture.h b/chromium/content/common/gpu/media/vaapi_drm_picture.h
index 066192b25ca..7f5fc8a1780 100644
--- a/chromium/content/common/gpu/media/vaapi_drm_picture.h
+++ b/chromium/content/common/gpu/media/vaapi_drm_picture.h
@@ -11,7 +11,6 @@
#include <stdint.h>
-#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -35,7 +34,7 @@ class VaapiWrapper;
class VaapiDrmPicture : public VaapiPicture {
public:
VaapiDrmPicture(const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)>& make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size);
@@ -52,7 +51,7 @@ class VaapiDrmPicture : public VaapiPicture {
private:
scoped_refptr<VaapiWrapper> vaapi_wrapper_;
- base::Callback<bool(void)> make_context_current_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
// Ozone buffer, the storage of the EGLImage and the VASurface.
scoped_refptr<ui::NativePixmap> pixmap_;
diff --git a/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc b/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc
index 8efb362180d..a0cbc6e059d 100644
--- a/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.cc
@@ -13,8 +13,9 @@
#include "base/metrics/histogram.h"
#include "base/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
-#include "content/common/gpu/gpu_channel.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/vaapi_picture.h"
+#include "gpu/ipc/service/gpu_channel.h"
#include "media/base/video_frame.h"
#include "media/filters/jpeg_parser.h"
#include "third_party/libyuv/include/libyuv.h"
@@ -76,10 +77,10 @@ static unsigned int VaSurfaceFormatForJpeg(
} // namespace
VaapiJpegDecodeAccelerator::DecodeRequest::DecodeRequest(
- const media::BitstreamBuffer& bitstream_buffer,
- scoped_ptr<base::SharedMemory> shm,
+ int32_t bitstream_buffer_id,
+ scoped_ptr<SharedMemoryRegion> shm,
const scoped_refptr<media::VideoFrame>& video_frame)
- : bitstream_buffer(bitstream_buffer),
+ : bitstream_buffer_id(bitstream_buffer_id),
shm(std::move(shm)),
video_frame(video_frame) {}
@@ -226,9 +227,9 @@ void VaapiJpegDecodeAccelerator::DecodeTask(
media::JpegParseResult parse_result;
if (!media::ParseJpegPicture(
reinterpret_cast<const uint8_t*>(request->shm->memory()),
- request->bitstream_buffer.size(), &parse_result)) {
+ request->shm->size(), &parse_result)) {
DLOG(ERROR) << "ParseJpegPicture failed";
- NotifyErrorFromDecoderThread(request->bitstream_buffer.id(),
+ NotifyErrorFromDecoderThread(request->bitstream_buffer_id,
PARSE_JPEG_FAILED);
return;
}
@@ -237,7 +238,7 @@ void VaapiJpegDecodeAccelerator::DecodeTask(
VaSurfaceFormatForJpeg(parse_result.frame_header);
if (!new_va_rt_format) {
DLOG(ERROR) << "Unsupported subsampling";
- NotifyErrorFromDecoderThread(request->bitstream_buffer.id(),
+ NotifyErrorFromDecoderThread(request->bitstream_buffer_id,
UNSUPPORTED_JPEG);
return;
}
@@ -255,7 +256,7 @@ void VaapiJpegDecodeAccelerator::DecodeTask(
if (!vaapi_wrapper_->CreateSurfaces(va_rt_format_, new_coded_size, 1,
&va_surfaces)) {
LOG(ERROR) << "Create VA surface failed";
- NotifyErrorFromDecoderThread(request->bitstream_buffer.id(),
+ NotifyErrorFromDecoderThread(request->bitstream_buffer_id,
PLATFORM_FAILURE);
return;
}
@@ -266,15 +267,15 @@ void VaapiJpegDecodeAccelerator::DecodeTask(
if (!VaapiJpegDecoder::Decode(vaapi_wrapper_.get(), parse_result,
va_surface_id_)) {
LOG(ERROR) << "Decode JPEG failed";
- NotifyErrorFromDecoderThread(request->bitstream_buffer.id(),
+ NotifyErrorFromDecoderThread(request->bitstream_buffer_id,
PLATFORM_FAILURE);
return;
}
- if (!OutputPicture(va_surface_id_, request->bitstream_buffer.id(),
+ if (!OutputPicture(va_surface_id_, request->bitstream_buffer_id,
request->video_frame)) {
LOG(ERROR) << "Output picture failed";
- NotifyErrorFromDecoderThread(request->bitstream_buffer.id(),
+ NotifyErrorFromDecoderThread(request->bitstream_buffer_id,
PLATFORM_FAILURE);
return;
}
@@ -289,17 +290,25 @@ void VaapiJpegDecodeAccelerator::Decode(
DVLOG(4) << "Mapping new input buffer id: " << bitstream_buffer.id()
<< " size: " << bitstream_buffer.size();
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(bitstream_buffer.handle(), true));
- if (!shm->Map(bitstream_buffer.size())) {
+ // SharedMemoryRegion will take over the |bitstream_buffer.handle()|.
+ scoped_ptr<SharedMemoryRegion> shm(
+ new SharedMemoryRegion(bitstream_buffer, true));
+
+ if (bitstream_buffer.id() < 0) {
+ LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+ NotifyErrorFromDecoderThread(bitstream_buffer.id(), INVALID_ARGUMENT);
+ return;
+ }
+
+ if (!shm->Map()) {
LOG(ERROR) << "Failed to map input buffer";
NotifyErrorFromDecoderThread(bitstream_buffer.id(), UNREADABLE_INPUT);
return;
}
scoped_ptr<DecodeRequest> request(
- new DecodeRequest(bitstream_buffer, std::move(shm), video_frame));
+ new DecodeRequest(bitstream_buffer.id(), std::move(shm), video_frame));
decoder_task_runner_->PostTask(
FROM_HERE, base::Bind(&VaapiJpegDecodeAccelerator::DecodeTask,
diff --git a/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h b/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h
index 7d78a5503e9..232b04de829 100644
--- a/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/vaapi_jpeg_decode_accelerator.h
@@ -15,6 +15,7 @@
#include "base/threading/non_thread_safe.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/vaapi_jpeg_decoder.h"
#include "content/common/gpu/media/vaapi_wrapper.h"
#include "media/base/bitstream_buffer.h"
@@ -47,13 +48,13 @@ class CONTENT_EXPORT VaapiJpegDecodeAccelerator
// An input buffer and the corresponding output video frame awaiting
// consumption, provided by the client.
struct DecodeRequest {
- DecodeRequest(const media::BitstreamBuffer& bitstream_buffer,
- scoped_ptr<base::SharedMemory> shm,
+ DecodeRequest(int32_t bitstream_buffer_id,
+ scoped_ptr<SharedMemoryRegion> shm,
const scoped_refptr<media::VideoFrame>& video_frame);
~DecodeRequest();
- media::BitstreamBuffer bitstream_buffer;
- scoped_ptr<base::SharedMemory> shm;
+ int32_t bitstream_buffer_id;
+ scoped_ptr<SharedMemoryRegion> shm;
scoped_refptr<media::VideoFrame> video_frame;
};
diff --git a/chromium/content/common/gpu/media/vaapi_picture.cc b/chromium/content/common/gpu/media/vaapi_picture.cc
index 5222bd23504..cdf8c355974 100644
--- a/chromium/content/common/gpu/media/vaapi_picture.cc
+++ b/chromium/content/common/gpu/media/vaapi_picture.cc
@@ -18,16 +18,16 @@ namespace content {
// static
linked_ptr<VaapiPicture> VaapiPicture::CreatePicture(
const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)> make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size) {
linked_ptr<VaapiPicture> picture;
#if defined(USE_X11)
- picture.reset(new VaapiTFPPicture(vaapi_wrapper, make_context_current,
+ picture.reset(new VaapiTFPPicture(vaapi_wrapper, make_context_current_cb,
picture_buffer_id, texture_id, size));
#elif defined(USE_OZONE)
- picture.reset(new VaapiDrmPicture(vaapi_wrapper, make_context_current,
+ picture.reset(new VaapiDrmPicture(vaapi_wrapper, make_context_current_cb,
picture_buffer_id, texture_id, size));
#endif // USE_X11
diff --git a/chromium/content/common/gpu/media/vaapi_picture.h b/chromium/content/common/gpu/media/vaapi_picture.h
index 921f80344ec..4bd51e11620 100644
--- a/chromium/content/common/gpu/media/vaapi_picture.h
+++ b/chromium/content/common/gpu/media/vaapi_picture.h
@@ -12,11 +12,11 @@
#include <stdint.h>
-#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/threading/non_thread_safe.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "ui/gfx/geometry/size.h"
namespace gl {
@@ -52,10 +52,10 @@ class VaapiPicture : public base::NonThreadSafe {
// Create a VaapiPicture of |size| to be associated with
// |picture_buffer_id| and bound to |texture_id|.
- // |make_context_current| is provided for the GL operations.
+ // |make_context_current_cb| is provided for the GL operations.
static linked_ptr<VaapiPicture> CreatePicture(
const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)> make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size);
diff --git a/chromium/content/common/gpu/media/vaapi_tfp_picture.cc b/chromium/content/common/gpu/media/vaapi_tfp_picture.cc
index 3de593b62fd..074ba98ed73 100644
--- a/chromium/content/common/gpu/media/vaapi_tfp_picture.cc
+++ b/chromium/content/common/gpu/media/vaapi_tfp_picture.cc
@@ -14,18 +14,18 @@ namespace content {
VaapiTFPPicture::VaapiTFPPicture(
const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)> make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size)
: VaapiPicture(picture_buffer_id, texture_id, size),
vaapi_wrapper_(vaapi_wrapper),
- make_context_current_(make_context_current),
+ make_context_current_cb_(make_context_current_cb),
x_display_(gfx::GetXDisplay()),
x_pixmap_(0) {}
VaapiTFPPicture::~VaapiTFPPicture() {
- if (glx_image_.get() && make_context_current_.Run()) {
+ if (glx_image_.get() && make_context_current_cb_.Run()) {
glx_image_->ReleaseTexImage(GL_TEXTURE_2D);
glx_image_->Destroy(true);
DCHECK_EQ(glGetError(), static_cast<GLenum>(GL_NO_ERROR));
@@ -36,7 +36,7 @@ VaapiTFPPicture::~VaapiTFPPicture() {
}
bool VaapiTFPPicture::Initialize() {
- if (!make_context_current_.Run())
+ if (!make_context_current_cb_.Run())
return false;
XWindowAttributes win_attr;
diff --git a/chromium/content/common/gpu/media/vaapi_tfp_picture.h b/chromium/content/common/gpu/media/vaapi_tfp_picture.h
index 3b66e10800b..5ef35653202 100644
--- a/chromium/content/common/gpu/media/vaapi_tfp_picture.h
+++ b/chromium/content/common/gpu/media/vaapi_tfp_picture.h
@@ -11,7 +11,6 @@
#include <stdint.h>
-#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "content/common/gpu/media/vaapi_picture.h"
@@ -34,7 +33,7 @@ class VaapiWrapper;
class VaapiTFPPicture : public VaapiPicture {
public:
VaapiTFPPicture(const scoped_refptr<VaapiWrapper>& vaapi_wrapper,
- const base::Callback<bool(void)> make_context_current,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
int32_t picture_buffer_id,
uint32_t texture_id,
const gfx::Size& size);
@@ -50,7 +49,7 @@ class VaapiTFPPicture : public VaapiPicture {
private:
scoped_refptr<VaapiWrapper> vaapi_wrapper_;
- base::Callback<bool(void)> make_context_current_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
Display* x_display_;
Pixmap x_pixmap_;
diff --git a/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.cc b/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.cc
index 271a0f7a1c9..d8caeec94da 100644
--- a/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.cc
+++ b/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.cc
@@ -14,12 +14,12 @@
#include "base/strings/string_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/trace_event/trace_event.h"
-#include "content/common/gpu/gpu_channel.h"
#include "content/common/gpu/media/accelerated_video_decoder.h"
#include "content/common/gpu/media/h264_decoder.h"
#include "content/common/gpu/media/vaapi_picture.h"
#include "content/common/gpu/media/vp8_decoder.h"
#include "content/common/gpu/media/vp9_decoder.h"
+#include "gpu/ipc/service/gpu_channel.h"
#include "media/base/bind_to_current_loop.h"
#include "media/video/picture.h"
#include "third_party/libva/va/va_dec_vp8.h"
@@ -256,8 +256,7 @@ class VaapiVideoDecodeAccelerator::VaapiVP9Accelerator
DISALLOW_COPY_AND_ASSIGN(VaapiVP9Accelerator);
};
-VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0), size(0) {
-}
+VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0) {}
VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() {
}
@@ -293,11 +292,9 @@ VaapiPicture* VaapiVideoDecodeAccelerator::PictureById(
}
VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- const base::Callback<void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>&
- bind_image)
- : make_context_current_(make_context_current),
- state_(kUninitialized),
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb)
+ : state_(kUninitialized),
input_ready_(&lock_),
surfaces_available_(&lock_),
message_loop_(base::MessageLoop::current()),
@@ -307,7 +304,8 @@ VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
finish_flush_pending_(false),
awaiting_va_surfaces_recycle_(false),
requested_num_pics_(0),
- bind_image_(bind_image),
+ make_context_current_cb_(make_context_current_cb),
+ bind_image_cb_(bind_image_cb),
weak_this_factory_(this) {
weak_this_ = weak_this_factory_.GetWeakPtr();
va_surface_release_cb_ = media::BindToCurrentLoop(
@@ -322,6 +320,11 @@ bool VaapiVideoDecodeAccelerator::Initialize(const Config& config,
Client* client) {
DCHECK_EQ(message_loop_, base::MessageLoop::current());
+ if (make_context_current_cb_.is_null() || bind_image_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
if (config.is_encrypted) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
return false;
@@ -447,10 +450,10 @@ void VaapiVideoDecodeAccelerator::MapAndQueueNewInputBuffer(
DVLOG(4) << "Mapping new input buffer id: " << bitstream_buffer.id()
<< " size: " << (int)bitstream_buffer.size();
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(bitstream_buffer.handle(), true));
- RETURN_AND_NOTIFY_ON_FAILURE(shm->Map(bitstream_buffer.size()),
- "Failed to map input buffer", UNREADABLE_INPUT,);
+ scoped_ptr<SharedMemoryRegion> shm(
+ new SharedMemoryRegion(bitstream_buffer, true));
+ RETURN_AND_NOTIFY_ON_FAILURE(shm->Map(), "Failed to map input buffer",
+ UNREADABLE_INPUT, );
base::AutoLock auto_lock(lock_);
@@ -458,7 +461,6 @@ void VaapiVideoDecodeAccelerator::MapAndQueueNewInputBuffer(
linked_ptr<InputBuffer> input_buffer(new InputBuffer());
input_buffer->shm.reset(shm.release());
input_buffer->id = bitstream_buffer.id();
- input_buffer->size = bitstream_buffer.size();
++num_stream_bufs_at_decoder_;
TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
@@ -497,13 +499,12 @@ bool VaapiVideoDecodeAccelerator::GetInputBuffer_Locked() {
curr_input_buffer_ = input_buffers_.front();
input_buffers_.pop();
- DVLOG(4) << "New current bitstream buffer, id: "
- << curr_input_buffer_->id
- << " size: " << curr_input_buffer_->size;
+ DVLOG(4) << "New current bitstream buffer, id: " << curr_input_buffer_->id
+ << " size: " << curr_input_buffer_->shm->size();
decoder_->SetStream(
static_cast<uint8_t*>(curr_input_buffer_->shm->memory()),
- curr_input_buffer_->size);
+ curr_input_buffer_->shm->size());
return true;
default:
@@ -663,7 +664,7 @@ void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
message_loop_->PostTask(
FROM_HERE,
base::Bind(&Client::ProvidePictureBuffers, client_, requested_num_pics_,
- requested_pic_size_, VaapiPicture::GetGLTextureTarget()));
+ 1, requested_pic_size_, VaapiPicture::GetGLTextureTarget()));
}
void VaapiVideoDecodeAccelerator::Decode(
@@ -673,6 +674,12 @@ void VaapiVideoDecodeAccelerator::Decode(
TRACE_EVENT1("Video Decoder", "VAVDA::Decode", "Buffer id",
bitstream_buffer.id());
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ bitstream_buffer.id() >= 0 &&
+ base::SharedMemory::IsHandleValid(bitstream_buffer.handle()),
+ "Invalid bitstream_buffer, id: " << bitstream_buffer.id(),
+ INVALID_ARGUMENT, );
+
// We got a new input buffer from the client, map it and queue for later use.
MapAndQueueNewInputBuffer(bitstream_buffer);
@@ -734,18 +741,22 @@ void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
DCHECK_EQ(va_surface_ids.size(), buffers.size());
for (size_t i = 0; i < buffers.size(); ++i) {
+ DCHECK_LE(1u, buffers[i].texture_ids().size());
DVLOG(2) << "Assigning picture id: " << buffers[i].id()
- << " to texture id: " << buffers[i].texture_id()
+ << " to texture id: " << buffers[i].texture_ids()[0]
<< " VASurfaceID: " << va_surface_ids[i];
linked_ptr<VaapiPicture> picture(VaapiPicture::CreatePicture(
- vaapi_wrapper_, make_context_current_, buffers[i].id(),
- buffers[i].texture_id(), requested_pic_size_));
+ vaapi_wrapper_, make_context_current_cb_, buffers[i].id(),
+ buffers[i].texture_ids()[0], requested_pic_size_));
scoped_refptr<gl::GLImage> image = picture->GetImageToBind();
if (image) {
- bind_image_.Run(buffers[i].internal_texture_id(),
- VaapiPicture::GetGLTextureTarget(), image);
+ DCHECK_LE(1u, buffers[i].internal_texture_ids().size());
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ bind_image_cb_.Run(buffers[i].internal_texture_ids()[0],
+ VaapiPicture::GetGLTextureTarget(), image, true),
+ "Failed to bind image", PLATFORM_FAILURE, );
}
RETURN_AND_NOTIFY_ON_FAILURE(
@@ -960,7 +971,9 @@ void VaapiVideoDecodeAccelerator::Destroy() {
delete this;
}
-bool VaapiVideoDecodeAccelerator::CanDecodeOnIOThread() {
+bool VaapiVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
return false;
}
diff --git a/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.h b/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.h
index 11cc082a627..f9cfb90376c 100644
--- a/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.h
+++ b/chromium/content/common/gpu/media/vaapi_video_decode_accelerator.h
@@ -20,13 +20,14 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/linked_ptr.h"
-#include "base/memory/shared_memory.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "content/common/gpu/media/vaapi_wrapper.h"
#include "media/base/bitstream_buffer.h"
#include "media/video/picture.h"
@@ -55,9 +56,9 @@ class CONTENT_EXPORT VaapiVideoDecodeAccelerator
class VaapiDecodeSurface;
VaapiVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- const base::Callback<
- void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>& bind_image);
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb);
+
~VaapiVideoDecodeAccelerator() override;
// media::VideoDecodeAccelerator implementation.
@@ -69,7 +70,10 @@ class CONTENT_EXPORT VaapiVideoDecodeAccelerator
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
static media::VideoDecodeAccelerator::SupportedProfiles
GetSupportedProfiles();
@@ -180,10 +184,6 @@ class CONTENT_EXPORT VaapiVideoDecodeAccelerator
// available.
scoped_refptr<VaapiDecodeSurface> CreateSurface();
-
- // Client-provided GL state.
- base::Callback<bool(void)> make_context_current_;
-
// VAVDA state.
enum State {
// Initialize() not called yet or failed.
@@ -210,8 +210,7 @@ class CONTENT_EXPORT VaapiVideoDecodeAccelerator
~InputBuffer();
int32_t id;
- size_t size;
- scoped_ptr<base::SharedMemory> shm;
+ scoped_ptr<SharedMemoryRegion> shm;
};
// Queue for incoming input buffers.
@@ -305,10 +304,11 @@ class CONTENT_EXPORT VaapiVideoDecodeAccelerator
size_t requested_num_pics_;
gfx::Size requested_pic_size_;
- // Binds the provided GLImage to a givenr client texture ID & texture target
- // combination in GLES.
- base::Callback<void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>
- bind_image_;
+ // Callback to make GL context current.
+ MakeGLContextCurrentCallback make_context_current_cb_;
+
+ // Callback to bind a GLImage to a given texture.
+ BindGLImageCallback bind_image_cb_;
// The WeakPtrFactory for |weak_this_|.
base::WeakPtrFactory<VaapiVideoDecodeAccelerator> weak_this_factory_;
diff --git a/chromium/content/common/gpu/media/vaapi_video_encode_accelerator.cc b/chromium/content/common/gpu/media/vaapi_video_encode_accelerator.cc
index 049cd7a5547..520d411e21b 100644
--- a/chromium/content/common/gpu/media/vaapi_video_encode_accelerator.cc
+++ b/chromium/content/common/gpu/media/vaapi_video_encode_accelerator.cc
@@ -13,6 +13,7 @@
#include "base/metrics/histogram.h"
#include "base/numerics/safe_conversions.h"
#include "content/common/gpu/media/h264_dpb.h"
+#include "content/common/gpu/media/shared_memory_region.h"
#include "media/base/bind_to_current_loop.h"
#include "third_party/libva/va/va_enc_h264.h"
@@ -100,13 +101,10 @@ struct VaapiVideoEncodeAccelerator::InputFrameRef {
};
struct VaapiVideoEncodeAccelerator::BitstreamBufferRef {
- BitstreamBufferRef(int32_t id,
- scoped_ptr<base::SharedMemory> shm,
- size_t size)
- : id(id), shm(std::move(shm)), size(size) {}
+ BitstreamBufferRef(int32_t id, scoped_ptr<SharedMemoryRegion> shm)
+ : id(id), shm(std::move(shm)) {}
const int32_t id;
- const scoped_ptr<base::SharedMemory> shm;
- const size_t size;
+ const scoped_ptr<SharedMemoryRegion> shm;
};
media::VideoEncodeAccelerator::SupportedProfiles
@@ -176,9 +174,19 @@ bool VaapiVideoEncodeAccelerator::Initialize(
client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
client_ = client_ptr_factory_->GetWeakPtr();
- if (output_profile < media::H264PROFILE_BASELINE ||
- output_profile > media::H264PROFILE_MAIN) {
- DVLOGF(1) << "Unsupported output profile: " << output_profile;
+ const SupportedProfiles& profiles = GetSupportedProfiles();
+ auto profile = find_if(profiles.begin(), profiles.end(),
+ [output_profile](const SupportedProfile& profile) {
+ return profile.profile == output_profile;
+ });
+ if (profile == profiles.end()) {
+ DVLOGF(1) << "Unsupported output profile " << output_profile;
+ return false;
+ }
+ if (input_visible_size.width() > profile->max_resolution.width() ||
+ input_visible_size.height() > profile->max_resolution.height()) {
+ DVLOGF(1) << "Input size too big: " << input_visible_size.ToString()
+ << ", max supported size: " << profile->max_resolution.ToString();
return false;
}
@@ -546,11 +554,8 @@ void VaapiVideoEncodeAccelerator::TryToReturnBitstreamBuffer() {
size_t data_size = 0;
if (!vaapi_wrapper_->DownloadAndDestroyCodedBuffer(
- encode_job->coded_buffer,
- encode_job->input_surface->id(),
- target_data,
- buffer->size,
- &data_size)) {
+ encode_job->coded_buffer, encode_job->input_surface->id(),
+ target_data, buffer->shm->size(), &data_size)) {
NOTIFY_ERROR(kPlatformFailureError, "Failed downloading coded buffer");
return;
}
@@ -669,15 +674,14 @@ void VaapiVideoEncodeAccelerator::UseOutputBitstreamBuffer(
return;
}
- scoped_ptr<base::SharedMemory> shm(
- new base::SharedMemory(buffer.handle(), false));
- if (!shm->Map(buffer.size())) {
+ scoped_ptr<SharedMemoryRegion> shm(new SharedMemoryRegion(buffer, false));
+ if (!shm->Map()) {
NOTIFY_ERROR(kPlatformFailureError, "Failed mapping shared memory.");
return;
}
scoped_ptr<BitstreamBufferRef> buffer_ref(
- new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
+ new BitstreamBufferRef(buffer.id(), std::move(shm)));
encoder_thread_task_runner_->PostTask(
FROM_HERE,
diff --git a/chromium/content/common/gpu/media/vaapi_wrapper.cc b/chromium/content/common/gpu/media/vaapi_wrapper.cc
index db38f32f7f8..19303e1e6d6 100644
--- a/chromium/content/common/gpu/media/vaapi_wrapper.cc
+++ b/chromium/content/common/gpu/media/vaapi_wrapper.cc
@@ -9,7 +9,6 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/command_line.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
@@ -18,7 +17,6 @@
// Auto-generated for dlopen libva libraries
#include "content/common/gpu/media/va_stubs.h"
#include "content/common/gpu/media/vaapi_picture.h"
-#include "content/public/common/content_switches.h"
#include "third_party/libyuv/include/libyuv.h"
#include "ui/gl/gl_bindings.h"
#if defined(USE_X11)
@@ -127,7 +125,9 @@ static const ProfileMap kProfileMap[] = {
// media::H264PROFILE_HIGH*.
{media::H264PROFILE_HIGH, VAProfileH264High},
{media::VP8PROFILE_ANY, VAProfileVP8Version0_3},
- {media::VP9PROFILE_ANY, VAProfileVP9Profile0},
+ // TODO(servolk): Need to add VP9 profiles 1,2,3 here after rolling
+ // third_party/libva to 1.7. crbug.com/598118
+ {media::VP9PROFILE_PROFILE0, VAProfileVP9Profile0},
};
static std::vector<VAConfigAttrib> GetRequiredAttribs(
@@ -214,10 +214,6 @@ scoped_refptr<VaapiWrapper> VaapiWrapper::CreateForVideoCodec(
media::VideoEncodeAccelerator::SupportedProfiles
VaapiWrapper::GetSupportedEncodeProfiles() {
media::VideoEncodeAccelerator::SupportedProfiles profiles;
- const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kDisableVaapiAcceleratedVideoEncode))
- return profiles;
-
std::vector<ProfileInfo> encode_profile_infos =
profile_infos_.Get().GetSupportedProfileInfosForCodecMode(kEncode);
@@ -369,11 +365,8 @@ bool VaapiWrapper::VaInitialize(const base::Closure& report_error_to_uma_cb) {
return false;
}
- VAStatus va_res = VA_STATUS_SUCCESS;
- if (!va_display_state->Initialize(&va_res)) {
- VA_LOG_ON_ERROR(va_res, "vaInitialize failed");
+ if (!va_display_state->Initialize())
return false;
- }
va_display_ = va_display_state->va_display();
return true;
@@ -1218,7 +1211,7 @@ VaapiWrapper::VADisplayState::VADisplayState()
VaapiWrapper::VADisplayState::~VADisplayState() {}
-bool VaapiWrapper::VADisplayState::Initialize(VAStatus* status) {
+bool VaapiWrapper::VADisplayState::Initialize() {
va_lock_.AssertAcquired();
if (refcount_++ == 0) {
#if defined(USE_X11)
@@ -1232,9 +1225,12 @@ bool VaapiWrapper::VADisplayState::Initialize(VAStatus* status) {
return false;
}
- *status = vaInitialize(va_display_, &major_version_, &minor_version_);
- if (*status != VA_STATUS_SUCCESS)
+ VAStatus va_res =
+ vaInitialize(va_display_, &major_version_, &minor_version_);
+ if (va_res != VA_STATUS_SUCCESS) {
+ LOG(WARNING) << "vaInitialize failed: " << vaErrorStr(va_res);
return false;
+ }
va_initialized_ = true;
DVLOG(1) << "VAAPI version: " << major_version_ << "." << minor_version_;
diff --git a/chromium/content/common/gpu/media/vaapi_wrapper.h b/chromium/content/common/gpu/media/vaapi_wrapper.h
index 7f14b49be11..4394bc36b92 100644
--- a/chromium/content/common/gpu/media/vaapi_wrapper.h
+++ b/chromium/content/common/gpu/media/vaapi_wrapper.h
@@ -247,7 +247,7 @@ class CONTENT_EXPORT VaapiWrapper
~VADisplayState();
// |va_lock_| must be held on entry.
- bool Initialize(VAStatus* status);
+ bool Initialize();
void Deinitialize(VAStatus* status);
base::Lock* va_lock() { return &va_lock_; }
diff --git a/chromium/content/common/gpu/media/video_decode_accelerator_unittest.cc b/chromium/content/common/gpu/media/video_decode_accelerator_unittest.cc
index 36466304a3c..91339668867 100644
--- a/chromium/content/common/gpu/media/video_decode_accelerator_unittest.cc
+++ b/chromium/content/common/gpu/media/video_decode_accelerator_unittest.cc
@@ -47,9 +47,10 @@
#include "base/threading/thread.h"
#include "build/build_config.h"
#include "content/common/gpu/media/fake_video_decode_accelerator.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h"
#include "content/common/gpu/media/rendering_helper.h"
#include "content/common/gpu/media/video_accelerator_unittest_helpers.h"
-#include "content/public/common/content_switches.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
#include "media/filters/h264_parser.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/codec/png_codec.h"
@@ -334,6 +335,7 @@ class GLRenderingVDAClient
// VideoDecodeAccelerator::Client implementation.
// The heart of the Client.
void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
+ uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) override;
void DismissPictureBuffer(int32_t picture_buffer_id) override;
@@ -359,16 +361,6 @@ class GLRenderingVDAClient
private:
typedef std::map<int32_t, scoped_refptr<TextureRef>> TextureRefMap;
- scoped_ptr<media::VideoDecodeAccelerator> CreateFakeVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateDXVAVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2VDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateV4L2SliceVDA();
- scoped_ptr<media::VideoDecodeAccelerator> CreateVaapiVDA();
-
- void BindImage(uint32_t client_texture_id,
- uint32_t texture_target,
- scoped_refptr<gl::GLImage> image);
-
void SetState(ClientState new_state);
void FinishInitialization();
void ReturnPicture(int32_t picture_buffer_id);
@@ -401,8 +393,10 @@ class GLRenderingVDAClient
int next_bitstream_buffer_id_;
ClientStateNotification<ClientState>* note_;
scoped_ptr<VideoDecodeAccelerator> decoder_;
- scoped_ptr<base::WeakPtrFactory<VideoDecodeAccelerator> >
- weak_decoder_factory_;
+ base::WeakPtr<VideoDecodeAccelerator> weak_vda_;
+ scoped_ptr<base::WeakPtrFactory<VideoDecodeAccelerator>>
+ weak_vda_ptr_factory_;
+ scoped_ptr<GpuVideoDecodeAcceleratorFactoryImpl> vda_factory_;
int remaining_play_throughs_;
int reset_after_frame_num_;
int delete_decoder_state_;
@@ -440,9 +434,23 @@ class GLRenderingVDAClient
int32_t next_picture_buffer_id_;
+ base::WeakPtr<GLRenderingVDAClient> weak_this_;
+ base::WeakPtrFactory<GLRenderingVDAClient> weak_this_factory_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(GLRenderingVDAClient);
};
+static bool DoNothingReturnTrue() {
+ return true;
+}
+
+static bool DummyBindImage(uint32_t client_texture_id,
+ uint32_t texture_target,
+ const scoped_refptr<gl::GLImage>& image,
+ bool can_bind_to_sampler) {
+ return true;
+}
+
GLRenderingVDAClient::GLRenderingVDAClient(
size_t window_id,
RenderingHelper* rendering_helper,
@@ -483,7 +491,8 @@ GLRenderingVDAClient::GLRenderingVDAClient(
delay_reuse_after_frame_num_(delay_reuse_after_frame_num),
decode_calls_per_second_(decode_calls_per_second),
render_as_thumbnails_(render_as_thumbnails),
- next_picture_buffer_id_(1) {
+ next_picture_buffer_id_(1),
+ weak_this_factory_(this) {
LOG_ASSERT(num_in_flight_decodes > 0);
LOG_ASSERT(num_play_throughs > 0);
// |num_in_flight_decodes_| is unsupported if |decode_calls_per_second_| > 0.
@@ -494,6 +503,8 @@ GLRenderingVDAClient::GLRenderingVDAClient(
profile_ = (profile != media::VIDEO_CODEC_PROFILE_UNKNOWN
? profile
: media::H264PROFILE_BASELINE);
+
+ weak_this_ = weak_this_factory_.GetWeakPtr();
}
GLRenderingVDAClient::~GLRenderingVDAClient() {
@@ -502,119 +513,49 @@ GLRenderingVDAClient::~GLRenderingVDAClient() {
SetState(CS_DESTROYED);
}
-static bool DoNothingReturnTrue() { return true; }
+void GLRenderingVDAClient::CreateAndStartDecoder() {
+ LOG_ASSERT(decoder_deleted());
+ LOG_ASSERT(!decoder_.get());
-scoped_ptr<media::VideoDecodeAccelerator>
-GLRenderingVDAClient::CreateFakeVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
if (fake_decoder_) {
- decoder.reset(new FakeVideoDecodeAccelerator(
- static_cast<gfx::GLContext*> (rendering_helper_->GetGLContextHandle()),
- frame_size_,
- base::Bind(&DoNothingReturnTrue)));
- }
- return decoder;
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GLRenderingVDAClient::CreateDXVAVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_WIN)
- if (base::win::GetVersion() >= base::win::VERSION_WIN7)
- decoder.reset(
- new DXVAVideoDecodeAccelerator(
- base::Bind(&DoNothingReturnTrue),
- rendering_helper_->GetGLContext().get()));
-#endif
- return decoder;
-}
-
-scoped_ptr<media::VideoDecodeAccelerator>
-GLRenderingVDAClient::CreateV4L2VDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
- scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
- if (device.get()) {
- base::WeakPtr<VideoDecodeAccelerator::Client> weak_client = AsWeakPtr();
- decoder.reset(new V4L2VideoDecodeAccelerator(
- static_cast<EGLDisplay>(rendering_helper_->GetGLDisplay()),
- static_cast<EGLContext>(rendering_helper_->GetGLContextHandle()),
- weak_client, base::Bind(&DoNothingReturnTrue), device,
- base::ThreadTaskRunnerHandle::Get()));
- }
-#endif
- return decoder;
-}
+ decoder_.reset(new FakeVideoDecodeAccelerator(
+ frame_size_, base::Bind(&DoNothingReturnTrue)));
+ LOG_ASSERT(decoder_->Initialize(profile_, this));
+ } else {
+ if (!vda_factory_) {
+ vda_factory_ = GpuVideoDecodeAcceleratorFactoryImpl::Create(
+ base::Bind(&RenderingHelper::GetGLContext,
+ base::Unretained(rendering_helper_)),
+ base::Bind(&DoNothingReturnTrue), base::Bind(&DummyBindImage));
+ LOG_ASSERT(vda_factory_);
+ }
-scoped_ptr<media::VideoDecodeAccelerator>
-GLRenderingVDAClient::CreateV4L2SliceVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC)
- scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
- if (device.get()) {
- base::WeakPtr<VideoDecodeAccelerator::Client> weak_client = AsWeakPtr();
- decoder.reset(new V4L2SliceVideoDecodeAccelerator(
- device, static_cast<EGLDisplay>(rendering_helper_->GetGLDisplay()),
- static_cast<EGLContext>(rendering_helper_->GetGLContextHandle()),
- weak_client, base::Bind(&DoNothingReturnTrue),
- base::ThreadTaskRunnerHandle::Get()));
+ VideoDecodeAccelerator::Config config(profile_);
+ gpu::GpuPreferences gpu_preferences;
+ decoder_ = vda_factory_->CreateVDA(this, config, gpu_preferences);
}
-#endif
- return decoder;
-}
-scoped_ptr<media::VideoDecodeAccelerator>
-GLRenderingVDAClient::CreateVaapiVDA() {
- scoped_ptr<media::VideoDecodeAccelerator> decoder;
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
- decoder.reset(new VaapiVideoDecodeAccelerator(
- base::Bind(&DoNothingReturnTrue),
- base::Bind(&GLRenderingVDAClient::BindImage, base::Unretained(this))));
-#endif
- return decoder;
-}
+ LOG_ASSERT(decoder_) << "Failed creating a VDA";
-void GLRenderingVDAClient::BindImage(uint32_t client_texture_id,
- uint32_t texture_target,
- scoped_refptr<gl::GLImage> image) {}
+ decoder_->TryToSetupDecodeOnSeparateThread(
+ weak_this_, base::ThreadTaskRunnerHandle::Get());
-void GLRenderingVDAClient::CreateAndStartDecoder() {
- LOG_ASSERT(decoder_deleted());
- LOG_ASSERT(!decoder_.get());
-
- VideoDecodeAccelerator::Client* client = this;
-
- scoped_ptr<media::VideoDecodeAccelerator> decoders[] = {
- CreateFakeVDA(),
- CreateDXVAVDA(),
- CreateV4L2VDA(),
- CreateV4L2SliceVDA(),
- CreateVaapiVDA(),
- };
+ weak_vda_ptr_factory_.reset(
+ new base::WeakPtrFactory<VideoDecodeAccelerator>(decoder_.get()));
+ weak_vda_ = weak_vda_ptr_factory_->GetWeakPtr();
- for (size_t i = 0; i < arraysize(decoders); ++i) {
- if (!decoders[i])
- continue;
- decoder_ = std::move(decoders[i]);
- weak_decoder_factory_.reset(
- new base::WeakPtrFactory<VideoDecodeAccelerator>(decoder_.get()));
- if (decoder_->Initialize(profile_, client)) {
- SetState(CS_DECODER_SET);
- FinishInitialization();
- return;
- }
- }
- // Decoders are all initialize failed.
- LOG(ERROR) << "VideoDecodeAccelerator::Initialize() failed";
- LOG_ASSERT(false);
+ SetState(CS_DECODER_SET);
+ FinishInitialization();
}
void GLRenderingVDAClient::ProvidePictureBuffers(
uint32_t requested_num_of_buffers,
+ uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) {
if (decoder_deleted())
return;
+ LOG_ASSERT(textures_per_buffer == 1u);
std::vector<media::PictureBuffer> buffers;
requested_num_of_buffers += kExtraPictureBuffers;
@@ -637,8 +578,9 @@ void GLRenderingVDAClient::ProvidePictureBuffers(
texture_id))))
.second);
- buffers.push_back(
- media::PictureBuffer(picture_buffer_id, dimensions, texture_id));
+ media::PictureBuffer::TextureIds ids;
+ ids.push_back(texture_id);
+ buffers.push_back(media::PictureBuffer(picture_buffer_id, dimensions, ids));
}
decoder_->AssignPictureBuffers(buffers);
}
@@ -710,10 +652,8 @@ void GLRenderingVDAClient::ReturnPicture(int32_t picture_buffer_id) {
if (num_decoded_frames_ > delay_reuse_after_frame_num_) {
base::MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&VideoDecodeAccelerator::ReusePictureBuffer,
- weak_decoder_factory_->GetWeakPtr(),
- picture_buffer_id),
+ FROM_HERE, base::Bind(&VideoDecodeAccelerator::ReusePictureBuffer,
+ weak_vda_, picture_buffer_id),
kReuseDelay);
} else {
decoder_->ReusePictureBuffer(picture_buffer_id);
@@ -835,7 +775,7 @@ void GLRenderingVDAClient::FinishInitialization() {
void GLRenderingVDAClient::DeleteDecoder() {
if (decoder_deleted())
return;
- weak_decoder_factory_.reset();
+ weak_vda_ptr_factory_->InvalidateWeakPtrs();
decoder_.reset();
STLClearObject(&encoded_data_);
active_textures_.clear();
@@ -1196,17 +1136,6 @@ class VideoDecodeAcceleratorParamTest
base::Tuple<int, int, int, ResetPoint, ClientState, bool, bool> > {
};
-// Helper so that gtest failures emit a more readable version of the tuple than
-// its byte representation.
-::std::ostream& operator<<(
- ::std::ostream& os,
- const base::Tuple<int, int, int, ResetPoint, ClientState, bool, bool>& t) {
- return os << base::get<0>(t) << ", " << base::get<1>(t) << ", "
- << base::get<2>(t) << ", " << base::get<3>(t) << ", "
- << base::get<4>(t) << ", " << base::get<5>(t) << ", "
- << base::get<6>(t);
-}
-
// Wait for |note| to report a state and if it's not |expected_state| then
// assert |client| has deleted its decoder.
static void AssertWaitForStateOrDeleted(
diff --git a/chromium/content/common/gpu/media/video_encode_accelerator_unittest.cc b/chromium/content/common/gpu/media/video_encode_accelerator_unittest.cc
index 9224e89c72f..09f1c63ed23 100644
--- a/chromium/content/common/gpu/media/video_encode_accelerator_unittest.cc
+++ b/chromium/content/common/gpu/media/video_encode_accelerator_unittest.cc
@@ -55,6 +55,8 @@
// Status has been defined as int in Xlib.h.
#undef Status
#endif // defined(ARCH_CPU_X86_FAMILY)
+#elif defined(OS_MACOSX)
+#include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
#else
#error The VideoEncodeAcceleratorUnittest is not supported on this platform.
#endif
@@ -126,7 +128,11 @@ const unsigned int kLoggedLatencyPercentiles[] = {50, 75, 95};
// of the stream.
// Bitrate is only forced for tests that test bitrate.
const char* g_default_in_filename = "bear_320x192_40frames.yuv";
+#if !defined(OS_MACOSX)
const char* g_default_in_parameters = ":320:192:1:out.h264:200000";
+#else
+const char* g_default_in_parameters = ":320:192:0:out.h264:200000";
+#endif
// Enabled by including a --fake_encoder flag to the command line invoking the
// test.
@@ -623,8 +629,8 @@ class VideoFrameQualityValidator {
private:
void InitializeCB(bool success);
- void DecodeDone(media::VideoDecoder::Status status);
- void FlushDone(media::VideoDecoder::Status status);
+ void DecodeDone(media::DecodeStatus status);
+ void FlushDone(media::DecodeStatus status);
void VerifyOutputFrame(const scoped_refptr<media::VideoFrame>& output_frame);
void Decode();
@@ -670,16 +676,18 @@ void VideoFrameQualityValidator::Initialize(const gfx::Size& coded_size,
if (IsVP8(profile_))
config.Initialize(media::kCodecVP8, media::VP8PROFILE_ANY, kInputFormat,
media::COLOR_SPACE_UNSPECIFIED, coded_size, visible_size,
- natural_size, media::EmptyExtraData(), false);
+ natural_size, media::EmptyExtraData(),
+ media::Unencrypted());
else if (IsH264(profile_))
config.Initialize(media::kCodecH264, media::H264PROFILE_MAIN, kInputFormat,
media::COLOR_SPACE_UNSPECIFIED, coded_size, visible_size,
- natural_size, media::EmptyExtraData(), false);
+ natural_size, media::EmptyExtraData(),
+ media::Unencrypted());
else
LOG_ASSERT(0) << "Invalid profile " << profile_;
decoder_->Initialize(
- config, false, media::SetCdmReadyCB(),
+ config, false, nullptr,
base::Bind(&VideoFrameQualityValidator::InitializeCB,
base::Unretained(this)),
base::Bind(&VideoFrameQualityValidator::VerifyOutputFrame,
@@ -704,9 +712,8 @@ void VideoFrameQualityValidator::AddOriginalFrame(
original_frames_.push(frame);
}
-void VideoFrameQualityValidator::DecodeDone(
- media::VideoDecoder::Status status) {
- if (status == media::VideoDecoder::kOk) {
+void VideoFrameQualityValidator::DecodeDone(media::DecodeStatus status) {
+ if (status == media::DecodeStatus::OK) {
decoder_state_ = INITIALIZED;
Decode();
} else {
@@ -716,7 +723,7 @@ void VideoFrameQualityValidator::DecodeDone(
}
}
-void VideoFrameQualityValidator::FlushDone(media::VideoDecoder::Status status) {
+void VideoFrameQualityValidator::FlushDone(media::DecodeStatus status) {
flush_complete_cb_.Run();
}
@@ -810,6 +817,7 @@ class VEAClient : public VideoEncodeAccelerator::Client {
scoped_ptr<media::VideoEncodeAccelerator> CreateFakeVEA();
scoped_ptr<media::VideoEncodeAccelerator> CreateV4L2VEA();
scoped_ptr<media::VideoEncodeAccelerator> CreateVaapiVEA();
+ scoped_ptr<media::VideoEncodeAccelerator> CreateVTVEA();
void SetState(ClientState new_state);
@@ -1071,6 +1079,14 @@ scoped_ptr<media::VideoEncodeAccelerator> VEAClient::CreateVaapiVEA() {
return encoder;
}
+scoped_ptr<media::VideoEncodeAccelerator> VEAClient::CreateVTVEA() {
+ scoped_ptr<media::VideoEncodeAccelerator> encoder;
+#if defined(OS_MACOSX)
+ encoder.reset(new VTVideoEncodeAccelerator());
+#endif
+ return encoder;
+}
+
void VEAClient::CreateEncoder() {
DCHECK(thread_checker_.CalledOnValidThread());
LOG_ASSERT(!has_encoder());
@@ -1078,7 +1094,8 @@ void VEAClient::CreateEncoder() {
scoped_ptr<media::VideoEncodeAccelerator> encoders[] = {
CreateFakeVEA(),
CreateV4L2VEA(),
- CreateVaapiVEA()
+ CreateVaapiVEA(),
+ CreateVTVEA()
};
DVLOG(1) << "Profile: " << test_stream_->requested_profile
@@ -1649,6 +1666,7 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
encoder_thread.Stop();
}
+#if !defined(OS_MACOSX)
INSTANTIATE_TEST_CASE_P(
SimpleEncode,
VideoEncodeAcceleratorTest,
@@ -1693,6 +1711,26 @@ INSTANTIATE_TEST_CASE_P(
base::MakeTuple(3, false, 0, false, false, false, false, false),
base::MakeTuple(3, false, 0, true, false, false, true, false),
base::MakeTuple(3, false, 0, true, false, true, false, false)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ SimpleEncode,
+ VideoEncodeAcceleratorTest,
+ ::testing::Values(
+ base::MakeTuple(1, true, 0, false, false, false, false, false),
+ base::MakeTuple(1, true, 0, false, false, false, false, true)));
+
+INSTANTIATE_TEST_CASE_P(
+ EncoderPerf,
+ VideoEncodeAcceleratorTest,
+ ::testing::Values(
+ base::MakeTuple(1, false, 0, false, true, false, false, false)));
+
+INSTANTIATE_TEST_CASE_P(
+ MultipleEncoders,
+ VideoEncodeAcceleratorTest,
+ ::testing::Values(
+ base::MakeTuple(3, false, 0, false, false, false, false, false)));
+#endif
// TODO(posciak): more tests:
// - async FeedEncoderWithOutput
diff --git a/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.cc b/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.cc
index e74e6f64d55..1571e834620 100644
--- a/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.cc
+++ b/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.cc
@@ -10,7 +10,6 @@
#include <stddef.h>
#include "base/bind.h"
-#include "base/command_line.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "base/macros.h"
@@ -20,7 +19,6 @@
#include "base/thread_task_runner_handle.h"
#include "base/version.h"
#include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
-#include "content/public/common/content_switches.h"
#include "media/base/limits.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image_io_surface.h"
@@ -46,7 +44,11 @@ static const media::VideoCodecProfile kSupportedProfiles[] = {
media::H264PROFILE_MAIN,
media::H264PROFILE_EXTENDED,
media::H264PROFILE_HIGH,
- media::H264PROFILE_HIGH10PROFILE,
+ // TODO(hubbe): Try to re-enable this again somehow. Currently it seems
+ // that some codecs fail to check the profile during initialization and
+ // then fail on the first frame decode, which currently results in a
+ // pipeline failure.
+ // media::H264PROFILE_HIGH10PROFILE,
media::H264PROFILE_SCALABLEBASELINE,
media::H264PROFILE_SCALABLEHIGH,
media::H264PROFILE_STEREOHIGH,
@@ -72,9 +74,9 @@ static base::ScopedCFTypeRef<CFMutableDictionaryRef>
BuildImageConfig(CMVideoDimensions coded_dimensions) {
base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config;
- // 4:2:2 is used over the native 4:2:0 because only 4:2:2 can be directly
- // bound to a texture by CGLTexImageIOSurface2D().
- int32_t pixel_format = kCVPixelFormatType_422YpCbCr8;
+ // Note that 4:2:0 textures cannot be used directly as RGBA in OpenGL, but are
+ // lower power than 4:2:2 when composited directly by CoreAnimation.
+ int32_t pixel_format = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
#define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i)
base::ScopedCFTypeRef<CFNumberRef> cf_pixel_format(CFINT(pixel_format));
base::ScopedCFTypeRef<CFNumberRef> cf_width(CFINT(coded_dimensions.width));
@@ -86,7 +88,7 @@ BuildImageConfig(CMVideoDimensions coded_dimensions) {
image_config.reset(
CFDictionaryCreateMutable(
kCFAllocatorDefault,
- 4, // capacity
+ 3, // capacity
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks));
if (!image_config.get())
@@ -96,8 +98,6 @@ BuildImageConfig(CMVideoDimensions coded_dimensions) {
cf_pixel_format);
CFDictionarySetValue(image_config, kCVPixelBufferWidthKey, cf_width);
CFDictionarySetValue(image_config, kCVPixelBufferHeightKey, cf_height);
- CFDictionarySetValue(image_config, kCVPixelBufferOpenGLCompatibilityKey,
- kCFBooleanTrue);
return image_config;
}
@@ -175,11 +175,6 @@ static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
// session fails, hardware decoding will be disabled (Initialize() will always
// return false).
static bool InitializeVideoToolboxInternal() {
- if (base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableAcceleratedVideoDecode)) {
- return false;
- }
-
if (!IsVtInitialized()) {
// CoreVideo is also required, but the loader stops after the first path is
// loaded. Instead we rely on the transitive dependency from VideoToolbox to
@@ -255,6 +250,8 @@ static void OutputThunk(
VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {
}
+VTVideoDecodeAccelerator::Task::Task(const Task& other) = default;
+
VTVideoDecodeAccelerator::Task::~Task() {
}
@@ -291,11 +288,10 @@ bool VTVideoDecodeAccelerator::FrameOrder::operator()(
}
VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- const base::Callback<void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>&
- bind_image)
- : make_context_current_(make_context_current),
- bind_image_(bind_image),
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb)
+ : make_context_current_cb_(make_context_current_cb),
+ bind_image_cb_(bind_image_cb),
client_(nullptr),
state_(STATE_DECODING),
format_(nullptr),
@@ -307,7 +303,6 @@ VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()),
decoder_thread_("VTDecoderThread"),
weak_this_factory_(this) {
- DCHECK(!make_context_current_.is_null());
callback_.decompressionOutputCallback = OutputThunk;
callback_.decompressionOutputRefCon = this;
weak_this_ = weak_this_factory_.GetWeakPtr();
@@ -321,6 +316,11 @@ bool VTVideoDecodeAccelerator::Initialize(const Config& config,
Client* client) {
DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ if (make_context_current_cb_.is_null() || bind_image_cb_.is_null()) {
+ NOTREACHED() << "GL callbacks are required for this VDA";
+ return false;
+ }
+
if (config.is_encrypted) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
return false;
@@ -622,23 +622,21 @@ void VTVideoDecodeAccelerator::DecodeTask(
config_changed_ = true;
}
if (config_changed_) {
- if (last_sps_.empty()) {
- config_changed_ = false;
- DLOG(ERROR) << "Invalid configuration; no SPS";
- NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
- return;
- }
- if (last_pps_.empty()) {
- config_changed_ = false;
- DLOG(ERROR) << "Invalid configuration; no PPS";
- NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
- return;
- }
-
// Only reconfigure at IDRs to avoid corruption.
if (frame->is_idr) {
config_changed_ = false;
+ if (last_sps_.empty()) {
+ DLOG(ERROR) << "Invalid configuration; no SPS";
+ NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
+ return;
+ }
+ if (last_pps_.empty()) {
+ DLOG(ERROR) << "Invalid configuration; no PPS";
+ NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
+ return;
+ }
+
// ConfigureDecoder() calls NotifyError() on failure.
if (!ConfigureDecoder())
return;
@@ -825,6 +823,13 @@ void VTVideoDecodeAccelerator::FlushDone(TaskType type) {
void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) {
DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ if (bitstream.id() < 0) {
+ DLOG(ERROR) << "Invalid bitstream, id: " << bitstream.id();
+ if (base::SharedMemory::IsHandleValid(bitstream.handle()))
+ base::SharedMemory::CloseHandle(bitstream.handle());
+ NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
+ return;
+ }
DCHECK_EQ(0u, assigned_bitstream_ids_.count(bitstream.id()));
assigned_bitstream_ids_.insert(bitstream.id());
Frame* frame = new Frame(bitstream.id());
@@ -842,10 +847,12 @@ void VTVideoDecodeAccelerator::AssignPictureBuffers(
DCHECK(!picture_info_map_.count(picture.id()));
assigned_picture_ids_.insert(picture.id());
available_picture_ids_.push_back(picture.id());
+ DCHECK_LE(1u, picture.internal_texture_ids().size());
+ DCHECK_LE(1u, picture.texture_ids().size());
picture_info_map_.insert(std::make_pair(
picture.id(),
- make_scoped_ptr(new PictureInfo(picture.internal_texture_id(),
- picture.texture_id()))));
+ make_scoped_ptr(new PictureInfo(picture.internal_texture_ids()[0],
+ picture.texture_ids()[0]))));
}
// Pictures are not marked as uncleared until after this method returns, and
@@ -859,7 +866,7 @@ void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) {
DCHECK(gpu_thread_checker_.CalledOnValidThread());
DCHECK(picture_info_map_.count(picture_id));
PictureInfo* picture_info = picture_info_map_.find(picture_id)->second.get();
- DCHECK_EQ(CFGetRetainCount(picture_info->cv_image), 1);
+ DCHECK_EQ(CFGetRetainCount(picture_info->cv_image), 2);
picture_info->cv_image.reset();
picture_info->gl_image->Destroy(false);
picture_info->gl_image = nullptr;
@@ -1002,8 +1009,8 @@ bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) {
// Request new pictures.
picture_size_ = frame.coded_size;
- client_->ProvidePictureBuffers(
- kNumPictureBuffers, coded_size_, GL_TEXTURE_RECTANGLE_ARB);
+ client_->ProvidePictureBuffers(kNumPictureBuffers, 1, coded_size_,
+ GL_TEXTURE_RECTANGLE_ARB);
return false;
}
if (!SendFrame(frame))
@@ -1026,47 +1033,27 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
DCHECK(!picture_info->cv_image);
DCHECK(!picture_info->gl_image);
- if (!make_context_current_.Run()) {
+ if (!make_context_current_cb_.Run()) {
DLOG(ERROR) << "Failed to make GL context current";
NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
return false;
}
- IOSurfaceRef surface = CVPixelBufferGetIOSurface(frame.image.get());
- if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGLCoreProfile)
- glEnable(GL_TEXTURE_RECTANGLE_ARB);
- gfx::ScopedTextureBinder texture_binder(GL_TEXTURE_RECTANGLE_ARB,
- picture_info->service_texture_id);
- CGLContextObj cgl_context =
- static_cast<CGLContextObj>(gfx::GLContext::GetCurrent()->GetHandle());
- CGLError status = CGLTexImageIOSurface2D(
- cgl_context, // ctx
- GL_TEXTURE_RECTANGLE_ARB, // target
- GL_RGB, // internal_format
- frame.coded_size.width(), // width
- frame.coded_size.height(), // height
- GL_YCBCR_422_APPLE, // format
- GL_UNSIGNED_SHORT_8_8_APPLE, // type
- surface, // io_surface
- 0); // plane
- if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGLCoreProfile)
- glDisable(GL_TEXTURE_RECTANGLE_ARB);
- if (status != kCGLNoError) {
- NOTIFY_STATUS("CGLTexImageIOSurface2D()", status, SFT_PLATFORM_ERROR);
- return false;
- }
-
- bool allow_overlay = false;
scoped_refptr<gl::GLImageIOSurface> gl_image(
new gl::GLImageIOSurface(frame.coded_size, GL_BGRA_EXT));
- if (gl_image->Initialize(surface, gfx::GenericSharedMemoryId(),
- gfx::BufferFormat::BGRA_8888)) {
- allow_overlay = true;
- } else {
- gl_image = nullptr;
+ if (!gl_image->InitializeWithCVPixelBuffer(
+ frame.image.get(), gfx::GenericSharedMemoryId(),
+ gfx::BufferFormat::YUV_420_BIPLANAR)) {
+ NOTIFY_STATUS("Failed to initialize GLImageIOSurface", PLATFORM_FAILURE,
+ SFT_PLATFORM_ERROR);
+ }
+
+ if (!bind_image_cb_.Run(picture_info->client_texture_id,
+ GL_TEXTURE_RECTANGLE_ARB, gl_image, false)) {
+ DLOG(ERROR) << "Failed to bind image";
+ NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
+ return false;
}
- bind_image_.Run(picture_info->client_texture_id, GL_TEXTURE_RECTANGLE_ARB,
- gl_image);
// Assign the new image(s) to the the picture info.
picture_info->gl_image = gl_image;
@@ -1080,7 +1067,7 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
// coded size and fix it.
client_->PictureReady(media::Picture(picture_id, frame.bitstream_id,
gfx::Rect(frame.coded_size),
- allow_overlay));
+ true));
return true;
}
@@ -1143,7 +1130,9 @@ void VTVideoDecodeAccelerator::Destroy() {
QueueFlush(TASK_DESTROY);
}
-bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() {
+bool VTVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
return false;
}
diff --git a/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.h b/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.h
index 2d222163823..22fc8b1d6ad 100644
--- a/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.h
+++ b/chromium/content/common/gpu/media/vt_video_decode_accelerator_mac.h
@@ -17,6 +17,7 @@
#include "base/message_loop/message_loop.h"
#include "base/threading/thread.h"
#include "base/threading/thread_checker.h"
+#include "content/common/gpu/media/gpu_video_decode_accelerator_helpers.h"
#include "content/common/gpu/media/vt_mac.h"
#include "media/filters/h264_parser.h"
#include "media/video/h264_poc.h"
@@ -35,9 +36,9 @@ bool InitializeVideoToolbox();
class VTVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
public:
explicit VTVideoDecodeAccelerator(
- const base::Callback<bool(void)>& make_context_current,
- const base::Callback<
- void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>& bind_image);
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const BindGLImageCallback& bind_image_cb);
+
~VTVideoDecodeAccelerator() override;
// VideoDecodeAccelerator implementation.
@@ -49,7 +50,10 @@ class VTVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
void Flush() override;
void Reset() override;
void Destroy() override;
- bool CanDecodeOnIOThread() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
// Called by OutputThunk() when VideoToolbox finishes decoding a frame.
void Output(
@@ -114,6 +118,7 @@ class VTVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
struct Task {
Task(TaskType type);
+ Task(const Task& other);
~Task();
TaskType type;
@@ -189,9 +194,9 @@ class VTVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
//
// GPU thread state.
//
- base::Callback<bool(void)> make_context_current_;
- base::Callback<void(uint32_t, uint32_t, scoped_refptr<gl::GLImage>)>
- bind_image_;
+ MakeGLContextCurrentCallback make_context_current_cb_;
+ BindGLImageCallback bind_image_cb_;
+
media::VideoDecodeAccelerator::Client* client_;
State state_;
diff --git a/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.cc b/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.cc
new file mode 100644
index 00000000000..71c80ef3a9f
--- /dev/null
+++ b/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.cc
@@ -0,0 +1,552 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
+
+#include "base/thread_task_runner_handle.h"
+#include "media/base/mac/coremedia_glue.h"
+#include "media/base/mac/corevideo_glue.h"
+#include "media/base/mac/video_frame_mac.h"
+
+namespace content {
+
+namespace {
+
+// TODO(emircan): Check if we can find the actual system capabilities via
+// creating VTCompressionSessions with varying requirements.
+// See crbug.com/584784.
+const size_t kBitsPerByte = 8;
+const size_t kDefaultResolutionWidth = 640;
+const size_t kDefaultResolutionHeight = 480;
+const size_t kMaxFrameRateNumerator = 30;
+const size_t kMaxFrameRateDenominator = 1;
+const size_t kMaxResolutionWidth = 4096;
+const size_t kMaxResolutionHeight = 2160;
+const size_t kNumInputBuffers = 3;
+
+} // namespace
+
+struct VTVideoEncodeAccelerator::InProgressFrameEncode {
+ InProgressFrameEncode(base::TimeDelta rtp_timestamp,
+ base::TimeTicks ref_time)
+ : timestamp(rtp_timestamp), reference_time(ref_time) {}
+ const base::TimeDelta timestamp;
+ const base::TimeTicks reference_time;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(InProgressFrameEncode);
+};
+
+struct VTVideoEncodeAccelerator::EncodeOutput {
+ EncodeOutput(VTEncodeInfoFlags info_flags, CMSampleBufferRef sbuf)
+ : info(info_flags), sample_buffer(sbuf, base::scoped_policy::RETAIN) {}
+ const VTEncodeInfoFlags info;
+ const base::ScopedCFTypeRef<CMSampleBufferRef> sample_buffer;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(EncodeOutput);
+};
+
+struct VTVideoEncodeAccelerator::BitstreamBufferRef {
+ BitstreamBufferRef(int32_t id,
+ scoped_ptr<base::SharedMemory> shm,
+ size_t size)
+ : id(id), shm(std::move(shm)), size(size) {}
+ const int32_t id;
+ const scoped_ptr<base::SharedMemory> shm;
+ const size_t size;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BitstreamBufferRef);
+};
+
+VTVideoEncodeAccelerator::VTVideoEncodeAccelerator()
+ : client_task_runner_(base::ThreadTaskRunnerHandle::Get()),
+ encoder_thread_("VTEncoderThread"),
+ encoder_task_weak_factory_(this) {
+ encoder_weak_ptr_ = encoder_task_weak_factory_.GetWeakPtr();
+}
+
+VTVideoEncodeAccelerator::~VTVideoEncodeAccelerator() {
+ DVLOG(3) << __FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ Destroy();
+ DCHECK(!encoder_thread_.IsRunning());
+ DCHECK(!encoder_task_weak_factory_.HasWeakPtrs());
+}
+
+media::VideoEncodeAccelerator::SupportedProfiles
+VTVideoEncodeAccelerator::GetSupportedProfiles() {
+ DVLOG(3) << __FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ SupportedProfiles profiles;
+ // Check if HW encoder is supported initially.
+ videotoolbox_glue_ = VideoToolboxGlue::Get();
+ if (!videotoolbox_glue_) {
+ DLOG(ERROR) << "Failed creating VideoToolbox glue.";
+ return profiles;
+ }
+ const bool rv = CreateCompressionSession(
+ media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0),
+ gfx::Size(kDefaultResolutionWidth, kDefaultResolutionHeight), true);
+ DestroyCompressionSession();
+ if (!rv) {
+ VLOG(1)
+ << "Hardware encode acceleration is not available on this platform.";
+ return profiles;
+ }
+
+ SupportedProfile profile;
+ profile.profile = media::H264PROFILE_BASELINE;
+ profile.max_framerate_numerator = kMaxFrameRateNumerator;
+ profile.max_framerate_denominator = kMaxFrameRateDenominator;
+ profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight);
+ profiles.push_back(profile);
+ return profiles;
+}
+
+bool VTVideoEncodeAccelerator::Initialize(
+ media::VideoPixelFormat format,
+ const gfx::Size& input_visible_size,
+ media::VideoCodecProfile output_profile,
+ uint32_t initial_bitrate,
+ Client* client) {
+ DVLOG(3) << __FUNCTION__
+ << ": input_format=" << media::VideoPixelFormatToString(format)
+ << ", input_visible_size=" << input_visible_size.ToString()
+ << ", output_profile=" << output_profile
+ << ", initial_bitrate=" << initial_bitrate;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(client);
+
+ if (media::PIXEL_FORMAT_I420 != format) {
+ DLOG(ERROR) << "Input format not supported= "
+ << media::VideoPixelFormatToString(format);
+ return false;
+ }
+ if (media::H264PROFILE_BASELINE != output_profile) {
+ DLOG(ERROR) << "Output profile not supported= "
+ << output_profile;
+ return false;
+ }
+
+ videotoolbox_glue_ = VideoToolboxGlue::Get();
+ if (!videotoolbox_glue_) {
+ DLOG(ERROR) << "Failed creating VideoToolbox glue.";
+ return false;
+ }
+
+ client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
+ client_ = client_ptr_factory_->GetWeakPtr();
+ input_visible_size_ = input_visible_size;
+ frame_rate_ = kMaxFrameRateNumerator / kMaxFrameRateDenominator;
+ target_bitrate_ = initial_bitrate;
+ bitstream_buffer_size_ = input_visible_size.GetArea();
+
+ if (!encoder_thread_.Start()) {
+ DLOG(ERROR) << "Failed spawning encoder thread.";
+ return false;
+ }
+ encoder_thread_task_runner_ = encoder_thread_.task_runner();
+
+ if (!ResetCompressionSession()) {
+ DLOG(ERROR) << "Failed creating compression session.";
+ return false;
+ }
+
+ client_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&Client::RequireBitstreamBuffers, client_, kNumInputBuffers,
+ input_visible_size_, bitstream_buffer_size_));
+ return true;
+}
+
+void VTVideoEncodeAccelerator::Encode(
+ const scoped_refptr<media::VideoFrame>& frame,
+ bool force_keyframe) {
+ DVLOG(3) << __FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ encoder_thread_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::EncodeTask,
+ base::Unretained(this), frame, force_keyframe));
+}
+
+void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer(
+ const media::BitstreamBuffer& buffer) {
+ DVLOG(3) << __FUNCTION__ << ": buffer size=" << buffer.size();
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (buffer.size() < bitstream_buffer_size_) {
+ DLOG(ERROR) << "Output BitstreamBuffer isn't big enough: " << buffer.size()
+ << " vs. " << bitstream_buffer_size_;
+ client_->NotifyError(kInvalidArgumentError);
+ return;
+ }
+
+ scoped_ptr<base::SharedMemory> shm(
+ new base::SharedMemory(buffer.handle(), false));
+ if (!shm->Map(buffer.size())) {
+ DLOG(ERROR) << "Failed mapping shared memory.";
+ client_->NotifyError(kPlatformFailureError);
+ return;
+ }
+
+ scoped_ptr<BitstreamBufferRef> buffer_ref(
+ new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
+
+ encoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask,
+ base::Unretained(this), base::Passed(&buffer_ref)));
+}
+
+void VTVideoEncodeAccelerator::RequestEncodingParametersChange(
+ uint32_t bitrate,
+ uint32_t framerate) {
+ DVLOG(3) << __FUNCTION__ << ": bitrate=" << bitrate
+ << ": framerate=" << framerate;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ encoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VTVideoEncodeAccelerator::RequestEncodingParametersChangeTask,
+ base::Unretained(this), bitrate, framerate));
+}
+
+void VTVideoEncodeAccelerator::Destroy() {
+ DVLOG(3) << __FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Cancel all callbacks.
+ client_ptr_factory_.reset();
+
+ if (encoder_thread_.IsRunning()) {
+ encoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VTVideoEncodeAccelerator::DestroyTask,
+ base::Unretained(this)));
+ encoder_thread_.Stop();
+ } else {
+ DestroyTask();
+ }
+}
+
+void VTVideoEncodeAccelerator::EncodeTask(
+ const scoped_refptr<media::VideoFrame>& frame,
+ bool force_keyframe) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+ DCHECK(compression_session_);
+ DCHECK(frame);
+
+ // TODO(emircan): See if we can eliminate a copy here by using
+ // CVPixelBufferPool for the allocation of incoming VideoFrames.
+ base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer =
+ media::WrapVideoFrameInCVPixelBuffer(*frame);
+ base::ScopedCFTypeRef<CFDictionaryRef> frame_props =
+ media::video_toolbox::DictionaryWithKeyValue(
+ videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
+ force_keyframe ? kCFBooleanTrue : kCFBooleanFalse);
+
+ base::TimeTicks ref_time;
+ if (!frame->metadata()->GetTimeTicks(
+ media::VideoFrameMetadata::REFERENCE_TIME, &ref_time)) {
+ ref_time = base::TimeTicks::Now();
+ }
+ auto timestamp_cm = CoreMediaGlue::CMTimeMake(
+ frame->timestamp().InMicroseconds(), USEC_PER_SEC);
+ // Wrap information we'll need after the frame is encoded in a heap object.
+ // We'll get the pointer back from the VideoToolbox completion callback.
+ scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode(
+ frame->timestamp(), ref_time));
+
+ // We can pass the ownership of |request| to the encode callback if
+ // successful. Otherwise let it fall out of scope.
+ OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame(
+ compression_session_, pixel_buffer, timestamp_cm,
+ CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props,
+ reinterpret_cast<void*>(request.get()), nullptr);
+ if (status != noErr) {
+ DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
+ NotifyError(kPlatformFailureError);
+ } else {
+ CHECK(request.release());
+ }
+}
+
+void VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask(
+ scoped_ptr<BitstreamBufferRef> buffer_ref) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+
+ // If there is already EncodeOutput waiting, copy its output first.
+ if (!encoder_output_queue_.empty()) {
+ scoped_ptr<VTVideoEncodeAccelerator::EncodeOutput> encode_output =
+ std::move(encoder_output_queue_.front());
+ encoder_output_queue_.pop_front();
+ ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
+ return;
+ }
+
+ bitstream_buffer_queue_.push_back(std::move(buffer_ref));
+}
+
+void VTVideoEncodeAccelerator::RequestEncodingParametersChangeTask(
+ uint32_t bitrate,
+ uint32_t framerate) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+
+ frame_rate_ = framerate > 1 ? framerate : 1;
+ target_bitrate_ = bitrate > 1 ? bitrate : 1;
+
+ if (!compression_session_) {
+ NotifyError(kPlatformFailureError);
+ return;
+ }
+
+ media::video_toolbox::SessionPropertySetter session_property_setter(
+ compression_session_, videotoolbox_glue_);
+ // TODO(emircan): See crbug.com/425352.
+ bool rv = session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
+ target_bitrate_);
+ rv &= session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
+ frame_rate_);
+ rv &= session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_DataRateLimits(),
+ media::video_toolbox::ArrayWithIntegerAndFloat(
+ target_bitrate_ / kBitsPerByte, 1.0f));
+ DLOG_IF(ERROR, !rv) << "Couldn't change session encoding parameters.";
+}
+
+void VTVideoEncodeAccelerator::DestroyTask() {
+ DCHECK(thread_checker_.CalledOnValidThread() ||
+ (encoder_thread_.IsRunning() &&
+ encoder_thread_task_runner_->BelongsToCurrentThread()));
+
+ // Cancel all encoder thread callbacks.
+ encoder_task_weak_factory_.InvalidateWeakPtrs();
+
+ // This call blocks until all pending frames are flushed out.
+ DestroyCompressionSession();
+}
+
+void VTVideoEncodeAccelerator::NotifyError(
+ media::VideoEncodeAccelerator::Error error) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+ client_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&Client::NotifyError, client_, error));
+}
+
+// static
+void VTVideoEncodeAccelerator::CompressionCallback(void* encoder_opaque,
+ void* request_opaque,
+ OSStatus status,
+ VTEncodeInfoFlags info,
+ CMSampleBufferRef sbuf) {
+ // This function may be called asynchronously, on a different thread from the
+ // one that calls VTCompressionSessionEncodeFrame.
+ DVLOG(3) << __FUNCTION__;
+
+ auto encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque);
+ DCHECK(encoder);
+
+ // Release InProgressFrameEncode, since we don't have support to return
+ // timestamps at this point.
+ scoped_ptr<InProgressFrameEncode> request(
+ reinterpret_cast<InProgressFrameEncode*>(request_opaque));
+ request.reset();
+
+ // EncodeOutput holds onto CMSampleBufferRef when posting task between
+ // threads.
+ scoped_ptr<EncodeOutput> encode_output(new EncodeOutput(info, sbuf));
+
+ // This method is NOT called on |encoder_thread_|, so we still need to
+ // post a task back to it to do work.
+ encoder->encoder_thread_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::CompressionCallbackTask,
+ encoder->encoder_weak_ptr_, status,
+ base::Passed(&encode_output)));
+}
+
+void VTVideoEncodeAccelerator::CompressionCallbackTask(
+ OSStatus status,
+ scoped_ptr<EncodeOutput> encode_output) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+
+ if (status != noErr) {
+ DLOG(ERROR) << " encode failed: " << status;
+ NotifyError(kPlatformFailureError);
+ return;
+ }
+
+ // If there isn't any BitstreamBuffer to copy into, add it to a queue for
+ // later use.
+ if (bitstream_buffer_queue_.empty()) {
+ encoder_output_queue_.push_back(std::move(encode_output));
+ return;
+ }
+
+ scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref =
+ std::move(bitstream_buffer_queue_.front());
+ bitstream_buffer_queue_.pop_front();
+ ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
+}
+
+void VTVideoEncodeAccelerator::ReturnBitstreamBuffer(
+ scoped_ptr<EncodeOutput> encode_output,
+ scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref) {
+ DVLOG(3) << __FUNCTION__;
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+
+ if (encode_output->info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped) {
+ DVLOG(2) << " frame dropped";
+ client_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
+ buffer_ref->id, 0, false));
+ return;
+ }
+
+ auto sample_attachments = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
+ CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(
+ encode_output->sample_buffer.get(), true),
+ 0));
+ const bool keyframe =
+ !CFDictionaryContainsKey(sample_attachments,
+ CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
+
+ size_t used_buffer_size = 0;
+ const bool copy_rv = media::video_toolbox::CopySampleBufferToAnnexBBuffer(
+ encode_output->sample_buffer.get(), keyframe, buffer_ref->size,
+ reinterpret_cast<char*>(buffer_ref->shm->memory()), &used_buffer_size);
+ if (!copy_rv) {
+ DLOG(ERROR) << "Cannot copy output from SampleBuffer to AnnexBBuffer.";
+ used_buffer_size = 0;
+ }
+
+ client_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
+ buffer_ref->id, used_buffer_size, keyframe));
+}
+
+bool VTVideoEncodeAccelerator::ResetCompressionSession() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ DestroyCompressionSession();
+
+ CFTypeRef attributes_keys[] = {
+ kCVPixelBufferOpenGLCompatibilityKey,
+ kCVPixelBufferIOSurfacePropertiesKey,
+ kCVPixelBufferPixelFormatTypeKey
+ };
+ const int format[] = {
+ CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange};
+ CFTypeRef attributes_values[] = {
+ kCFBooleanTrue,
+ media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0)
+ .release(),
+ media::video_toolbox::ArrayWithIntegers(format, arraysize(format))
+ .release()};
+ const base::ScopedCFTypeRef<CFDictionaryRef> attributes =
+ media::video_toolbox::DictionaryWithKeysAndValues(
+ attributes_keys, attributes_values, arraysize(attributes_keys));
+ for (auto& v : attributes_values)
+ CFRelease(v);
+
+ bool session_rv =
+ CreateCompressionSession(attributes, input_visible_size_, false);
+ if (!session_rv) {
+ DestroyCompressionSession();
+ return false;
+ }
+
+ const bool configure_rv = ConfigureCompressionSession();
+ if (configure_rv)
+ RequestEncodingParametersChange(target_bitrate_, frame_rate_);
+ return configure_rv;
+}
+
+bool VTVideoEncodeAccelerator::CreateCompressionSession(
+ base::ScopedCFTypeRef<CFDictionaryRef> attributes,
+ const gfx::Size& input_size,
+ bool require_hw_encoding) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ std::vector<CFTypeRef> encoder_keys;
+ std::vector<CFTypeRef> encoder_values;
+ if (require_hw_encoding) {
+ encoder_keys.push_back(videotoolbox_glue_
+ ->kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder());
+ encoder_values.push_back(kCFBooleanTrue);
+ } else {
+ encoder_keys.push_back(videotoolbox_glue_
+ ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder());
+ encoder_values.push_back(kCFBooleanTrue);
+ }
+ base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec =
+ media::video_toolbox::DictionaryWithKeysAndValues(
+ encoder_keys.data(), encoder_values.data(), encoder_keys.size());
+
+ // Create the compression session.
+ // Note that the encoder object is given to the compression session as the
+ // callback context using a raw pointer. The C API does not allow us to use a
+ // smart pointer, nor is this encoder ref counted. However, this is still
+ // safe, because we 1) we own the compression session and 2) we tear it down
+ // safely. When destructing the encoder, the compression session is flushed
+ // and invalidated. Internally, VideoToolbox will join all of its threads
+ // before returning to the client. Therefore, when control returns to us, we
+ // are guaranteed that the output callback will not execute again.
+ OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate(
+ kCFAllocatorDefault,
+ input_size.width(),
+ input_size.height(),
+ CoreMediaGlue::kCMVideoCodecType_H264,
+ encoder_spec,
+ attributes,
+ nullptr /* compressedDataAllocator */,
+ &VTVideoEncodeAccelerator::CompressionCallback,
+ reinterpret_cast<void*>(this),
+ compression_session_.InitializeInto());
+ if (status != noErr) {
+ DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
+ return false;
+ }
+ DVLOG(3) << " VTCompressionSession created with HW encode: "
+ << require_hw_encoding << ", input size=" << input_size.ToString();
+ return true;
+}
+
+bool VTVideoEncodeAccelerator::ConfigureCompressionSession() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(compression_session_);
+
+ media::video_toolbox::SessionPropertySetter session_property_setter(
+ compression_session_, videotoolbox_glue_);
+ bool rv = true;
+ rv &= session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(),
+ videotoolbox_glue_->kVTProfileLevel_H264_Baseline_AutoLevel());
+ rv &= session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), true);
+ rv &= session_property_setter.Set(
+ videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(),
+ false);
+ DLOG_IF(ERROR, !rv) << " Setting session property failed.";
+ return rv;
+}
+
+void VTVideoEncodeAccelerator::DestroyCompressionSession() {
+ DCHECK(thread_checker_.CalledOnValidThread() ||
+ (encoder_thread_.IsRunning() &&
+ encoder_thread_task_runner_->BelongsToCurrentThread()));
+
+ if (compression_session_) {
+ videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_);
+ compression_session_.reset();
+ }
+}
+
+} // namespace content
diff --git a/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.h b/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.h
new file mode 100644
index 00000000000..aa4b37ed22d
--- /dev/null
+++ b/chromium/content/common/gpu/media/vt_video_encode_accelerator_mac.h
@@ -0,0 +1,142 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_
+#define CONTENT_COMMON_GPU_MEDIA_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_
+
+#include "base/mac/scoped_cftyperef.h"
+#include "content/common/content_export.h"
+#include "media/base/mac/videotoolbox_glue.h"
+#include "media/base/mac/videotoolbox_helpers.h"
+#include "media/video/video_encode_accelerator.h"
+
+namespace content {
+
+// VideoToolbox.framework implementation of the VideoEncodeAccelerator
+// interface for MacOSX. VideoToolbox makes no guarantees that it is thread
+// safe, so this object is pinned to the thread on which it is constructed.
+class CONTENT_EXPORT VTVideoEncodeAccelerator
+ : public media::VideoEncodeAccelerator {
+ public:
+ VTVideoEncodeAccelerator();
+ ~VTVideoEncodeAccelerator() override;
+
+ // media::VideoEncodeAccelerator implementation.
+ media::VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles()
+ override;
+ bool Initialize(media::VideoPixelFormat format,
+ const gfx::Size& input_visible_size,
+ media::VideoCodecProfile output_profile,
+ uint32_t initial_bitrate,
+ Client* client) override;
+ void Encode(const scoped_refptr<media::VideoFrame>& frame,
+ bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(const media::BitstreamBuffer& buffer) override;
+ void RequestEncodingParametersChange(uint32_t bitrate,
+ uint32_t framerate) override;
+ void Destroy() override;
+
+ private:
+ using CMSampleBufferRef = CoreMediaGlue::CMSampleBufferRef;
+ using VTCompressionSessionRef = VideoToolboxGlue::VTCompressionSessionRef;
+ using VTEncodeInfoFlags = VideoToolboxGlue::VTEncodeInfoFlags;
+
+ // Holds the associated data of a video frame being processed.
+ struct InProgressFrameEncode;
+
+ // Holds output buffers coming from the encoder.
+ struct EncodeOutput;
+
+ // Holds output buffers coming from the client ready to be filled.
+ struct BitstreamBufferRef;
+
+ // Encoding tasks to be run on |encoder_thread_|.
+ void EncodeTask(const scoped_refptr<media::VideoFrame>& frame,
+ bool force_keyframe);
+ void UseOutputBitstreamBufferTask(scoped_ptr<BitstreamBufferRef> buffer_ref);
+ void RequestEncodingParametersChangeTask(uint32_t bitrate,
+ uint32_t framerate);
+ void DestroyTask();
+
+ // Helper function to notify the client of an error on |client_task_runner_|.
+ void NotifyError(media::VideoEncodeAccelerator::Error error);
+
+ // Compression session callback function to handle compressed frames.
+ static void CompressionCallback(void* encoder_opaque,
+ void* request_opaque,
+ OSStatus status,
+ VTEncodeInfoFlags info,
+ CMSampleBufferRef sbuf);
+ void CompressionCallbackTask(OSStatus status,
+ scoped_ptr<EncodeOutput> encode_output);
+
+ // Copy CMSampleBuffer into a BitstreamBuffer and return it to the |client_|.
+ void ReturnBitstreamBuffer(
+ scoped_ptr<EncodeOutput> encode_output,
+ scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref);
+
+ // Reset the encoder's compression session by destroying the existing one
+ // using DestroyCompressionSession() and creating a new one. The new session
+ // is configured using ConfigureCompressionSession().
+ bool ResetCompressionSession();
+
+ // Create a compression session, with HW encoder enforced if
+ // |require_hw_encoding| is set.
+ bool CreateCompressionSession(
+ base::ScopedCFTypeRef<CFDictionaryRef> attributes,
+ const gfx::Size& input_size,
+ bool require_hw_encoding);
+
+ // Configure the current compression session using current encoder settings.
+ bool ConfigureCompressionSession();
+
+ // Destroy the current compression session if any. Blocks until all pending
+ // frames have been flushed out (similar to EmitFrames without doing any
+ // encoding work).
+ void DestroyCompressionSession();
+
+ // VideoToolboxGlue provides access to VideoToolbox at runtime.
+ const VideoToolboxGlue* videotoolbox_glue_;
+ base::ScopedCFTypeRef<VTCompressionSessionRef> compression_session_;
+
+ gfx::Size input_visible_size_;
+ size_t bitstream_buffer_size_;
+ int32_t frame_rate_;
+ int32_t target_bitrate_;
+
+ // Bitstream buffers ready to be used to return encoded output as a FIFO.
+ std::deque<scoped_ptr<BitstreamBufferRef>> bitstream_buffer_queue_;
+
+ // EncodeOutput needs to be copied into a BitstreamBufferRef as a FIFO.
+ std::deque<scoped_ptr<EncodeOutput>> encoder_output_queue_;
+
+ // Our original calling task runner for the child thread.
+ const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner_;
+
+ // To expose client callbacks from VideoEncodeAccelerator.
+ // NOTE: all calls to this object *MUST* be executed on
+ // |client_task_runner_|.
+ base::WeakPtr<Client> client_;
+ scoped_ptr<base::WeakPtrFactory<Client> > client_ptr_factory_;
+
+ // Thread checker to enforce that this object is used on a specific thread.
+ // It is pinned on |client_task_runner_| thread.
+ base::ThreadChecker thread_checker_;
+
+ // This thread services tasks posted from the VEA API entry points by the
+ // GPU child thread and CompressionCallback() posted from device thread.
+ base::Thread encoder_thread_;
+ scoped_refptr<base::SingleThreadTaskRunner> encoder_thread_task_runner_;
+
+ // Declared last to ensure that all weak pointers are invalidated before
+ // other destructors run.
+ base::WeakPtr<VTVideoEncodeAccelerator> encoder_weak_ptr_;
+ base::WeakPtrFactory<VTVideoEncodeAccelerator> encoder_task_weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(VTVideoEncodeAccelerator);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_VT_VIDEO_ENCODE_ACCELERATOR_MAC_H_