summaryrefslogtreecommitdiff
path: root/chromium/media/gpu
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-07-12 14:07:37 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-07-17 10:29:26 +0000
commitec02ee4181c49b61fce1c8fb99292dbb8139cc90 (patch)
tree25cde714b2b71eb639d1cd53f5a22e9ba76e14ef /chromium/media/gpu
parentbb09965444b5bb20b096a291445170876225268d (diff)
downloadqtwebengine-chromium-ec02ee4181c49b61fce1c8fb99292dbb8139cc90.tar.gz
BASELINE: Update Chromium to 59.0.3071.134
Change-Id: Id02ef6fb2204c5fd21668a1c3e6911c83b17585a Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/media/gpu')
-rw-r--r--chromium/media/gpu/BUILD.gn15
-rw-r--r--chromium/media/gpu/DEPS1
-rw-r--r--chromium/media/gpu/PRESUBMIT.py1
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder_unittest.cc5
-rw-r--r--chromium/media/gpu/android_video_decode_accelerator.cc161
-rw-r--r--chromium/media/gpu/android_video_decode_accelerator.h46
-rw-r--r--chromium/media/gpu/android_video_encode_accelerator.cc2
-rw-r--r--chromium/media/gpu/avda_codec_allocator.cc195
-rw-r--r--chromium/media/gpu/avda_codec_allocator.h78
-rw-r--r--chromium/media/gpu/avda_codec_allocator_unittest.cc70
-rw-r--r--chromium/media/gpu/avda_picture_buffer_manager.cc48
-rw-r--r--chromium/media/gpu/avda_picture_buffer_manager.h24
-rw-r--r--chromium/media/gpu/avda_shared_state.cc19
-rw-r--r--chromium/media/gpu/avda_shared_state.h28
-rw-r--r--chromium/media/gpu/avda_surface_bundle.cc24
-rw-r--r--chromium/media/gpu/avda_surface_bundle.h70
-rw-r--r--chromium/media/gpu/content_video_view_overlay.cc72
-rw-r--r--chromium/media/gpu/content_video_view_overlay.h47
-rw-r--r--chromium/media/gpu/content_video_view_overlay_allocator.cc149
-rw-r--r--chromium/media/gpu/content_video_view_overlay_allocator.h86
-rw-r--r--chromium/media/gpu/content_video_view_overlay_allocator_unittest.cc147
-rw-r--r--chromium/media/gpu/dxva_picture_buffer_win.cc63
-rw-r--r--chromium/media/gpu/dxva_picture_buffer_win.h8
-rw-r--r--chromium/media/gpu/dxva_video_decode_accelerator_win.cc137
-rw-r--r--chromium/media/gpu/dxva_video_decode_accelerator_win.h12
-rw-r--r--chromium/media/gpu/fake_jpeg_decode_accelerator.cc103
-rw-r--r--chromium/media/gpu/fake_jpeg_decode_accelerator.h69
-rw-r--r--chromium/media/gpu/gpu_video_decode_accelerator_factory.cc8
-rw-r--r--chromium/media/gpu/gpu_video_decode_accelerator_factory.h3
-rw-r--r--chromium/media/gpu/ipc/client/gpu_jpeg_decode_accelerator_host.cc38
-rw-r--r--chromium/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc49
-rw-r--r--chromium/media/gpu/ipc/service/gpu_jpeg_decode_accelerator_unittest.cc3
-rw-r--r--chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc14
-rw-r--r--chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h4
-rw-r--r--chromium/media/gpu/ipc/service/gpu_video_encode_accelerator.cc56
-rw-r--r--chromium/media/gpu/ipc/service/gpu_video_encode_accelerator.h6
-rw-r--r--chromium/media/gpu/ipc/service/media_gpu_channel.cc2
-rw-r--r--chromium/media/gpu/jpeg_decode_accelerator_unittest.cc23
-rw-r--r--chromium/media/gpu/media_foundation_video_encode_accelerator_win.cc15
-rw-r--r--chromium/media/gpu/rendering_helper.cc6
-rw-r--r--chromium/media/gpu/surface_texture_gl_owner.cc50
-rw-r--r--chromium/media/gpu/surface_texture_gl_owner.h53
-rw-r--r--chromium/media/gpu/surface_texture_gl_owner_unittest.cc113
-rw-r--r--chromium/media/gpu/v4l2_image_processor.cc2
-rw-r--r--chromium/media/gpu/v4l2_video_encode_accelerator.cc2
-rw-r--r--chromium/media/gpu/vaapi_drm_picture.cc12
-rw-r--r--chromium/media/gpu/vaapi_drm_picture.h4
-rw-r--r--chromium/media/gpu/vaapi_wrapper.cc6
-rw-r--r--chromium/media/gpu/vaapi_wrapper.h8
-rw-r--r--chromium/media/gpu/video_decode_accelerator_unittest.cc204
-rw-r--r--chromium/media/gpu/video_encode_accelerator_unittest.cc136
-rw-r--r--chromium/media/gpu/vt_video_decode_accelerator_mac.cc61
-rw-r--r--chromium/media/gpu/vt_video_decode_accelerator_mac.h12
-rw-r--r--chromium/media/gpu/vt_video_encode_accelerator_mac.cc3
54 files changed, 1901 insertions, 672 deletions
diff --git a/chromium/media/gpu/BUILD.gn b/chromium/media/gpu/BUILD.gn
index e026eee6778..03626237536 100644
--- a/chromium/media/gpu/BUILD.gn
+++ b/chromium/media/gpu/BUILD.gn
@@ -128,6 +128,8 @@ component("gpu") {
defines = [ "MEDIA_GPU_IMPLEMENTATION" ]
sources = [
+ "fake_jpeg_decode_accelerator.cc",
+ "fake_jpeg_decode_accelerator.h",
"fake_video_decode_accelerator.cc",
"fake_video_decode_accelerator.h",
"gpu_video_accelerator_util.cc",
@@ -187,6 +189,14 @@ component("gpu") {
"avda_shared_state.cc",
"avda_shared_state.h",
"avda_state_provider.h",
+ "avda_surface_bundle.cc",
+ "avda_surface_bundle.h",
+ "content_video_view_overlay.cc",
+ "content_video_view_overlay.h",
+ "content_video_view_overlay_allocator.cc",
+ "content_video_view_overlay_allocator.h",
+ "surface_texture_gl_owner.cc",
+ "surface_texture_gl_owner.h",
]
if (enable_webrtc) {
@@ -330,6 +340,7 @@ component("gpu") {
deps += [
"//third_party/angle:includes",
"//third_party/libyuv",
+ "//ui/display",
]
libs += [
"d3d9.lib",
@@ -434,6 +445,8 @@ source_set("android_video_decode_accelerator_unittests") {
sources = [
"android_video_decode_accelerator_unittest.cc",
"avda_codec_allocator_unittest.cc",
+ "content_video_view_overlay_allocator_unittest.cc",
+ "surface_texture_gl_owner_unittest.cc",
]
if (enable_media_codec_video_decoder) {
sources += [ "android/media_codec_video_decoder_unittest.cc" ]
@@ -442,7 +455,7 @@ source_set("android_video_decode_accelerator_unittests") {
":gpu",
"//base/test:test_support",
"//gpu:test_support",
- "//media/base/android",
+ "//media",
"//testing/gmock",
"//testing/gtest",
]
diff --git a/chromium/media/gpu/DEPS b/chromium/media/gpu/DEPS
index 6ce29b6a093..d96abcaf7c2 100644
--- a/chromium/media/gpu/DEPS
+++ b/chromium/media/gpu/DEPS
@@ -7,6 +7,7 @@ include_rules = [
"+third_party/webrtc/common_video",
"+third_party/webrtc/system_wrappers",
"+ui/base",
+ "+ui/display/display_switches.h",
"+ui/display/manager/chromeos",
"+ui/display/types",
"+ui/platform_window",
diff --git a/chromium/media/gpu/PRESUBMIT.py b/chromium/media/gpu/PRESUBMIT.py
index 6e1cdd1746e..180cd747ab5 100644
--- a/chromium/media/gpu/PRESUBMIT.py
+++ b/chromium/media/gpu/PRESUBMIT.py
@@ -23,5 +23,6 @@ def PostUploadHook(cl, change, output_api):
'master.tryserver.chromium.linux:linux_optional_gpu_tests_rel',
'master.tryserver.chromium.mac:mac_optional_gpu_tests_rel',
'master.tryserver.chromium.win:win_optional_gpu_tests_rel',
+ 'master.tryserver.chromium.android:android_optional_gpu_tests_rel',
],
'Automatically added optional GPU tests to run on CQ.')
diff --git a/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc b/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
index 7ce5e5da15f..826386f406c 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
+++ b/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "media/gpu/android/media_codec_video_decoder.h"
#include "base/bind.h"
#include "base/run_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "media/base/android/media_codec_util.h"
#include "media/base/test_helpers.h"
-#include "media/gpu/android/media_codec_video_decoder.h"
#include "testing/gtest/include/gtest/gtest.h"
#define SKIP_IF_MEDIA_CODEC_IS_BLACKLISTED() \
@@ -42,7 +43,7 @@ class MediaCodecVideoDecoderTest : public testing::Test {
private:
MediaCodecVideoDecoder mcvd_;
- base::MessageLoop message_loop_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
};
TEST_F(MediaCodecVideoDecoderTest, DestructWithoutInit) {
diff --git a/chromium/media/gpu/android_video_decode_accelerator.cc b/chromium/media/gpu/android_video_decode_accelerator.cc
index 6b145ea7820..773b09e5785 100644
--- a/chromium/media/gpu/android_video_decode_accelerator.cc
+++ b/chromium/media/gpu/android_video_decode_accelerator.cc
@@ -35,6 +35,7 @@
#include "media/base/timestamp_constants.h"
#include "media/base/video_decoder_config.h"
#include "media/gpu/avda_picture_buffer_manager.h"
+#include "media/gpu/content_video_view_overlay.h"
#include "media/gpu/shared_memory_region.h"
#include "media/video/picture.h"
#include "ui/gl/android/scoped_java_surface.h"
@@ -42,7 +43,7 @@
#include "ui/gl/gl_bindings.h"
#if defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
-#include "media/mojo/services/mojo_cdm_service.h"
+#include "media/mojo/services/mojo_cdm_service.h" // nogncheck
#endif
#define NOTIFY_ERROR(error_code, error_message) \
@@ -242,7 +243,6 @@ AndroidVideoDecodeAccelerator::AndroidVideoDecodeAccelerator(
deferred_initialization_pending_(false),
codec_needs_reset_(false),
defer_surface_creation_(false),
- last_release_task_type_(TaskType::AUTO_CODEC),
weak_this_factory_(this) {}
AndroidVideoDecodeAccelerator::~AndroidVideoDecodeAccelerator() {
@@ -289,6 +289,7 @@ bool AndroidVideoDecodeAccelerator::Initialize(const Config& config,
codec_config_->codec = VideoCodecProfileToVideoCodec(config.profile);
codec_config_->initial_expected_coded_size =
config.initial_expected_coded_size;
+ incoming_bundle_ = new AVDASurfaceBundle(config_.surface_id);
if (codec_config_->codec != kCodecVP8 && codec_config_->codec != kCodecVP9 &&
#if BUILDFLAG(ENABLE_HEVC_DEMUXING)
@@ -330,10 +331,10 @@ bool AndroidVideoDecodeAccelerator::Initialize(const Config& config,
// If we're low on resources, we may decide to defer creation of the surface
// until the codec is actually used.
- if (ShouldDeferSurfaceCreation(codec_allocator_, config_.surface_id,
+ if (ShouldDeferSurfaceCreation(codec_allocator_, surface_id(),
codec_config_->codec)) {
// We should never be here if a SurfaceView is required.
- DCHECK_EQ(config_.surface_id, SurfaceManager::kNoSurfaceID);
+ DCHECK_EQ(surface_id(), SurfaceManager::kNoSurfaceID);
defer_surface_creation_ = true;
}
@@ -362,6 +363,14 @@ bool AndroidVideoDecodeAccelerator::Initialize(const Config& config,
void AndroidVideoDecodeAccelerator::StartSurfaceCreation() {
// We might be called during Initialize, during deferred initialization, or
// afterwards (::Decode, for deferred surface init, UpdateSurface).
+ DCHECK(incoming_bundle_);
+
+ // We should not yet have an overlay.
+ DCHECK(!incoming_bundle_->overlay);
+
+ // Note that we don't enforce that for any SurfaceTexture or its Surface,
+ // since there might be a codec that's using them. They'll get cleared
+ // later, in InitializePictureBufferManager.
// If surface creation is deferred, then do nothing except signal that init
// is complete, if needed. We might still fail to get a surface or codec,
@@ -376,60 +385,110 @@ void AndroidVideoDecodeAccelerator::StartSurfaceCreation() {
return;
}
- if (!codec_allocator_->AllocateSurface(this, config_.surface_id)) {
+ if (incoming_bundle_->surface_id != SurfaceManager::kNoSurfaceID) {
+ // Create the overlay. Note that it will never call us back immediately.
+ // It will post when the surface is available.
+ AndroidOverlay::Config overlay_config;
+ // We use weak ptrs here since |overlay| can outlive us, if we send it for
+ // async codec config.
+ overlay_config.ready_cb =
+ base::Bind(&AndroidVideoDecodeAccelerator::OnOverlayReady,
+ weak_this_factory_.GetWeakPtr());
+ overlay_config.failed_cb =
+ base::Bind(&AndroidVideoDecodeAccelerator::OnOverlayFailed,
+ weak_this_factory_.GetWeakPtr());
+ overlay_config.destroyed_cb =
+ base::Bind(&AndroidVideoDecodeAccelerator::OnSurfaceDestroyed,
+ weak_this_factory_.GetWeakPtr());
+ // TODO(liberato): make |surface_id| the overlay config token. If we're
+ // using CVV, then we'll need a CVV factory impl that understands it.
+ incoming_bundle_->overlay = base::MakeUnique<ContentVideoViewOverlay>(
+ incoming_bundle_->surface_id, overlay_config);
// We have to wait for some other AVDA instance to free up the surface.
- // OnSurfaceAvailable will be called when it's available.
+ // OnOverlayReady will be called when it's available.
// Note that if we aren't deferring init, then we'll signal success, and
// if we fail later then it will fail decoding instead. However, since
// nobody that provides a SurfaceView requires sync init, it doesn't matter.
+ // Also remember that ContentVideoViewOverlay will not call OnOverlayReady
+ // before it returns.
state_ = WAITING_FOR_SURFACE;
return;
}
- // We now own the surface, so finish initialization.
+ // We're creating a SurfaceTexture.
InitializePictureBufferManager();
}
-void AndroidVideoDecodeAccelerator::OnSurfaceAvailable(bool success) {
+void AndroidVideoDecodeAccelerator::OnOverlayReady(AndroidOverlay* overlay) {
DCHECK(!defer_surface_creation_);
DCHECK_EQ(state_, WAITING_FOR_SURFACE);
-
- if (!success) {
- NOTIFY_ERROR(PLATFORM_FAILURE, "Surface is not available");
- return;
- }
+ DCHECK(incoming_bundle_);
InitializePictureBufferManager();
}
+void AndroidVideoDecodeAccelerator::OnOverlayFailed(AndroidOverlay* overlay) {
+ NOTIFY_ERROR(PLATFORM_FAILURE, "Surface is not available");
+}
+
void AndroidVideoDecodeAccelerator::InitializePictureBufferManager() {
DCHECK(!defer_surface_creation_);
+ DCHECK(incoming_bundle_);
if (!make_context_current_cb_.Run()) {
NOTIFY_ERROR(PLATFORM_FAILURE,
"Failed to make this decoder's GL context current");
+ incoming_bundle_ = nullptr;
return;
}
- codec_config_->surface =
- picture_buffer_manager_.Initialize(config_.surface_id);
- codec_config_->surface_texture = picture_buffer_manager_.surface_texture();
- if (codec_config_->surface.IsEmpty()) {
- NOTIFY_ERROR(PLATFORM_FAILURE, "Codec surface is empty");
- return;
+ // Move |incoming_bundle_| to |codec_config_|. Our caller must set up an
+ // incoming bundle properly, since we don't want to accidentally overwrite
+ // |surface_bundle| for a codec that's being released elsewhere.
+ // TODO(liberato): it doesn't make sense anymore for the PictureBufferManager
+ // to create the surface texture. We can probably make an overlay impl out
+ // of it, and provide the surface texture to |picture_buffer_manager_|.
+ if (incoming_bundle_->overlay) {
+ picture_buffer_manager_.InitializeForOverlay();
+ } else {
+ incoming_bundle_->surface_texture_surface =
+ picture_buffer_manager_.InitializeForSurfaceTexture();
+ incoming_bundle_->surface_texture =
+ picture_buffer_manager_.surface_texture();
+
+ if (!incoming_bundle_->surface_texture) {
+ NOTIFY_ERROR(PLATFORM_FAILURE, "Could not allocate surface texture");
+ incoming_bundle_ = nullptr;
+ return;
+ }
}
- // If we have a media codec, then setSurface. If that doesn't work, then we
+ // If we have a media codec, then SetSurface. If that doesn't work, then we
// do not try to allocate a new codec; we might not be at a keyframe, etc.
// If we get here with a codec, then we must setSurface.
if (media_codec_) {
// TODO(liberato): fail on api check?
- if (!media_codec_->SetSurface(codec_config_->surface.j_surface().obj())) {
+ if (!media_codec_->SetSurface(incoming_bundle_->j_surface().obj())) {
NOTIFY_ERROR(PLATFORM_FAILURE, "MediaCodec failed to switch surfaces.");
+ // We're not going to use |incoming_bundle_|.
+ } else {
+ // We've switched surfaces, so replace |surface_bundle|.
+ codec_config_->surface_bundle = incoming_bundle_;
+ // We could be in WAITING_FOR_SURFACE, but we're not anymore.
+ state_ = NO_ERROR;
}
+ incoming_bundle_ = nullptr;
return;
}
+ // We're going to create a codec with |incoming_bundle_|. It might fail, but
+ // either way, we're done with any previous bundle. Note that, since we
+ // never get here after init (i.e., we never change surfaces without using
+ // SetSurface), there shouldn't be any previous bundle. However, this is the
+ // right thing to do even if we can switch.
+ codec_config_->surface_bundle = incoming_bundle_;
+ incoming_bundle_ = nullptr;
+
// If the client doesn't support deferred initialization (WebRTC), then we
// should complete it now and return a meaningful result. Note that it would
// be nice if we didn't have to worry about starting codec configuration at
@@ -632,7 +691,7 @@ bool AndroidVideoDecodeAccelerator::DequeueOutput() {
if (!UpdateSurface())
return false;
// If we can't allocate the incoming surface yet, then stop here.
- if (state_ == WAITING_FOR_SURFACE)
+ if (state_ != NO_ERROR)
return false;
}
@@ -982,7 +1041,7 @@ void AndroidVideoDecodeAccelerator::ConfigureMediaCodecSynchronously() {
codec_config_->task_type = task_type.value();
std::unique_ptr<MediaCodecBridge> media_codec =
- AVDACodecAllocator::Instance()->CreateMediaCodecSync(codec_config_);
+ AVDACodecAllocator::GetInstance()->CreateMediaCodecSync(codec_config_);
// Note that |media_codec| might be null, which will NotifyError.
OnCodecConfigured(std::move(media_codec));
}
@@ -991,7 +1050,6 @@ void AndroidVideoDecodeAccelerator::OnCodecConfigured(
std::unique_ptr<MediaCodecBridge> media_codec) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(state_ == WAITING_FOR_CODEC || state_ == SURFACE_DESTROYED);
-
// If we are supposed to notify that initialization is complete, then do so
// before returning. Otherwise, this is a reconfiguration.
@@ -1121,7 +1179,8 @@ void AndroidVideoDecodeAccelerator::ResetCodecState() {
// If we've just completed a flush don't reset the codec yet. Instead defer
// until the next decode call. This prevents us from unbacking frames that
// might be out for display at end of stream.
- codec_needs_reset_ = drain_type_ == DRAIN_FOR_FLUSH;
+ codec_needs_reset_ =
+ (drain_type_ == DRAIN_FOR_FLUSH) || (drain_type_ == DRAIN_FOR_RESET);
if (codec_needs_reset_)
return;
@@ -1135,6 +1194,10 @@ void AndroidVideoDecodeAccelerator::ResetCodecState() {
} else {
DVLOG(3) << __func__ << " Deleting the MediaCodec and creating a new one.";
GetManager()->StopTimer(this);
+ // Note that this will release the codec, then allocate a new one. It will
+ // not wait for the old one to finish up with the surface, which is bad.
+ // It works (usually) because it ends up allocating the codec on the same
+ // thread as is used to release the old one, so it's serialized anyway.
ConfigureMediaCodecAsynchronously();
}
}
@@ -1177,7 +1240,7 @@ void AndroidVideoDecodeAccelerator::SetSurface(int32_t surface_id) {
DVLOG(1) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
- if (surface_id == config_.surface_id) {
+ if (surface_id == this->surface_id()) {
pending_surface_id_.reset();
return;
}
@@ -1211,23 +1274,6 @@ void AndroidVideoDecodeAccelerator::ActualDestroy() {
GetManager()->StopTimer(this);
ReleaseCodec();
- // We no longer care about |surface_id|, in case we did before. It's okay
- // if we have no surface and/or weren't the owner or a waiter.
- codec_allocator_->DeallocateSurface(this, config_.surface_id);
-
- // Hop the SurfaceTexture release call through the task runner used last time
- // we released a codec. This ensures that we release the surface texture after
- // the codec it's attached to (if any) is released. It's not sufficient to use
- // |codec_config_->task_type| because that might have changed since we
- // released the codec this surface was attached to.
- if (codec_config_->surface_texture) {
- codec_allocator_->TaskRunnerFor(last_release_task_type_)
- ->PostTaskAndReply(
- FROM_HERE, base::Bind(&base::DoNothing),
- base::Bind(&gl::SurfaceTexture::ReleaseSurfaceTexture,
- codec_config_->surface_texture));
- }
-
delete this;
}
@@ -1246,7 +1292,8 @@ AndroidVideoDecodeAccelerator::GetGlDecoder() const {
return get_gles2_decoder_cb_.Run();
}
-void AndroidVideoDecodeAccelerator::OnSurfaceDestroyed() {
+void AndroidVideoDecodeAccelerator::OnSurfaceDestroyed(
+ AndroidOverlay* overlay) {
DVLOG(1) << __func__;
TRACE_EVENT0("media", "AVDA::OnSurfaceDestroyed");
DCHECK(thread_checker_.CalledOnValidThread());
@@ -1267,7 +1314,7 @@ void AndroidVideoDecodeAccelerator::OnSurfaceDestroyed() {
picture_buffer_manager_.ReleaseCodecBuffers(output_picture_buffers_);
// Switch away from the surface being destroyed to a surface texture.
- DCHECK_NE(config_.surface_id, SurfaceManager::kNoSurfaceID);
+ DCHECK_NE(surface_id(), SurfaceManager::kNoSurfaceID);
// The leaving fullscreen notification may come in before this point.
if (pending_surface_id_)
@@ -1545,11 +1592,10 @@ bool AndroidVideoDecodeAccelerator::IsMediaCodecSoftwareDecodingForbidden()
bool AndroidVideoDecodeAccelerator::UpdateSurface() {
DCHECK(pending_surface_id_);
- DCHECK_NE(config_.surface_id, pending_surface_id_.value());
- DCHECK(config_.surface_id == SurfaceManager::kNoSurfaceID ||
+ DCHECK_NE(surface_id(), pending_surface_id_.value());
+ DCHECK(surface_id() == SurfaceManager::kNoSurfaceID ||
pending_surface_id_.value() == SurfaceManager::kNoSurfaceID);
- const int previous_surface_id = config_.surface_id;
const int new_surface_id = pending_surface_id_.value();
pending_surface_id_.reset();
@@ -1557,7 +1603,8 @@ bool AndroidVideoDecodeAccelerator::UpdateSurface() {
// then this must complete synchronously or it will DCHECK. Otherwise, we
// might still be using the destroyed surface. We don't enforce this, but
// it's worth remembering that there are cases where it's required.
- config_.surface_id = new_surface_id;
+ // Note that we don't re-use |surface_bundle|, since the codec is using it!
+ incoming_bundle_ = new AVDASurfaceBundle(new_surface_id);
StartSurfaceCreation();
if (state_ == ERROR) {
// This might be called from OnSurfaceDestroyed(), so we have to release the
@@ -1568,15 +1615,13 @@ bool AndroidVideoDecodeAccelerator::UpdateSurface() {
// wouldn't be necessarily true anymore.
// Also note that we might not have switched surfaces yet, which is also bad
// for OnSurfaceDestroyed, because of WAITING_FOR_SURFACE. Shouldn't
- // happen with SurfaceTexture, and OnSurfaceDestroyed checks for it.
- config_.surface_id = previous_surface_id;
+ // happen with SurfaceTexture, and OnSurfaceDestroyed checks for it. In
+ // either case, we definitely should not still have an incoming bundle; it
+ // should have been dropped.
+ DCHECK(!incoming_bundle_);
ReleaseCodec();
- codec_allocator_->DeallocateSurface(this, new_surface_id);
}
- // Regardless of whether we succeeded, we no longer own the previous surface.
- codec_allocator_->DeallocateSurface(this, previous_surface_id);
-
return state_ != ERROR;
}
@@ -1585,9 +1630,9 @@ void AndroidVideoDecodeAccelerator::ReleaseCodec() {
return;
picture_buffer_manager_.CodecChanged(nullptr);
- codec_allocator_->ReleaseMediaCodec(
- std::move(media_codec_), codec_config_->task_type, config_.surface_id);
- last_release_task_type_ = codec_config_->task_type;
+ codec_allocator_->ReleaseMediaCodec(std::move(media_codec_),
+ codec_config_->task_type,
+ codec_config_->surface_bundle);
}
} // namespace media
diff --git a/chromium/media/gpu/android_video_decode_accelerator.h b/chromium/media/gpu/android_video_decode_accelerator.h
index 95962377d62..507dccc867f 100644
--- a/chromium/media/gpu/android_video_decode_accelerator.h
+++ b/chromium/media/gpu/android_video_decode_accelerator.h
@@ -76,8 +76,6 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
void NotifyError(Error error) override;
// AVDACodecAllocatorClient implementation:
- void OnSurfaceAvailable(bool success) override;
- void OnSurfaceDestroyed() override;
void OnCodecConfigured(
std::unique_ptr<MediaCodecBridge> media_codec) override;
@@ -112,12 +110,23 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// Entry point for configuring / reconfiguring a codec with a new surface.
// Start surface creation by trying to allocate the surface id. Will either
// InitializePictureBufferManager if the surface is available immediately, or
- // will wait for OnSurfaceAvailable to do it. This will transition |state_|
+ // will wait for OnOverlayReady to do it. This will transition |state_|
// to WAITING_FOR_SURFACE or WAITING_FOR_CODEC, as needed (or NO_ERROR if it
// gets the surface and the codec without waiting).
+ // Note that this requires that you create a new |incoming_bundle_| with the
+ // appropriate surface id.
void StartSurfaceCreation();
- // Initialize of the picture buffer manager to use the current surface, once
+ // Called by AndroidOverlay when a surface becomes available.
+ void OnOverlayReady(AndroidOverlay* overlay);
+
+ // Called by AndroidOverlay when the overlay will not call OnOverlayReady.
+ void OnOverlayFailed(AndroidOverlay* overlay);
+
+ // Called by AndroidOverlay when a surface is lost.
+ void OnSurfaceDestroyed(AndroidOverlay* overlay);
+
+ // Initializes the picture buffer manager to use the current surface, once
// it is available. This is not normally called directly, but rather via
// StartSurfaceCreation. If we have a media codec already, then this will
// attempt to setSurface the new surface. Otherwise, it will start codec
@@ -125,6 +134,9 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// ready even if this succeeds, but async config will be started. If
// setSurface fails, this will not replace the codec. On failure, this will
// transition |state_| to ERROR.
+ // Note that this assumes that there is an |incoming_bundle_| that we'll use.
+ // On success, we'll replace the bundle in |codec_config_|. On failure, we'll
+ // delete the incoming bundle.
void InitializePictureBufferManager();
// A part of destruction process that is sometimes postponed after the drain.
@@ -143,11 +155,6 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// concern with modifying |codec_config_| after this returns.
void ConfigureMediaCodecSynchronously();
- // Instantiate a media codec using |codec_config|.
- // This may be called on any thread.
- static std::unique_ptr<MediaCodecBridge> ConfigureMediaCodecOnAnyThread(
- scoped_refptr<CodecConfig> codec_config);
-
// Sends the decoded frame specified by |codec_buffer_index| to the client.
void SendDecodedFrameToClient(int32_t codec_buffer_index,
int32_t bitstream_id);
@@ -240,6 +247,16 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// |picture_buffer_manager_|.
void ReleaseCodec();
+ // Returns the surface ID from the incoming bundle, if we have one, or
+ // the current surface bundle if not. The reasoning is that, if we have an
+ // incoming bundle, then the current (outgoing) one has already been returned
+ // to the codec allocator via DeallocateSurface. The only place this happens
+ // is UpdateSurface, which handles it specially.
+ int surface_id() const {
+ return incoming_bundle_ ? incoming_bundle_->surface_id
+ : codec_config_->surface_bundle->surface_id;
+ }
+
// Used to DCHECK that we are called on the correct thread.
base::ThreadChecker thread_checker_;
@@ -353,13 +370,16 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// pictures have been rendered in DequeueOutput().
base::Optional<int32_t> pending_surface_id_;
- // The task type used for the last codec release. For posting SurfaceTexture
- // release to the same thread.
- TaskType last_release_task_type_;
-
// Copy of the VDA::Config we were given.
Config config_;
+ // SurfaceBundle that we're going to use for StartSurfaceCreation. This is
+ // separate than the bundle in |codec_config_|, since we can start surface
+ // creation while another codec is using the old surface. For example, if
+ // we're going to SetSurface, then the current codec will depend on the
+ // current bundle until then.
+ scoped_refptr<AVDASurfaceBundle> incoming_bundle_;
+
// WeakPtrFactory for posting tasks back to |this|.
base::WeakPtrFactory<AndroidVideoDecodeAccelerator> weak_this_factory_;
diff --git a/chromium/media/gpu/android_video_encode_accelerator.cc b/chromium/media/gpu/android_video_encode_accelerator.cc
index 24237bb971e..95c542c08ed 100644
--- a/chromium/media/gpu/android_video_encode_accelerator.cc
+++ b/chromium/media/gpu/android_video_encode_accelerator.cc
@@ -428,7 +428,7 @@ void AndroidVideoEncodeAccelerator::DequeueOutput() {
FROM_HERE,
base::Bind(&VideoEncodeAccelerator::Client::BitstreamBufferReady,
client_ptr_factory_->GetWeakPtr(), bitstream_buffer.id(), size,
- key_frame, base::Time::Now() - base::Time()));
+ key_frame, base::TimeDelta()));
}
} // namespace media
diff --git a/chromium/media/gpu/avda_codec_allocator.cc b/chromium/media/gpu/avda_codec_allocator.cc
index d9b1c8e429f..ac0fca9c61a 100644
--- a/chromium/media/gpu/avda_codec_allocator.cc
+++ b/chromium/media/gpu/avda_codec_allocator.cc
@@ -41,6 +41,11 @@ void DeleteMediaCodecAndSignal(std::unique_ptr<MediaCodecBridge> codec,
done_event->Signal();
}
+void DropReferenceToSurfaceBundle(
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) {
+ // Do nothing. Let |surface_bundle| go out of scope.
+}
+
} // namespace
CodecConfig::CodecConfig() {}
@@ -71,7 +76,7 @@ bool AVDACodecAllocator::HangDetector::IsThreadLikelyHung() {
}
// static
-AVDACodecAllocator* AVDACodecAllocator::Instance() {
+AVDACodecAllocator* AVDACodecAllocator::GetInstance() {
static AVDACodecAllocator* allocator = new AVDACodecAllocator();
return allocator;
}
@@ -137,103 +142,6 @@ scoped_refptr<base::SingleThreadTaskRunner> AVDACodecAllocator::TaskRunnerFor(
return threads_[task_type]->thread.task_runner();
}
-bool AVDACodecAllocator::AllocateSurface(AVDACodecAllocatorClient* client,
- int surface_id) {
- DVLOG(1) << __func__ << ": " << surface_id;
- DCHECK(thread_checker_.CalledOnValidThread());
-
- if (surface_id == SurfaceManager::kNoSurfaceID)
- return true;
-
- // If it's not owned or being released, |client| now owns it.
- if (!surface_owners_.count(surface_id) &&
- !pending_codec_releases_.count(surface_id)) {
- surface_owners_[surface_id].owner = client;
- return true;
- }
-
- // Otherwise |client| replaces the previous waiter (if any).
- OwnerRecord& record = surface_owners_[surface_id];
- if (record.waiter)
- record.waiter->OnSurfaceAvailable(false);
- record.waiter = client;
- return false;
-}
-
-void AVDACodecAllocator::DeallocateSurface(AVDACodecAllocatorClient* client,
- int surface_id) {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (surface_id == SurfaceManager::kNoSurfaceID ||
- !surface_owners_.count(surface_id)) {
- return;
- }
-
- OwnerRecord& record = surface_owners_[surface_id];
- if (record.owner == client)
- record.owner = nullptr;
- else if (record.waiter == client)
- record.waiter = nullptr;
-
- // Promote the waiter if possible.
- if (record.waiter && !record.owner &&
- !pending_codec_releases_.count(surface_id)) {
- record.owner = record.waiter;
- record.waiter = nullptr;
- record.owner->OnSurfaceAvailable(true);
- return;
- }
-
- // Remove the record if it's now unused.
- if (!record.owner && !record.waiter)
- surface_owners_.erase(surface_id);
-}
-
-// During surface teardown we have to handle the following cases.
-// 1) No AVDA has acquired the surface, or the surface has already been
-// completely released.
-// 2) A MediaCodec is currently being configured with the surface on another
-// thread. Whether an AVDA owns the surface or has already deallocated it,
-// the MediaCodec should be dropped when configuration completes.
-// 3) An AVDA owns the surface and it responds to OnSurfaceDestroyed() by:
-// a) Replacing the destroyed surface by calling MediaCodec#setSurface().
-// b) Releasing the MediaCodec it's attached to.
-// 4) No AVDA owns the surface, but the MediaCodec it's attached to is currently
-// being destroyed on another thread.
-void AVDACodecAllocator::OnSurfaceDestroyed(int surface_id) {
- DVLOG(1) << __func__ << ": " << surface_id;
- DCHECK(thread_checker_.CalledOnValidThread());
-
- // Notify the owner and waiter (if any).
- if (surface_owners_.count(surface_id)) {
- OwnerRecord& record = surface_owners_[surface_id];
- if (record.waiter) {
- record.waiter->OnSurfaceAvailable(false);
- record.waiter = nullptr;
- }
-
- if (record.owner)
- record.owner->OnSurfaceDestroyed();
-
- surface_owners_.erase(surface_id);
- }
-
- // The codec might have been released above in OnSurfaceDestroyed(), or was
- // already pending release.
- if (!pending_codec_releases_.count(surface_id))
- return;
-
- // The codec is being released so we have to wait for it here. It's a
- // TimedWait() because the MediaCodec release may hang due to framework bugs.
- // And in that case we don't want to hang the browser UI thread. Android ANRs
- // occur when the UI thread is blocked for 5 seconds, so waiting for 2 seconds
- // gives us leeway to avoid an ANR. Verified no ANR on a Nexus 7.
- base::WaitableEvent& released =
- pending_codec_releases_.find(surface_id)->second;
- released.TimedWait(base::TimeDelta::FromSeconds(2));
- if (!released.IsSignaled())
- DLOG(WARNING) << __func__ << ": timed out waiting for MediaCodec#release()";
-}
-
std::unique_ptr<MediaCodecBridge> AVDACodecAllocator::CreateMediaCodecSync(
scoped_refptr<CodecConfig> codec_config) {
TRACE_EVENT0("media", "AVDA::CreateMediaCodecSync");
@@ -249,7 +157,7 @@ std::unique_ptr<MediaCodecBridge> AVDACodecAllocator::CreateMediaCodecSync(
MediaCodecBridgeImpl::CreateVideoDecoder(
codec_config->codec, codec_config->needs_protected_surface,
codec_config->initial_expected_coded_size,
- codec_config->surface.j_surface().obj(), media_crypto,
+ codec_config->surface_bundle->j_surface().obj(), media_crypto,
codec_config->csd0, codec_config->csd1, true,
require_software_codec));
@@ -259,55 +167,80 @@ std::unique_ptr<MediaCodecBridge> AVDACodecAllocator::CreateMediaCodecSync(
void AVDACodecAllocator::CreateMediaCodecAsync(
base::WeakPtr<AVDACodecAllocatorClient> client,
scoped_refptr<CodecConfig> codec_config) {
+ // Allocate the codec on the appropriate thread, and reply to this one with
+ // the result. If |client| is gone by then, we handle cleanup.
base::PostTaskAndReplyWithResult(
TaskRunnerFor(codec_config->task_type).get(), FROM_HERE,
base::Bind(&AVDACodecAllocator::CreateMediaCodecSync,
base::Unretained(this), codec_config),
- base::Bind(&AVDACodecAllocatorClient::OnCodecConfigured, client));
+ base::Bind(&AVDACodecAllocator::ForwardOrDropCodec,
+ base::Unretained(this), client, codec_config->task_type,
+ codec_config->surface_bundle));
+}
+
+void AVDACodecAllocator::ForwardOrDropCodec(
+ base::WeakPtr<AVDACodecAllocatorClient> client,
+ TaskType task_type,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle,
+ std::unique_ptr<MediaCodecBridge> media_codec) {
+ if (!client) {
+ // |client| has been destroyed. Free |media_codec| on the right thread.
+ // Note that this also preserves |surface_bundle| until |media_codec| has
+ // been released, in case our ref to it is the last one.
+ ReleaseMediaCodec(std::move(media_codec), task_type, surface_bundle);
+ return;
+ }
+
+ client->OnCodecConfigured(std::move(media_codec));
}
void AVDACodecAllocator::ReleaseMediaCodec(
std::unique_ptr<MediaCodecBridge> media_codec,
TaskType task_type,
- int surface_id) {
+ const scoped_refptr<AVDASurfaceBundle>& surface_bundle) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(media_codec);
- // No need to track the release if it's a SurfaceTexture.
- if (surface_id == SurfaceManager::kNoSurfaceID) {
- TaskRunnerFor(task_type)->PostTask(
- FROM_HERE, base::Bind(&DeleteMediaCodecAndSignal,
- base::Passed(std::move(media_codec)), nullptr));
+ // No need to track the release if it's a SurfaceTexture. We still forward
+ // the reference to |surface_bundle|, though, so that the SurfaceTexture
+ // lasts at least as long as the codec.
+ if (!surface_bundle->overlay) {
+ TaskRunnerFor(task_type)->PostTaskAndReply(
+ FROM_HERE,
+ base::Bind(&DeleteMediaCodecAndSignal,
+ base::Passed(std::move(media_codec)), nullptr),
+ base::Bind(&DropReferenceToSurfaceBundle, surface_bundle));
return;
}
+ DCHECK(!surface_bundle->surface_texture);
pending_codec_releases_.emplace(
- std::piecewise_construct, std::forward_as_tuple(surface_id),
+ std::piecewise_construct,
+ std::forward_as_tuple(surface_bundle->overlay.get()),
std::forward_as_tuple(base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED));
base::WaitableEvent* released =
- &pending_codec_releases_.find(surface_id)->second;
+ &pending_codec_releases_.find(surface_bundle->overlay.get())->second;
+ // Note that we forward |surface_bundle|, too, so that the surface outlasts
+ // the codec. This doesn't matter so much for CVV surfaces, since they don't
+ // auto-release when they're dropped. However, for surface owners, this will
+ // become important, so we still handle it. Plus, it makes sense.
TaskRunnerFor(task_type)->PostTaskAndReply(
- FROM_HERE, base::Bind(&DeleteMediaCodecAndSignal,
- base::Passed(std::move(media_codec)), released),
- base::Bind(&AVDACodecAllocator::OnMediaCodecAndSurfaceReleased,
- base::Unretained(this), surface_id));
+ FROM_HERE,
+ base::Bind(&DeleteMediaCodecAndSignal,
+ base::Passed(std::move(media_codec)), released),
+ base::Bind(&AVDACodecAllocator::OnMediaCodecReleased,
+ base::Unretained(this), surface_bundle));
}
-void AVDACodecAllocator::OnMediaCodecAndSurfaceReleased(int surface_id) {
+void AVDACodecAllocator::OnMediaCodecReleased(
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) {
DCHECK(thread_checker_.CalledOnValidThread());
- pending_codec_releases_.erase(surface_id);
- if (!surface_owners_.count(surface_id))
- return;
+ pending_codec_releases_.erase(surface_bundle->overlay.get());
- OwnerRecord& record = surface_owners_[surface_id];
- if (!record.owner && record.waiter) {
- record.owner = record.waiter;
- record.waiter = nullptr;
- record.owner->OnSurfaceAvailable(true);
- }
+ // Also note that |surface_bundle| lasted at least as long as the codec.
}
// Returns a hint about whether the construction thread has hung for
@@ -337,6 +270,24 @@ base::Thread& AVDACodecAllocator::GetThreadForTesting(TaskType task_type) {
return threads_[task_type]->thread;
}
+bool AVDACodecAllocator::WaitForPendingRelease(AndroidOverlay* overlay) {
+ if (!pending_codec_releases_.count(overlay))
+ return true;
+
+ // The codec is being released so we have to wait for it here. It's a
+ // TimedWait() because the MediaCodec release may hang due to framework bugs.
+ // And in that case we don't want to hang the browser UI thread. Android ANRs
+ // occur when the UI thread is blocked for 5 seconds, so waiting for 2 seconds
+ // gives us leeway to avoid an ANR. Verified no ANR on a Nexus 7.
+ base::WaitableEvent& released = pending_codec_releases_.find(overlay)->second;
+ released.TimedWait(base::TimeDelta::FromSeconds(2));
+ if (released.IsSignaled())
+ return true;
+
+ DLOG(WARNING) << __func__ << ": timed out waiting for MediaCodec#release()";
+ return false;
+}
+
AVDACodecAllocator::AVDACodecAllocator(base::TickClock* tick_clock,
base::WaitableEvent* stop_event)
: stop_event_for_testing_(stop_event), weak_this_factory_(this) {
diff --git a/chromium/media/gpu/avda_codec_allocator.h b/chromium/media/gpu/avda_codec_allocator.h
index d8428de7fed..be1d61396c4 100644
--- a/chromium/media/gpu/avda_codec_allocator.h
+++ b/chromium/media/gpu/avda_codec_allocator.h
@@ -19,11 +19,13 @@
#include "base/threading/thread_checker.h"
#include "base/time/tick_clock.h"
#include "base/trace_event/trace_event.h"
+#include "media/base/android/android_overlay.h"
#include "media/base/android/media_codec_bridge_impl.h"
#include "media/base/android/media_drm_bridge_cdm_context.h"
#include "media/base/media.h"
#include "media/base/surface_manager.h"
#include "media/base/video_codecs.h"
+#include "media/gpu/avda_surface_bundle.h"
#include "media/gpu/media_gpu_export.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/android/scoped_java_surface.h"
@@ -50,12 +52,7 @@ class CodecConfig : public base::RefCountedThreadSafe<CodecConfig> {
VideoCodec codec = kUnknownVideoCodec;
// The surface that MediaCodec is configured to output to.
- gl::ScopedJavaSurface surface;
- int surface_id = SurfaceManager::kNoSurfaceID;
-
- // The SurfaceTexture attached to |surface|, or nullptr if |surface| is
- // SurfaceView backed.
- scoped_refptr<gl::SurfaceTexture> surface_texture;
+ scoped_refptr<AVDASurfaceBundle> surface_bundle;
// The MediaCrypto that MediaCodec is configured with for an encrypted stream.
MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto;
@@ -84,7 +81,7 @@ class CodecConfig : public base::RefCountedThreadSafe<CodecConfig> {
DISALLOW_COPY_AND_ASSIGN(CodecConfig);
};
-class AVDACodecAllocatorClient {
+class AVDASurfaceAllocatorClient {
public:
// Called when the requested SurfaceView becomes available after a call to
// AllocateSurface()
@@ -96,6 +93,12 @@ class AVDACodecAllocatorClient {
// need to call DeallocateSurface();
virtual void OnSurfaceDestroyed() = 0;
+ protected:
+ ~AVDASurfaceAllocatorClient() {}
+};
+
+class AVDACodecAllocatorClient {
+ public:
// Called on the main thread when a new MediaCodec is configured.
// |media_codec| will be null if configuration failed.
virtual void OnCodecConfigured(
@@ -111,11 +114,7 @@ class AVDACodecAllocatorClient {
// on them to allow software fallback if the HW path is hung up.
class MEDIA_GPU_EXPORT AVDACodecAllocator {
public:
- static AVDACodecAllocator* Instance();
-
- // Called synchronously when the given surface is being destroyed on the
- // browser UI thread.
- void OnSurfaceDestroyed(int surface_id);
+ static AVDACodecAllocator* GetInstance();
// Make sure the construction threads are started for |client|. Returns true
// on success.
@@ -123,17 +122,6 @@ class MEDIA_GPU_EXPORT AVDACodecAllocator {
void StopThread(AVDACodecAllocatorClient* client);
- // Returns true if the caller now owns the surface, or false if someone else
- // owns the surface. |client| will be notified when the surface is available
- // via OnSurfaceAvailable().
- bool AllocateSurface(AVDACodecAllocatorClient* client, int surface_id);
-
- // Relinquish ownership of the surface or stop waiting for it to be available.
- // The caller must guarantee that when calling this the surface is either no
- // longer attached to a MediaCodec, or the MediaCodec it was attached to is
- // was released with ReleaseMediaCodec().
- void DeallocateSurface(AVDACodecAllocatorClient* client, int surface_id);
-
// Create and configure a MediaCodec synchronously.
std::unique_ptr<MediaCodecBridge> CreateMediaCodecSync(
scoped_refptr<CodecConfig> codec_config);
@@ -144,12 +132,18 @@ class MEDIA_GPU_EXPORT AVDACodecAllocator {
base::WeakPtr<AVDACodecAllocatorClient> client,
scoped_refptr<CodecConfig> codec_config);
- // Asynchronously release |media_codec| with the attached surface.
+ // Asynchronously release |media_codec| with the attached surface. We will
+ // drop our reference to |surface_bundle| on the main thread after the codec
+ // is deallocated, since the codec isn't using it anymore. We will not take
+ // other action on it (e.g., calling ReleaseSurfaceTexture if it has one),
+ // since some other codec might be going to use it. We just want to be sure
+ // that it outlives |media_codec|.
// TODO(watk): Bundle the MediaCodec and surface together so you can't get
// this pairing wrong.
- void ReleaseMediaCodec(std::unique_ptr<MediaCodecBridge> media_codec,
- TaskType task_type,
- int surface_id);
+ void ReleaseMediaCodec(
+ std::unique_ptr<MediaCodecBridge> media_codec,
+ TaskType task_type,
+ const scoped_refptr<AVDASurfaceBundle>& surface_bundle);
// Returns a hint about whether the construction thread has hung for
// |task_type|. Note that if a thread isn't started, then we'll just return
@@ -170,18 +164,31 @@ class MEDIA_GPU_EXPORT AVDACodecAllocator {
// Return a reference to the thread for unit tests.
base::Thread& GetThreadForTesting(TaskType task_type);
+ // Wait for a bounded amount of time for |overlay| to be freed, if it's
+ // in use pending release of a codec. Returns true on success, or false if
+ // the wait times out.
+ bool WaitForPendingRelease(AndroidOverlay* overlay);
+
protected:
// |tick_clock| and |stop_event| are for tests only.
AVDACodecAllocator(base::TickClock* tick_clock = nullptr,
base::WaitableEvent* stop_event = nullptr);
virtual ~AVDACodecAllocator();
+ // Forward |media_codec|, which is configured to output to |surface_bundle|,
+ // to |client| if |client| is still around. Otherwise, release the codec and
+ // then drop our ref to |surface_bundle|.
+ void ForwardOrDropCodec(base::WeakPtr<AVDACodecAllocatorClient> client,
+ TaskType task_type,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle,
+ std::unique_ptr<MediaCodecBridge> media_codec);
+
private:
friend class AVDACodecAllocatorTest;
struct OwnerRecord {
- AVDACodecAllocatorClient* owner = nullptr;
- AVDACodecAllocatorClient* waiter = nullptr;
+ AVDASurfaceAllocatorClient* owner = nullptr;
+ AVDASurfaceAllocatorClient* waiter = nullptr;
};
class HangDetector : public base::MessageLoop::TaskObserver {
@@ -210,7 +217,9 @@ class MEDIA_GPU_EXPORT AVDACodecAllocator {
HangDetector hang_detector;
};
- void OnMediaCodecAndSurfaceReleased(int surface_id);
+ // Called on the gpu main thread when a codec is freed on a codec thread.
+ // |surface_bundle| is the surface bundle that the codec was using.
+ void OnMediaCodecReleased(scoped_refptr<AVDASurfaceBundle> surface_bundle);
// Stop the thread indicated by |index|. This signals stop_event_for_testing_
// after both threads are stopped.
@@ -219,12 +228,11 @@ class MEDIA_GPU_EXPORT AVDACodecAllocator {
// All registered AVDAs.
std::set<AVDACodecAllocatorClient*> clients_;
- // Indexed by surface id.
- std::map<int32_t, OwnerRecord> surface_owners_;
-
- // Waitable events for ongoing release tasks indexed by surface id so we can
+ // Waitable events for ongoing release tasks indexed by overlay so we can
// wait on the codec release if the surface attached to it is being destroyed.
- std::map<int32_t, base::WaitableEvent> pending_codec_releases_;
+ // This really is needed only for ContentVideoViewOverlay, since it requires
+ // synchronous releases with respect to the main thread.
+ std::map<AndroidOverlay*, base::WaitableEvent> pending_codec_releases_;
// Threads for each of TaskType. They are started / stopped as avda instances
// show and and request them. The vector indicies must match TaskType.
diff --git a/chromium/media/gpu/avda_codec_allocator_unittest.cc b/chromium/media/gpu/avda_codec_allocator_unittest.cc
index 4be7e91fe7a..030cd48b999 100644
--- a/chromium/media/gpu/avda_codec_allocator_unittest.cc
+++ b/chromium/media/gpu/avda_codec_allocator_unittest.cc
@@ -44,9 +44,6 @@ void SignalImmediately(base::WaitableEvent* event) {
class MockClient : public AVDACodecAllocatorClient {
public:
- MOCK_METHOD1(OnSurfaceAvailable, void(bool success));
- MOCK_METHOD0(OnSurfaceDestroyed, void());
-
// Gmock doesn't let us mock methods taking move-only types.
MOCK_METHOD1(OnCodecConfiguredMock, void(MediaCodecBridge* media_codec));
void OnCodecConfigured(
@@ -238,71 +235,4 @@ TEST_F(AVDACodecAllocatorTest, TestHangThread) {
ASSERT_EQ(AUTO_CODEC, TaskTypeForAllocation().value());
}
-TEST_F(AVDACodecAllocatorTest, AllocatingASurfaceTextureAlwaysSucceeds) {
- ASSERT_TRUE(
- allocator2_->AllocateSurface(avda1_, SurfaceManager::kNoSurfaceID));
- ASSERT_TRUE(
- allocator2_->AllocateSurface(avda2_, SurfaceManager::kNoSurfaceID));
-}
-
-TEST_F(AVDACodecAllocatorTest, AllocatingAnOwnedSurfaceFails) {
- ASSERT_TRUE(allocator2_->AllocateSurface(avda1_, 1));
- ASSERT_FALSE(allocator2_->AllocateSurface(avda2_, 1));
-}
-
-TEST_F(AVDACodecAllocatorTest, LaterWaitersReplaceEarlierWaiters) {
- allocator2_->AllocateSurface(avda1_, 1);
- allocator2_->AllocateSurface(avda2_, 1);
- EXPECT_CALL(*avda2_, OnSurfaceAvailable(false));
- allocator2_->AllocateSurface(avda3_, 1);
-}
-
-TEST_F(AVDACodecAllocatorTest, WaitersBecomeOwnersWhenSurfacesAreReleased) {
- allocator2_->AllocateSurface(avda1_, 1);
- allocator2_->AllocateSurface(avda2_, 1);
- EXPECT_CALL(*avda2_, OnSurfaceAvailable(true));
- allocator2_->DeallocateSurface(avda1_, 1);
- // The surface should still be owned.
- ASSERT_FALSE(allocator2_->AllocateSurface(avda1_, 1));
-}
-
-TEST_F(AVDACodecAllocatorTest, DeallocatingUnownedSurfacesIsSafe) {
- allocator2_->DeallocateSurface(avda1_, 1);
- allocator2_->DeallocateSurface(avda1_, SurfaceManager::kNoSurfaceID);
-}
-
-TEST_F(AVDACodecAllocatorTest, WaitersAreRemovedIfTheyDeallocate) {
- allocator2_->AllocateSurface(avda1_, 1);
- allocator2_->AllocateSurface(avda2_, 1);
- allocator2_->DeallocateSurface(avda2_, 1);
- // avda2_ should should not receive a notification.
- EXPECT_CALL(*avda2_, OnSurfaceAvailable(_)).Times(0);
- allocator2_->DeallocateSurface(avda1_, 1);
-}
-
-TEST_F(AVDACodecAllocatorTest, OwnersAreNotifiedOnDestruction) {
- allocator2_->AllocateSurface(avda1_, 1);
- // Not notified for a surface it doesn't own.
- EXPECT_CALL(*avda1_, OnSurfaceDestroyed()).Times(0);
- allocator2_->OnSurfaceDestroyed(123);
- // But notified for a surface it does own.
- EXPECT_CALL(*avda1_, OnSurfaceDestroyed());
- allocator2_->OnSurfaceDestroyed(1);
-}
-
-TEST_F(AVDACodecAllocatorTest, WaitersAreNotifiedOnDestruction) {
- allocator2_->AllocateSurface(avda1_, 1);
- allocator2_->AllocateSurface(avda2_, 1);
- EXPECT_CALL(*avda2_, OnSurfaceAvailable(false));
- allocator2_->OnSurfaceDestroyed(1);
-}
-
-TEST_F(AVDACodecAllocatorTest, DeallocatingIsSafeDuringSurfaceDestroyed) {
- allocator2_->AllocateSurface(avda1_, 1);
- EXPECT_CALL(*avda1_, OnSurfaceDestroyed()).WillOnce(Invoke([=]() {
- allocator2_->DeallocateSurface(avda1_, 1);
- }));
- allocator2_->OnSurfaceDestroyed(1);
-}
-
} // namespace media
diff --git a/chromium/media/gpu/avda_picture_buffer_manager.cc b/chromium/media/gpu/avda_picture_buffer_manager.cc
index 0050c534742..27a7eb82d6b 100644
--- a/chromium/media/gpu/avda_picture_buffer_manager.cc
+++ b/chromium/media/gpu/avda_picture_buffer_manager.cc
@@ -16,11 +16,11 @@
#include "gpu/command_buffer/service/gl_stream_texture_image.h"
#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
#include "gpu/command_buffer/service/texture_manager.h"
-#include "gpu/ipc/common/gpu_surface_lookup.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "media/base/android/media_codec_bridge_impl.h"
#include "media/gpu/avda_codec_image.h"
#include "media/gpu/avda_shared_state.h"
+#include "ui/gl/android/scoped_java_surface.h"
#include "ui/gl/android/surface_texture.h"
#include "ui/gl/egl_util.h"
#include "ui/gl/gl_bindings.h"
@@ -42,16 +42,16 @@
namespace media {
namespace {
-// Creates a SurfaceTexture and attaches a new gl texture to it. |*service_id|
-// is set to the new texture id.
-scoped_refptr<gl::SurfaceTexture> CreateAttachedSurfaceTexture(
- base::WeakPtr<gpu::gles2::GLES2Decoder> gl_decoder,
- GLuint* service_id) {
- GLuint texture_id;
- glGenTextures(1, &texture_id);
+// Creates a SurfaceTexture and attaches a new gl texture to it.
+scoped_refptr<SurfaceTextureGLOwner> CreateAttachedSurfaceTexture(
+ base::WeakPtr<gpu::gles2::GLES2Decoder> gl_decoder) {
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture =
+ SurfaceTextureGLOwner::Create();
+ if (!surface_texture)
+ return nullptr;
glActiveTexture(GL_TEXTURE0);
- glBindTexture(GL_TEXTURE_EXTERNAL_OES, texture_id);
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, surface_texture->texture_id());
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
@@ -61,14 +61,7 @@ scoped_refptr<gl::SurfaceTexture> CreateAttachedSurfaceTexture(
gl_decoder->RestoreActiveTexture();
DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
- *service_id = texture_id;
- // Previously, to reduce context switching, we used to create an unattached
- // SurfaceTexture and attach it lazily in the compositor's context. But that
- // was flaky because SurfaceTexture#detachFromGLContext() is buggy on a lot of
- // devices. Now we attach it to the current context, which means we might have
- // to context switch later to call updateTexImage(). Fortunately, if virtual
- // contexts are in use, we won't have to context switch.
- return gl::SurfaceTexture::Create(texture_id);
+ return surface_texture;
}
} // namespace
@@ -79,19 +72,22 @@ AVDAPictureBufferManager::AVDAPictureBufferManager(
AVDAPictureBufferManager::~AVDAPictureBufferManager() {}
-gl::ScopedJavaSurface AVDAPictureBufferManager::Initialize(int surface_id) {
+void AVDAPictureBufferManager::InitializeForOverlay() {
+ shared_state_ = new AVDASharedState();
+ surface_texture_ = nullptr;
+}
+
+gl::ScopedJavaSurface AVDAPictureBufferManager::InitializeForSurfaceTexture() {
shared_state_ = new AVDASharedState();
surface_texture_ = nullptr;
- // Acquire the SurfaceView surface if given a valid id.
- if (surface_id != SurfaceManager::kNoSurfaceID)
- return gpu::GpuSurfaceLookup::GetInstance()->AcquireJavaSurface(surface_id);
+ // Create a SurfaceTexture.
+ surface_texture_ =
+ CreateAttachedSurfaceTexture(state_provider_->GetGlDecoder());
+ if (!surface_texture_)
+ return gl::ScopedJavaSurface();
- // Otherwise create a SurfaceTexture.
- GLuint service_id;
- surface_texture_ = CreateAttachedSurfaceTexture(
- state_provider_->GetGlDecoder(), &service_id);
- shared_state_->SetSurfaceTexture(surface_texture_, service_id);
+ shared_state_->SetSurfaceTexture(surface_texture_);
return gl::ScopedJavaSurface(surface_texture_.get());
}
diff --git a/chromium/media/gpu/avda_picture_buffer_manager.h b/chromium/media/gpu/avda_picture_buffer_manager.h
index 37f7dd23547..377da600327 100644
--- a/chromium/media/gpu/avda_picture_buffer_manager.h
+++ b/chromium/media/gpu/avda_picture_buffer_manager.h
@@ -11,6 +11,7 @@
#include "base/macros.h"
#include "media/gpu/avda_state_provider.h"
#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/surface_texture_gl_owner.h"
namespace gpu {
namespace gles2 {
@@ -42,14 +43,21 @@ class MEDIA_GPU_EXPORT AVDAPictureBufferManager {
explicit AVDAPictureBufferManager(AVDAStateProvider* state_provider);
virtual ~AVDAPictureBufferManager();
- // Must be called before anything else. If |surface_id| is |kNoSurfaceID|
- // then a new SurfaceTexture will be returned. Otherwise, the corresponding
- // SurfaceView will be returned.
+ // Call either InitializeForOverlay or InitializeForSurfaceTexture before
+ // anything else. InitializeForOverlay will set us up to render codec buffers
+ // at the approrpriate time for display, but will assume that consuming the
+ // resulting buffers is handled elsewhere (e.g., SurfaceFlinger).
//
- // May be called multiple times to switch to a new |surface_id|. Picture
- // buffers will be updated to use the new surface during the call to
- // UseCodecBufferForPictureBuffer().
- gl::ScopedJavaSurface Initialize(int surface_id);
+ // InitializeForSurfaceTexture will create a SurfaceTexture and return the
+ // surface for it. We will arrange to consume the buffers at the right time,
+ // in addition to releasing codec buffers for rendering.
+ //
+ // One may call these multiple times to change between overlay and ST.
+ //
+ // Picture buffers will be updated to reflect the new surface during the call
+ // to UseCodecBufferForPicture().
+ void InitializeForOverlay();
+ gl::ScopedJavaSurface InitializeForSurfaceTexture();
void Destroy(const PictureBufferMap& buffers);
@@ -110,7 +118,7 @@ class MEDIA_GPU_EXPORT AVDAPictureBufferManager {
// The SurfaceTexture to render to. Non-null after Initialize() if
// we're not rendering to a SurfaceView.
- scoped_refptr<gl::SurfaceTexture> surface_texture_;
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture_;
MediaCodecBridge* media_codec_;
diff --git a/chromium/media/gpu/avda_shared_state.cc b/chromium/media/gpu/avda_shared_state.cc
index 48ad88cc591..1309dabe238 100644
--- a/chromium/media/gpu/avda_shared_state.cc
+++ b/chromium/media/gpu/avda_shared_state.cc
@@ -62,8 +62,7 @@ class AVDASharedState::OnFrameAvailableHandler
};
AVDASharedState::AVDASharedState()
- : surface_texture_service_id_(0),
- frame_available_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ : frame_available_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED),
gl_matrix_{
@@ -74,15 +73,10 @@ AVDASharedState::AVDASharedState()
} {}
AVDASharedState::~AVDASharedState() {
- if (!surface_texture_service_id_)
+ if (!surface_texture_)
return;
on_frame_available_handler_->ClearListener();
- ui::ScopedMakeCurrent scoped_make_current(context_.get(), surface_.get());
- if (scoped_make_current.Succeeded()) {
- glDeleteTextures(1, &surface_texture_service_id_);
- DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
- }
}
void AVDASharedState::SignalFrameAvailable() {
@@ -120,16 +114,9 @@ void AVDASharedState::WaitForFrameAvailable() {
}
void AVDASharedState::SetSurfaceTexture(
- scoped_refptr<gl::SurfaceTexture> surface_texture,
- GLuint attached_service_id) {
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture) {
DCHECK(surface_texture);
- DCHECK(attached_service_id);
surface_texture_ = surface_texture;
- surface_texture_service_id_ = attached_service_id;
- context_ = gl::GLContext::GetCurrent();
- surface_ = gl::GLSurface::GetCurrent();
- DCHECK(context_);
- DCHECK(surface_);
on_frame_available_handler_ =
new OnFrameAvailableHandler(this, surface_texture_.get());
}
diff --git a/chromium/media/gpu/avda_shared_state.h b/chromium/media/gpu/avda_shared_state.h
index a85a882b7ad..68ecf676a47 100644
--- a/chromium/media/gpu/avda_shared_state.h
+++ b/chromium/media/gpu/avda_shared_state.h
@@ -8,15 +8,12 @@
#include "base/synchronization/waitable_event.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "media/base/android/media_codec_bridge.h"
+#include "media/gpu/surface_texture_gl_owner.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image.h"
#include "ui/gl/gl_surface.h"
-namespace gl {
-class SurfaceTexture;
-}
-
namespace media {
// Shared state to allow communication between the AVDA and the
@@ -26,7 +23,7 @@ class AVDASharedState : public base::RefCounted<AVDASharedState> {
AVDASharedState();
GLuint surface_texture_service_id() const {
- return surface_texture_service_id_;
+ return surface_texture_ ? surface_texture_->texture_id() : 0;
}
// Signal the "frame available" event. This may be called from any thread.
@@ -34,14 +31,17 @@ class AVDASharedState : public base::RefCounted<AVDASharedState> {
void WaitForFrameAvailable();
- void SetSurfaceTexture(scoped_refptr<gl::SurfaceTexture> surface_texture,
- GLuint attached_service_id);
+ void SetSurfaceTexture(scoped_refptr<SurfaceTextureGLOwner> surface_texture);
// Context and surface that |surface_texture_| is bound to, if
// |surface_texture_| is not null.
- gl::GLContext* context() const { return context_.get(); }
+ gl::GLContext* context() const {
+ return surface_texture_ ? surface_texture_->context() : nullptr;
+ }
- gl::GLSurface* surface() const { return surface_.get(); }
+ gl::GLSurface* surface() const {
+ return surface_texture_ ? surface_texture_->surface() : nullptr;
+ }
// Helper method for coordinating the interactions between
// MediaCodec::ReleaseOutputBuffer() and WaitForFrameAvailable() when
@@ -80,19 +80,11 @@ class AVDASharedState : public base::RefCounted<AVDASharedState> {
private:
friend class base::RefCounted<AVDASharedState>;
- scoped_refptr<gl::SurfaceTexture> surface_texture_;
-
- // Platform gl texture id for |surface_texture_|.
- GLuint surface_texture_service_id_;
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture_;
// For signalling OnFrameAvailable().
base::WaitableEvent frame_available_event_;
- // Context and surface that |surface_texture_| is bound to, if
- // |surface_texture_| is not null.
- scoped_refptr<gl::GLContext> context_;
- scoped_refptr<gl::GLSurface> surface_;
-
// The time of the last call to RenderCodecBufferToSurfaceTexture(), null if
// if there has been no last call or WaitForFrameAvailable() has been called
// since the last call.
diff --git a/chromium/media/gpu/avda_surface_bundle.cc b/chromium/media/gpu/avda_surface_bundle.cc
new file mode 100644
index 00000000000..608519bb936
--- /dev/null
+++ b/chromium/media/gpu/avda_surface_bundle.cc
@@ -0,0 +1,24 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/avda_surface_bundle.h"
+#include "media/base/android/android_overlay.h"
+
+namespace media {
+
+AVDASurfaceBundle::AVDASurfaceBundle(int surface_id) : surface_id(surface_id) {}
+
+AVDASurfaceBundle::~AVDASurfaceBundle() {
+ // Explicitly free the surface first, just to be sure that it's deleted before
+ // the SurfaceTexture is.
+ surface_texture_surface = gl::ScopedJavaSurface();
+
+ // Also release the back buffers. We might want to let the consumer do this
+ // after it has swapped the back buffer it wants, but we don't.
+ if (surface_texture)
+ surface_texture->ReleaseSurfaceTexture();
+ surface_texture = nullptr;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/avda_surface_bundle.h b/chromium/media/gpu/avda_surface_bundle.h
new file mode 100644
index 00000000000..a65264c9a49
--- /dev/null
+++ b/chromium/media/gpu/avda_surface_bundle.h
@@ -0,0 +1,70 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_AVDA_SURFACE_BUNDLE_H_
+#define MEDIA_GPU_AVDA_SURFACE_BUNDLE_H_
+
+#include "base/memory/ref_counted.h"
+#include "media/base/android/android_overlay.h"
+#include "media/base/surface_manager.h"
+#include "ui/gl/android/scoped_java_surface.h"
+#include "ui/gl/android/surface_texture.h"
+
+namespace media {
+
+// AVDASurfaceBundle is a collection of everything that the producer-side of
+// the output surface needs. In other words, it's the surface, and any
+// SurfaceTexture that backs it. The SurfaceTexture isn't needed directly by
+// the producer, but destroying it causes the surface not to work.
+//
+// The idea is that a reference to this should be kept with the codec, even if
+// the codec is sent to another thread. This will prevent the output surface
+// from being destroyed while the codec depends on it.
+// While you may send a reference to this to other threads, be sure that it
+// doesn't drop the reference there without creating another one. This has to
+// be destroyed on the gpu main thread.
+class AVDASurfaceBundle : public base::RefCountedThreadSafe<AVDASurfaceBundle> {
+ public:
+ explicit AVDASurfaceBundle(int surface_id);
+
+ // The surface that MediaCodec is configured to output to. This can be either
+ // a SurfaceTexture or other Surface provider.
+ // TODO(liberato): it would be nice if we had an abstraction that included
+ // SurfaceTexture and Overlay, but we don't right now.
+ const base::android::JavaRef<jobject>& j_surface() const {
+ if (overlay)
+ return overlay->GetJavaSurface();
+ else
+ return surface_texture_surface.j_surface();
+ }
+
+ int surface_id = SurfaceManager::kNoSurfaceID;
+
+ // The surface onto which the codec is writing.
+ // TODO(liberato): this isn't true if we have an overlay. the overlay keeps
+ // the java surface.
+ // gl::ScopedJavaSurface surface;
+
+ // If |overlay| is non-null, then |overlay| owns |surface|.
+ std::unique_ptr<AndroidOverlay> overlay;
+
+ // The SurfaceTexture attached to |surface|, or nullptr if |surface| is
+ // SurfaceView backed.
+ scoped_refptr<gl::SurfaceTexture> surface_texture;
+
+ // If |surface_texture| is not null, then this is the surface for it.
+ // TODO(liberato): |surface| is the same thing, since overlays own their own
+ // surfaces anyway.
+ gl::ScopedJavaSurface surface_texture_surface;
+
+ private:
+ ~AVDASurfaceBundle();
+ friend class base::RefCountedThreadSafe<AVDASurfaceBundle>;
+
+ DISALLOW_COPY_AND_ASSIGN(AVDASurfaceBundle);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_AVDA_SURFACE_BUNDLE_H_
diff --git a/chromium/media/gpu/content_video_view_overlay.cc b/chromium/media/gpu/content_video_view_overlay.cc
new file mode 100644
index 00000000000..31da81eec53
--- /dev/null
+++ b/chromium/media/gpu/content_video_view_overlay.cc
@@ -0,0 +1,72 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/content_video_view_overlay.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "gpu/ipc/common/gpu_surface_lookup.h"
+
+namespace media {
+
+ContentVideoViewOverlay::ContentVideoViewOverlay(
+ int surface_id,
+ const AndroidOverlay::Config& config)
+ : surface_id_(surface_id), config_(config), weak_factory_(this) {
+ if (ContentVideoViewOverlayAllocator::GetInstance()->AllocateSurface(this)) {
+ // We have the surface -- post a callback to our OnSurfaceAvailable.
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&ContentVideoViewOverlay::OnSurfaceAvailable,
+ weak_factory_.GetWeakPtr(), true));
+ }
+}
+
+ContentVideoViewOverlay::~ContentVideoViewOverlay() {
+ // Deallocate the surface. It's okay if we don't own it.
+ // Note that this only happens once any codec is done with us.
+ ContentVideoViewOverlayAllocator::GetInstance()->DeallocateSurface(this);
+}
+
+void ContentVideoViewOverlay::ScheduleLayout(const gfx::Rect& rect) {
+ NOTIMPLEMENTED();
+}
+
+const base::android::JavaRef<jobject>& ContentVideoViewOverlay::GetJavaSurface()
+ const {
+ return surface_.j_surface();
+}
+
+void ContentVideoViewOverlay::OnSurfaceAvailable(bool success) {
+ if (!success) {
+ // Notify that the surface won't be available.
+ config_.failed_cb.Run(this);
+ // |this| may be deleted.
+ return;
+ }
+
+ // Get the surface and notify our client.
+ surface_ =
+ gpu::GpuSurfaceLookup::GetInstance()->AcquireJavaSurface(surface_id_);
+
+ // If no surface was returned, then fail instead.
+ if (surface_.IsEmpty()) {
+ config_.failed_cb.Run(this);
+ // |this| may be deleted.
+ return;
+ }
+
+ config_.ready_cb.Run(this);
+}
+
+void ContentVideoViewOverlay::OnSurfaceDestroyed() {
+ config_.destroyed_cb.Run(this);
+ // |this| may be deleted, or deletion might be posted elsewhere.
+}
+
+int32_t ContentVideoViewOverlay::GetSurfaceId() {
+ return surface_id_;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/content_video_view_overlay.h b/chromium/media/gpu/content_video_view_overlay.h
new file mode 100644
index 00000000000..ef6a6c4cf97
--- /dev/null
+++ b/chromium/media/gpu/content_video_view_overlay.h
@@ -0,0 +1,47 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_CONTENT_VIDEO_VIEW_OVERLAY_H_
+#define MEDIA_GPU_CONTENT_VIDEO_VIEW_OVERLAY_H_
+
+#include "base/memory/weak_ptr.h"
+#include "media/base/android/android_overlay.h"
+#include "media/gpu/content_video_view_overlay_allocator.h"
+#include "ui/gl/android/scoped_java_surface.h"
+
+namespace media {
+
+class ContentVideoViewOverlay
+ : public ContentVideoViewOverlayAllocator::Client {
+ public:
+ // |config| is ignored except for callbacks. Callbacks will not be called
+ // before this returns.
+ ContentVideoViewOverlay(int surface_id, const AndroidOverlay::Config& config);
+ ~ContentVideoViewOverlay() override;
+
+ // AndroidOverlay (via ContentVideoViewOverlayAllocator::Client)
+ // ContentVideoView ignores this, unfortunately.
+ void ScheduleLayout(const gfx::Rect& rect) override;
+ const base::android::JavaRef<jobject>& GetJavaSurface() const override;
+
+ // ContentVideoViewOverlayAllocator::Client
+ void OnSurfaceAvailable(bool success) override;
+ void OnSurfaceDestroyed() override;
+ int32_t GetSurfaceId() override;
+
+ protected:
+ // For tests.
+ ContentVideoViewOverlay();
+
+ private:
+ int surface_id_;
+ AndroidOverlay::Config config_;
+ gl::ScopedJavaSurface surface_;
+
+ base::WeakPtrFactory<ContentVideoViewOverlay> weak_factory_;
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_CONTENT_VIDEO_VIEW_OVERLAY_H_
diff --git a/chromium/media/gpu/content_video_view_overlay_allocator.cc b/chromium/media/gpu/content_video_view_overlay_allocator.cc
new file mode 100644
index 00000000000..e9fb1610076
--- /dev/null
+++ b/chromium/media/gpu/content_video_view_overlay_allocator.cc
@@ -0,0 +1,149 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/content_video_view_overlay_allocator.h"
+
+#include "media/gpu/avda_codec_allocator.h"
+
+namespace media {
+
+// static
+ContentVideoViewOverlayAllocator*
+ContentVideoViewOverlayAllocator::GetInstance() {
+ static ContentVideoViewOverlayAllocator* allocator =
+ new ContentVideoViewOverlayAllocator();
+ return allocator;
+}
+
+ContentVideoViewOverlayAllocator::ContentVideoViewOverlayAllocator()
+ : allocator_(AVDACodecAllocator::GetInstance()) {}
+
+ContentVideoViewOverlayAllocator::~ContentVideoViewOverlayAllocator() {}
+
+bool ContentVideoViewOverlayAllocator::AllocateSurface(Client* client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ const int32_t surface_id = client->GetSurfaceId();
+ DVLOG(1) << __func__ << ": " << surface_id;
+ DCHECK_NE(surface_id, SurfaceManager::kNoSurfaceID);
+
+ // If it's not owned or being released, |client| now owns it.
+ // Note: it's owned until it's released, since AVDACodecAllocator does that.
+ // It keeps the bundle around (and also the overlay that's the current owner)
+ // until the codec is done with it. That's required to use AndroidOverlay.
+ // So, we don't need to check for 'being released'; the owner is good enough.
+ auto it = surface_owners_.find(surface_id);
+ if (it == surface_owners_.end()) {
+ OwnerRecord record;
+ record.owner = client;
+ surface_owners_.insert(OwnerMap::value_type(surface_id, record));
+ return true;
+ }
+
+ // Otherwise |client| replaces the previous waiter (if any).
+ OwnerRecord& record = it->second;
+ if (record.waiter)
+ record.waiter->OnSurfaceAvailable(false);
+ record.waiter = client;
+ return false;
+}
+
+void ContentVideoViewOverlayAllocator::DeallocateSurface(Client* client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ const int32_t surface_id = client->GetSurfaceId();
+ DCHECK_NE(surface_id, SurfaceManager::kNoSurfaceID);
+
+ // If we time out waiting for the surface to be destroyed, then we might have
+ // already removed |surface_id|. If it's now trying to deallocate, then
+ // maybe we just weren't patient enough, or mediaserver restarted.
+ auto it = surface_owners_.find(surface_id);
+ if (it == surface_owners_.end())
+ return;
+
+ OwnerRecord& record = it->second;
+ if (record.owner == client)
+ record.owner = nullptr;
+ else if (record.waiter == client)
+ record.waiter = nullptr;
+
+ // Promote the waiter if possible.
+ if (record.waiter && !record.owner) {
+ record.owner = record.waiter;
+ record.waiter = nullptr;
+ record.owner->OnSurfaceAvailable(true);
+ return;
+ }
+
+ // Remove the record if it's now unused.
+ if (!record.owner && !record.waiter)
+ surface_owners_.erase(it);
+}
+
+// During surface teardown we have to handle the following cases.
+// 1) No AVDA has acquired the surface, or the surface has already been
+// completely released.
+// This case is easy -- there's no owner or waiter, and we can return.
+//
+// 2) A MediaCodec is currently being configured with the surface on another
+// thread. Whether an AVDA owns the surface or has already deallocated it,
+// the MediaCodec should be dropped when configuration completes.
+// In this case, we'll find an owner. We'll notify it about the destruction.
+// Note that AVDA doesn't handle this case correctly right now, since it
+// doesn't know the state of codec creation on the codec thread. This is
+// only a problem because CVV has the 'wait on main thread' semantics.
+//
+// 3) An AVDA owns the surface and it responds to OnSurfaceDestroyed() by:
+// a) Replacing the destroyed surface by calling MediaCodec#setSurface().
+// b) Releasing the MediaCodec it's attached to.
+// In case a, the surface will be destroyed during OnSurfaceDestroyed.
+// In case b, we'll have to wait for the release to complete.
+//
+// 4) No AVDA owns the surface, but the MediaCodec it's attached to is currently
+// being destroyed on another thread.
+// This is the same as 3b.
+void ContentVideoViewOverlayAllocator::OnSurfaceDestroyed(int32_t surface_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(1) << __func__ << ": " << surface_id;
+
+ // If it isn't currently owned, then we're done. Rememeber that the overlay
+ // must outlive any user of it (MediaCodec!), and currently AVDACodecAllocator
+ // is responsible for making sure that happens for AVDA.
+ auto it = surface_owners_.find(surface_id);
+ if (it == surface_owners_.end())
+ return;
+
+ // Notify the owner and waiter (if any).
+ OwnerRecord& record = it->second;
+ if (record.waiter) {
+ record.waiter->OnSurfaceAvailable(false);
+ record.waiter = nullptr;
+ }
+
+ DCHECK(record.owner);
+
+ // |record| could be removed by the callback, if it deallocates the surface.
+ record.owner->OnSurfaceDestroyed();
+
+ // If owner deallocated the surface, then we don't need to wait. Note that
+ // the owner might have been deleted in that case. Since CVVOverlay only
+ // deallocates the surface during destruction, it's a safe bet.
+ it = surface_owners_.find(surface_id);
+ if (it == surface_owners_.end())
+ return;
+
+ // The surface is still in use, but should have been posted to another thread
+ // for destruction. Note that this isn't technically required for overlays
+ // in general, but CVV requires it. All of the pending release stuff should
+ // be moved here, or to custom deleters of CVVOverlay. However, in the
+ // interest of not changing too much at once, we let AVDACodecAllocator
+ // handle it. Since we're deprecating CVVOverlay anyway, all of this can be
+ // removed eventually.
+ // If the wait fails, then clean up |surface_owners_| anyway, since the codec
+ // release is probably hung up.
+ if (!allocator_->WaitForPendingRelease(record.owner))
+ surface_owners_.erase(it);
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/content_video_view_overlay_allocator.h b/chromium/media/gpu/content_video_view_overlay_allocator.h
new file mode 100644
index 00000000000..47255475816
--- /dev/null
+++ b/chromium/media/gpu/content_video_view_overlay_allocator.h
@@ -0,0 +1,86 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_CONTENT_VIDEO_VIEW_OVERLAY_ALLOCATOR_H_
+#define MEDIA_GPU_CONTENT_VIDEO_VIEW_OVERLAY_ALLOCATOR_H_
+
+#include <stddef.h>
+
+#include "base/containers/flat_map.h"
+#include "base/macros.h"
+#include "base/threading/thread_checker.h"
+#include "media/base/android/android_overlay.h"
+#include "media/gpu/media_gpu_export.h"
+
+namespace media {
+
+class AVDACodecAllocator;
+class ContentVideoViewOverlayAllocatorTest;
+
+// ContentVideoViewOverlayAllocator lets different instances of CVVOverlay that
+// share the same surface ID to be synchronized with respect to each other.
+// It also manages synchronous surface destruction.
+class MEDIA_GPU_EXPORT ContentVideoViewOverlayAllocator {
+ public:
+ class Client : public AndroidOverlay {
+ public:
+ // Called when the requested SurfaceView becomes available after a call to
+ // AllocateSurface()
+ virtual void OnSurfaceAvailable(bool success) = 0;
+
+ // Called when the allocated surface is being destroyed. This must either
+ // replace the surface with MediaCodec#setSurface, or release the MediaCodec
+ // it's attached to. The client no longer owns the surface and doesn't
+ // need to call DeallocateSurface();
+ virtual void OnSurfaceDestroyed() = 0;
+
+ // Return the surface id of the client's ContentVideoView.
+ virtual int32_t GetSurfaceId() = 0;
+
+ protected:
+ ~Client() override {}
+ };
+
+ static ContentVideoViewOverlayAllocator* GetInstance();
+
+ // Called synchronously when the given surface is being destroyed on the
+ // browser UI thread.
+ void OnSurfaceDestroyed(int32_t surface_id);
+
+ // Returns true if the caller now owns the surface, or false if someone else
+ // owns the surface. |client| will be notified when the surface is available
+ // via OnSurfaceAvailable().
+ bool AllocateSurface(Client* client);
+
+ // Relinquish ownership of the surface or stop waiting for it to be available.
+ // The caller must guarantee that when calling this the surface is either no
+ // longer attached to a MediaCodec, or the MediaCodec it was attached to is
+ // was released with ReleaseMediaCodec().
+ void DeallocateSurface(Client* client);
+
+ private:
+ friend class ContentVideoViewOverlayAllocatorTest;
+
+ ContentVideoViewOverlayAllocator();
+ ~ContentVideoViewOverlayAllocator();
+
+ struct OwnerRecord {
+ Client* owner = nullptr;
+ Client* waiter = nullptr;
+ };
+
+ // Indexed by surface id.
+ using OwnerMap = base::flat_map<int32_t, OwnerRecord>;
+ OwnerMap surface_owners_;
+
+ AVDACodecAllocator* allocator_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(ContentVideoViewOverlayAllocator);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_CONTENT_VIDEO_VIEW_OVERLAY_ALLOCATOR_H_
diff --git a/chromium/media/gpu/content_video_view_overlay_allocator_unittest.cc b/chromium/media/gpu/content_video_view_overlay_allocator_unittest.cc
new file mode 100644
index 00000000000..99cd9e60786
--- /dev/null
+++ b/chromium/media/gpu/content_video_view_overlay_allocator_unittest.cc
@@ -0,0 +1,147 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/content_video_view_overlay_allocator.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "media/base/surface_manager.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::AnyNumber;
+using testing::Invoke;
+using testing::Return;
+using testing::StrictMock;
+using testing::_;
+
+namespace media {
+class ContentVideoViewOverlayAllocatorTest : public testing::Test {
+ public:
+ class MockClient
+ : public StrictMock<ContentVideoViewOverlayAllocator::Client> {
+ public:
+ MOCK_METHOD1(ScheduleLayout, void(const gfx::Rect&));
+ MOCK_CONST_METHOD0(GetJavaSurface,
+ const base::android::JavaRef<jobject>&());
+
+ MOCK_METHOD1(OnSurfaceAvailable, void(bool success));
+ MOCK_METHOD0(OnSurfaceDestroyed, void());
+ MOCK_METHOD0(GetSurfaceId, int32_t());
+ };
+
+ ContentVideoViewOverlayAllocatorTest() {}
+
+ ~ContentVideoViewOverlayAllocatorTest() override {}
+
+ protected:
+ void SetUp() override {
+ allocator_ = new ContentVideoViewOverlayAllocator();
+
+ avda1_ = new MockClient();
+ avda2_ = new MockClient();
+ avda3_ = new MockClient();
+ // Default all |avda*| instances to surface ID 1.
+ SetSurfaceId(avda1_, 1);
+ SetSurfaceId(avda2_, 1);
+ SetSurfaceId(avda3_, 1);
+ }
+
+ void TearDown() override {
+ delete avda3_;
+ delete avda2_;
+ delete avda1_;
+ delete allocator_;
+ }
+
+ void SetSurfaceId(MockClient* client, int32_t surface_id) {
+ ON_CALL(*client, GetSurfaceId()).WillByDefault(Return(surface_id));
+ EXPECT_CALL(*client, GetSurfaceId()).Times(AnyNumber());
+ }
+
+ protected:
+ ContentVideoViewOverlayAllocator* allocator_;
+
+ MockClient* avda1_;
+ MockClient* avda2_;
+ MockClient* avda3_;
+};
+
+TEST_F(ContentVideoViewOverlayAllocatorTest, AllocatingAnOwnedSurfaceFails) {
+ ASSERT_TRUE(allocator_->AllocateSurface(avda1_));
+ ASSERT_FALSE(allocator_->AllocateSurface(avda2_));
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ LaterWaitersReplaceEarlierWaiters) {
+ allocator_->AllocateSurface(avda1_);
+ allocator_->AllocateSurface(avda2_);
+ EXPECT_CALL(*avda2_, OnSurfaceAvailable(false));
+ allocator_->AllocateSurface(avda3_);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ WaitersBecomeOwnersWhenSurfacesAreReleased) {
+ allocator_->AllocateSurface(avda1_);
+ allocator_->AllocateSurface(avda2_);
+ EXPECT_CALL(*avda2_, OnSurfaceAvailable(true));
+ allocator_->DeallocateSurface(avda1_);
+ // The surface should still be owned.
+ ASSERT_FALSE(allocator_->AllocateSurface(avda1_));
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ DeallocatingUnownedSurfacesIsSafe) {
+ allocator_->DeallocateSurface(avda1_);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ WaitersAreRemovedIfTheyDeallocate) {
+ allocator_->AllocateSurface(avda1_);
+ allocator_->AllocateSurface(avda2_);
+ allocator_->DeallocateSurface(avda2_);
+ // |avda2_| should should not receive a notification.
+ EXPECT_CALL(*avda2_, OnSurfaceAvailable(_)).Times(0);
+ allocator_->DeallocateSurface(avda1_);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest, OwnersAreNotifiedOnDestruction) {
+ allocator_->AllocateSurface(avda1_);
+ // Owner is notified for a surface it owns.
+ EXPECT_CALL(*avda1_, OnSurfaceDestroyed());
+ allocator_->OnSurfaceDestroyed(1);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ NonOwnersAreNotNotifiedOnDestruction) {
+ allocator_->AllocateSurface(avda1_);
+ // Not notified for a surface it doesn't own.
+ EXPECT_CALL(*avda1_, OnSurfaceDestroyed()).Times(0);
+ allocator_->OnSurfaceDestroyed(123);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest, WaitersAreNotifiedOnDestruction) {
+ allocator_->AllocateSurface(avda1_);
+ allocator_->AllocateSurface(avda2_);
+ EXPECT_CALL(*avda1_, OnSurfaceDestroyed());
+ EXPECT_CALL(*avda2_, OnSurfaceAvailable(false));
+ allocator_->OnSurfaceDestroyed(1);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ DeallocatingIsSafeDuringSurfaceDestroyed) {
+ allocator_->AllocateSurface(avda1_);
+ EXPECT_CALL(*avda1_, OnSurfaceDestroyed()).WillOnce(Invoke([=]() {
+ allocator_->DeallocateSurface(avda1_);
+ }));
+ allocator_->OnSurfaceDestroyed(1);
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/dxva_picture_buffer_win.cc b/chromium/media/gpu/dxva_picture_buffer_win.cc
index a6f417da290..4e36b91332a 100644
--- a/chromium/media/gpu/dxva_picture_buffer_win.cc
+++ b/chromium/media/gpu/dxva_picture_buffer_win.cc
@@ -11,6 +11,7 @@
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_image.h"
+#include "ui/gl/gl_image_dxgi.h"
#include "ui/gl/gl_surface_egl.h"
#include "ui/gl/scoped_binders.h"
@@ -75,20 +76,6 @@ class GLImagePbuffer : public DummyGLImage {
EGLSurface surface_;
};
-class GLImageEGLStream : public DummyGLImage {
- public:
- GLImageEGLStream(const gfx::Size& size, EGLStreamKHR stream)
- : DummyGLImage(size), stream_(stream) {}
-
- private:
- ~GLImageEGLStream() override {
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
- eglDestroyStreamKHR(egl_display, stream_);
- }
-
- EGLStreamKHR stream_;
-};
-
} // namespace
#define RETURN_ON_FAILURE(result, log, ret) \
@@ -324,8 +311,8 @@ bool PbufferPictureBuffer::CopyOutputSampleDataToPictureBuffer(
// The same picture buffer can be reused for a different frame. Release the
// target surface and the decoder references here.
- target_surface_.Release();
- decoder_surface_.Release();
+ target_surface_.Reset();
+ decoder_surface_.Reset();
// Grab a reference on the decoder surface and the target surface. These
// references will be released when we receive a notification that the
@@ -362,11 +349,11 @@ bool PbufferPictureBuffer::CopySurfaceComplete(
if (src_surface && dest_surface) {
DCHECK_EQ(src_surface, decoder_surface_.get());
DCHECK_EQ(dest_surface, target_surface_.get());
- decoder_surface_.Release();
- target_surface_.Release();
+ decoder_surface_.Reset();
+ target_surface_.Reset();
} else {
DCHECK(decoder_dx11_texture_.get());
- decoder_dx11_texture_.Release();
+ decoder_dx11_texture_.Reset();
}
if (egl_keyed_mutex_) {
keyed_mutex_value_++;
@@ -383,6 +370,10 @@ bool PbufferPictureBuffer::CopySurfaceComplete(
return true;
}
+bool PbufferPictureBuffer::AllowOverlay() const {
+ return false;
+}
+
PbufferPictureBuffer::PbufferPictureBuffer(const PictureBuffer& buffer)
: DXVAPictureBuffer(buffer),
decoding_surface_(NULL),
@@ -400,9 +391,9 @@ bool PbufferPictureBuffer::ReusePictureBuffer() {
EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
eglReleaseTexImage(egl_display, decoding_surface_, EGL_BACK_BUFFER);
- decoder_surface_.Release();
- target_surface_.Release();
- decoder_dx11_texture_.Release();
+ decoder_surface_.Reset();
+ target_surface_.Reset();
+ decoder_dx11_texture_.Reset();
state_ = UNUSED;
if (egl_keyed_mutex_) {
HRESULT hr = egl_keyed_mutex_->ReleaseSync(++keyed_mutex_value_);
@@ -432,7 +423,7 @@ bool EGLStreamPictureBuffer::Initialize() {
};
stream_ = eglCreateStreamKHR(egl_display, stream_attributes);
RETURN_ON_FAILURE(!!stream_, "Could not create stream", false);
- gl_image_ = make_scoped_refptr(new GLImageEGLStream(size(), stream_));
+ gl_image_ = make_scoped_refptr(new gl::GLImageDXGI(size(), stream_));
gl::ScopedActiveTexture texture0(GL_TEXTURE0);
gl::ScopedTextureBinder texture0_binder(
GL_TEXTURE_EXTERNAL_OES, picture_buffer_.service_texture_ids()[0]);
@@ -474,8 +465,8 @@ bool EGLStreamPictureBuffer::ReusePictureBuffer() {
RETURN_ON_FAILURE(result, "Could not release stream", false);
}
if (current_d3d_sample_) {
- dx11_decoding_texture_.Release();
- current_d3d_sample_.Release();
+ dx11_decoding_texture_.Reset();
+ current_d3d_sample_.Reset();
}
state_ = UNUSED;
return true;
@@ -513,6 +504,15 @@ bool EGLStreamPictureBuffer::BindSampleToTexture(
RETURN_ON_FAILURE(result, "Could not post texture", false);
result = eglStreamConsumerAcquireKHR(egl_display, stream_);
RETURN_ON_FAILURE(result, "Could not post acquire stream", false);
+ gl::GLImageDXGI* gl_image_dxgi =
+ gl::GLImageDXGI::FromGLImage(gl_image_.get());
+ DCHECK(gl_image_dxgi);
+
+ gl_image_dxgi->SetTexture(dx11_decoding_texture_, subresource);
+ return true;
+}
+
+bool EGLStreamPictureBuffer::AllowOverlay() const {
return true;
}
@@ -539,7 +539,7 @@ bool EGLStreamCopyPictureBuffer::Initialize(
};
stream_ = eglCreateStreamKHR(egl_display, stream_attributes);
RETURN_ON_FAILURE(!!stream_, "Could not create stream", false);
- gl_image_ = make_scoped_refptr(new GLImageEGLStream(size(), stream_));
+ gl_image_ = make_scoped_refptr(new gl::GLImageDXGI(size(), stream_));
gl::ScopedActiveTexture texture0(GL_TEXTURE0);
gl::ScopedTextureBinder texture0_binder(
GL_TEXTURE_EXTERNAL_OES, picture_buffer_.service_texture_ids()[0]);
@@ -635,7 +635,7 @@ bool EGLStreamCopyPictureBuffer::CopySurfaceComplete(
DCHECK_EQ(COPYING, state_);
state_ = IN_CLIENT;
- dx11_decoding_texture_.Release();
+ dx11_decoding_texture_.Reset();
HRESULT hr =
egl_keyed_mutex_->AcquireSync(keyed_mutex_value_, kAcquireSyncWaitMs);
@@ -653,6 +653,11 @@ bool EGLStreamCopyPictureBuffer::CopySurfaceComplete(
RETURN_ON_FAILURE(result, "Could not post stream", false);
result = eglStreamConsumerAcquireKHR(egl_display, stream_);
RETURN_ON_FAILURE(result, "Could not post acquire stream", false);
+ gl::GLImageDXGI* gl_image_dxgi =
+ gl::GLImageDXGI::FromGLImage(gl_image_.get());
+ DCHECK(gl_image_dxgi);
+
+ gl_image_dxgi->SetTexture(angle_copy_texture_, 0);
return true;
}
@@ -674,4 +679,8 @@ bool EGLStreamCopyPictureBuffer::ReusePictureBuffer() {
return true;
}
+bool EGLStreamCopyPictureBuffer::AllowOverlay() const {
+ return true;
+}
+
} // namespace media
diff --git a/chromium/media/gpu/dxva_picture_buffer_win.h b/chromium/media/gpu/dxva_picture_buffer_win.h
index ee4ce0c6c79..29d502d1d09 100644
--- a/chromium/media/gpu/dxva_picture_buffer_win.h
+++ b/chromium/media/gpu/dxva_picture_buffer_win.h
@@ -61,6 +61,11 @@ class DXVAPictureBuffer {
color_space_ = color_space;
}
+ // Returns true if these could in theory be used as an overlay. May
+ // still be drawn using GL depending on the scene and precise hardware
+ // support.
+ virtual bool AllowOverlay() const = 0;
+
bool waiting_to_reuse() const { return state_ == WAITING_TO_REUSE; }
virtual gl::GLFence* reuse_fence();
@@ -102,6 +107,7 @@ class PbufferPictureBuffer : public DXVAPictureBuffer {
gl::GLFence* reuse_fence() override;
bool CopySurfaceComplete(IDirect3DSurface9* src_surface,
IDirect3DSurface9* dest_surface) override;
+ bool AllowOverlay() const override;
protected:
EGLSurface decoding_surface_;
@@ -144,6 +150,7 @@ class EGLStreamPictureBuffer : public DXVAPictureBuffer {
bool Initialize();
bool ReusePictureBuffer() override;
bool BindSampleToTexture(base::win::ScopedComPtr<IMFSample> sample) override;
+ bool AllowOverlay() const override;
private:
EGLStreamKHR stream_;
@@ -167,6 +174,7 @@ class EGLStreamCopyPictureBuffer : public DXVAPictureBuffer {
int input_buffer_id) override;
bool CopySurfaceComplete(IDirect3DSurface9* src_surface,
IDirect3DSurface9* dest_surface) override;
+ bool AllowOverlay() const override;
private:
EGLStreamKHR stream_;
diff --git a/chromium/media/gpu/dxva_video_decode_accelerator_win.cc b/chromium/media/gpu/dxva_video_decode_accelerator_win.cc
index a77d3d69ecb..6d954163791 100644
--- a/chromium/media/gpu/dxva_video_decode_accelerator_win.cc
+++ b/chromium/media/gpu/dxva_video_decode_accelerator_win.cc
@@ -50,7 +50,7 @@
#include "media/video/video_decode_accelerator.h"
#include "third_party/angle/include/EGL/egl.h"
#include "third_party/angle/include/EGL/eglext.h"
-#include "ui/base/ui_base_switches.h"
+#include "ui/display/display_switches.h"
#include "ui/gfx/color_space_win.h"
#include "ui/gl/gl_angle_util_win.h"
#include "ui/gl/gl_bindings.h"
@@ -451,14 +451,14 @@ bool H264ConfigChangeDetector::DetectConfig(const uint8_t* stream,
return true;
}
-gfx::ColorSpace H264ConfigChangeDetector::current_color_space() const {
+VideoColorSpace H264ConfigChangeDetector::current_color_space() const {
if (!parser_)
- return gfx::ColorSpace();
+ return VideoColorSpace();
// TODO(hubbe): Is using last_sps_id_ correct here?
const H264SPS* sps = parser_->GetSPS(last_sps_id_);
if (sps)
return sps->GetColorSpace();
- return gfx::ColorSpace();
+ return VideoColorSpace();
}
DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
@@ -504,7 +504,9 @@ DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
use_keyed_mutex_(false),
using_angle_device_(false),
enable_accelerated_vpx_decode_(
- gpu_preferences.enable_accelerated_vpx_decode),
+ workarounds.disable_accelerated_vpx_decode
+ ? gpu::GpuPreferences::VpxDecodeVendors::VPX_VENDOR_NONE
+ : gpu_preferences.enable_accelerated_vpx_decode),
processing_config_changed_(false),
weak_this_factory_(this) {
weak_ptr_ = weak_this_factory_.GetWeakPtr();
@@ -567,8 +569,7 @@ bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
// Unfortunately, the profile is currently unreliable for
// VP9 (crbug.com/592074) so also try to use fp16 if HDR is on.
- if (base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableHDROutput)) {
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(switches::kEnableHDR)) {
use_fp16_ = true;
}
@@ -616,7 +617,9 @@ bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
"Initialize: invalid state: " << state,
ILLEGAL_STATE, false);
- InitializeMediaFoundation();
+ RETURN_AND_NOTIFY_ON_FAILURE(InitializeMediaFoundation(),
+ "Could not initialize Media Foundartion",
+ PLATFORM_FAILURE, false);
config_ = config;
@@ -1192,7 +1195,8 @@ GLenum DXVAVideoDecodeAccelerator::GetSurfaceInternalFormat() const {
// static
VideoDecodeAccelerator::SupportedProfiles
DXVAVideoDecodeAccelerator::GetSupportedProfiles(
- const gpu::GpuPreferences& preferences) {
+ const gpu::GpuPreferences& preferences,
+ const gpu::GpuDriverBugWorkarounds& workarounds) {
TRACE_EVENT0("gpu,startup",
"DXVAVideoDecodeAccelerator::GetSupportedProfiles");
@@ -1208,7 +1212,8 @@ DXVAVideoDecodeAccelerator::GetSupportedProfiles(
}
}
for (const auto& supported_profile : kSupportedProfiles) {
- if (!preferences.enable_accelerated_vpx_decode &&
+ if ((!preferences.enable_accelerated_vpx_decode ||
+ workarounds.disable_accelerated_vpx_decode) &&
(supported_profile >= VP8PROFILE_MIN) &&
(supported_profile <= VP9PROFILE_MAX)) {
continue;
@@ -1977,7 +1982,7 @@ void DXVAVideoDecodeAccelerator::Invalidate() {
weak_this_factory_.InvalidateWeakPtrs();
weak_ptr_ = weak_this_factory_.GetWeakPtr();
pending_output_samples_.clear();
- decoder_.Release();
+ decoder_.Reset();
config_change_detector_.reset();
// If we are processing a config change, then leave the d3d9/d3d11 objects
@@ -1993,21 +1998,21 @@ void DXVAVideoDecodeAccelerator::Invalidate() {
pending_input_buffers_.clear();
pictures_requested_ = false;
if (use_dx11_) {
- d3d11_processor_.Release();
- enumerator_.Release();
- video_context_.Release();
- video_device_.Release();
- d3d11_device_context_.Release();
- d3d11_device_.Release();
- d3d11_device_manager_.Release();
- d3d11_query_.Release();
- multi_threaded_.Release();
+ d3d11_processor_.Reset();
+ enumerator_.Reset();
+ video_context_.Reset();
+ video_device_.Reset();
+ d3d11_device_context_.Reset();
+ d3d11_device_.Reset();
+ d3d11_device_manager_.Reset();
+ d3d11_query_.Reset();
+ multi_threaded_.Reset();
processor_width_ = processor_height_ = 0;
} else {
- d3d9_.Release();
- d3d9_device_ex_.Release();
- device_manager_.Release();
- query_.Release();
+ d3d9_.Reset();
+ d3d9_device_ex_.Reset();
+ device_manager_.Reset();
+ query_.Reset();
}
}
sent_drain_message_ = false;
@@ -2087,14 +2092,15 @@ void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) {
void DXVAVideoDecodeAccelerator::NotifyPictureReady(
int picture_buffer_id,
int input_buffer_id,
- const gfx::ColorSpace& color_space) {
+ const gfx::ColorSpace& color_space,
+ bool allow_overlay) {
DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
// This task could execute after the decoder has been torn down.
if (GetState() != kUninitialized && client_) {
// TODO(henryhsu): Use correct visible size instead of (0, 0). We can't use
// coded size here so use (0, 0) intentionally to have the client choose.
Picture picture(picture_buffer_id, input_buffer_id, gfx::Rect(0, 0),
- color_space, false);
+ color_space, allow_overlay);
client_->PictureReady(picture);
}
}
@@ -2172,10 +2178,10 @@ void DXVAVideoDecodeAccelerator::FlushInternal() {
// Attempt to retrieve an output frame from the decoder. If we have one,
// return and proceed when the output frame is processed. If we don't have a
// frame then we are done.
- gfx::ColorSpace color_space = config_change_detector_->current_color_space();
- if (!color_space.IsValid())
+ VideoColorSpace color_space = config_change_detector_->current_color_space();
+ if (color_space == VideoColorSpace())
color_space = config_.color_space;
- DoDecode(color_space);
+ DoDecode(color_space.ToGfxColorSpace());
if (OutputSamplesPresent())
return;
@@ -2224,8 +2230,8 @@ void DXVAVideoDecodeAccelerator::DecodeInternal(
return;
}
- gfx::ColorSpace color_space = config_change_detector_->current_color_space();
- if (!color_space.IsValid())
+ VideoColorSpace color_space = config_change_detector_->current_color_space();
+ if (color_space == VideoColorSpace())
color_space = config_.color_space;
if (!inputs_before_decode_) {
@@ -2247,7 +2253,7 @@ void DXVAVideoDecodeAccelerator::DecodeInternal(
// process the input again. Failure in either of these steps is treated as a
// decoder failure.
if (hr == MF_E_NOTACCEPTING) {
- DoDecode(color_space);
+ DoDecode(color_space.ToGfxColorSpace());
// If the DoDecode call resulted in an output frame then we should not
// process any more input until that frame is copied to the target surface.
if (!OutputSamplesPresent()) {
@@ -2279,7 +2285,7 @@ void DXVAVideoDecodeAccelerator::DecodeInternal(
RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample",
PLATFORM_FAILURE, );
- DoDecode(color_space);
+ DoDecode(color_space.ToGfxColorSpace());
State state = GetState();
RETURN_AND_NOTIFY_ON_FAILURE(
@@ -2505,7 +2511,8 @@ void DXVAVideoDecodeAccelerator::CopySurfaceComplete(
PLATFORM_FAILURE, );
NotifyPictureReady(picture_buffer->id(), input_buffer_id,
- picture_buffer->color_space());
+ picture_buffer->color_space(),
+ picture_buffer->AllowOverlay());
{
base::AutoLock lock(decoder_lock_);
@@ -2558,7 +2565,8 @@ void DXVAVideoDecodeAccelerator::BindPictureBufferToSample(
PLATFORM_FAILURE, );
NotifyPictureReady(picture_buffer->id(), input_buffer_id,
- picture_buffer->color_space());
+ picture_buffer->color_space(),
+ picture_buffer->AllowOverlay());
{
base::AutoLock lock(decoder_lock_);
@@ -2777,11 +2785,10 @@ bool DXVAVideoDecodeAccelerator::InitializeID3D11VideoProcessor(
int height,
const gfx::ColorSpace& color_space) {
if (width < processor_width_ || height != processor_height_) {
- d3d11_processor_.Release();
- enumerator_.Release();
+ d3d11_processor_.Reset();
+ enumerator_.Reset();
processor_width_ = 0;
processor_height_ = 0;
- dx11_converter_color_space_ = gfx::ColorSpace();
D3D11_VIDEO_PROCESSOR_CONTENT_DESC desc;
desc.InputFrameFormat = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
@@ -2825,24 +2832,51 @@ bool DXVAVideoDecodeAccelerator::InitializeID3D11VideoProcessor(
dx11_converter_output_color_space_ = color_space;
} else {
dx11_converter_output_color_space_ = gfx::ColorSpace::CreateSRGB();
- // Not sure if this call is expensive, let's only do it if the color
- // space changes.
- if ((use_color_info_ || use_fp16_) &&
- dx11_converter_color_space_ != color_space) {
+ if (use_color_info_ || use_fp16_) {
base::win::ScopedComPtr<ID3D11VideoContext1> video_context1;
HRESULT hr = video_context_.QueryInterface(video_context1.Receive());
if (SUCCEEDED(hr)) {
- if (use_fp16_ && base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableHDROutput)) {
+ if (use_fp16_ &&
+ base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableHDR) &&
+ color_space.IsHDR()) {
+ // Note, we only use the SCRGBLinear output color space when
+ // the input is PQ, because nvidia drivers will not convert
+ // G22 to G10 for some reason.
dx11_converter_output_color_space_ =
gfx::ColorSpace::CreateSCRGBLinear();
}
- video_context1->VideoProcessorSetStreamColorSpace1(
- d3d11_processor_.get(), 0,
- gfx::ColorSpaceWin::GetDXGIColorSpace(color_space));
- video_context1->VideoProcessorSetOutputColorSpace1(
- d3d11_processor_.get(), gfx::ColorSpaceWin::GetDXGIColorSpace(
- dx11_converter_output_color_space_));
+ // Since the video processor doesn't support HLG, let's just do the
+ // YUV->RGB conversion and let the output color space be HLG.
+ // This won't work well unless color management is on, but if color
+ // management is off we don't support HLG anyways.
+ if (color_space ==
+ gfx::ColorSpace(gfx::ColorSpace::PrimaryID::BT2020,
+ gfx::ColorSpace::TransferID::ARIB_STD_B67,
+ gfx::ColorSpace::MatrixID::BT709,
+ gfx::ColorSpace::RangeID::LIMITED)) {
+ video_context1->VideoProcessorSetStreamColorSpace1(
+ d3d11_processor_.get(), 0,
+ DXGI_COLOR_SPACE_YCBCR_STUDIO_G2084_LEFT_P2020);
+ video_context1->VideoProcessorSetOutputColorSpace1(
+ d3d11_processor_.get(),
+ DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020);
+ dx11_converter_output_color_space_ = color_space.GetAsFullRangeRGB();
+ } else {
+ DVLOG(2) << "input color space: " << color_space
+ << " DXGIColorSpace: "
+ << gfx::ColorSpaceWin::GetDXGIColorSpace(color_space);
+ DVLOG(2) << "output color space:"
+ << dx11_converter_output_color_space_ << " DXGIColorSpace: "
+ << gfx::ColorSpaceWin::GetDXGIColorSpace(
+ dx11_converter_output_color_space_);
+ video_context1->VideoProcessorSetStreamColorSpace1(
+ d3d11_processor_.get(), 0,
+ gfx::ColorSpaceWin::GetDXGIColorSpace(color_space));
+ video_context1->VideoProcessorSetOutputColorSpace1(
+ d3d11_processor_.get(), gfx::ColorSpaceWin::GetDXGIColorSpace(
+ dx11_converter_output_color_space_));
+ }
} else {
D3D11_VIDEO_PROCESSOR_COLOR_SPACE d3d11_color_space =
gfx::ColorSpaceWin::GetD3D11ColorSpace(color_space);
@@ -2853,7 +2887,6 @@ bool DXVAVideoDecodeAccelerator::InitializeID3D11VideoProcessor(
video_context_->VideoProcessorSetOutputColorSpace(
d3d11_processor_.get(), &d3d11_color_space);
}
- dx11_converter_color_space_ = color_space;
}
}
return true;
@@ -2921,7 +2954,7 @@ bool DXVAVideoDecodeAccelerator::SetTransformOutputType(IMFTransform* transform,
RETURN_ON_HR_FAILURE(hr, "Failed to set output type", false);
return true;
}
- media_type.Release();
+ media_type.Reset();
}
return false;
}
diff --git a/chromium/media/gpu/dxva_video_decode_accelerator_win.h b/chromium/media/gpu/dxva_video_decode_accelerator_win.h
index df3ff4dc692..46ee4ad499c 100644
--- a/chromium/media/gpu/dxva_video_decode_accelerator_win.h
+++ b/chromium/media/gpu/dxva_video_decode_accelerator_win.h
@@ -32,6 +32,7 @@
#include "base/threading/thread.h"
#include "base/win/scoped_comptr.h"
#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "media/base/video_color_space.h"
#include "media/filters/h264_parser.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
#include "media/gpu/media_gpu_export.h"
@@ -73,7 +74,7 @@ class H264ConfigChangeDetector {
bool DetectConfig(const uint8_t* stream, unsigned int size);
bool config_changed() const { return config_changed_; }
- gfx::ColorSpace current_color_space() const;
+ VideoColorSpace current_color_space() const;
private:
// These fields are used to track the SPS/PPS in the H.264 bitstream and
@@ -135,7 +136,8 @@ class MEDIA_GPU_EXPORT DXVAVideoDecodeAccelerator
GLenum GetSurfaceInternalFormat() const override;
static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles(
- const gpu::GpuPreferences& gpu_preferences);
+ const gpu::GpuPreferences& gpu_preferences,
+ const gpu::GpuDriverBugWorkarounds& workarounds);
// Preload dlls required for decoding.
static void PreSandboxInitialization();
@@ -241,7 +243,8 @@ class MEDIA_GPU_EXPORT DXVAVideoDecodeAccelerator
// Notifies the client about the availability of a picture.
void NotifyPictureReady(int picture_buffer_id,
int input_buffer_id,
- const gfx::ColorSpace& color_space);
+ const gfx::ColorSpace& color_space,
+ bool allow_overlay);
// Sends pending input buffer processed acks to the client if we don't have
// output samples waiting to be processed.
@@ -535,9 +538,6 @@ class MEDIA_GPU_EXPORT DXVAVideoDecodeAccelerator
// contexts.
bool use_keyed_mutex_;
- // Color spaced used when initializing the dx11 format converter.
- gfx::ColorSpace dx11_converter_color_space_;
-
// Outputs from the dx11 format converter will be in this color space.
gfx::ColorSpace dx11_converter_output_color_space_;
diff --git a/chromium/media/gpu/fake_jpeg_decode_accelerator.cc b/chromium/media/gpu/fake_jpeg_decode_accelerator.cc
new file mode 100644
index 00000000000..cfaba2b7bb8
--- /dev/null
+++ b/chromium/media/gpu/fake_jpeg_decode_accelerator.cc
@@ -0,0 +1,103 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/fake_jpeg_decode_accelerator.h"
+
+#include "base/bind.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "media/gpu/shared_memory_region.h"
+
+namespace media {
+
+FakeJpegDecodeAccelerator::FakeJpegDecodeAccelerator(
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ : client_task_runner_(base::ThreadTaskRunnerHandle::Get()),
+ io_task_runner_(std::move(io_task_runner)),
+ decoder_thread_("FakeJpegDecoderThread"),
+ weak_factory_(this) {}
+
+FakeJpegDecodeAccelerator::~FakeJpegDecodeAccelerator() {
+ DCHECK(client_task_runner_->BelongsToCurrentThread());
+}
+
+bool FakeJpegDecodeAccelerator::Initialize(
+ JpegDecodeAccelerator::Client* client) {
+ DCHECK(client_task_runner_->BelongsToCurrentThread());
+ client_ = client;
+
+ if (!decoder_thread_.Start()) {
+ DLOG(ERROR) << "Failed to start decoding thread.";
+ return false;
+ }
+ decoder_task_runner_ = decoder_thread_.task_runner();
+
+ return true;
+}
+
+void FakeJpegDecodeAccelerator::Decode(
+ const BitstreamBuffer& bitstream_buffer,
+ const scoped_refptr<VideoFrame>& video_frame) {
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+
+ // SharedMemoryRegion will take over the |bitstream_buffer.handle()|.
+ std::unique_ptr<SharedMemoryRegion> src_shm(
+ new SharedMemoryRegion(bitstream_buffer, true));
+ if (!src_shm->Map()) {
+ DLOG(ERROR) << "Unable to map shared memory in FakeJpegDecodeAccelerator";
+ NotifyError(bitstream_buffer.id(), JpegDecodeAccelerator::UNREADABLE_INPUT);
+ return;
+ }
+
+ // Unretained |this| is safe because |this| owns |decoder_thread_|.
+ decoder_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&FakeJpegDecodeAccelerator::DecodeOnDecoderThread,
+ base::Unretained(this), bitstream_buffer,
+ video_frame, base::Passed(&src_shm)));
+}
+
+void FakeJpegDecodeAccelerator::DecodeOnDecoderThread(
+ const BitstreamBuffer& bitstream_buffer,
+ const scoped_refptr<VideoFrame>& video_frame,
+ std::unique_ptr<SharedMemoryRegion> src_shm) {
+ DCHECK(decoder_task_runner_->BelongsToCurrentThread());
+
+ // Do not actually decode the Jpeg data.
+ // Instead, just fill the output buffer with zeros.
+ size_t allocation_size =
+ VideoFrame::AllocationSize(PIXEL_FORMAT_I420, video_frame->coded_size());
+ memset(video_frame->data(0), 0, allocation_size);
+
+ client_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&FakeJpegDecodeAccelerator::OnDecodeDoneOnClientThread,
+ weak_factory_.GetWeakPtr(), bitstream_buffer.id()));
+}
+
+bool FakeJpegDecodeAccelerator::IsSupported() {
+ return true;
+}
+
+void FakeJpegDecodeAccelerator::NotifyError(int32_t bitstream_buffer_id,
+ Error error) {
+ client_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&FakeJpegDecodeAccelerator::NotifyErrorOnClientThread,
+ weak_factory_.GetWeakPtr(), bitstream_buffer_id, error));
+}
+
+void FakeJpegDecodeAccelerator::NotifyErrorOnClientThread(
+ int32_t bitstream_buffer_id,
+ Error error) {
+ DCHECK(client_task_runner_->BelongsToCurrentThread());
+ client_->NotifyError(bitstream_buffer_id, error);
+}
+
+void FakeJpegDecodeAccelerator::OnDecodeDoneOnClientThread(
+ int32_t input_buffer_id) {
+ DCHECK(client_task_runner_->BelongsToCurrentThread());
+ client_->VideoFrameReady(input_buffer_id);
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/fake_jpeg_decode_accelerator.h b/chromium/media/gpu/fake_jpeg_decode_accelerator.h
new file mode 100644
index 00000000000..0a25dbb729c
--- /dev/null
+++ b/chromium/media/gpu/fake_jpeg_decode_accelerator.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_FAKE_JPEG_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_FAKE_JPEG_DECODE_ACCELERATOR_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread.h"
+#include "media/base/bitstream_buffer.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/video/jpeg_decode_accelerator.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+namespace media {
+
+class SharedMemoryRegion;
+
+// Uses software-based decoding. The purpose of this class is to enable testing
+// of communication to the JpegDecodeAccelerator without requiring an actual
+// hardware decoder.
+class MEDIA_GPU_EXPORT FakeJpegDecodeAccelerator
+ : public JpegDecodeAccelerator {
+ public:
+ FakeJpegDecodeAccelerator(
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
+ ~FakeJpegDecodeAccelerator() override;
+
+ // JpegDecodeAccelerator implementation.
+ bool Initialize(JpegDecodeAccelerator::Client* client) override;
+ void Decode(const BitstreamBuffer& bitstream_buffer,
+ const scoped_refptr<VideoFrame>& video_frame) override;
+ bool IsSupported() override;
+
+ private:
+ void DecodeOnDecoderThread(const BitstreamBuffer& bitstream_buffer,
+ const scoped_refptr<VideoFrame>& video_frame,
+ std::unique_ptr<SharedMemoryRegion> src_shm);
+ void NotifyError(int32_t bitstream_buffer_id, Error error);
+ void NotifyErrorOnClientThread(int32_t bitstream_buffer_id, Error error);
+ void OnDecodeDoneOnClientThread(int32_t input_buffer_id);
+
+ // Task runner for calls to |client_|.
+ const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner_;
+
+ // GPU IO task runner.
+ const scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+
+ Client* client_ = nullptr;
+
+ base::Thread decoder_thread_;
+ scoped_refptr<base::SingleThreadTaskRunner> decoder_task_runner_;
+
+ base::WeakPtrFactory<FakeJpegDecodeAccelerator> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeJpegDecodeAccelerator);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_FAKE_JPEG_DECODE_ACCELERATOR_H_
diff --git a/chromium/media/gpu/gpu_video_decode_accelerator_factory.cc b/chromium/media/gpu/gpu_video_decode_accelerator_factory.cc
index 0065548b5e1..c7a4dfd382b 100644
--- a/chromium/media/gpu/gpu_video_decode_accelerator_factory.cc
+++ b/chromium/media/gpu/gpu_video_decode_accelerator_factory.cc
@@ -67,7 +67,8 @@ GpuVideoDecodeAcceleratorFactory::CreateWithNoGL() {
// static
MEDIA_GPU_EXPORT gpu::VideoDecodeAcceleratorCapabilities
GpuVideoDecodeAcceleratorFactory::GetDecoderCapabilities(
- const gpu::GpuPreferences& gpu_preferences) {
+ const gpu::GpuPreferences& gpu_preferences,
+ const gpu::GpuDriverBugWorkarounds& workarounds) {
VideoDecodeAccelerator::Capabilities capabilities;
if (gpu_preferences.disable_accelerated_video_decode)
return gpu::VideoDecodeAcceleratorCapabilities();
@@ -81,7 +82,8 @@ GpuVideoDecodeAcceleratorFactory::GetDecoderCapabilities(
// resolutions and other supported profile parameters.
#if defined(OS_WIN)
capabilities.supported_profiles =
- DXVAVideoDecodeAccelerator::GetSupportedProfiles(gpu_preferences);
+ DXVAVideoDecodeAccelerator::GetSupportedProfiles(gpu_preferences,
+ workarounds);
#elif defined(OS_CHROMEOS)
VideoDecodeAccelerator::SupportedProfiles vda_profiles;
#if defined(USE_V4L2_CODEC)
@@ -249,7 +251,7 @@ GpuVideoDecodeAcceleratorFactory::CreateAndroidVDA(
const gpu::GpuPreferences& gpu_preferences) const {
std::unique_ptr<VideoDecodeAccelerator> decoder;
decoder.reset(new AndroidVideoDecodeAccelerator(
- AVDACodecAllocator::Instance(), make_context_current_cb_,
+ AVDACodecAllocator::GetInstance(), make_context_current_cb_,
get_gles2_decoder_cb_));
return decoder;
}
diff --git a/chromium/media/gpu/gpu_video_decode_accelerator_factory.h b/chromium/media/gpu/gpu_video_decode_accelerator_factory.h
index 5f5a9d2ecdb..71d5a304ba4 100644
--- a/chromium/media/gpu/gpu_video_decode_accelerator_factory.h
+++ b/chromium/media/gpu/gpu_video_decode_accelerator_factory.h
@@ -70,7 +70,8 @@ class MEDIA_GPU_EXPORT GpuVideoDecodeAcceleratorFactory {
static std::unique_ptr<GpuVideoDecodeAcceleratorFactory> CreateWithNoGL();
static gpu::VideoDecodeAcceleratorCapabilities GetDecoderCapabilities(
- const gpu::GpuPreferences& gpu_preferences);
+ const gpu::GpuPreferences& gpu_preferences,
+ const gpu::GpuDriverBugWorkarounds& workarounds);
std::unique_ptr<VideoDecodeAccelerator> CreateVDA(
VideoDecodeAccelerator::Client* client,
diff --git a/chromium/media/gpu/ipc/client/gpu_jpeg_decode_accelerator_host.cc b/chromium/media/gpu/ipc/client/gpu_jpeg_decode_accelerator_host.cc
index 1afe39c3db2..ec2a43364fd 100644
--- a/chromium/media/gpu/ipc/client/gpu_jpeg_decode_accelerator_host.cc
+++ b/chromium/media/gpu/ipc/client/gpu_jpeg_decode_accelerator_host.cc
@@ -31,15 +31,23 @@ class GpuJpegDecodeAcceleratorHost::Receiver : public IPC::Listener,
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
: client_(client),
io_task_runner_(io_task_runner),
- weak_factory_for_io_(this) {
+ weak_factory_for_io_(
+ base::MakeUnique<base::WeakPtrFactory<Receiver>>(this)),
+ weak_ptr_for_io_(weak_factory_for_io_->GetWeakPtr()) {
DCHECK(CalledOnValidThread());
}
- ~Receiver() override { DCHECK(CalledOnValidThread()); }
+ ~Receiver() override {
+ DCHECK(CalledOnValidThread());
+ // If |io_task_runner_| no longer accepts tasks, |weak_factory_for_io_|
+ // will leak. This is acceptable, because that should only happen on
+ // Browser shutdown.
+ io_task_runner_->DeleteSoon(FROM_HERE, weak_factory_for_io_.release());
+ }
- void InvalidateWeakPtr(base::WaitableEvent* event) {
+ void InvalidateWeakPtrOnIOThread(base::WaitableEvent* event) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
- weak_factory_for_io_.InvalidateWeakPtrs();
+ weak_factory_for_io_->InvalidateWeakPtrs();
event->Signal();
}
@@ -62,9 +70,7 @@ class GpuJpegDecodeAcceleratorHost::Receiver : public IPC::Listener,
return handled;
}
- base::WeakPtr<IPC::Listener> AsWeakPtrForIO() {
- return weak_factory_for_io_.GetWeakPtr();
- }
+ base::WeakPtr<IPC::Listener> AsWeakPtrForIO() { return weak_ptr_for_io_; }
private:
void OnDecodeAck(int32_t bitstream_buffer_id, Error error) {
@@ -91,7 +97,8 @@ class GpuJpegDecodeAcceleratorHost::Receiver : public IPC::Listener,
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
// Weak pointers will be invalidated on IO thread.
- base::WeakPtrFactory<Receiver> weak_factory_for_io_;
+ std::unique_ptr<base::WeakPtrFactory<Receiver>> weak_factory_for_io_;
+ base::WeakPtr<Receiver> weak_ptr_for_io_;
DISALLOW_COPY_AND_ASSIGN(Receiver);
};
@@ -118,11 +125,16 @@ GpuJpegDecodeAcceleratorHost::~GpuJpegDecodeAcceleratorHost() {
// routed to |receiver_| on IO thread.
base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED);
- io_task_runner_->PostTask(FROM_HERE,
- base::Bind(&Receiver::InvalidateWeakPtr,
- base::Unretained(receiver_.get()),
- base::Unretained(&event)));
- event.Wait();
+ // Use of Unretained() is safe, because if the task executes, we block
+ // until it is finished by waiting on |event| below.
+ bool task_expected_to_run = io_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&Receiver::InvalidateWeakPtrOnIOThread,
+ base::Unretained(receiver_.get()),
+ base::Unretained(&event)));
+ // If the current call is happening during the browser shutdown, the
+ // |io_task_runner_| may no longer be accepting tasks.
+ if (task_expected_to_run)
+ event.Wait();
}
}
diff --git a/chromium/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc b/chromium/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc
index af17901fcac..2bafbeebd11 100644
--- a/chromium/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc
+++ b/chromium/media/gpu/ipc/service/gpu_jpeg_decode_accelerator.cc
@@ -10,6 +10,7 @@
#include <utility>
#include "base/bind.h"
+#include "base/command_line.h"
#include "base/containers/hash_tables.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
@@ -21,7 +22,9 @@
#include "gpu/ipc/service/gpu_channel.h"
#include "ipc/ipc_message_macros.h"
#include "ipc/message_filter.h"
+#include "media/base/media_switches.h"
#include "media/filters/jpeg_parser.h"
+#include "media/gpu/fake_jpeg_decode_accelerator.h"
#include "media/gpu/ipc/common/media_messages.h"
#include "ui/gfx/geometry/size.h"
@@ -59,6 +62,12 @@ std::unique_ptr<media::JpegDecodeAccelerator> CreateVaapiJDA(
return decoder;
}
+std::unique_ptr<media::JpegDecodeAccelerator> CreateFakeJDA(
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner) {
+ return base::MakeUnique<media::FakeJpegDecodeAccelerator>(
+ std::move(io_task_runner));
+}
+
void DecodeFinished(std::unique_ptr<base::SharedMemory> shm) {
// Do nothing. Because VideoFrame is backed by |shm|, the purpose of this
// function is to just keep reference of |shm| to make sure it lives until
@@ -94,17 +103,22 @@ bool VerifyDecodeParams(const AcceleratedJpegDecoderMsg_Decode_Params& params) {
namespace media {
-class GpuJpegDecodeAccelerator::Client : public JpegDecodeAccelerator::Client,
- public base::NonThreadSafe {
+class GpuJpegDecodeAccelerator::Client : public JpegDecodeAccelerator::Client {
public:
- Client(GpuJpegDecodeAccelerator* owner, int32_t route_id)
- : owner_(owner->AsWeakPtr()), route_id_(route_id) {}
+ Client(base::WeakPtr<GpuJpegDecodeAccelerator> owner,
+ int32_t route_id,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner)
+ : owner_(std::move(owner)),
+ route_id_(route_id),
+ io_task_runner_(std::move(io_task_runner)) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ }
- ~Client() override { DCHECK(CalledOnValidThread()); }
+ ~Client() override { DCHECK(thread_checker_.CalledOnValidThread()); }
// JpegDecodeAccelerator::Client implementation.
void VideoFrameReady(int32_t bitstream_buffer_id) override {
- DCHECK(CalledOnValidThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
if (owner_)
owner_->NotifyDecodeStatus(route_id_, bitstream_buffer_id,
JpegDecodeAccelerator::NO_ERRORS);
@@ -112,31 +126,34 @@ class GpuJpegDecodeAccelerator::Client : public JpegDecodeAccelerator::Client,
void NotifyError(int32_t bitstream_buffer_id,
JpegDecodeAccelerator::Error error) override {
- DCHECK(CalledOnValidThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
if (owner_)
owner_->NotifyDecodeStatus(route_id_, bitstream_buffer_id, error);
}
void Decode(const BitstreamBuffer& bitstream_buffer,
const scoped_refptr<VideoFrame>& video_frame) {
- DCHECK(CalledOnValidThread());
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
DCHECK(accelerator_);
accelerator_->Decode(bitstream_buffer, video_frame);
}
void set_accelerator(std::unique_ptr<JpegDecodeAccelerator> accelerator) {
- DCHECK(CalledOnValidThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
accelerator_ = std::move(accelerator);
}
private:
+ base::ThreadChecker thread_checker_;
base::WeakPtr<GpuJpegDecodeAccelerator> owner_;
- int32_t route_id_;
+ const int32_t route_id_;
+ const scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
std::unique_ptr<JpegDecodeAccelerator> accelerator_;
};
// Create, destroy, and RemoveClient run on child thread. All other methods run
// on IO thread.
+// TODO(chfremer): Migrate this to Mojo. See https://crbug.com/699255
class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
public:
explicit MessageFilter(GpuJpegDecodeAccelerator* owner)
@@ -331,8 +348,13 @@ std::vector<GpuJpegDecodeAcceleratorFactoryProvider::CreateAcceleratorCB>
GpuJpegDecodeAcceleratorFactoryProvider::GetAcceleratorFactories() {
// This list is ordered by priority of use.
std::vector<CreateAcceleratorCB> result;
- result.push_back(base::Bind(&CreateV4L2JDA));
- result.push_back(base::Bind(&CreateVaapiJDA));
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kUseFakeJpegDecodeAccelerator)) {
+ result.push_back(base::Bind(&CreateFakeJDA));
+ } else {
+ result.push_back(base::Bind(&CreateV4L2JDA));
+ result.push_back(base::Bind(&CreateVaapiJDA));
+ }
return result;
}
@@ -369,7 +391,8 @@ void GpuJpegDecodeAccelerator::AddClient(int32_t route_id,
// When adding non-chromeos platforms, VideoCaptureGpuJpegDecoder::Initialize
// needs to be updated.
- std::unique_ptr<Client> client(new Client(this, route_id));
+ std::unique_ptr<Client> client(
+ new Client(AsWeakPtr(), route_id, io_task_runner_));
std::unique_ptr<JpegDecodeAccelerator> accelerator;
for (const auto& create_jda_function : accelerator_factory_functions_) {
std::unique_ptr<JpegDecodeAccelerator> tmp_accelerator =
diff --git a/chromium/media/gpu/ipc/service/gpu_jpeg_decode_accelerator_unittest.cc b/chromium/media/gpu/ipc/service/gpu_jpeg_decode_accelerator_unittest.cc
index 82dd28f05c5..a4c8f7abc6e 100644
--- a/chromium/media/gpu/ipc/service/gpu_jpeg_decode_accelerator_unittest.cc
+++ b/chromium/media/gpu/ipc/service/gpu_jpeg_decode_accelerator_unittest.cc
@@ -5,6 +5,7 @@
#include "media/gpu/ipc/service/gpu_jpeg_decode_accelerator.h"
#include "base/bind.h"
#include "base/run_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "base/threading/thread.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "media/gpu/ipc/common/media_messages.h"
@@ -80,7 +81,7 @@ class GpuJpegDecodeAcceleratorTest : public ::testing::Test {
private:
// This is required to allow base::ThreadTaskRunnerHandle::Get() from the
// test execution thread.
- base::MessageLoop message_loop_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
};
// Tests that the communication for decoding a frame between the caller of
diff --git a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
index 9ed062425b8..3609e5da2f1 100644
--- a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
+++ b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
@@ -188,9 +188,10 @@ GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
// static
gpu::VideoDecodeAcceleratorCapabilities
GpuVideoDecodeAccelerator::GetCapabilities(
- const gpu::GpuPreferences& gpu_preferences) {
+ const gpu::GpuPreferences& gpu_preferences,
+ const gpu::GpuDriverBugWorkarounds& workarounds) {
return GpuVideoDecodeAcceleratorFactory::GetDecoderCapabilities(
- gpu_preferences);
+ gpu_preferences, workarounds);
}
bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
@@ -343,7 +344,7 @@ bool GpuVideoDecodeAccelerator::Initialize(
const VideoDecodeAccelerator::Config& config) {
DCHECK(!video_decode_accelerator_);
- if (!stub_->channel()->AddRoute(host_route_id_, stub_->stream_id(), this)) {
+ if (!stub_->channel()->AddRoute(host_route_id_, stub_->sequence_id(), this)) {
DLOG(ERROR) << "Initialize(): failed to add route";
return false;
}
@@ -450,7 +451,7 @@ void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, GL_RGBA,
texture_dimensions_.width(),
texture_dimensions_.height(), 1, 0,
- GL_RGBA, 0, gfx::Rect());
+ GL_RGBA, GL_UNSIGNED_BYTE, gfx::Rect());
} else {
// For other targets, texture dimensions should already be defined.
GLsizei width = 0, height = 0;
@@ -467,9 +468,10 @@ void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
GLenum format =
video_decode_accelerator_.get()->GetSurfaceInternalFormat();
if (format != GL_RGBA) {
+ DCHECK(format == GL_BGRA_EXT);
texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, format,
- width, height, 1, 0, format, 0,
- gfx::Rect());
+ width, height, 1, 0, format,
+ GL_UNSIGNED_BYTE, gfx::Rect());
}
}
service_ids.push_back(texture_ref->service_id());
diff --git a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h
index c194c7567ff..6c6d0bff669 100644
--- a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h
+++ b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h
@@ -26,6 +26,7 @@
#include "ui/gfx/geometry/size.h"
namespace gpu {
+class GpuDriverBugWorkarounds;
struct GpuPreferences;
} // namespace gpu
@@ -49,7 +50,8 @@ class GpuVideoDecodeAccelerator
// This query calls the appropriate platform-specific version. The returned
// capabilities will not contain duplicate supported profile entries.
static gpu::VideoDecodeAcceleratorCapabilities GetCapabilities(
- const gpu::GpuPreferences& gpu_preferences);
+ const gpu::GpuPreferences& gpu_preferences,
+ const gpu::GpuDriverBugWorkarounds& workarounds);
// IPC::Listener implementation.
bool OnMessageReceived(const IPC::Message& message) override;
diff --git a/chromium/media/gpu/ipc/service/gpu_video_encode_accelerator.cc b/chromium/media/gpu/ipc/service/gpu_video_encode_accelerator.cc
index eac1572efc1..7d31fcf60a3 100644
--- a/chromium/media/gpu/ipc/service/gpu_video_encode_accelerator.cc
+++ b/chromium/media/gpu/ipc/service/gpu_video_encode_accelerator.cc
@@ -173,6 +173,9 @@ GpuVideoEncodeAccelerator::GpuVideoEncodeAccelerator(
encode_task_runner_(main_task_runner_),
weak_this_factory_for_encoder_worker_(this),
weak_this_factory_(this) {
+ weak_this_for_encoder_worker_ =
+ weak_this_factory_for_encoder_worker_.GetWeakPtr();
+ weak_this_ = weak_this_factory_.GetWeakPtr();
stub_->AddDestructionObserver(this);
make_context_current_ =
base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
@@ -182,13 +185,7 @@ GpuVideoEncodeAccelerator::~GpuVideoEncodeAccelerator() {
// This class can only be self-deleted from OnWillDestroyStub(), which means
// the VEA has already been destroyed in there.
DCHECK(!encoder_);
- if (encoder_worker_thread_.IsRunning()) {
- encoder_worker_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&GpuVideoEncodeAccelerator::DestroyOnEncoderWorker,
- weak_this_factory_for_encoder_worker_.GetWeakPtr()));
- encoder_worker_thread_.Stop();
- }
+ DCHECK(!encoder_worker_thread_.IsRunning());
}
bool GpuVideoEncodeAccelerator::Initialize(VideoPixelFormat input_format,
@@ -203,7 +200,7 @@ bool GpuVideoEncodeAccelerator::Initialize(VideoPixelFormat input_format,
<< ", initial_bitrate=" << initial_bitrate;
DCHECK(!encoder_);
- if (!stub_->channel()->AddRoute(host_route_id_, stub_->stream_id(), this)) {
+ if (!stub_->channel()->AddRoute(host_route_id_, stub_->sequence_id(), this)) {
DLOG(ERROR) << __func__ << " failed to add route";
return false;
}
@@ -229,8 +226,8 @@ bool GpuVideoEncodeAccelerator::Initialize(VideoPixelFormat input_format,
input_visible_size_ = input_visible_size;
// Attempt to set up performing encoding tasks on IO thread, if supported
// by the VEA.
- if (encoder_->TryToSetupEncodeOnSeparateThread(
- weak_this_factory_.GetWeakPtr(), io_task_runner_)) {
+ if (encoder_->TryToSetupEncodeOnSeparateThread(weak_this_,
+ io_task_runner_)) {
filter_ = new MessageFilter(this, host_route_id_);
stub_->channel()->AddFilter(filter_.get());
encode_task_runner_ = io_task_runner_;
@@ -325,6 +322,16 @@ void GpuVideoEncodeAccelerator::OnWillDestroyStub() {
filter_removed_.Wait();
}
+ // We should stop |encoder_worker_thread_| before releasing |encoder_|, see
+ // crbug.com/715759.
+ if (encoder_worker_thread_.IsRunning()) {
+ encoder_worker_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&GpuVideoEncodeAccelerator::DestroyOnEncoderWorker,
+ weak_this_for_encoder_worker_));
+ encoder_worker_thread_.Stop();
+ }
+
stub_->channel()->RemoveRoute(host_route_id_);
stub_->RemoveDestructionObserver(this);
encoder_.reset();
@@ -404,7 +411,7 @@ void GpuVideoEncodeAccelerator::OnEncode(
encoder_worker_task_runner_->PostTask(
FROM_HERE,
base::Bind(&GpuVideoEncodeAccelerator::CreateEncodeFrameOnEncoderWorker,
- weak_this_factory_for_encoder_worker_.GetWeakPtr(), params));
+ weak_this_for_encoder_worker_, params));
}
void GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(
@@ -465,18 +472,18 @@ void GpuVideoEncodeAccelerator::CreateEncodeFrameOnEncoderWorker(
if (!map_offset.IsValid() || !map_size.IsValid()) {
DLOG(ERROR) << __func__ << " invalid map_offset or map_size";
encode_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuVideoEncodeAccelerator::NotifyError,
- weak_this_factory_.GetWeakPtr(),
- VideoEncodeAccelerator::kPlatformFailureError));
+ FROM_HERE,
+ base::Bind(&GpuVideoEncodeAccelerator::NotifyError, weak_this_,
+ VideoEncodeAccelerator::kPlatformFailureError));
return;
}
if (!shm->MapAt(map_offset.ValueOrDie(), map_size.ValueOrDie())) {
DLOG(ERROR) << __func__ << " could not map frame_id=" << params.frame_id;
encode_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuVideoEncodeAccelerator::NotifyError,
- weak_this_factory_.GetWeakPtr(),
- VideoEncodeAccelerator::kPlatformFailureError));
+ FROM_HERE,
+ base::Bind(&GpuVideoEncodeAccelerator::NotifyError, weak_this_,
+ VideoEncodeAccelerator::kPlatformFailureError));
return;
}
@@ -489,9 +496,9 @@ void GpuVideoEncodeAccelerator::CreateEncodeFrameOnEncoderWorker(
if (!frame) {
DLOG(ERROR) << __func__ << " could not create a frame";
encode_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuVideoEncodeAccelerator::NotifyError,
- weak_this_factory_.GetWeakPtr(),
- VideoEncodeAccelerator::kPlatformFailureError));
+ FROM_HERE,
+ base::Bind(&GpuVideoEncodeAccelerator::NotifyError, weak_this_,
+ VideoEncodeAccelerator::kPlatformFailureError));
return;
}
@@ -500,9 +507,9 @@ void GpuVideoEncodeAccelerator::CreateEncodeFrameOnEncoderWorker(
frame->AddDestructionObserver(
base::Bind(&DropSharedMemory, base::Passed(&shm)));
encode_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuVideoEncodeAccelerator::OnEncodeFrameCreated,
- weak_this_factory_.GetWeakPtr(), params.frame_id,
- params.force_keyframe, frame));
+ FROM_HERE,
+ base::Bind(&GpuVideoEncodeAccelerator::OnEncodeFrameCreated, weak_this_,
+ params.frame_id, params.force_keyframe, frame));
}
void GpuVideoEncodeAccelerator::DestroyOnEncoderWorker() {
@@ -517,6 +524,9 @@ void GpuVideoEncodeAccelerator::OnEncodeFrameCreated(
DVLOG(3) << __func__;
DCHECK(CheckIfCalledOnCorrectThread());
+ if (filter_removed_.IsSignaled())
+ return;
+
if (!frame) {
DLOG(ERROR) << __func__ << " could not create a frame";
NotifyError(VideoEncodeAccelerator::kPlatformFailureError);
diff --git a/chromium/media/gpu/ipc/service/gpu_video_encode_accelerator.h b/chromium/media/gpu/ipc/service/gpu_video_encode_accelerator.h
index 351a97c3eef..f6ed332ddc9 100644
--- a/chromium/media/gpu/ipc/service/gpu_video_encode_accelerator.h
+++ b/chromium/media/gpu/ipc/service/gpu_video_encode_accelerator.h
@@ -159,12 +159,16 @@ class GpuVideoEncodeAccelerator
// otherwise |main_thread_task_runner_|.
scoped_refptr<base::SingleThreadTaskRunner> encode_task_runner_;
+ base::WeakPtr<GpuVideoEncodeAccelerator> weak_this_for_encoder_worker_;
+ base::WeakPtr<GpuVideoEncodeAccelerator> weak_this_;
+
// Weak pointer for referring back to |this| on |encoder_worker_task_runner_|.
base::WeakPtrFactory<GpuVideoEncodeAccelerator>
weak_this_factory_for_encoder_worker_;
// Weak pointer for VideoFrames that refer back to |this| on
- // |main_task_runner| or |io_task_runner_|.
+ // |main_task_runner| or |io_task_runner_|. |io_task_runner_| is used if and
+ // only if |filter_| is applied.
base::WeakPtrFactory<GpuVideoEncodeAccelerator> weak_this_factory_;
DISALLOW_COPY_AND_ASSIGN(GpuVideoEncodeAccelerator);
diff --git a/chromium/media/gpu/ipc/service/media_gpu_channel.cc b/chromium/media/gpu/ipc/service/media_gpu_channel.cc
index da1c62bc08f..2f5732d046b 100644
--- a/chromium/media/gpu/ipc/service/media_gpu_channel.cc
+++ b/chromium/media/gpu/ipc/service/media_gpu_channel.cc
@@ -132,7 +132,7 @@ void MediaGpuChannel::OnCreateJpegDecoder(int32_t route_id,
jpeg_decoder_->AddClient(
route_id, base::Bind(&SendCreateJpegDecoderResult, base::Passed(&msg),
channel_->io_task_runner(), channel_->AsWeakPtr(),
- make_scoped_refptr(channel_->filter())));
+ channel_->filter()));
}
void MediaGpuChannel::OnCreateVideoDecoder(
diff --git a/chromium/media/gpu/jpeg_decode_accelerator_unittest.cc b/chromium/media/gpu/jpeg_decode_accelerator_unittest.cc
index 00fa4f5b212..4262eb684ef 100644
--- a/chromium/media/gpu/jpeg_decode_accelerator_unittest.cc
+++ b/chromium/media/gpu/jpeg_decode_accelerator_unittest.cc
@@ -439,7 +439,7 @@ class JpegDecodeAcceleratorTest : public ::testing::Test {
};
void JpegDecodeAcceleratorTest::TestDecode(size_t num_concurrent_decoders) {
- LOG_ASSERT(test_image_files_.size() == expected_status_.size());
+ LOG_ASSERT(test_image_files_.size() >= expected_status_.size());
base::Thread decoder_thread("DecoderThread");
ASSERT_TRUE(decoder_thread.Start());
@@ -461,8 +461,10 @@ void JpegDecodeAcceleratorTest::TestDecode(size_t num_concurrent_decoders) {
FROM_HERE, base::Bind(&JpegClient::StartDecode,
base::Unretained(clients[i]), index));
}
- for (size_t i = 0; i < num_concurrent_decoders; i++) {
- ASSERT_EQ(notes[i]->Wait(), expected_status_[index]);
+ if (index < expected_status_.size()) {
+ for (size_t i = 0; i < num_concurrent_decoders; i++) {
+ ASSERT_EQ(notes[i]->Wait(), expected_status_[index]);
+ }
}
}
@@ -475,7 +477,7 @@ void JpegDecodeAcceleratorTest::TestDecode(size_t num_concurrent_decoders) {
}
TEST_F(JpegDecodeAcceleratorTest, SimpleDecode) {
- for (const auto& image : g_env->image_data_user_) {
+ for (auto* image : g_env->image_data_user_) {
test_image_files_.push_back(image);
expected_status_.push_back(CS_DECODE_PASS);
}
@@ -483,7 +485,7 @@ TEST_F(JpegDecodeAcceleratorTest, SimpleDecode) {
}
TEST_F(JpegDecodeAcceleratorTest, MultipleDecoders) {
- for (const auto& image : g_env->image_data_user_) {
+ for (auto* image : g_env->image_data_user_) {
test_image_files_.push_back(image);
expected_status_.push_back(CS_DECODE_PASS);
}
@@ -530,6 +532,17 @@ TEST_F(JpegDecodeAcceleratorTest, KeepDecodeAfterFailure) {
TestDecode(1);
}
+TEST_F(JpegDecodeAcceleratorTest, Abort) {
+ const size_t kNumOfJpegToDecode = 5;
+ for (size_t i = 0; i < kNumOfJpegToDecode; i++)
+ test_image_files_.push_back(g_env->image_data_1280x720_default_.get());
+ // Verify only one decode success to ensure both decoders have started the
+ // decoding. Then destroy the first decoder when it is still decoding. The
+ // kernel should not crash during this test.
+ expected_status_.push_back(CS_DECODE_PASS);
+ TestDecode(2);
+}
+
} // namespace
} // namespace media
diff --git a/chromium/media/gpu/media_foundation_video_encode_accelerator_win.cc b/chromium/media/gpu/media_foundation_video_encode_accelerator_win.cc
index 050dfb77043..6723aa7a0b9 100644
--- a/chromium/media/gpu/media_foundation_video_encode_accelerator_win.cc
+++ b/chromium/media/gpu/media_foundation_video_encode_accelerator_win.cc
@@ -337,7 +337,8 @@ bool MediaFoundationVideoEncodeAccelerator::CreateHardwareEncoderMFT() {
}
}
- InitializeMediaFoundation();
+ if (!InitializeMediaFoundation())
+ return false;
uint32_t flags = MFT_ENUM_FLAG_HARDWARE | MFT_ENUM_FLAG_SORTANDFILTER;
MFT_REGISTER_TYPE_INFO input_info;
@@ -693,12 +694,12 @@ void MediaFoundationVideoEncodeAccelerator::DestroyTask() {
}
void MediaFoundationVideoEncodeAccelerator::ReleaseEncoderResources() {
- encoder_.Release();
- codec_api_.Release();
- imf_input_media_type_.Release();
- imf_output_media_type_.Release();
- input_sample_.Release();
- output_sample_.Release();
+ encoder_.Reset();
+ codec_api_.Reset();
+ imf_input_media_type_.Reset();
+ imf_output_media_type_.Reset();
+ input_sample_.Reset();
+ output_sample_.Reset();
}
} // namespace content
diff --git a/chromium/media/gpu/rendering_helper.cc b/chromium/media/gpu/rendering_helper.cc
index 69d631b30cd..1031ed43d59 100644
--- a/chromium/media/gpu/rendering_helper.cc
+++ b/chromium/media/gpu/rendering_helper.cc
@@ -198,6 +198,12 @@ void RenderingHelper::InitializeOneOff(base::WaitableEvent* done) {
cmd_line->AppendSwitchASCII(switches::kUseGL, gl::kGLImplementationEGLName);
#endif
+#if defined(USE_OZONE)
+ ui::OzonePlatform::InitParams params;
+ params.single_process = true;
+ ui::OzonePlatform::InitializeForGPU(params);
+#endif
+
if (!gl::init::InitializeGLOneOff())
LOG(FATAL) << "Could not initialize GL";
done->Signal();
diff --git a/chromium/media/gpu/surface_texture_gl_owner.cc b/chromium/media/gpu/surface_texture_gl_owner.cc
new file mode 100644
index 00000000000..f64ff0d1fa2
--- /dev/null
+++ b/chromium/media/gpu/surface_texture_gl_owner.cc
@@ -0,0 +1,50 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/surface_texture_gl_owner.h"
+
+#include "base/logging.h"
+#include "ui/gl/scoped_make_current.h"
+
+namespace media {
+
+scoped_refptr<SurfaceTextureGLOwner> SurfaceTextureGLOwner::Create() {
+ GLuint texture_id;
+ glGenTextures(1, &texture_id);
+ if (!texture_id)
+ return nullptr;
+
+ return new SurfaceTextureGLOwner(texture_id);
+}
+
+SurfaceTextureGLOwner::SurfaceTextureGLOwner(GLuint texture_id)
+ : SurfaceTexture(CreateJavaSurfaceTexture(texture_id)),
+ context_(gl::GLContext::GetCurrent()),
+ surface_(gl::GLSurface::GetCurrent()),
+ texture_id_(texture_id) {
+ DCHECK(context_);
+ DCHECK(surface_);
+}
+
+SurfaceTextureGLOwner::~SurfaceTextureGLOwner() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Make sure that the SurfaceTexture isn't using the GL objects.
+ DestroyJavaObject();
+
+ ui::ScopedMakeCurrent scoped_make_current(context_.get(), surface_.get());
+ if (scoped_make_current.Succeeded()) {
+ glDeleteTextures(1, &texture_id_);
+ DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ }
+}
+
+void SurfaceTextureGLOwner::AttachToGLContext() {
+ NOTIMPLEMENTED();
+}
+
+void SurfaceTextureGLOwner::DetachFromGLContext() {
+ NOTIMPLEMENTED();
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/surface_texture_gl_owner.h b/chromium/media/gpu/surface_texture_gl_owner.h
new file mode 100644
index 00000000000..f28827ad309
--- /dev/null
+++ b/chromium/media/gpu/surface_texture_gl_owner.h
@@ -0,0 +1,53 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_SURFACE_TEXTURE_GL_OWNER_H_
+#define MEDIA_GPU_SURFACE_TEXTURE_GL_OWNER_H_
+
+#include "base/threading/thread_checker.h"
+#include "media/gpu/media_gpu_export.h"
+#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_surface.h"
+
+namespace media {
+
+// Handy subclass of SurfaceTexture which creates and maintains ownership of the
+// GL texture also. This texture is only destroyed with the object; one may
+// ReleaseSurfaceTexture without destroying the GL texture. It must be deleted
+// on the same thread on which it is constructed.
+class MEDIA_GPU_EXPORT SurfaceTextureGLOwner : public gl::SurfaceTexture {
+ public:
+ // Must be called with a platform gl context current. Creates the GL texture
+ // using this context, and memorizes it to delete the texture later.
+ static scoped_refptr<SurfaceTextureGLOwner> Create();
+
+ // Return the GL texture id that we created.
+ GLuint texture_id() const { return texture_id_; }
+
+ gl::GLContext* context() const { return context_.get(); }
+ gl::GLSurface* surface() const { return surface_.get(); }
+
+ // We don't support these. In principle, we could, but they're fairly buggy
+ // anyway on many devices.
+ void AttachToGLContext() override;
+ void DetachFromGLContext() override;
+
+ private:
+ SurfaceTextureGLOwner(GLuint texture_id);
+ ~SurfaceTextureGLOwner() override;
+
+ // Context and surface that were used to create |texture_id_|.
+ scoped_refptr<gl::GLContext> context_;
+ scoped_refptr<gl::GLSurface> surface_;
+
+ GLuint texture_id_;
+
+ base::ThreadChecker thread_checker_;
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_SURFACE_TEXTURE_GL_OWNER_H_
diff --git a/chromium/media/gpu/surface_texture_gl_owner_unittest.cc b/chromium/media/gpu/surface_texture_gl_owner_unittest.cc
new file mode 100644
index 00000000000..11ce8f3f08a
--- /dev/null
+++ b/chromium/media/gpu/surface_texture_gl_owner_unittest.cc
@@ -0,0 +1,113 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/surface_texture_gl_owner.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_context_egl.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_share_group.h"
+#include "ui/gl/gl_surface_egl.h"
+#include "ui/gl/init/gl_factory.h"
+
+using testing::Invoke;
+using testing::NiceMock;
+using testing::_;
+
+namespace media {
+
+class SurfaceTextureGLOwnerTest : public testing::Test {
+ public:
+ SurfaceTextureGLOwnerTest() {}
+ ~SurfaceTextureGLOwnerTest() override {}
+
+ protected:
+ void SetUp() override {
+ gl::init::InitializeGLOneOffImplementation(gl::kGLImplementationEGLGLES2,
+ false, false, false);
+ surface_ = new gl::PbufferGLSurfaceEGL(gfx::Size(320, 240));
+ surface_->Initialize();
+
+ share_group_ = new gl::GLShareGroup;
+ context_ = new gl::GLContextEGL(share_group_.get());
+ context_->Initialize(surface_.get(), gl::GLContextAttribs());
+ ASSERT_TRUE(context_->MakeCurrent(surface_.get()));
+
+ surface_texture_ = SurfaceTextureGLOwner::Create();
+ texture_id_ = surface_texture_->texture_id();
+ // Bind and un-bind the texture, since that's required for glIsTexture to
+ // return true.
+ glBindTexture(GL_TEXTURE_2D, texture_id_);
+ glBindTexture(GL_TEXTURE_2D, 0);
+ ASSERT_TRUE(glIsTexture(texture_id_));
+ }
+
+ void TearDown() override {
+ surface_texture_ = nullptr;
+ context_ = nullptr;
+ share_group_ = nullptr;
+ surface_ = nullptr;
+ gl::init::ShutdownGL();
+ }
+
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture_;
+ GLuint texture_id_ = 0;
+
+ scoped_refptr<gl::GLContext> context_;
+ scoped_refptr<gl::GLShareGroup> share_group_;
+ scoped_refptr<gl::GLSurface> surface_;
+};
+
+// Verify that SurfaceTextureGLOwner creates a bindable GL texture, and deletes
+// it during destruction.
+TEST_F(SurfaceTextureGLOwnerTest, GLTextureIsCreatedAndDestroyed) {
+ // |texture_id| should not work anymore after we delete |surface_texture|.
+ surface_texture_ = nullptr;
+ ASSERT_FALSE(glIsTexture(texture_id_));
+}
+
+// Calling ReleaseSurfaceTexture shouldn't deallocate the texture handle.
+TEST_F(SurfaceTextureGLOwnerTest, ReleaseDoesntDestroyTexture) {
+ surface_texture_->ReleaseSurfaceTexture();
+ ASSERT_TRUE(glIsTexture(texture_id_));
+}
+
+// Make sure that |surface_texture_| remembers the correct context and surface.
+TEST_F(SurfaceTextureGLOwnerTest, ContextAndSurfaceAreCaptured) {
+ ASSERT_EQ(context_, surface_texture_->context());
+ ASSERT_EQ(surface_, surface_texture_->surface());
+}
+
+// Verify that destruction works even if some other context is current.
+TEST_F(SurfaceTextureGLOwnerTest, DestructionWorksWithWrongContext) {
+ scoped_refptr<gl::GLSurface> new_surface(
+ new gl::PbufferGLSurfaceEGL(gfx::Size(320, 240)));
+ new_surface->Initialize();
+
+ scoped_refptr<gl::GLShareGroup> new_share_group(new gl::GLShareGroup);
+ scoped_refptr<gl::GLContext> new_context(
+ new gl::GLContextEGL(new_share_group.get()));
+ new_context->Initialize(new_surface.get(), gl::GLContextAttribs());
+ ASSERT_TRUE(new_context->MakeCurrent(new_surface.get()));
+
+ surface_texture_ = nullptr;
+ ASSERT_FALSE(glIsTexture(texture_id_));
+
+ // |new_context| should still be current.
+ ASSERT_TRUE(new_context->IsCurrent(new_surface.get()));
+
+ new_context = nullptr;
+ new_share_group = nullptr;
+ new_surface = nullptr;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/v4l2_image_processor.cc b/chromium/media/gpu/v4l2_image_processor.cc
index 005c095dc05..bbbed742185 100644
--- a/chromium/media/gpu/v4l2_image_processor.cc
+++ b/chromium/media/gpu/v4l2_image_processor.cc
@@ -668,7 +668,7 @@ bool V4L2ImageProcessor::EnqueueInputRecord() {
qbuf.m.planes[i].m.userptr =
reinterpret_cast<unsigned long>(input_record.frame->data(i));
} else {
- qbuf.m.planes[i].m.fd = input_record.frame->dmabuf_fd(i);
+ qbuf.m.planes[i].m.fd = input_record.frame->DmabufFd(i);
}
}
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
diff --git a/chromium/media/gpu/v4l2_video_encode_accelerator.cc b/chromium/media/gpu/v4l2_video_encode_accelerator.cc
index 7312a237cda..b09b6420a3f 100644
--- a/chromium/media/gpu/v4l2_video_encode_accelerator.cc
+++ b/chromium/media/gpu/v4l2_video_encode_accelerator.cc
@@ -767,7 +767,7 @@ bool V4L2VideoEncodeAccelerator::EnqueueInputRecord() {
break;
case V4L2_MEMORY_DMABUF:
- qbuf.m.planes[i].m.fd = frame->dmabuf_fd(i);
+ qbuf.m.planes[i].m.fd = frame->DmabufFd(i);
DCHECK_NE(qbuf.m.planes[i].m.fd, -1);
break;
diff --git a/chromium/media/gpu/vaapi_drm_picture.cc b/chromium/media/gpu/vaapi_drm_picture.cc
index c61d12ce6bb..b6e1f13731f 100644
--- a/chromium/media/gpu/vaapi_drm_picture.cc
+++ b/chromium/media/gpu/vaapi_drm_picture.cc
@@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "media/gpu/vaapi_drm_picture.h"
+
#include "base/file_descriptor_posix.h"
#include "media/gpu/va_surface.h"
-#include "media/gpu/vaapi_drm_picture.h"
#include "media/gpu/vaapi_wrapper.h"
#include "third_party/libva/va/drm/va_drm.h"
#include "third_party/libva/va/va.h"
#include "ui/gfx/gpu_memory_buffer.h"
+#include "ui/gfx/native_pixmap.h"
#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_image_native_pixmap.h"
#include "ui/gl/scoped_binders.h"
-#include "ui/ozone/gl/gl_image_ozone_native_pixmap.h"
-#include "ui/ozone/public/native_pixmap.h"
#include "ui/ozone/public/ozone_platform.h"
#include "ui/ozone/public/surface_factory_ozone.h"
@@ -76,9 +77,8 @@ bool VaapiDrmPicture::Initialize() {
gfx::BufferFormat format = pixmap_->GetBufferFormat();
- scoped_refptr<ui::GLImageOzoneNativePixmap> image(
- new ui::GLImageOzoneNativePixmap(size_,
- BufferFormatToInternalFormat(format)));
+ scoped_refptr<gl::GLImageNativePixmap> image(new gl::GLImageNativePixmap(
+ size_, BufferFormatToInternalFormat(format)));
if (!image->Initialize(pixmap_.get(), format)) {
LOG(ERROR) << "Failed to create GLImage";
return false;
diff --git a/chromium/media/gpu/vaapi_drm_picture.h b/chromium/media/gpu/vaapi_drm_picture.h
index a1ba66150c8..e73617bad58 100644
--- a/chromium/media/gpu/vaapi_drm_picture.h
+++ b/chromium/media/gpu/vaapi_drm_picture.h
@@ -22,7 +22,7 @@ namespace gl {
class GLImage;
}
-namespace ui {
+namespace gfx {
class NativePixmap;
}
@@ -56,7 +56,7 @@ class VaapiDrmPicture : public VaapiPicture {
bool Initialize();
// Ozone buffer, the storage of the EGLImage and the VASurface.
- scoped_refptr<ui::NativePixmap> pixmap_;
+ scoped_refptr<gfx::NativePixmap> pixmap_;
// EGLImage bound to the GL textures used by the VDA client.
scoped_refptr<gl::GLImage> gl_image_;
diff --git a/chromium/media/gpu/vaapi_wrapper.cc b/chromium/media/gpu/vaapi_wrapper.cc
index 15d1f189446..21ffdabc4e8 100644
--- a/chromium/media/gpu/vaapi_wrapper.cc
+++ b/chromium/media/gpu/vaapi_wrapper.cc
@@ -601,7 +601,7 @@ scoped_refptr<VASurface> VaapiWrapper::CreateUnownedSurface(
#if defined(USE_OZONE)
scoped_refptr<VASurface> VaapiWrapper::CreateVASurfaceForPixmap(
- const scoped_refptr<ui::NativePixmap>& pixmap) {
+ const scoped_refptr<gfx::NativePixmap>& pixmap) {
// Create a VASurface for a NativePixmap by importing the underlying dmabufs.
VASurfaceAttribExternalBuffers va_attrib_extbuf;
memset(&va_attrib_extbuf, 0, sizeof(va_attrib_extbuf));
@@ -667,8 +667,8 @@ scoped_refptr<VASurface> VaapiWrapper::CreateVASurfaceForPixmap(
}
bool VaapiWrapper::ProcessPixmap(
- const scoped_refptr<ui::NativePixmap>& source_pixmap,
- scoped_refptr<ui::NativePixmap> target_pixmap) {
+ const scoped_refptr<gfx::NativePixmap>& source_pixmap,
+ scoped_refptr<gfx::NativePixmap> target_pixmap) {
scoped_refptr<VASurface> va_surface = CreateVASurfaceForPixmap(source_pixmap);
if (!va_surface) {
LOG(ERROR) << "Failed creating VA Surface for source_pixmap";
diff --git a/chromium/media/gpu/vaapi_wrapper.h b/chromium/media/gpu/vaapi_wrapper.h
index 6308f426634..e3f90068461 100644
--- a/chromium/media/gpu/vaapi_wrapper.h
+++ b/chromium/media/gpu/vaapi_wrapper.h
@@ -36,7 +36,7 @@
#endif // USE_X11
#if defined(USE_OZONE)
-namespace ui {
+namespace gfx {
class NativePixmap;
}
#endif
@@ -118,12 +118,12 @@ class MEDIA_GPU_EXPORT VaapiWrapper
// transferred to the caller. It differs from surfaces created using
// CreateSurfaces(), where VaapiWrapper is the owner of the surfaces.
scoped_refptr<VASurface> CreateVASurfaceForPixmap(
- const scoped_refptr<ui::NativePixmap>& pixmap);
+ const scoped_refptr<gfx::NativePixmap>& pixmap);
// Use VPP to process |source_pixmap| to |target_pixmap| with scaling and
// color space conversion.
- bool ProcessPixmap(const scoped_refptr<ui::NativePixmap>& source_pixmap,
- scoped_refptr<ui::NativePixmap> target_pixmap);
+ bool ProcessPixmap(const scoped_refptr<gfx::NativePixmap>& source_pixmap,
+ scoped_refptr<gfx::NativePixmap> target_pixmap);
#endif
diff --git a/chromium/media/gpu/video_decode_accelerator_unittest.cc b/chromium/media/gpu/video_decode_accelerator_unittest.cc
index cb1bed901e5..168bd597075 100644
--- a/chromium/media/gpu/video_decode_accelerator_unittest.cc
+++ b/chromium/media/gpu/video_decode_accelerator_unittest.cc
@@ -85,7 +85,7 @@
#endif // OS_WIN
#if defined(USE_OZONE)
-#include "ui/ozone/public/native_pixmap.h"
+#include "ui/gfx/native_pixmap.h"
#include "ui/ozone/public/ozone_gpu_test_helper.h"
#include "ui/ozone/public/ozone_platform.h"
#include "ui/ozone/public/surface_factory_ozone.h"
@@ -143,6 +143,10 @@ bool g_test_import = false;
// working directory.
base::FilePath g_test_file_path;
+// The location to output bad thumbnail image. If empty or invalid, fallback to
+// the original location.
+base::FilePath g_thumbnail_output_dir;
+
// Environment to store rendering thread.
class VideoDecodeAcceleratorTestEnvironment;
VideoDecodeAcceleratorTestEnvironment* g_env;
@@ -150,6 +154,8 @@ VideoDecodeAcceleratorTestEnvironment* g_env;
// Magic constants for differentiating the reasons for NotifyResetDone being
// called.
enum ResetPoint {
+ // Reset() right after calling Flush() (before getting NotifyFlushDone()).
+ RESET_BEFORE_NOTIFY_FLUSH_DONE = -5,
// Reset() just after calling Decode() with a fragment containing config info.
RESET_AFTER_FIRST_CONFIG_INFO = -4,
START_OF_STREAM_RESET = -3,
@@ -197,7 +203,14 @@ const int kMD5StringLength = 32;
base::FilePath GetTestDataFile(const base::FilePath& input_file) {
if (input_file.IsAbsolute())
return input_file;
- return base::MakeAbsoluteFilePath(g_test_file_path.Append(input_file));
+ // input_file needs to be existed, otherwise base::MakeAbsoluteFilePath will
+ // return an empty base::FilePath.
+ base::FilePath abs_path =
+ base::MakeAbsoluteFilePath(g_test_file_path.Append(input_file));
+ LOG_IF(ERROR, abs_path.empty())
+ << g_test_file_path.Append(input_file).value().c_str()
+ << " is not an existing path.";
+ return abs_path;
}
// Read in golden MD5s for the thumbnailed rendering of this video
@@ -325,7 +338,7 @@ class TextureRef : public base::RefCounted<TextureRef> {
uint32_t texture_id_;
base::Closure no_longer_needed_cb_;
#if defined(USE_OZONE)
- scoped_refptr<ui::NativePixmap> pixmap_;
+ scoped_refptr<gfx::NativePixmap> pixmap_;
#endif
};
@@ -387,7 +400,7 @@ gfx::GpuMemoryBufferHandle TextureRef::ExportGpuMemoryBufferHandle() const {
CHECK(pixmap_);
int duped_fd = HANDLE_EINTR(dup(pixmap_->GetDmaBufFd(0)));
LOG_ASSERT(duped_fd != -1) << "Failed duplicating dmabuf fd";
- handle.type = gfx::OZONE_NATIVE_PIXMAP;
+ handle.type = gfx::NATIVE_PIXMAP;
handle.native_pixmap_handle.fds.emplace_back(
base::FileDescriptor(duped_fd, true));
handle.native_pixmap_handle.planes.emplace_back(
@@ -476,6 +489,8 @@ class GLRenderingVDAClient
// Delete the associated decoder helper.
void DeleteDecoder();
+ // Reset the associated decoder after flushing.
+ void ResetDecoderAfterFlush();
// Compute & return the first encoded bytes (including a start frame) to send
// to the decoder, starting at |start_pos| and returning one fragment. Skips
@@ -807,6 +822,20 @@ void GLRenderingVDAClient::ReturnPicture(int32_t picture_buffer_id) {
}
}
+void GLRenderingVDAClient::ResetDecoderAfterFlush() {
+ --remaining_play_throughs_;
+ DCHECK_GE(remaining_play_throughs_, 0);
+ // SetState(CS_RESETTING) should be called before decoder_->Reset(), because
+ // VDA can call NotifyFlushDone() from Reset().
+ // TODO(johnylin): call SetState() before all decoder Flush() and Reset().
+ SetState(CS_RESETTING);
+ // It is necessary to check decoder deleted here because it is possible to
+ // delete decoder in SetState() in some cases.
+ if (decoder_deleted())
+ return;
+ decoder_->Reset();
+}
+
void GLRenderingVDAClient::NotifyEndOfBitstreamBuffer(
int32_t bitstream_buffer_id) {
if (decoder_deleted())
@@ -824,6 +853,10 @@ void GLRenderingVDAClient::NotifyEndOfBitstreamBuffer(
if (state_ != CS_FLUSHING) {
decoder_->Flush();
SetState(CS_FLUSHING);
+ if (reset_after_frame_num_ == RESET_BEFORE_NOTIFY_FLUSH_DONE) {
+ SetState(CS_FLUSHED);
+ ResetDecoderAfterFlush();
+ }
}
} else if (decode_calls_per_second_ == 0) {
DecodeNextFragment();
@@ -834,13 +867,16 @@ void GLRenderingVDAClient::NotifyFlushDone() {
if (decoder_deleted())
return;
- SetState(CS_FLUSHED);
- --remaining_play_throughs_;
- DCHECK_GE(remaining_play_throughs_, 0);
- if (decoder_deleted())
+ if (reset_after_frame_num_ == RESET_BEFORE_NOTIFY_FLUSH_DONE) {
+ // In ResetBeforeNotifyFlushDone case client is not necessary to wait for
+ // NotifyFlushDone(). But if client gets here, it should be always before
+ // NotifyResetDone().
+ ASSERT_EQ(state_, CS_RESETTING);
return;
- decoder_->Reset();
- SetState(CS_RESETTING);
+ }
+
+ SetState(CS_FLUSHED);
+ ResetDecoderAfterFlush();
}
void GLRenderingVDAClient::NotifyResetDone() {
@@ -1118,7 +1154,10 @@ class VideoDecodeAcceleratorTest : public ::testing::Test {
void InitializeRenderingHelper(const RenderingHelperParams& helper_params);
void CreateAndStartDecoder(GLRenderingVDAClient* client,
ClientStateNotification<ClientState>* note);
- void WaitUntilDecodeFinish(ClientStateNotification<ClientState>* note);
+
+ // Wait until decode finishes and return the last state.
+ ClientState WaitUntilDecodeFinish(ClientStateNotification<ClientState>* note);
+
void WaitUntilIdle();
void OutputLogFile(const base::FilePath::CharType* log_path,
const std::string& content);
@@ -1132,6 +1171,12 @@ class VideoDecodeAcceleratorTest : public ::testing::Test {
static void Delete(T item) {
// |item| is cleared when the scope of this function is left.
}
+ using NotesVector =
+ std::vector<std::unique_ptr<ClientStateNotification<ClientState>>>;
+ using ClientsVector = std::vector<std::unique_ptr<GLRenderingVDAClient>>;
+
+ NotesVector notes_;
+ ClientsVector clients_;
private:
// Required for Thread to work. Not used otherwise.
@@ -1147,6 +1192,15 @@ void VideoDecodeAcceleratorTest::SetUp() {
}
void VideoDecodeAcceleratorTest::TearDown() {
+ // |clients_| must be deleted first because |clients_| use |notes_|.
+ g_env->GetRenderingTaskRunner()->PostTask(
+ FROM_HERE, base::Bind(&Delete<ClientsVector>, base::Passed(&clients_)));
+
+ g_env->GetRenderingTaskRunner()->PostTask(
+ FROM_HERE, base::Bind(&Delete<NotesVector>, base::Passed(&notes_)));
+
+ WaitUntilIdle();
+
g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&Delete<TestFilesVector>, base::Passed(&test_video_files_)));
@@ -1250,12 +1304,15 @@ void VideoDecodeAcceleratorTest::CreateAndStartDecoder(
ASSERT_EQ(note->Wait(), CS_DECODER_SET);
}
-void VideoDecodeAcceleratorTest::WaitUntilDecodeFinish(
+ClientState VideoDecodeAcceleratorTest::WaitUntilDecodeFinish(
ClientStateNotification<ClientState>* note) {
+ ClientState state = CS_DESTROYED;
for (int i = 0; i < CS_MAX; i++) {
- if (note->Wait() == CS_DESTROYED)
+ state = note->Wait();
+ if (state == CS_DESTROYED || state == CS_ERROR)
break;
}
+ return state;
}
void VideoDecodeAcceleratorTest::WaitUntilIdle() {
@@ -1289,31 +1346,8 @@ class VideoDecodeAcceleratorParamTest
: public VideoDecodeAcceleratorTest,
public ::testing::WithParamInterface<
std::tuple<int, int, int, ResetPoint, ClientState, bool, bool>> {
- protected:
- using NotesVector =
- std::vector<std::unique_ptr<ClientStateNotification<ClientState>>>;
- using ClientsVector = std::vector<std::unique_ptr<GLRenderingVDAClient>>;
-
- void TearDown() override;
-
- NotesVector notes_;
- ClientsVector clients_;
};
-void VideoDecodeAcceleratorParamTest::TearDown() {
- // |clients_| must be deleted first because |clients_| use |notes_|.
- g_env->GetRenderingTaskRunner()->PostTask(
- FROM_HERE, base::Bind(&Delete<ClientsVector>, base::Passed(&clients_)));
-
- g_env->GetRenderingTaskRunner()->PostTask(
- FROM_HERE, base::Bind(&Delete<NotesVector>, base::Passed(&notes_)));
-
- WaitUntilIdle();
-
- // Do VideoDecodeAcceleratorTest clean-up after deleting clients and notes.
- VideoDecodeAcceleratorTest::TearDown();
-}
-
// Wait for |note| to report a state and if it's not |expected_state| then
// assert |client| has deleted its decoder.
static void AssertWaitForStateOrDeleted(
@@ -1463,7 +1497,10 @@ TEST_P(VideoDecodeAcceleratorParamTest, TestSimpleDecode) {
// could still be returned until resetting done.
if (video_file->reset_after_frame_num > 0)
EXPECT_GE(client->num_decoded_frames(), video_file->num_frames);
- else
+ // In ResetBeforeNotifyFlushDone case the decoded frames may be less than
+ // the video frames because decoder is reset before flush done.
+ else if (video_file->reset_after_frame_num !=
+ RESET_BEFORE_NOTIFY_FLUSH_DONE)
EXPECT_EQ(client->num_decoded_frames(), video_file->num_frames);
}
if (reset_point == END_OF_STREAM_RESET) {
@@ -1512,11 +1549,23 @@ TEST_P(VideoDecodeAcceleratorParamTest, TestSimpleDecode) {
LOG(ERROR) << "Unknown thumbnails MD5: " << md5_string;
base::FilePath filepath(test_video_files_[0]->file_name);
+ if (!g_thumbnail_output_dir.empty() &&
+ base::DirectoryExists(g_thumbnail_output_dir)) {
+ // Write bad thumbnails image to where --thumbnail_output_dir assigned.
+ filepath = g_thumbnail_output_dir.Append(filepath.BaseName());
+ } else {
+ // Fallback to write to test data directory.
+ // Note: test data directory is not writable by vda_unittest while
+ // running by autotest. It should assign its resultsdir as output
+ // directory.
+ filepath = GetTestDataFile(filepath);
+ }
filepath = filepath.AddExtension(FILE_PATH_LITERAL(".bad_thumbnails"));
filepath = filepath.AddExtension(FILE_PATH_LITERAL(".png"));
- int num_bytes =
- base::WriteFile(GetTestDataFile(filepath),
- reinterpret_cast<char*>(&png[0]), png.size());
+ LOG(INFO) << "Write bad thumbnails image to: "
+ << filepath.value().c_str();
+ int num_bytes = base::WriteFile(
+ filepath, reinterpret_cast<char*>(&png[0]), png.size());
EXPECT_EQ(num_bytes, static_cast<int>(png.size()));
}
EXPECT_NE(match, golden_md5s.end());
@@ -1566,6 +1615,18 @@ INSTANTIATE_TEST_CASE_P(
false,
false)));
+// Test Reset() immediately after Flush() and before NotifyFlushDone().
+INSTANTIATE_TEST_CASE_P(
+ ResetBeforeNotifyFlushDone,
+ VideoDecodeAcceleratorParamTest,
+ ::testing::Values(std::make_tuple(1,
+ 1,
+ 1,
+ RESET_BEFORE_NOTIFY_FLUSH_DONE,
+ CS_RESET,
+ false,
+ false)));
+
// Test that Reset() mid-stream works fine and doesn't affect decoding even when
// Decode() calls are made during the reset.
INSTANTIATE_TEST_CASE_P(
@@ -1696,39 +1757,22 @@ WRAPPED_INSTANTIATE_TEST_CASE_P(
// Measure the median of the decode time when VDA::Decode is called 30 times per
// second.
TEST_F(VideoDecodeAcceleratorTest, TestDecodeTimeMedian) {
+ notes_.push_back(base::MakeUnique<ClientStateNotification<ClientState>>());
+ clients_.push_back(base::MakeUnique<GLRenderingVDAClient>(
+ 0, &rendering_helper_, notes_[0].get(), test_video_files_[0]->data_str, 1,
+ 1, test_video_files_[0]->reset_after_frame_num, CS_RESET,
+ test_video_files_[0]->width, test_video_files_[0]->height,
+ test_video_files_[0]->profile, g_fake_decoder, true,
+ std::numeric_limits<int>::max(), kWebRtcDecodeCallsPerSecond, false));
RenderingHelperParams helper_params;
-
- // Disable rendering by setting the rendering_fps = 0.
- helper_params.rendering_fps = 0;
- helper_params.warm_up_iterations = 0;
- helper_params.render_as_thumbnails = false;
-
- ClientStateNotification<ClientState>* note =
- new ClientStateNotification<ClientState>();
- GLRenderingVDAClient* client =
- new GLRenderingVDAClient(0,
- &rendering_helper_,
- note,
- test_video_files_[0]->data_str,
- 1,
- 1,
- test_video_files_[0]->reset_after_frame_num,
- CS_RESET,
- test_video_files_[0]->width,
- test_video_files_[0]->height,
- test_video_files_[0]->profile,
- g_fake_decoder,
- true,
- std::numeric_limits<int>::max(),
- kWebRtcDecodeCallsPerSecond,
- false /* render_as_thumbnail */);
helper_params.window_sizes.push_back(
gfx::Size(test_video_files_[0]->width, test_video_files_[0]->height));
InitializeRenderingHelper(helper_params);
- CreateAndStartDecoder(client, note);
- WaitUntilDecodeFinish(note);
+ CreateAndStartDecoder(clients_[0].get(), notes_[0].get());
+ ClientState last_state = WaitUntilDecodeFinish(notes_[0].get());
+ EXPECT_NE(CS_ERROR, last_state);
- base::TimeDelta decode_time_median = client->decode_time_median();
+ base::TimeDelta decode_time_median = clients_[0]->decode_time_median();
std::string output_string =
base::StringPrintf("Decode time median: %" PRId64 " us",
decode_time_median.InMicroseconds());
@@ -1736,10 +1780,25 @@ TEST_F(VideoDecodeAcceleratorTest, TestDecodeTimeMedian) {
if (g_output_log != NULL)
OutputLogFile(g_output_log, output_string);
+};
- g_env->GetRenderingTaskRunner()->DeleteSoon(FROM_HERE, client);
- g_env->GetRenderingTaskRunner()->DeleteSoon(FROM_HERE, note);
- WaitUntilIdle();
+// This test passes as long as there is no crash. If VDA notifies an error, it
+// is not considered as a failure because the input may be unsupported or
+// corrupted videos.
+TEST_F(VideoDecodeAcceleratorTest, NoCrash) {
+ notes_.push_back(base::MakeUnique<ClientStateNotification<ClientState>>());
+ clients_.push_back(base::MakeUnique<GLRenderingVDAClient>(
+ 0, &rendering_helper_, notes_[0].get(), test_video_files_[0]->data_str, 1,
+ 1, test_video_files_[0]->reset_after_frame_num, CS_RESET,
+ test_video_files_[0]->width, test_video_files_[0]->height,
+ test_video_files_[0]->profile, g_fake_decoder, true,
+ std::numeric_limits<int>::max(), 0, false));
+ RenderingHelperParams helper_params;
+ helper_params.window_sizes.push_back(
+ gfx::Size(test_video_files_[0]->width, test_video_files_[0]->height));
+ InitializeRenderingHelper(helper_params);
+ CreateAndStartDecoder(clients_[0].get(), notes_[0].get());
+ WaitUntilDecodeFinish(notes_[0].get());
};
// TODO(fischman, vrk): add more tests! In particular:
@@ -1848,6 +1907,9 @@ int main(int argc, char** argv) {
media::g_test_file_path = media::GetTestDataFilePath("");
continue;
}
+ if (it->first == "thumbnail_output_dir") {
+ media::g_thumbnail_output_dir = base::FilePath(it->second.c_str());
+ }
}
base::ShadowingAtExitManager at_exit_manager;
diff --git a/chromium/media/gpu/video_encode_accelerator_unittest.cc b/chromium/media/gpu/video_encode_accelerator_unittest.cc
index 11f019b9b89..b330202b13e 100644
--- a/chromium/media/gpu/video_encode_accelerator_unittest.cc
+++ b/chromium/media/gpu/video_encode_accelerator_unittest.cc
@@ -19,6 +19,7 @@
#include "base/files/file_util.h"
#include "base/macros.h"
#include "base/memory/aligned_memory.h"
+#include "base/memory/ref_counted.h"
#include "base/memory/scoped_vector.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
@@ -38,6 +39,7 @@
#include "media/base/bitstream_buffer.h"
#include "media/base/cdm_context.h"
#include "media/base/decoder_buffer.h"
+#include "media/base/media_log.h"
#include "media/base/media_util.h"
#include "media/base/test_data_util.h"
#include "media/base/video_decoder.h"
@@ -706,7 +708,8 @@ std::unique_ptr<StreamValidator> StreamValidator::Create(
return validator;
}
-class VideoFrameQualityValidator {
+class VideoFrameQualityValidator
+ : public base::SupportsWeakPtr<VideoFrameQualityValidator> {
public:
VideoFrameQualityValidator(const VideoCodecProfile profile,
const base::Closure& flush_complete_cb,
@@ -738,6 +741,7 @@ class VideoFrameQualityValidator {
State decoder_state_;
std::queue<scoped_refptr<VideoFrame>> original_frames_;
std::queue<scoped_refptr<DecoderBuffer>> decode_buffers_;
+ base::ThreadChecker thread_checker_;
};
VideoFrameQualityValidator::VideoFrameQualityValidator(
@@ -745,11 +749,11 @@ VideoFrameQualityValidator::VideoFrameQualityValidator(
const base::Closure& flush_complete_cb,
const base::Closure& decode_error_cb)
: profile_(profile),
- decoder_(new FFmpegVideoDecoder()),
- decode_cb_(base::Bind(&VideoFrameQualityValidator::DecodeDone,
- base::Unretained(this))),
- eos_decode_cb_(base::Bind(&VideoFrameQualityValidator::FlushDone,
- base::Unretained(this))),
+ decoder_(new FFmpegVideoDecoder(make_scoped_refptr(new MediaLog()))),
+ decode_cb_(
+ base::Bind(&VideoFrameQualityValidator::DecodeDone, AsWeakPtr())),
+ eos_decode_cb_(
+ base::Bind(&VideoFrameQualityValidator::FlushDone, AsWeakPtr())),
flush_complete_cb_(flush_complete_cb),
decode_error_cb_(decode_error_cb),
decoder_state_(UNINITIALIZED) {
@@ -759,6 +763,7 @@ VideoFrameQualityValidator::VideoFrameQualityValidator(
void VideoFrameQualityValidator::Initialize(const gfx::Size& coded_size,
const gfx::Rect& visible_size) {
+ DCHECK(thread_checker_.CalledOnValidThread());
FFmpegGlue::InitializeFFmpeg();
gfx::Size natural_size(visible_size.size());
@@ -784,6 +789,8 @@ void VideoFrameQualityValidator::Initialize(const gfx::Size& coded_size,
}
void VideoFrameQualityValidator::InitializeCB(bool success) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
if (success) {
decoder_state_ = INITIALIZED;
Decode();
@@ -798,10 +805,14 @@ void VideoFrameQualityValidator::InitializeCB(bool success) {
void VideoFrameQualityValidator::AddOriginalFrame(
scoped_refptr<VideoFrame> frame) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
original_frames_.push(frame);
}
void VideoFrameQualityValidator::DecodeDone(DecodeStatus status) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
if (status == DecodeStatus::OK) {
decoder_state_ = INITIALIZED;
Decode();
@@ -813,10 +824,14 @@ void VideoFrameQualityValidator::DecodeDone(DecodeStatus status) {
}
void VideoFrameQualityValidator::FlushDone(DecodeStatus status) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
flush_complete_cb_.Run();
}
void VideoFrameQualityValidator::Flush() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
if (decoder_state_ != DECODER_ERROR) {
decode_buffers_.push(DecoderBuffer::CreateEOSBuffer());
Decode();
@@ -825,6 +840,8 @@ void VideoFrameQualityValidator::Flush() {
void VideoFrameQualityValidator::AddDecodeBuffer(
const scoped_refptr<DecoderBuffer>& buffer) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
if (decoder_state_ != DECODER_ERROR) {
decode_buffers_.push(buffer);
Decode();
@@ -832,6 +849,8 @@ void VideoFrameQualityValidator::AddDecodeBuffer(
}
void VideoFrameQualityValidator::Decode() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
if (decoder_state_ == INITIALIZED && !decode_buffers_.empty()) {
scoped_refptr<DecoderBuffer> next_buffer = decode_buffers_.front();
decode_buffers_.pop();
@@ -845,6 +864,8 @@ void VideoFrameQualityValidator::Decode() {
void VideoFrameQualityValidator::VerifyOutputFrame(
const scoped_refptr<VideoFrame>& output_frame) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
scoped_refptr<VideoFrame> original_frame = original_frames_.front();
original_frames_.pop();
gfx::Size visible_size = original_frame->visible_rect().size();
@@ -920,8 +941,8 @@ class VEAClient : public VEAClientBase {
void CreateEncoder();
void DestroyEncoder();
- void TryToSetupEncodeOnSeperateThread();
- void DestroyEncodeOnSeperateThread();
+ void TryToSetupEncodeOnSeparateThread();
+ void DestroyEncodeOnSeparateThread();
// VideoDecodeAccelerator::Client implementation.
void RequireBitstreamBuffers(unsigned int input_count,
@@ -933,10 +954,10 @@ class VEAClient : public VEAClientBase {
base::TimeDelta timestamp) override;
private:
- void BitstreamBufferReadyOnMainThread(int32_t bitstream_buffer_id,
- size_t payload_size,
- bool key_frame,
- base::TimeDelta timestamp);
+ void BitstreamBufferReadyOnVeaClientThread(int32_t bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame,
+ base::TimeDelta timestamp);
// Return the number of encoded frames per second.
double frames_per_second();
@@ -1116,18 +1137,20 @@ class VEAClient : public VEAClientBase {
base::Thread io_thread_;
// Task runner on which |encoder_| is created.
- scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner_;
+ // Most of VEA and VEA::Client functions are running on this thread. See
+ // comment of VEA::TryToSetupEncodeOnSeparateThread for exceptions.
+ scoped_refptr<base::SingleThreadTaskRunner> vea_client_task_runner_;
// Task runner used for posting encode tasks. If
- // TryToSetupEncodeOnSeperateThread() is true, |io_thread|'s task runner is
- // used, otherwise |main_thread_task_runner_|.
+ // TryToSetupEncodeOnSeparateThread() is true, |io_thread|'s task runner is
+ // used, otherwise |vea_client_task_runner_|.
scoped_refptr<base::SingleThreadTaskRunner> encode_task_runner_;
// Weak factory used for posting tasks on |encode_task_runner_|.
std::unique_ptr<base::WeakPtrFactory<VideoEncodeAccelerator>>
encoder_weak_factory_;
- // Weak factory used for TryToSetupEncodeOnSeperateThread().
+ // Weak factory used for TryToSetupEncodeOnSeparateThread().
base::WeakPtrFactory<VEAClient> client_weak_factory_for_io_;
};
@@ -1202,8 +1225,8 @@ void VEAClient::CreateEncoder() {
DCHECK(thread_checker_.CalledOnValidThread());
LOG_ASSERT(!has_encoder());
- main_thread_task_runner_ = base::ThreadTaskRunnerHandle::Get();
- encode_task_runner_ = main_thread_task_runner_;
+ vea_client_task_runner_ = base::ThreadTaskRunnerHandle::Get();
+ encode_task_runner_ = vea_client_task_runner_;
std::unique_ptr<VideoEncodeAccelerator> encoders[] = {
CreateFakeVEA(), CreateV4L2VEA(), CreateVaapiVEA(), CreateVTVEA(),
@@ -1221,7 +1244,7 @@ void VEAClient::CreateEncoder() {
requested_bitrate_, this)) {
encoder_weak_factory_.reset(
new base::WeakPtrFactory<VideoEncodeAccelerator>(encoder_.get()));
- TryToSetupEncodeOnSeperateThread();
+ TryToSetupEncodeOnSeparateThread();
SetStreamParameters(requested_bitrate_, requested_framerate_);
SetState(CS_INITIALIZED);
@@ -1239,10 +1262,14 @@ void VEAClient::CreateEncoder() {
}
void VEAClient::DecodeCompleted() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
SetState(CS_VALIDATED);
}
-void VEAClient::TryToSetupEncodeOnSeperateThread() {
+void VEAClient::TryToSetupEncodeOnSeparateThread() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
// Start dummy thread if not started already.
if (!io_thread_.IsRunning())
ASSERT_TRUE(io_thread_.Start());
@@ -1257,6 +1284,7 @@ void VEAClient::TryToSetupEncodeOnSeperateThread() {
}
void VEAClient::DecodeFailed() {
+ DCHECK(thread_checker_.CalledOnValidThread());
SetState(CS_ERROR);
}
@@ -1267,11 +1295,11 @@ void VEAClient::DestroyEncoder() {
if (io_thread_.IsRunning()) {
encode_task_runner_->PostTask(
- FROM_HERE, base::Bind(&VEAClient::DestroyEncodeOnSeperateThread,
+ FROM_HERE, base::Bind(&VEAClient::DestroyEncodeOnSeparateThread,
client_weak_factory_for_io_.GetWeakPtr()));
io_thread_.Stop();
} else {
- DestroyEncodeOnSeperateThread();
+ DestroyEncodeOnSeparateThread();
}
// Clear the objects that should be destroyed on the same thread as creation.
@@ -1280,14 +1308,14 @@ void VEAClient::DestroyEncoder() {
quality_validator_.reset();
}
-void VEAClient::DestroyEncodeOnSeperateThread() {
+void VEAClient::DestroyEncodeOnSeparateThread() {
encoder_weak_factory_->InvalidateWeakPtrs();
// |client_weak_factory_for_io_| is used only when
- // TryToSetupEncodeOnSeperateThread() returns true, in order to have weak
+ // TryToSetupEncodeOnSeparateThread() returns true, in order to have weak
// pointers to use when posting tasks on |io_thread_|. It is safe to
// invalidate here because |encode_task_runner_| points to |io_thread_| in
// this case. If not, it is safe to invalidate it on
- // |main_thread_task_runner_| as no weak pointers are used.
+ // |vea_client_task_runner_| as no weak pointers are used.
client_weak_factory_for_io_.InvalidateWeakPtrs();
}
@@ -1412,16 +1440,17 @@ void VEAClient::BitstreamBufferReady(int32_t bitstream_buffer_id,
bool key_frame,
base::TimeDelta timestamp) {
ASSERT_TRUE(encode_task_runner_->BelongsToCurrentThread());
- main_thread_task_runner_->PostTask(
- FROM_HERE, base::Bind(&VEAClient::BitstreamBufferReadyOnMainThread,
+ vea_client_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&VEAClient::BitstreamBufferReadyOnVeaClientThread,
base::Unretained(this), bitstream_buffer_id,
payload_size, key_frame, timestamp));
}
-void VEAClient::BitstreamBufferReadyOnMainThread(int32_t bitstream_buffer_id,
- size_t payload_size,
- bool key_frame,
- base::TimeDelta timestamp) {
+void VEAClient::BitstreamBufferReadyOnVeaClientThread(
+ int32_t bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame,
+ base::TimeDelta timestamp) {
DCHECK(thread_checker_.CalledOnValidThread());
ASSERT_LE(payload_size, output_buffer_size_);
@@ -1476,6 +1505,8 @@ void VEAClient::BitstreamBufferReadyOnMainThread(int32_t bitstream_buffer_id,
}
void VEAClient::SetState(ClientState new_state) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
DVLOG(4) << "Changing state " << state_ << "->" << new_state;
note_->Notify(new_state);
state_ = new_state;
@@ -1483,6 +1514,8 @@ void VEAClient::SetState(ClientState new_state) {
void VEAClient::SetStreamParameters(unsigned int bitrate,
unsigned int framerate) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
current_requested_bitrate_ = bitrate;
current_framerate_ = framerate;
LOG_ASSERT(current_requested_bitrate_ > 0UL);
@@ -1496,6 +1529,8 @@ void VEAClient::SetStreamParameters(unsigned int bitrate,
}
void VEAClient::InputNoLongerNeededCallback(int32_t input_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
std::set<int32_t>::iterator it = inputs_at_client_.find(input_id);
ASSERT_NE(it, inputs_at_client_.end());
inputs_at_client_.erase(it);
@@ -1504,6 +1539,8 @@ void VEAClient::InputNoLongerNeededCallback(int32_t input_id) {
}
scoped_refptr<VideoFrame> VEAClient::CreateFrame(off_t position) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
uint8_t* frame_data_y =
reinterpret_cast<uint8_t*>(&test_stream_->aligned_in_file_data[0]) +
position;
@@ -1526,6 +1563,7 @@ scoped_refptr<VideoFrame> VEAClient::CreateFrame(off_t position) {
scoped_refptr<VideoFrame> VEAClient::PrepareInputFrame(off_t position,
int32_t* input_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
CHECK_LE(position + test_stream_->aligned_buffer_size,
test_stream_->aligned_in_file_data.size());
@@ -1542,6 +1580,8 @@ scoped_refptr<VideoFrame> VEAClient::PrepareInputFrame(off_t position,
}
void VEAClient::OnInputTimer() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
if (!has_encoder() || state_ != CS_ENCODING)
input_timer_.reset();
else if (inputs_at_client_.size() <
@@ -1552,6 +1592,7 @@ void VEAClient::OnInputTimer() {
}
void VEAClient::FeedEncoderWithOneInput() {
+ DCHECK(thread_checker_.CalledOnValidThread());
if (!has_encoder() || state_ != CS_ENCODING)
return;
@@ -1597,6 +1638,7 @@ void VEAClient::FeedEncoderWithOneInput() {
}
void VEAClient::FeedEncoderWithOutput(base::SharedMemory* shm) {
+ DCHECK(thread_checker_.CalledOnValidThread());
if (!has_encoder())
return;
@@ -1619,6 +1661,7 @@ void VEAClient::FeedEncoderWithOutput(base::SharedMemory* shm) {
}
bool VEAClient::HandleEncodedFrame(bool keyframe) {
+ DCHECK(thread_checker_.CalledOnValidThread());
// This would be a bug in the test, which should not ignore false
// return value from this method.
LOG_ASSERT(num_encoded_frames_ <= num_frames_to_encode_);
@@ -1911,6 +1954,7 @@ void VEANoInputClient::RequireBitstreamBuffers(
unsigned int input_count,
const gfx::Size& input_coded_size,
size_t output_size) {
+ DCHECK(thread_checker_.CalledOnValidThread());
SimpleVEAClientBase::RequireBitstreamBuffers(input_count, input_coded_size,
output_size);
@@ -1963,6 +2007,7 @@ void VEACacheLineUnalignedInputClient::RequireBitstreamBuffers(
unsigned int input_count,
const gfx::Size& input_coded_size,
size_t output_size) {
+ DCHECK(thread_checker_.CalledOnValidThread());
SimpleVEAClientBase::RequireBitstreamBuffers(input_count, input_coded_size,
output_size);
@@ -1982,6 +2027,7 @@ void VEACacheLineUnalignedInputClient::BitstreamBufferReady(
void VEACacheLineUnalignedInputClient::FeedEncoderWithOneInput(
const gfx::Size& input_coded_size) {
+ DCHECK(thread_checker_.CalledOnValidThread());
if (!has_encoder())
return;
@@ -2038,8 +2084,8 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
ScopedVector<ClientStateNotification<ClientState>> notes;
ScopedVector<VEAClient> clients;
- base::Thread encoder_thread("EncoderThread");
- ASSERT_TRUE(encoder_thread.Start());
+ base::Thread vea_client_thread("EncoderClientThread");
+ ASSERT_TRUE(vea_client_thread.Start());
if (g_env->test_streams_.size() > 1)
num_concurrent_encoders = g_env->test_streams_.size();
@@ -2059,7 +2105,7 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
mid_stream_bitrate_switch, mid_stream_framerate_switch, verify_output,
verify_output_timestamp));
- encoder_thread.task_runner()->PostTask(
+ vea_client_thread.task_runner()->PostTask(
FROM_HERE, base::Bind(&VEAClient::CreateEncoder,
base::Unretained(clients.back())));
}
@@ -2085,13 +2131,13 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
}
for (size_t i = 0; i < num_concurrent_encoders; ++i) {
- encoder_thread.task_runner()->PostTask(
+ vea_client_thread.task_runner()->PostTask(
FROM_HERE,
base::Bind(&VEAClient::DestroyEncoder, base::Unretained(clients[i])));
}
// This ensures all tasks have finished.
- encoder_thread.Stop();
+ vea_client_thread.Stop();
}
// Test parameters:
@@ -2106,10 +2152,10 @@ void SimpleTestFunc() {
std::unique_ptr<ClientStateNotification<ClientState>> note(
new ClientStateNotification<ClientState>());
std::unique_ptr<TestClient> client(new TestClient(note.get()));
- base::Thread encoder_thread("EncoderThread");
- ASSERT_TRUE(encoder_thread.Start());
+ base::Thread vea_client_thread("EncoderClientThread");
+ ASSERT_TRUE(vea_client_thread.Start());
- encoder_thread.task_runner()->PostTask(
+ vea_client_thread.task_runner()->PostTask(
FROM_HERE,
base::Bind(&TestClient::CreateEncoder, base::Unretained(client.get())));
@@ -2124,12 +2170,12 @@ void SimpleTestFunc() {
}
}
- encoder_thread.task_runner()->PostTask(
+ vea_client_thread.task_runner()->PostTask(
FROM_HERE,
base::Bind(&TestClient::DestroyEncoder, base::Unretained(client.get())));
// This ensures all tasks have finished.
- encoder_thread.Stop();
+ vea_client_thread.Stop();
}
TEST_P(VideoEncodeAcceleratorSimpleTest, TestSimpleEncode) {
@@ -2143,12 +2189,12 @@ TEST_P(VideoEncodeAcceleratorSimpleTest, TestSimpleEncode) {
}
#if defined(OS_CHROMEOS)
+// TODO(kcwu): add back test of verify_output=true after crbug.com/694131 fixed.
INSTANTIATE_TEST_CASE_P(
SimpleEncode,
VideoEncodeAcceleratorTest,
::testing::Values(
- std::make_tuple(1, true, 0, false, false, false, false, false, false),
- std::make_tuple(1, true, 0, false, false, false, false, true, false)));
+ std::make_tuple(1, true, 0, false, false, false, false, false, false)));
INSTANTIATE_TEST_CASE_P(
EncoderPerf,
@@ -2180,8 +2226,9 @@ INSTANTIATE_TEST_CASE_P(
::testing::Values(
std::make_tuple(1, false, 0, true, false, true, false, false, false)));
+// TODO(kcwu): add back bitrate test after crbug.com/693336 fixed.
INSTANTIATE_TEST_CASE_P(
- MidStreamParamSwitchFPS,
+ DISABLED_MidStreamParamSwitchFPS,
VideoEncodeAcceleratorTest,
::testing::Values(
std::make_tuple(1, false, 0, true, false, false, true, false, false)));
@@ -2191,7 +2238,6 @@ INSTANTIATE_TEST_CASE_P(
VideoEncodeAcceleratorTest,
::testing::Values(
std::make_tuple(3, false, 0, false, false, false, false, false, false),
- std::make_tuple(3, false, 0, true, false, false, true, false, false),
std::make_tuple(3, false, 0, true, false, true, false, false, false)));
INSTANTIATE_TEST_CASE_P(
diff --git a/chromium/media/gpu/vt_video_decode_accelerator_mac.cc b/chromium/media/gpu/vt_video_decode_accelerator_mac.cc
index 59cb2ce2873..c16bb568f38 100644
--- a/chromium/media/gpu/vt_video_decode_accelerator_mac.cc
+++ b/chromium/media/gpu/vt_video_decode_accelerator_mac.cc
@@ -12,15 +12,18 @@
#include <algorithm>
#include <memory>
+#include "base/atomic_sequence_num.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
+#include "base/strings/stringprintf.h"
#include "base/sys_byteorder.h"
#include "base/sys_info.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_manager.h"
#include "base/version.h"
#include "media/base/limits.h"
#include "media/gpu/shared_memory_region.h"
@@ -40,6 +43,12 @@ namespace media {
namespace {
+// A sequence of ids for memory tracing.
+base::StaticAtomicSequenceNumber g_memory_dump_ids;
+
+// A sequence of shared memory ids for CVPixelBufferRefs.
+base::StaticAtomicSequenceNumber g_cv_pixel_buffer_ids;
+
// Only H.264 with 4:2:0 chroma sampling is supported.
const VideoCodecProfile kSupportedProfiles[] = {
H264PROFILE_BASELINE, H264PROFILE_EXTENDED, H264PROFILE_MAIN,
@@ -209,6 +218,13 @@ bool InitializeVideoToolboxInternal() {
// TODO(sandersd): Share this computation with the VAAPI decoder.
int32_t ComputeReorderWindow(const H264SPS* sps) {
+ // When |pic_order_cnt_type| == 2, decode order always matches presentation
+ // order.
+ // TODO(sandersd): For |pic_order_cnt_type| == 1, analyze the delta cycle to
+ // find the minimum required reorder window.
+ if (sps->pic_order_cnt_type == 2)
+ return 0;
+
// TODO(sandersd): Compute MaxDpbFrames.
int32_t max_dpb_frames = 16;
@@ -289,11 +305,34 @@ VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
callback_.decompressionOutputCallback = OutputThunk;
callback_.decompressionOutputRefCon = this;
weak_this_ = weak_this_factory_.GetWeakPtr();
+
+ memory_dump_id_ = g_memory_dump_ids.GetNext();
+ base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "VTVideoDecodeAccelerator", gpu_task_runner_);
}
VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() {
DVLOG(1) << __func__;
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
+
+ base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
+ this);
+}
+
+bool VTVideoDecodeAccelerator::OnMemoryDump(
+ const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) {
+ for (const auto& it : picture_info_map_) {
+ int32_t picture_id = it.first;
+ PictureInfo* picture_info = it.second.get();
+ if (picture_info->gl_image) {
+ std::string dump_name =
+ base::StringPrintf("media/vt_video_decode_accelerator_%d/picture_%d",
+ memory_dump_id_, picture_id);
+ picture_info->gl_image->OnMemoryDump(pmd, 0, dump_name);
+ }
+ }
+ return true;
}
bool VTVideoDecodeAccelerator::Initialize(const Config& config,
@@ -592,6 +631,7 @@ void VTVideoDecodeAccelerator::DecodeTask(const BitstreamBuffer& bitstream,
frame->has_slice = true;
frame->is_idr = nalu.nal_unit_type == media::H264NALU::kIDRSlice;
+ frame->has_mmco5 = poc_.IsPendingMMCO5();
frame->pic_order_cnt = pic_order_cnt;
frame->reorder_window = ComputeReorderWindow(sps);
}
@@ -604,9 +644,8 @@ void VTVideoDecodeAccelerator::DecodeTask(const BitstreamBuffer& bitstream,
}
}
- if (frame->is_idr) {
+ if (frame->is_idr)
waiting_for_idr_ = false;
- }
// If no IDR has been seen yet, skip decoding. Note that Flash sends
// configuration changes as a bitstream with only SPS/PPS; we don't print
@@ -941,9 +980,14 @@ bool VTVideoDecodeAccelerator::ProcessTaskQueue() {
const Task& task = task_queue_.front();
switch (task.type) {
- case TASK_FRAME:
- if (reorder_queue_.size() < kMaxReorderQueueSize &&
- (!task.frame->is_idr || reorder_queue_.empty())) {
+ case TASK_FRAME: {
+ bool reorder_queue_has_space =
+ reorder_queue_.size() < kMaxReorderQueueSize;
+ bool reorder_queue_flush_needed =
+ task.frame->is_idr || task.frame->has_mmco5;
+ bool reorder_queue_flush_done = reorder_queue_.empty();
+ if (reorder_queue_has_space &&
+ (!reorder_queue_flush_needed || reorder_queue_flush_done)) {
DVLOG(2) << "Decode(" << task.frame->bitstream_id << ") complete";
assigned_bitstream_ids_.erase(task.frame->bitstream_id);
client_->NotifyEndOfBitstreamBuffer(task.frame->bitstream_id);
@@ -952,6 +996,7 @@ bool VTVideoDecodeAccelerator::ProcessTaskQueue() {
return true;
}
return false;
+ }
case TASK_FLUSH:
DCHECK_EQ(task.type, pending_flush_tasks_.front());
@@ -995,7 +1040,8 @@ bool VTVideoDecodeAccelerator::ProcessReorderQueue() {
// the next frame.
bool flushing =
!task_queue_.empty() && (task_queue_.front().type != TASK_FRAME ||
- task_queue_.front().frame->is_idr);
+ task_queue_.front().frame->is_idr ||
+ task_queue_.front().frame->has_mmco5);
size_t reorder_window = std::max(0, reorder_queue_.top()->reorder_window);
DVLOG(3) << __func__ << " size=" << reorder_queue_.size()
@@ -1076,7 +1122,8 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
scoped_refptr<gl::GLImageIOSurface> gl_image(
new gl::GLImageIOSurface(frame.image_size, GL_BGRA_EXT));
if (!gl_image->InitializeWithCVPixelBuffer(
- frame.image.get(), gfx::GenericSharedMemoryId(),
+ frame.image.get(),
+ gfx::GenericSharedMemoryId(g_cv_pixel_buffer_ids.GetNext()),
gfx::BufferFormat::YUV_420_BIPLANAR)) {
NOTIFY_STATUS("Failed to initialize GLImageIOSurface", PLATFORM_FAILURE,
SFT_PLATFORM_ERROR);
diff --git a/chromium/media/gpu/vt_video_decode_accelerator_mac.h b/chromium/media/gpu/vt_video_decode_accelerator_mac.h
index aebe512cb7b..9a69ef5525d 100644
--- a/chromium/media/gpu/vt_video_decode_accelerator_mac.h
+++ b/chromium/media/gpu/vt_video_decode_accelerator_mac.h
@@ -20,6 +20,7 @@
#include "base/message_loop/message_loop.h"
#include "base/threading/thread.h"
#include "base/threading/thread_checker.h"
+#include "base/trace_event/memory_dump_provider.h"
#include "media/filters/h264_parser.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
#include "media/gpu/media_gpu_export.h"
@@ -36,7 +37,8 @@ MEDIA_GPU_EXPORT bool InitializeVideoToolbox();
// VideoToolbox.framework implementation of the VideoDecodeAccelerator
// interface for Mac OS X (currently limited to 10.9+).
-class VTVideoDecodeAccelerator : public VideoDecodeAccelerator {
+class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
+ public base::trace_event::MemoryDumpProvider {
public:
explicit VTVideoDecodeAccelerator(
const MakeGLContextCurrentCallback& make_context_current_cb,
@@ -58,6 +60,10 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator {
const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
override;
+ // MemoryDumpProvider implementation.
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) override;
+
// Called by OutputThunk() when VideoToolbox finishes decoding a frame.
void Output(void* source_frame_refcon,
OSStatus status,
@@ -102,6 +108,7 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator {
// Slice header information.
bool has_slice = false;
bool is_idr = false;
+ bool has_mmco5 = false;
int32_t pic_order_cnt = 0;
int32_t reorder_window = 0;
@@ -267,6 +274,9 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator {
bool missing_idr_logged_ = false;
H264POC poc_;
+ // Id number for this instance for memory dumps.
+ int memory_dump_id_ = 0;
+
//
// Shared state (set up and torn down on GPU thread).
//
diff --git a/chromium/media/gpu/vt_video_encode_accelerator_mac.cc b/chromium/media/gpu/vt_video_encode_accelerator_mac.cc
index e5fee2e14de..de0d8d7ff61 100644
--- a/chromium/media/gpu/vt_video_encode_accelerator_mac.cc
+++ b/chromium/media/gpu/vt_video_encode_accelerator_mac.cc
@@ -86,7 +86,6 @@ VTVideoEncodeAccelerator::~VTVideoEncodeAccelerator() {
DVLOG(3) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
- Destroy();
DCHECK(!encoder_thread_.IsRunning());
DCHECK(!encoder_task_weak_factory_.HasWeakPtrs());
}
@@ -230,6 +229,8 @@ void VTVideoEncodeAccelerator::Destroy() {
} else {
DestroyTask();
}
+
+ delete this;
}
void VTVideoEncodeAccelerator::EncodeTask(