summaryrefslogtreecommitdiff
path: root/chromium/media/gpu/android
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-10-24 11:30:15 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-10-30 12:56:19 +0000
commit6036726eb981b6c4b42047513b9d3f4ac865daac (patch)
tree673593e70678e7789766d1f732eb51f613a2703b /chromium/media/gpu/android
parent466052c4e7c052268fd931888cd58961da94c586 (diff)
downloadqtwebengine-chromium-6036726eb981b6c4b42047513b9d3f4ac865daac.tar.gz
BASELINE: Update Chromium to 70.0.3538.78
Change-Id: Ie634710bf039e26c1957f4ae45e101bd4c434ae7 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/media/gpu/android')
-rw-r--r--chromium/media/gpu/android/android_video_decode_accelerator.cc11
-rw-r--r--chromium/media/gpu/android/android_video_decode_accelerator.h2
-rw-r--r--chromium/media/gpu/android/android_video_encode_accelerator.cc52
-rw-r--r--chromium/media/gpu/android/android_video_encode_accelerator.h6
-rw-r--r--chromium/media/gpu/android/avda_codec_allocator.cc5
-rw-r--r--chromium/media/gpu/android/avda_codec_allocator.h7
-rw-r--r--chromium/media/gpu/android/avda_picture_buffer_manager.cc6
-rw-r--r--chromium/media/gpu/android/codec_image_unittest.cc3
-rw-r--r--chromium/media/gpu/android/codec_wrapper.cc71
-rw-r--r--chromium/media/gpu/android/codec_wrapper.h13
-rw-r--r--chromium/media/gpu/android/codec_wrapper_unittest.cc61
-rw-r--r--chromium/media/gpu/android/device_info.cc8
-rw-r--r--chromium/media/gpu/android/device_info.h1
-rw-r--r--chromium/media/gpu/android/image_reader_gl_owner.cc98
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.cc50
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.h6
-rw-r--r--chromium/media/gpu/android/mock_device_info.cc1
-rw-r--r--chromium/media/gpu/android/mock_device_info.h1
-rw-r--r--chromium/media/gpu/android/texture_owner.cc4
19 files changed, 245 insertions, 161 deletions
diff --git a/chromium/media/gpu/android/android_video_decode_accelerator.cc b/chromium/media/gpu/android/android_video_decode_accelerator.cc
index f8d5a17b884..de902b1ffd8 100644
--- a/chromium/media/gpu/android/android_video_decode_accelerator.cc
+++ b/chromium/media/gpu/android/android_video_decode_accelerator.cc
@@ -247,10 +247,8 @@ AndroidVideoDecodeAccelerator::BitstreamRecord::BitstreamRecord(
const BitstreamBuffer& bitstream_buffer)
: buffer(bitstream_buffer) {
if (buffer.id() != -1) {
- memory.reset(new WritableUnalignedMapping(buffer.handle(), buffer.size(),
- buffer.offset()));
- // The handle is no longer needed and can be closed.
- bitstream_buffer.handle().Close();
+ memory.reset(
+ new UnalignedSharedMemory(buffer.handle(), buffer.size(), true));
}
}
@@ -648,15 +646,16 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
return true;
}
- std::unique_ptr<WritableUnalignedMapping> shm;
+ std::unique_ptr<UnalignedSharedMemory> shm;
if (pending_input_buf_index_ == -1) {
// When |pending_input_buf_index_| is not -1, the buffer is already dequeued
// from MediaCodec, filled with data and bitstream_buffer.handle() is
// closed.
shm = std::move(pending_bitstream_records_.front().memory);
+ auto* buffer = &pending_bitstream_records_.front().buffer;
- if (!shm->IsValid()) {
+ if (!shm->MapAt(buffer->offset(), buffer->size())) {
NOTIFY_ERROR(UNREADABLE_INPUT, "UnalignedSharedMemory::Map() failed");
return false;
}
diff --git a/chromium/media/gpu/android/android_video_decode_accelerator.h b/chromium/media/gpu/android/android_video_decode_accelerator.h
index b2caf4159e1..082294f8c40 100644
--- a/chromium/media/gpu/android/android_video_decode_accelerator.h
+++ b/chromium/media/gpu/android/android_video_decode_accelerator.h
@@ -305,7 +305,7 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
BitstreamBuffer buffer;
// |memory| may be null if buffer has no data.
- std::unique_ptr<WritableUnalignedMapping> memory;
+ std::unique_ptr<UnalignedSharedMemory> memory;
};
// Encoded bitstream buffers to be passed to media codec, queued until an
diff --git a/chromium/media/gpu/android/android_video_encode_accelerator.cc b/chromium/media/gpu/android/android_video_encode_accelerator.cc
index 770d0bf2b15..b19e8bafbd7 100644
--- a/chromium/media/gpu/android/android_video_encode_accelerator.cc
+++ b/chromium/media/gpu/android/android_video_encode_accelerator.cc
@@ -138,16 +138,9 @@ AndroidVideoEncodeAccelerator::GetSupportedProfiles() {
return profiles;
}
-bool AndroidVideoEncodeAccelerator::Initialize(
- VideoPixelFormat format,
- const gfx::Size& input_visible_size,
- VideoCodecProfile output_profile,
- uint32_t initial_bitrate,
- Client* client) {
- DVLOG(3) << __func__ << " format: " << VideoPixelFormatToString(format)
- << ", input_visible_size: " << input_visible_size.ToString()
- << ", output_profile: " << GetProfileName(output_profile)
- << ", initial_bitrate: " << initial_bitrate;
+bool AndroidVideoEncodeAccelerator::Initialize(const Config& config,
+ Client* client) {
+ DVLOG(3) << __func__ << " " << config.AsHumanReadableString();
DCHECK(!media_codec_);
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(client);
@@ -155,9 +148,9 @@ bool AndroidVideoEncodeAccelerator::Initialize(
client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
if (!(MediaCodecUtil::SupportsSetParameters() &&
- format == PIXEL_FORMAT_I420)) {
- DLOG(ERROR) << "Unexpected combo: " << format << ", "
- << GetProfileName(output_profile);
+ config.input_format == PIXEL_FORMAT_I420)) {
+ DLOG(ERROR) << "Unexpected combo: " << config.input_format << ", "
+ << GetProfileName(config.output_profile);
return false;
}
@@ -168,13 +161,13 @@ bool AndroidVideoEncodeAccelerator::Initialize(
// need to hold onto some subset of inputs as reference pictures.
uint32_t frame_input_count;
uint32_t i_frame_interval;
- if (output_profile == VP8PROFILE_ANY) {
+ if (config.output_profile == VP8PROFILE_ANY) {
codec = kCodecVP8;
mime_type = "video/x-vnd.on2.vp8";
frame_input_count = 1;
i_frame_interval = IFRAME_INTERVAL_VPX;
- } else if (output_profile == H264PROFILE_BASELINE ||
- output_profile == H264PROFILE_MAIN) {
+ } else if (config.output_profile == H264PROFILE_BASELINE ||
+ config.output_profile == H264PROFILE_MAIN) {
codec = kCodecH264;
mime_type = "video/avc";
frame_input_count = 30;
@@ -183,8 +176,8 @@ bool AndroidVideoEncodeAccelerator::Initialize(
return false;
}
- frame_size_ = input_visible_size;
- last_set_bitrate_ = initial_bitrate;
+ frame_size_ = config.input_visible_size;
+ last_set_bitrate_ = config.initial_bitrate;
// Only consider using MediaCodec if it's likely backed by hardware.
if (MediaCodecUtil::IsKnownUnaccelerated(codec,
@@ -199,23 +192,25 @@ bool AndroidVideoEncodeAccelerator::Initialize(
return false;
}
media_codec_ = MediaCodecBridgeImpl::CreateVideoEncoder(
- codec, input_visible_size, initial_bitrate, INITIAL_FRAMERATE,
- i_frame_interval, pixel_format);
+ codec, config.input_visible_size, config.initial_bitrate,
+ INITIAL_FRAMERATE, i_frame_interval, pixel_format);
if (!media_codec_) {
DLOG(ERROR) << "Failed to create/start the codec: "
- << input_visible_size.ToString();
+ << config.input_visible_size.ToString();
return false;
}
// Conservative upper bound for output buffer size: decoded size + 2KB.
const size_t output_buffer_capacity =
- VideoFrame::AllocationSize(format, input_visible_size) + 2048;
+ VideoFrame::AllocationSize(config.input_format,
+ config.input_visible_size) +
+ 2048;
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&VideoEncodeAccelerator::Client::RequireBitstreamBuffers,
client_ptr_factory_->GetWeakPtr(), frame_input_count,
- input_visible_size, output_buffer_capacity));
+ config.input_visible_size, output_buffer_capacity));
return true;
}
@@ -429,12 +424,11 @@ void AndroidVideoEncodeAccelerator::DequeueOutput() {
BitstreamBuffer bitstream_buffer = available_bitstream_buffers_.back();
available_bitstream_buffers_.pop_back();
- auto shm = std::make_unique<WritableUnalignedMapping>(
- bitstream_buffer.handle(), bitstream_buffer.size(),
- bitstream_buffer.offset());
- // The handle is no longer needed and should be closed.
- bitstream_buffer.handle().Close();
- RETURN_ON_FAILURE(shm->IsValid(), "Failed to map SHM", kPlatformFailureError);
+ auto shm = std::make_unique<UnalignedSharedMemory>(
+ bitstream_buffer.handle(), bitstream_buffer.size(), false);
+ RETURN_ON_FAILURE(
+ shm->MapAt(bitstream_buffer.offset(), bitstream_buffer.size()),
+ "Failed to map SHM", kPlatformFailureError);
RETURN_ON_FAILURE(
size <= bitstream_buffer.size(),
"Encoded buffer too large: " << size << ">" << bitstream_buffer.size(),
diff --git a/chromium/media/gpu/android/android_video_encode_accelerator.h b/chromium/media/gpu/android/android_video_encode_accelerator.h
index 91ad14b6bf5..f7bd9015bc6 100644
--- a/chromium/media/gpu/android/android_video_encode_accelerator.h
+++ b/chromium/media/gpu/android/android_video_encode_accelerator.h
@@ -40,11 +40,7 @@ class MEDIA_GPU_EXPORT AndroidVideoEncodeAccelerator
// VideoEncodeAccelerator implementation.
VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
- bool Initialize(VideoPixelFormat format,
- const gfx::Size& input_visible_size,
- VideoCodecProfile output_profile,
- uint32_t initial_bitrate,
- Client* client) override;
+ bool Initialize(const Config& config, Client* client) override;
void Encode(const scoped_refptr<VideoFrame>& frame,
bool force_keyframe) override;
void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
diff --git a/chromium/media/gpu/android/avda_codec_allocator.cc b/chromium/media/gpu/android/avda_codec_allocator.cc
index 8f7a74653a7..63fff644eec 100644
--- a/chromium/media/gpu/android/avda_codec_allocator.cc
+++ b/chromium/media/gpu/android/avda_codec_allocator.cc
@@ -11,8 +11,8 @@
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "base/sys_info.h"
+#include "base/task/task_traits.h"
#include "base/task_runner_util.h"
-#include "base/task_scheduler/task_traits.h"
#include "base/threading/thread.h"
#include "base/threading/thread_checker.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -63,7 +63,8 @@ std::unique_ptr<MediaCodecBridge> CreateMediaCodecInternal(
codec_config->initial_expected_coded_size,
codec_config->surface_bundle->GetJavaSurface(), media_crypto,
codec_config->csd0, codec_config->csd1,
- codec_config->container_color_space, codec_config->hdr_metadata, true));
+ codec_config->container_color_space, codec_config->hdr_metadata, true,
+ codec_config->on_buffers_available_cb));
return codec;
}
diff --git a/chromium/media/gpu/android/avda_codec_allocator.h b/chromium/media/gpu/android/avda_codec_allocator.h
index bd65661bb3b..15c1f5267c4 100644
--- a/chromium/media/gpu/android/avda_codec_allocator.h
+++ b/chromium/media/gpu/android/avda_codec_allocator.h
@@ -17,7 +17,7 @@
#include "base/sequenced_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/sys_info.h"
-#include "base/task_scheduler/post_task.h"
+#include "base/task/post_task.h"
#include "base/threading/thread.h"
#include "base/time/tick_clock.h"
#include "base/trace_event/trace_event.h"
@@ -78,6 +78,8 @@ class MEDIA_GPU_EXPORT CodecConfig
VideoColorSpace container_color_space;
base::Optional<HDRMetadata> hdr_metadata;
+ base::RepeatingClosure on_buffers_available_cb;
+
protected:
friend class base::RefCountedThreadSafe<CodecConfig>;
virtual ~CodecConfig();
@@ -134,7 +136,8 @@ class MEDIA_GPU_EXPORT AVDACodecAllocator {
const std::vector<uint8_t>& csd1,
const VideoColorSpace& color_space,
const base::Optional<HDRMetadata>& hdr_metadata,
- bool allow_adaptive_playback)>;
+ bool allow_adaptive_playback,
+ base::RepeatingClosure on_buffers_available_cb)>;
// Make sure the construction threads are started for |client|. If the
// threads fail to start, then codec allocation may fail.
diff --git a/chromium/media/gpu/android/avda_picture_buffer_manager.cc b/chromium/media/gpu/android/avda_picture_buffer_manager.cc
index d760da02e96..e68b23faa08 100644
--- a/chromium/media/gpu/android/avda_picture_buffer_manager.cc
+++ b/chromium/media/gpu/android/avda_picture_buffer_manager.cc
@@ -11,6 +11,7 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
+#include "base/stl_util.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/gl_stream_texture_image.h"
#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
@@ -171,10 +172,7 @@ void AVDAPictureBufferManager::ReleaseCodecBufferForPicture(
void AVDAPictureBufferManager::ReuseOnePictureBuffer(
const PictureBuffer& picture_buffer) {
- pictures_out_for_display_.erase(
- std::remove(pictures_out_for_display_.begin(),
- pictures_out_for_display_.end(), picture_buffer.id()),
- pictures_out_for_display_.end());
+ base::Erase(pictures_out_for_display_, picture_buffer.id());
// At this point, the CC must be done with the picture. We can't really
// check for that here directly. it's guaranteed in gpu_video_decoder.cc,
diff --git a/chromium/media/gpu/android/codec_image_unittest.cc b/chromium/media/gpu/android/codec_image_unittest.cc
index 52855a32940..0a71d0dfd0d 100644
--- a/chromium/media/gpu/android/codec_image_unittest.cc
+++ b/chromium/media/gpu/android/codec_image_unittest.cc
@@ -9,6 +9,7 @@
#include "base/logging.h"
#include "base/test/mock_callback.h"
#include "base/test/scoped_task_environment.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "media/base/android/media_codec_bridge.h"
#include "media/base/android/mock_media_codec_bridge.h"
@@ -42,7 +43,7 @@ class CodecImageTest : public testing::Test {
codec_ = codec.get();
wrapper_ = std::make_unique<CodecWrapper>(
CodecSurfacePair(std::move(codec), new AVDASurfaceBundle()),
- base::DoNothing());
+ base::DoNothing(), base::SequencedTaskRunnerHandle::Get());
ON_CALL(*codec_, DequeueOutputBuffer(_, _, _, _, _, _, _))
.WillByDefault(Return(MEDIA_CODEC_OK));
diff --git a/chromium/media/gpu/android/codec_wrapper.cc b/chromium/media/gpu/android/codec_wrapper.cc
index 7d851d1a369..77a8dc25e4c 100644
--- a/chromium/media/gpu/android/codec_wrapper.cc
+++ b/chromium/media/gpu/android/codec_wrapper.cc
@@ -23,8 +23,10 @@ namespace media {
// CodecOutputBuffer are the only two things that hold references to it.
class CodecWrapperImpl : public base::RefCountedThreadSafe<CodecWrapperImpl> {
public:
- CodecWrapperImpl(CodecSurfacePair codec_surface_pair,
- base::Closure output_buffer_release_cb);
+ CodecWrapperImpl(
+ CodecSurfacePair codec_surface_pair,
+ CodecWrapper::OutputReleasedCB output_buffer_release_cb,
+ scoped_refptr<base::SequencedTaskRunner> release_task_runner);
using DequeueStatus = CodecWrapper::DequeueStatus;
using QueueStatus = CodecWrapper::QueueStatus;
@@ -89,12 +91,16 @@ class CodecWrapperImpl : public base::RefCountedThreadSafe<CodecWrapperImpl> {
// A callback that's called whenever an output buffer is released back to the
// codec.
- base::Closure output_buffer_release_cb_;
+ CodecWrapper::OutputReleasedCB output_buffer_release_cb_;
// Do we owe the client an EOS in DequeueOutput, due to an eos that we elided
// while we're already flushed?
bool elided_eos_pending_ = false;
+ // Task runner on which we'll release codec buffers without rendering. May be
+ // null to always do this on the calling task runner.
+ scoped_refptr<base::SequencedTaskRunner> release_task_runner_;
+
DISALLOW_COPY_AND_ASSIGN(CodecWrapperImpl);
};
@@ -104,20 +110,28 @@ CodecOutputBuffer::CodecOutputBuffer(scoped_refptr<CodecWrapperImpl> codec,
: codec_(std::move(codec)), id_(id), size_(size) {}
CodecOutputBuffer::~CodecOutputBuffer() {
- codec_->ReleaseCodecOutputBuffer(id_, false);
+ // While it will work if we re-release the buffer, since CodecWrapper handles
+ // it properly, we can save a lock + (possibly) post by checking here if we
+ // know that it has been rendered already.
+ if (!was_rendered_)
+ codec_->ReleaseCodecOutputBuffer(id_, false);
}
bool CodecOutputBuffer::ReleaseToSurface() {
+ was_rendered_ = true;
return codec_->ReleaseCodecOutputBuffer(id_, true);
}
-CodecWrapperImpl::CodecWrapperImpl(CodecSurfacePair codec_surface_pair,
- base::Closure output_buffer_release_cb)
+CodecWrapperImpl::CodecWrapperImpl(
+ CodecSurfacePair codec_surface_pair,
+ CodecWrapper::OutputReleasedCB output_buffer_release_cb,
+ scoped_refptr<base::SequencedTaskRunner> release_task_runner)
: state_(State::kFlushed),
codec_(std::move(codec_surface_pair.first)),
surface_bundle_(std::move(codec_surface_pair.second)),
next_buffer_id_(0),
- output_buffer_release_cb_(std::move(output_buffer_release_cb)) {
+ output_buffer_release_cb_(std::move(output_buffer_release_cb)),
+ release_task_runner_(std::move(release_task_runner)) {
DVLOG(2) << __func__;
}
@@ -364,6 +378,34 @@ scoped_refptr<AVDASurfaceBundle> CodecWrapperImpl::SurfaceBundle() {
}
bool CodecWrapperImpl::ReleaseCodecOutputBuffer(int64_t id, bool render) {
+ if (!render && release_task_runner_ &&
+ !release_task_runner_->RunsTasksInCurrentSequence()) {
+ // Note that this can only delay releases, but that won't ultimately change
+ // the ordering at the codec, assuming that releases / renders originate
+ // from the same thread.
+ //
+ // We know that a render call that happens before a release call will still
+ // run before the release's posted task, since it happens before we even
+ // post it.
+ //
+ // Similarly, renders are kept in order with each other.
+ //
+ // It is possible that a render happens before the posted task(s) of some
+ // earlier release(s) (with no intervening renders, since those are
+ // ordered). In this case, though, the loop below will still release
+ // everything earlier than the rendered buffer, so the codec still sees the
+ // same sequence of calls -- some releases follwed by a render.
+ //
+ // Of course, if releases and renders are posted from different threads,
+ // then it's unclear what the ordering was anyway.
+ release_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(
+ base::IgnoreResult(&CodecWrapperImpl::ReleaseCodecOutputBuffer),
+ this, id, render));
+ return true;
+ }
+
base::AutoLock l(lock_);
if (!codec_ || state_ == State::kError)
return false;
@@ -386,15 +428,20 @@ bool CodecWrapperImpl::ReleaseCodecOutputBuffer(int64_t id, bool render) {
int index = buffer_it->second;
codec_->ReleaseOutputBuffer(index, render);
buffer_ids_.erase(buffer_ids_.begin(), buffer_it + 1);
- if (output_buffer_release_cb_)
- output_buffer_release_cb_.Run();
+ if (output_buffer_release_cb_) {
+ output_buffer_release_cb_.Run(state_ == State::kDrained ||
+ state_ == State::kDraining);
+ }
return true;
}
-CodecWrapper::CodecWrapper(CodecSurfacePair codec_surface_pair,
- base::Closure output_buffer_release_cb)
+CodecWrapper::CodecWrapper(
+ CodecSurfacePair codec_surface_pair,
+ OutputReleasedCB output_buffer_release_cb,
+ scoped_refptr<base::SequencedTaskRunner> release_task_runner)
: impl_(new CodecWrapperImpl(std::move(codec_surface_pair),
- std::move(output_buffer_release_cb))) {}
+ std::move(output_buffer_release_cb),
+ std::move(release_task_runner))) {}
CodecWrapper::~CodecWrapper() {
// The codec must have already been taken.
diff --git a/chromium/media/gpu/android/codec_wrapper.h b/chromium/media/gpu/android/codec_wrapper.h
index b316f114d40..865d18a12b5 100644
--- a/chromium/media/gpu/android/codec_wrapper.h
+++ b/chromium/media/gpu/android/codec_wrapper.h
@@ -52,6 +52,7 @@ class MEDIA_GPU_EXPORT CodecOutputBuffer {
scoped_refptr<CodecWrapperImpl> codec_;
int64_t id_;
+ bool was_rendered_ = false;
gfx::Size size_;
DISALLOW_COPY_AND_ASSIGN(CodecOutputBuffer);
};
@@ -68,8 +69,18 @@ class MEDIA_GPU_EXPORT CodecWrapper {
// released back to the codec (whether it's rendered or not). This is a signal
// that the codec might be ready to accept more input. It may be run on any
// thread.
+ //
+ // OutputReleasedCB will be called with a bool indicating if CodecWrapper is
+ // currently draining or in the drained state.
+ //
+ // If not null, then we will only release codec buffers without rendering
+ // on |release_task_runner|, posting if needed. This does not change where
+ // we release them with rendering; that has to be done inline. This helps
+ // us avoid a common case of hanging up the GPU main thread.
+ using OutputReleasedCB = base::RepeatingCallback<void(bool)>;
CodecWrapper(CodecSurfacePair codec_surface_pair,
- base::Closure output_buffer_release_cb);
+ OutputReleasedCB output_buffer_release_cb,
+ scoped_refptr<base::SequencedTaskRunner> release_task_runner);
~CodecWrapper();
// Takes the backing codec and surface, implicitly discarding all outstanding
diff --git a/chromium/media/gpu/android/codec_wrapper_unittest.cc b/chromium/media/gpu/android/codec_wrapper_unittest.cc
index b01bb38919a..dc9ea6e896b 100644
--- a/chromium/media/gpu/android/codec_wrapper_unittest.cc
+++ b/chromium/media/gpu/android/codec_wrapper_unittest.cc
@@ -4,16 +4,19 @@
#include <memory>
-#include "media/gpu/android/codec_wrapper.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
+#include "base/synchronization/waitable_event.h"
#include "base/test/mock_callback.h"
#include "base/test/scoped_task_environment.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread.h"
#include "media/base/android/media_codec_bridge.h"
#include "media/base/android/mock_media_codec_bridge.h"
#include "media/base/encryption_scheme.h"
#include "media/base/subsample_entry.h"
+#include "media/gpu/android/codec_wrapper.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -28,13 +31,15 @@ namespace media {
class CodecWrapperTest : public testing::Test {
public:
- CodecWrapperTest() {
+ CodecWrapperTest() : other_thread_("Other thread") {
auto codec = std::make_unique<NiceMock<MockMediaCodecBridge>>();
codec_ = codec.get();
surface_bundle_ = base::MakeRefCounted<AVDASurfaceBundle>();
wrapper_ = std::make_unique<CodecWrapper>(
CodecSurfacePair(std::move(codec), surface_bundle_),
- output_buffer_release_cb_.Get());
+ output_buffer_release_cb_.Get(),
+ // Unrendered output buffers are released on our thread.
+ base::SequencedTaskRunnerHandle::Get());
ON_CALL(*codec_, DequeueOutputBuffer(_, _, _, _, _, _, _))
.WillByDefault(Return(MEDIA_CODEC_OK));
ON_CALL(*codec_, DequeueInputBuffer(_, _))
@@ -44,6 +49,9 @@ class CodecWrapperTest : public testing::Test {
uint8_t data = 0;
fake_decoder_buffer_ = DecoderBuffer::CopyFrom(&data, 1);
+
+ // May fail.
+ other_thread_.Start();
}
~CodecWrapperTest() override {
@@ -63,8 +71,11 @@ class CodecWrapperTest : public testing::Test {
NiceMock<MockMediaCodecBridge>* codec_;
std::unique_ptr<CodecWrapper> wrapper_;
scoped_refptr<AVDASurfaceBundle> surface_bundle_;
- NiceMock<base::MockCallback<base::Closure>> output_buffer_release_cb_;
+ NiceMock<base::MockCallback<CodecWrapper::OutputReleasedCB>>
+ output_buffer_release_cb_;
scoped_refptr<DecoderBuffer> fake_decoder_buffer_;
+
+ base::Thread other_thread_;
};
TEST_F(CodecWrapperTest, TakeCodecReturnsTheCodecFirstAndNullLater) {
@@ -211,13 +222,22 @@ TEST_F(CodecWrapperTest, CodecOutputBuffersHaveTheCorrectSize) {
TEST_F(CodecWrapperTest, OutputBufferReleaseCbIsCalledWhenRendering) {
auto codec_buffer = DequeueCodecOutputBuffer();
- EXPECT_CALL(output_buffer_release_cb_, Run()).Times(1);
+ EXPECT_CALL(output_buffer_release_cb_, Run(false)).Times(1);
codec_buffer->ReleaseToSurface();
}
TEST_F(CodecWrapperTest, OutputBufferReleaseCbIsCalledWhenDestructing) {
auto codec_buffer = DequeueCodecOutputBuffer();
- EXPECT_CALL(output_buffer_release_cb_, Run()).Times(1);
+ EXPECT_CALL(output_buffer_release_cb_, Run(false)).Times(1);
+}
+
+TEST_F(CodecWrapperTest, OutputBufferReflectsDrainingOrDrainedStatus) {
+ wrapper_->QueueInputBuffer(*fake_decoder_buffer_, EncryptionScheme());
+ auto eos = DecoderBuffer::CreateEOSBuffer();
+ wrapper_->QueueInputBuffer(*eos, EncryptionScheme());
+ ASSERT_TRUE(wrapper_->IsDraining());
+ auto codec_buffer = DequeueCodecOutputBuffer();
+ EXPECT_CALL(output_buffer_release_cb_, Run(true)).Times(1);
}
TEST_F(CodecWrapperTest, CodecStartsInFlushedState) {
@@ -308,4 +328,33 @@ TEST_F(CodecWrapperTest, EOSWhileFlushedOrDrainedIsElided) {
ASSERT_TRUE(is_eos);
}
+TEST_F(CodecWrapperTest, CodecWrapperPostsReleaseToProvidedThread) {
+ // Releasing an output buffer without rendering on some other thread should
+ // post back to the main thread.
+ scoped_refptr<base::SequencedTaskRunner> task_runner =
+ other_thread_.task_runner();
+ // If the thread failed to start, pass.
+ if (!task_runner)
+ return;
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ auto cb = base::BindOnce(
+ [](std::unique_ptr<CodecOutputBuffer> codec_buffer,
+ base::WaitableEvent* event) {
+ codec_buffer.reset();
+ event->Signal();
+ },
+ DequeueCodecOutputBuffer(), base::Unretained(&event));
+ task_runner->PostTask(FROM_HERE, std::move(cb));
+
+ // Wait until the CodecOutputBuffer is released. It should not release the
+ // underlying buffer, but should instead post a task to release it.
+ event.Wait();
+
+ // The underlying buffer should not be released until we RunUntilIdle.
+ EXPECT_CALL(*codec_, ReleaseOutputBuffer(_, false));
+ base::RunLoop().RunUntilIdle();
+}
+
} // namespace media
diff --git a/chromium/media/gpu/android/device_info.cc b/chromium/media/gpu/android/device_info.cc
index a7f163579e0..dbff9393442 100644
--- a/chromium/media/gpu/android/device_info.cc
+++ b/chromium/media/gpu/android/device_info.cc
@@ -48,4 +48,12 @@ bool DeviceInfo::CodecNeedsFlushWorkaround(MediaCodecBridge* codec) {
return MediaCodecUtil::CodecNeedsFlushWorkaround(codec);
}
+bool DeviceInfo::IsAsyncApiSupported() {
+ // Technically the base setCallback() API is available in L, but we
+ // need the version which accepts a Handler which is in M... but
+ // in M there's a MediaCodec bug that's not fixed until N :|
+ // https://crbug.com/873094 https://crbug.com/610523
+ return SdkVersion() >= base::android::SDK_VERSION_NOUGAT;
+}
+
} // namespace media
diff --git a/chromium/media/gpu/android/device_info.h b/chromium/media/gpu/android/device_info.h
index 95fcbc888b3..095a5c1b8f2 100644
--- a/chromium/media/gpu/android/device_info.h
+++ b/chromium/media/gpu/android/device_info.h
@@ -23,6 +23,7 @@ struct MEDIA_GPU_EXPORT DeviceInfo {
virtual bool IsSetOutputSurfaceSupported();
virtual bool SupportsOverlaySurfaces();
virtual bool CodecNeedsFlushWorkaround(MediaCodecBridge* codec);
+ virtual bool IsAsyncApiSupported();
};
} // namespace media
diff --git a/chromium/media/gpu/android/image_reader_gl_owner.cc b/chromium/media/gpu/android/image_reader_gl_owner.cc
index 38c607bd4a7..1b7ef787ef2 100644
--- a/chromium/media/gpu/android/image_reader_gl_owner.cc
+++ b/chromium/media/gpu/android/image_reader_gl_owner.cc
@@ -15,6 +15,7 @@
#include "base/metrics/histogram_macros.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "gpu/ipc/common/android/android_image_reader_utils.h"
#include "ui/gl/gl_fence_android_native_fence_sync.h"
#include "ui/gl/scoped_binders.h"
#include "ui/gl/scoped_make_current.h"
@@ -57,13 +58,16 @@ ImageReaderGLOwner::ImageReaderGLOwner(GLuint texture_id)
// Set the width, height and format to some default value. This parameters
// are/maybe overriden by the producer sending buffers to this imageReader's
// Surface.
- int32_t width = 1, height = 1, maxImages = 3;
+ int32_t width = 1, height = 1, max_images = 3;
AIMAGE_FORMATS format = AIMAGE_FORMAT_YUV_420_888;
AImageReader* reader = nullptr;
+ // The usage flag below should be used when the buffer will be read from by
+ // the GPU as a texture.
+ const uint64_t usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
// Create a new reader for images of the desired size and format.
- media_status_t return_code =
- loader_.AImageReader_new(width, height, format, maxImages, &reader);
+ media_status_t return_code = loader_.AImageReader_newWithUsage(
+ width, height, format, usage, max_images, &reader);
if (return_code != AMEDIA_OK) {
LOG(ERROR) << " Image reader creation failed.";
if (return_code == AMEDIA_ERROR_INVALID_PARAMETER)
@@ -140,10 +144,10 @@ void ImageReaderGLOwner::UpdateTexImage() {
// Acquire the latest image asynchronously
AImage* image = nullptr;
- int acquireFenceFd = 0;
+ int acquire_fence_fd = -1;
media_status_t return_code = AMEDIA_OK;
return_code = loader_.AImageReader_acquireLatestImageAsync(
- image_reader_, &image, &acquireFenceFd);
+ image_reader_, &image, &acquire_fence_fd);
// TODO(http://crbug.com/846050).
// Need to add some better error handling if below error occurs. Currently we
@@ -178,6 +182,7 @@ void ImageReaderGLOwner::UpdateTexImage() {
NOTREACHED();
return;
}
+ base::ScopedFD scoped_acquire_fence_fd(acquire_fence_fd);
// If there is no new image simply return. At this point previous image will
// still be bound to the texture.
@@ -185,89 +190,20 @@ void ImageReaderGLOwner::UpdateTexImage() {
return;
}
- // If we have a new Image, delete the previously acquired image (if any).
- if (current_image_) {
- // Delete the image synchronously. Create and insert a fence signal.
- std::unique_ptr<gl::GLFenceAndroidNativeFenceSync> android_native_fence =
- gl::GLFenceAndroidNativeFenceSync::CreateForGpuFence();
- if (!android_native_fence) {
- LOG(ERROR) << "Failed to create android native fence sync object.";
- return;
- }
- std::unique_ptr<gfx::GpuFence> gpu_fence =
- android_native_fence->GetGpuFence();
- if (!gpu_fence) {
- LOG(ERROR) << "Unable to get a gpu fence object.";
- return;
- }
- gfx::GpuFenceHandle fence_handle =
- gfx::CloneHandleForIPC(gpu_fence->GetGpuFenceHandle());
- if (fence_handle.is_null()) {
- LOG(ERROR) << "Gpu fence handle is null";
- return;
- }
- loader_.AImage_deleteAsync(current_image_, fence_handle.native_fd.fd);
- current_image_ = nullptr;
- }
+ // If we have a new Image, delete the previously acquired image.
+ if (!gpu::DeleteAImageAsync(current_image_, &loader_))
+ return;
// Make the newly acuired image as current image.
current_image_ = image;
- // If acquireFenceFd is -1, we do not need synchronization fence and image is
- // ready to be used immediately. Else we need to create a sync fence which is
- // used to signal when the buffer/image is ready to be consumed.
- if (acquireFenceFd != -1) {
- // Create a new egl sync object using the acquireFenceFd.
- EGLint attribs[] = {EGL_SYNC_NATIVE_FENCE_FD_ANDROID, acquireFenceFd,
- EGL_NONE};
- std::unique_ptr<gl::GLFenceEGL> egl_fence(
- gl::GLFenceEGL::Create(EGL_SYNC_NATIVE_FENCE_ANDROID, attribs));
-
- // Insert the fence sync gl command using the helper class in
- // gl_fence_egl.h.
- if (egl_fence == nullptr) {
- LOG(ERROR) << " Failed to created egl fence object ";
- return;
- }
- DCHECK(egl_fence);
-
- // Make the server wait and not the client.
- egl_fence->ServerWait();
- }
-
- // Get the hardware buffer from the image.
- AHardwareBuffer* buffer = nullptr;
- DCHECK(current_image_);
- if (loader_.AImage_getHardwareBuffer(current_image_, &buffer) != AMEDIA_OK) {
- LOG(ERROR) << "hardware buffer is null";
+ // Insert an EGL fence and make server wait for image to be available.
+ if (!gpu::InsertEglFenceAndWait(std::move(scoped_acquire_fence_fd)))
return;
- }
- // Create a egl image from the hardware buffer. Get the image size to create
- // egl image.
- int32_t image_height = 0, image_width = 0;
- if (loader_.AImage_getWidth(current_image_, &image_width) != AMEDIA_OK) {
- LOG(ERROR) << "image width is null OR image has been deleted";
- return;
- }
- if (loader_.AImage_getHeight(current_image_, &image_height) != AMEDIA_OK) {
- LOG(ERROR) << "image height is null OR image has been deleted";
- return;
- }
- gfx::Size image_size(image_width, image_height);
- scoped_refptr<gl::GLImageAHardwareBuffer> egl_image(
- new gl::GLImageAHardwareBuffer(image_size));
- if (!egl_image->Initialize(buffer, false)) {
- LOG(ERROR) << "Failed to create EGL image ";
- egl_image = nullptr;
+ // Create EGL image from the AImage and bind it to the texture.
+ if (!gpu::CreateAndBindEglImage(current_image_, texture_id_, &loader_))
return;
- }
-
- // Now bind this egl image to the texture target GL_TEXTURE_EXTERNAL_OES. Note
- // that once the egl image is bound, it can be destroyed safely without
- // affecting the rendering using this texture image.
- glBindTexture(GL_TEXTURE_EXTERNAL_OES, texture_id_);
- egl_image->BindTexImage(GL_TEXTURE_EXTERNAL_OES);
}
void ImageReaderGLOwner::GetTransformMatrix(float mtx[]) {
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.cc b/chromium/media/gpu/android/media_codec_video_decoder.cc
index 04de2ca939e..c16bbf51db7 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.cc
+++ b/chromium/media/gpu/android/media_codec_video_decoder.cc
@@ -12,6 +12,7 @@
#include "base/logging.h"
#include "base/memory/weak_ptr.h"
#include "base/metrics/histogram_macros.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "media/base/android/media_codec_bridge_impl.h"
#include "media/base/android/media_codec_util.h"
#include "media/base/bind_to_current_loop.h"
@@ -89,6 +90,16 @@ bool ConfigSupported(const VideoDecoderConfig& config,
}
}
+void OutputBufferReleased(bool using_async_api,
+ base::RepeatingClosure pump_cb,
+ bool is_drained_or_draining) {
+ // The asynchronous API doesn't need pumping upon calls to ReleaseOutputBuffer
+ // unless we're draining or drained.
+ if (using_async_api && !is_drained_or_draining)
+ return;
+ pump_cb.Run();
+}
+
} // namespace
// static
@@ -284,7 +295,7 @@ void MediaCodecVideoDecoder::OnMediaCryptoReady(
void MediaCodecVideoDecoder::OnKeyAdded() {
DVLOG(2) << __func__;
waiting_for_key_ = false;
- StartTimer();
+ StartTimerOrPumpCodec();
}
void MediaCodecVideoDecoder::StartLazyInit() {
@@ -426,6 +437,15 @@ void MediaCodecVideoDecoder::CreateCodec() {
media_crypto_);
config->initial_expected_coded_size = decoder_config_.coded_size();
config->surface_bundle = target_surface_bundle_;
+
+ // Use the asynchronous API if we can.
+ if (device_info_->IsAsyncApiSupported()) {
+ using_async_api_ = true;
+ config->on_buffers_available_cb = BindToCurrentLoop(
+ base::BindRepeating(&MediaCodecVideoDecoder::StartTimerOrPumpCodec,
+ weak_factory_.GetWeakPtr()));
+ }
+
// Note that this might be the same surface bundle that we've been using, if
// we're reinitializing the codec without changing surfaces. That's fine.
video_frame_factory_->SetSurfaceBundle(target_surface_bundle_);
@@ -443,10 +463,14 @@ void MediaCodecVideoDecoder::OnCodecConfigured(
EnterTerminalState(State::kError);
return;
}
+
codec_ = std::make_unique<CodecWrapper>(
CodecSurfacePair(std::move(codec), std::move(surface_bundle)),
- BindToCurrentLoop(base::Bind(&MediaCodecVideoDecoder::StartTimer,
- weak_factory_.GetWeakPtr())));
+ base::BindRepeating(&OutputBufferReleased, using_async_api_,
+ BindToCurrentLoop(base::BindRepeating(
+ &MediaCodecVideoDecoder::StartTimerOrPumpCodec,
+ weak_factory_.GetWeakPtr()))),
+ base::SequencedTaskRunnerHandle::Get());
// If the target surface changed while codec creation was in progress,
// transition to it immediately.
@@ -459,7 +483,7 @@ void MediaCodecVideoDecoder::OnCodecConfigured(
// Cache the frame information that goes with this codec.
CacheFrameInformation();
- StartTimer();
+ StartTimerOrPumpCodec();
}
void MediaCodecVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
@@ -514,17 +538,25 @@ void MediaCodecVideoDecoder::PumpCodec(bool force_start_timer) {
did_work = true;
} while (did_input || did_output);
+ if (using_async_api_)
+ return;
+
if (did_work || force_start_timer)
- StartTimer();
+ StartTimerOrPumpCodec();
else
StopTimerIfIdle();
}
-void MediaCodecVideoDecoder::StartTimer() {
+void MediaCodecVideoDecoder::StartTimerOrPumpCodec() {
DVLOG(4) << __func__;
if (state_ != State::kRunning)
return;
+ if (using_async_api_) {
+ PumpCodec(false);
+ return;
+ }
+
idle_timer_ = base::ElapsedTimer();
// Poll at 10ms somewhat arbitrarily.
@@ -542,6 +574,8 @@ void MediaCodecVideoDecoder::StartTimer() {
void MediaCodecVideoDecoder::StopTimerIfIdle() {
DVLOG(4) << __func__;
+ DCHECK(!using_async_api_);
+
// Stop the timer if we've been idle for one second. Chosen arbitrarily.
const auto kTimeout = base::TimeDelta::FromSeconds(1);
if (idle_timer_.Elapsed() > kTimeout) {
@@ -701,6 +735,8 @@ void MediaCodecVideoDecoder::RunEosDecodeCb(int reset_generation) {
void MediaCodecVideoDecoder::ForwardVideoFrame(
int reset_generation,
const scoped_refptr<VideoFrame>& frame) {
+ DVLOG(3) << __func__ << " : "
+ << (frame ? frame->AsHumanReadableString() : "null");
if (reset_generation == reset_generation_) {
// TODO(liberato): We might actually have a SW decoder. Consider setting
// this to false if so, especially for higher bitrates.
@@ -769,7 +805,7 @@ void MediaCodecVideoDecoder::OnCodecDrained() {
if (drain_type == DrainType::kForDestroy) {
// Post the delete in case the caller uses |this| after we return.
- base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, this);
+ base::SequencedTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, this);
return;
}
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.h b/chromium/media/gpu/android/media_codec_video_decoder.h
index 4ca9eebd488..fbfc05c87d7 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.h
+++ b/chromium/media/gpu/android/media_codec_video_decoder.h
@@ -151,13 +151,13 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder
void FlushCodec();
// Attempts to queue input and dequeue output from the codec. Calls
- // StartTimer() even if the codec is idle when |force_start_timer|.
+ // StartTimerOrPumpCodec() even if the codec is idle when |force_start_timer|.
void PumpCodec(bool force_start_timer);
bool QueueInput();
bool DequeueOutput();
// Starts |pump_codec_timer_| if it's not started and resets the idle timeout.
- void StartTimer();
+ void StartTimerOrPumpCodec();
void StopTimerIfIdle();
// Runs |eos_decode_cb_| if it's valid and |reset_generation| matches
@@ -281,6 +281,8 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder
// Do we need a hw-secure codec?
bool requires_secure_codec_ = false;
+ bool using_async_api_ = false;
+
// Should we flush the codec on the next decode, and pretend that it is
// drained currently? Note that we'll automatically flush if the codec is
// drained; this flag indicates that we also elided the drain, so the codec is
diff --git a/chromium/media/gpu/android/mock_device_info.cc b/chromium/media/gpu/android/mock_device_info.cc
index eb8606ce0c8..c3a9aa0e331 100644
--- a/chromium/media/gpu/android/mock_device_info.cc
+++ b/chromium/media/gpu/android/mock_device_info.cc
@@ -20,6 +20,7 @@ MockDeviceInfo::MockDeviceInfo() {
ON_CALL(*this, IsSetOutputSurfaceSupported()).WillByDefault(Return(true));
ON_CALL(*this, SupportsOverlaySurfaces()).WillByDefault(Return(true));
ON_CALL(*this, CodecNeedsFlushWorkaround(_)).WillByDefault(Return(false));
+ ON_CALL(*this, IsAsyncApiSupported()).WillByDefault(Return(false));
}
MockDeviceInfo::~MockDeviceInfo() = default;
diff --git a/chromium/media/gpu/android/mock_device_info.h b/chromium/media/gpu/android/mock_device_info.h
index 67bb8b48013..eaa85f8f16e 100644
--- a/chromium/media/gpu/android/mock_device_info.h
+++ b/chromium/media/gpu/android/mock_device_info.h
@@ -24,6 +24,7 @@ class MockDeviceInfo : public DeviceInfo {
MOCK_METHOD0(IsSetOutputSurfaceSupported, bool());
MOCK_METHOD0(SupportsOverlaySurfaces, bool());
MOCK_METHOD1(CodecNeedsFlushWorkaround, bool(MediaCodecBridge* codec));
+ MOCK_METHOD0(IsAsyncApiSupported, bool());
};
} // namespace media
diff --git a/chromium/media/gpu/android/texture_owner.cc b/chromium/media/gpu/android/texture_owner.cc
index 28d0b2c737f..c6a6e731ec7 100644
--- a/chromium/media/gpu/android/texture_owner.cc
+++ b/chromium/media/gpu/android/texture_owner.cc
@@ -37,8 +37,8 @@ scoped_refptr<TextureOwner> TextureOwner::Create() {
DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
// If AImageReader is supported and is enabled by media flag, use it.
- if (base::FeatureList::IsEnabled(media::kAImageReaderVideoOutput) &&
- base::android::AndroidImageReader::GetInstance().IsSupported()) {
+ if (base::android::AndroidImageReader::GetInstance().IsSupported() &&
+ base::FeatureList::IsEnabled(media::kAImageReaderVideoOutput)) {
return new ImageReaderGLOwner(texture_id);
}