summaryrefslogtreecommitdiff
path: root/chromium/media/gpu/android
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/media/gpu/android')
-rw-r--r--chromium/media/gpu/android/android_video_decode_accelerator.cc1859
-rw-r--r--chromium/media/gpu/android/android_video_decode_accelerator.h446
-rw-r--r--chromium/media/gpu/android/android_video_decode_accelerator_unittest.cc542
-rw-r--r--chromium/media/gpu/android/android_video_encode_accelerator.cc433
-rw-r--r--chromium/media/gpu/android/android_video_encode_accelerator.h115
-rw-r--r--chromium/media/gpu/android/android_video_surface_chooser.h81
-rw-r--r--chromium/media/gpu/android/android_video_surface_chooser_impl.cc294
-rw-r--r--chromium/media/gpu/android/android_video_surface_chooser_impl.h102
-rw-r--r--chromium/media/gpu/android/android_video_surface_chooser_impl_unittest.cc574
-rw-r--r--chromium/media/gpu/android/avda_codec_allocator.cc482
-rw-r--r--chromium/media/gpu/android/avda_codec_allocator.h296
-rw-r--r--chromium/media/gpu/android/avda_codec_allocator_unittest.cc384
-rw-r--r--chromium/media/gpu/android/avda_codec_image.cc252
-rw-r--r--chromium/media/gpu/android/avda_codec_image.h164
-rw-r--r--chromium/media/gpu/android/avda_picture_buffer_manager.cc278
-rw-r--r--chromium/media/gpu/android/avda_picture_buffer_manager.h137
-rw-r--r--chromium/media/gpu/android/avda_shared_state.cc79
-rw-r--r--chromium/media/gpu/android/avda_shared_state.h113
-rw-r--r--chromium/media/gpu/android/avda_state_provider.h44
-rw-r--r--chromium/media/gpu/android/avda_surface_bundle.cc47
-rw-r--r--chromium/media/gpu/android/avda_surface_bundle.h51
-rw-r--r--chromium/media/gpu/android/codec_image.cc17
-rw-r--r--chromium/media/gpu/android/codec_image.h7
-rw-r--r--chromium/media/gpu/android/codec_image_unittest.cc36
-rw-r--r--chromium/media/gpu/android/codec_wrapper.cc297
-rw-r--r--chromium/media/gpu/android/codec_wrapper.h81
-rw-r--r--chromium/media/gpu/android/codec_wrapper_unittest.cc84
-rw-r--r--chromium/media/gpu/android/content_video_view_overlay.cc78
-rw-r--r--chromium/media/gpu/android/content_video_view_overlay.h51
-rw-r--r--chromium/media/gpu/android/content_video_view_overlay_allocator.cc152
-rw-r--r--chromium/media/gpu/android/content_video_view_overlay_allocator.h86
-rw-r--r--chromium/media/gpu/android/content_video_view_overlay_allocator_unittest.cc155
-rw-r--r--chromium/media/gpu/android/fake_android_video_surface_chooser.cc40
-rw-r--r--chromium/media/gpu/android/fake_android_video_surface_chooser.h49
-rw-r--r--chromium/media/gpu/android/fake_codec_allocator.cc93
-rw-r--r--chromium/media/gpu/android/fake_codec_allocator.h58
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.cc632
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.h179
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder_unittest.cc303
-rw-r--r--chromium/media/gpu/android/mock_surface_texture_gl_owner.cc39
-rw-r--r--chromium/media/gpu/android/mock_surface_texture_gl_owner.h53
-rw-r--r--chromium/media/gpu/android/promotion_hint_aggregator.h16
-rw-r--r--chromium/media/gpu/android/promotion_hint_aggregator_impl_unittest.cc3
-rw-r--r--chromium/media/gpu/android/surface_texture_gl_owner.cc163
-rw-r--r--chromium/media/gpu/android/surface_texture_gl_owner.h129
-rw-r--r--chromium/media/gpu/android/surface_texture_gl_owner_unittest.cc115
-rw-r--r--chromium/media/gpu/android/video_frame_factory.h18
-rw-r--r--chromium/media/gpu/android/video_frame_factory_impl.cc114
-rw-r--r--chromium/media/gpu/android/video_frame_factory_impl.h41
49 files changed, 8996 insertions, 866 deletions
diff --git a/chromium/media/gpu/android/android_video_decode_accelerator.cc b/chromium/media/gpu/android/android_video_decode_accelerator.cc
new file mode 100644
index 00000000000..4aa7deab85a
--- /dev/null
+++ b/chromium/media/gpu/android/android_video_decode_accelerator.cc
@@ -0,0 +1,1859 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/android_video_decode_accelerator.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/android/build_info.h"
+#include "base/auto_reset.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
+#include "base/command_line.h"
+#include "base/containers/queue.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/sys_info.h"
+#include "base/task_runner_util.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/trace_event.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "media/base/android/media_codec_bridge_impl.h"
+#include "media/base/android/media_codec_util.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/base/bitstream_buffer.h"
+#include "media/base/limits.h"
+#include "media/base/media.h"
+#include "media/base/media_switches.h"
+#include "media/base/timestamp_constants.h"
+#include "media/base/video_decoder_config.h"
+#include "media/gpu/android/android_video_surface_chooser_impl.h"
+#include "media/gpu/android/avda_picture_buffer_manager.h"
+#include "media/gpu/android/content_video_view_overlay.h"
+#include "media/gpu/android/device_info.h"
+#include "media/gpu/android/promotion_hint_aggregator_impl.h"
+#include "media/gpu/shared_memory_region.h"
+#include "media/mojo/features.h"
+#include "media/video/picture.h"
+#include "services/service_manager/public/cpp/service_context_ref.h"
+#include "ui/gl/android/scoped_java_surface.h"
+#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/gl_bindings.h"
+
+#if BUILDFLAG(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
+#include "media/cdm/cdm_manager.h" // nogncheck
+#endif
+
+#define NOTIFY_ERROR(error_code, error_message) \
+ do { \
+ DLOG(ERROR) << error_message; \
+ NotifyError(VideoDecodeAccelerator::error_code); \
+ } while (0)
+
+namespace media {
+
+namespace {
+
+enum { kNumPictureBuffers = limits::kMaxVideoFrames + 1 };
+
+// Max number of bitstreams notified to the client with
+// NotifyEndOfBitstreamBuffer() before getting output from the bitstream.
+enum { kMaxBitstreamsNotifiedInAdvance = 32 };
+
+// Number of frames to defer overlays for when entering fullscreen. This lets
+// blink relayout settle down a bit. If overlay positions were synchronous,
+// then we wouldn't need this.
+enum { kFrameDelayForFullscreenLayout = 15 };
+
+// MediaCodec is only guaranteed to support baseline, but some devices may
+// support others. Advertise support for all H264 profiles and let the
+// MediaCodec fail when decoding if it's not actually supported. It's assumed
+// that consumers won't have software fallback for H264 on Android anyway.
+constexpr VideoCodecProfile kSupportedH264Profiles[] = {
+ H264PROFILE_BASELINE,
+ H264PROFILE_MAIN,
+ H264PROFILE_EXTENDED,
+ H264PROFILE_HIGH,
+ H264PROFILE_HIGH10PROFILE,
+ H264PROFILE_HIGH422PROFILE,
+ H264PROFILE_HIGH444PREDICTIVEPROFILE,
+ H264PROFILE_SCALABLEBASELINE,
+ H264PROFILE_SCALABLEHIGH,
+ H264PROFILE_STEREOHIGH,
+ H264PROFILE_MULTIVIEWHIGH};
+
+#if BUILDFLAG(ENABLE_HEVC_DEMUXING)
+constexpr VideoCodecProfile kSupportedHevcProfiles[] = {HEVCPROFILE_MAIN,
+ HEVCPROFILE_MAIN10};
+#endif
+
+// Because MediaCodec is thread-hostile (must be poked on a single thread) and
+// has no callback mechanism (b/11990118), we must drive it by polling for
+// complete frames (and available input buffers, when the codec is fully
+// saturated). This function defines the polling delay. The value used is an
+// arbitrary choice that trades off CPU utilization (spinning) against latency.
+// Mirrors android_video_encode_accelerator.cc:EncodePollDelay().
+//
+// An alternative to this polling scheme could be to dedicate a new thread
+// (instead of using the ChildThread) to run the MediaCodec, and make that
+// thread use the timeout-based flavor of MediaCodec's dequeue methods when it
+// believes the codec should complete "soon" (e.g. waiting for an input
+// buffer, or waiting for a picture when it knows enough complete input
+// pictures have been fed to saturate any internal buffering). This is
+// speculative and it's unclear that this would be a win (nor that there's a
+// reasonably device-agnostic way to fill in the "believes" above).
+constexpr base::TimeDelta DecodePollDelay =
+ base::TimeDelta::FromMilliseconds(10);
+
+constexpr base::TimeDelta NoWaitTimeOut = base::TimeDelta::FromMicroseconds(0);
+
+constexpr base::TimeDelta IdleTimerTimeOut = base::TimeDelta::FromSeconds(1);
+
+// How often do we let the surface chooser try for an overlay? While we'll
+// retry if some relevant state changes on our side (e.g., fullscreen state),
+// there's plenty of state that we don't know about (e.g., power efficiency,
+// memory pressure => cancelling an old overlay, etc.). We just let the chooser
+// retry every once in a while for those things.
+constexpr base::TimeDelta RetryChooserTimeout = base::TimeDelta::FromSeconds(5);
+
+// On low end devices (< KitKat is always low-end due to buggy MediaCodec),
+// defer the surface creation until the codec is actually used if we know no
+// software fallback exists.
+bool ShouldDeferSurfaceCreation(AVDACodecAllocator* codec_allocator,
+ const OverlayInfo& overlay_info,
+ VideoCodec codec,
+ DeviceInfo* device_info) {
+ // TODO(liberato): We might still want to defer if we've got a routing
+ // token. It depends on whether we want to use it right away or not.
+ if (overlay_info.HasValidSurfaceId() || overlay_info.HasValidRoutingToken())
+ return false;
+
+ return codec == kCodecH264 && codec_allocator->IsAnyRegisteredAVDA() &&
+ device_info->SdkVersion() <= base::android::SDK_VERSION_JELLY_BEAN_MR2;
+}
+
+} // namespace
+
+// AVDAManager manages a RepeatingTimer so that AVDAs can get a regular callback
+// to DoIOTask().
+class AVDAManager {
+ public:
+ AVDAManager() {}
+
+ // Request periodic callback of |avda|->DoIOTask(). Does nothing if the
+ // instance is already registered and the timer started. The first request
+ // will start the repeating timer on an interval of DecodePollDelay.
+ void StartTimer(AndroidVideoDecodeAccelerator* avda) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ timer_avda_instances_.insert(avda);
+
+ // If the timer is running, StopTimer() might have been called earlier, if
+ // so remove the instance from the pending erasures.
+ if (timer_running_)
+ pending_erase_.erase(avda);
+
+ if (io_timer_.IsRunning())
+ return;
+ io_timer_.Start(FROM_HERE, DecodePollDelay, this, &AVDAManager::RunTimer);
+ }
+
+ // Stop callbacks to |avda|->DoIOTask(). Does nothing if the instance is not
+ // registered. If there are no instances left, the repeating timer will be
+ // stopped.
+ void StopTimer(AndroidVideoDecodeAccelerator* avda) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // If the timer is running, defer erasures to avoid iterator invalidation.
+ if (timer_running_) {
+ pending_erase_.insert(avda);
+ return;
+ }
+
+ timer_avda_instances_.erase(avda);
+ if (timer_avda_instances_.empty())
+ io_timer_.Stop();
+ }
+
+ private:
+ ~AVDAManager() = delete;
+
+ void RunTimer() {
+ {
+ // Call out to all AVDA instances, some of which may attempt to remove
+ // themselves from the list during this operation; those removals will be
+ // deferred until after all iterations are complete.
+ base::AutoReset<bool> scoper(&timer_running_, true);
+ for (auto* avda : timer_avda_instances_)
+ avda->DoIOTask(false);
+ }
+
+ // Take care of any deferred erasures.
+ for (auto* avda : pending_erase_)
+ StopTimer(avda);
+ pending_erase_.clear();
+
+ // TODO(dalecurtis): We may want to consider chunking this if task execution
+ // takes too long for the combined timer.
+ }
+
+ // All AVDA instances that would like us to poll DoIOTask.
+ std::set<AndroidVideoDecodeAccelerator*> timer_avda_instances_;
+
+ // Since we can't delete while iterating when using a set, defer erasure until
+ // after iteration complete.
+ bool timer_running_ = false;
+ std::set<AndroidVideoDecodeAccelerator*> pending_erase_;
+
+ // Repeating timer responsible for draining pending IO to the codecs.
+ base::RepeatingTimer io_timer_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(AVDAManager);
+};
+
+static AVDAManager* GetManager() {
+ static AVDAManager* manager = new AVDAManager();
+ return manager;
+}
+
+AndroidVideoDecodeAccelerator::BitstreamRecord::BitstreamRecord(
+ const BitstreamBuffer& bitstream_buffer)
+ : buffer(bitstream_buffer) {
+ if (buffer.id() != -1)
+ memory.reset(new SharedMemoryRegion(buffer, true));
+}
+
+AndroidVideoDecodeAccelerator::BitstreamRecord::BitstreamRecord(
+ BitstreamRecord&& other)
+ : buffer(std::move(other.buffer)), memory(std::move(other.memory)) {}
+
+AndroidVideoDecodeAccelerator::BitstreamRecord::~BitstreamRecord() {}
+
+AndroidVideoDecodeAccelerator::AndroidVideoDecodeAccelerator(
+ AVDACodecAllocator* codec_allocator,
+ std::unique_ptr<AndroidVideoSurfaceChooser> surface_chooser,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb,
+ const AndroidOverlayMojoFactoryCB& overlay_factory_cb,
+ DeviceInfo* device_info)
+ : client_(nullptr),
+ codec_allocator_(codec_allocator),
+ make_context_current_cb_(make_context_current_cb),
+ get_gles2_decoder_cb_(get_gles2_decoder_cb),
+ state_(BEFORE_OVERLAY_INIT),
+ picturebuffers_requested_(false),
+ picture_buffer_manager_(this),
+ media_drm_bridge_cdm_context_(nullptr),
+ cdm_registration_id_(0),
+ pending_input_buf_index_(-1),
+ during_initialize_(false),
+ deferred_initialization_pending_(false),
+ codec_needs_reset_(false),
+ defer_surface_creation_(false),
+ surface_chooser_(std::move(surface_chooser)),
+ device_info_(device_info),
+ force_defer_surface_creation_for_testing_(false),
+ overlay_factory_cb_(overlay_factory_cb),
+ promotion_hint_aggregator_(
+ base::MakeUnique<PromotionHintAggregatorImpl>()),
+ weak_this_factory_(this) {}
+
+AndroidVideoDecodeAccelerator::~AndroidVideoDecodeAccelerator() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ GetManager()->StopTimer(this);
+ codec_allocator_->StopThread(this);
+
+#if BUILDFLAG(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
+ if (!media_drm_bridge_cdm_context_)
+ return;
+
+ DCHECK(cdm_registration_id_);
+
+ // Cancel previously registered callback (if any).
+ media_drm_bridge_cdm_context_->SetMediaCryptoReadyCB(
+ MediaDrmBridgeCdmContext::MediaCryptoReadyCB());
+
+ media_drm_bridge_cdm_context_->UnregisterPlayer(cdm_registration_id_);
+#endif // BUILDFLAG(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
+}
+
+bool AndroidVideoDecodeAccelerator::Initialize(const Config& config,
+ Client* client) {
+ DVLOG(1) << __func__ << ": " << config.AsHumanReadableString();
+ TRACE_EVENT0("media", "AVDA::Initialize");
+ DCHECK(!media_codec_);
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::AutoReset<bool> scoper(&during_initialize_, true);
+
+ if (make_context_current_cb_.is_null() || get_gles2_decoder_cb_.is_null()) {
+ DLOG(ERROR) << "GL callbacks are required for this VDA";
+ return false;
+ }
+
+ if (config.output_mode != Config::OutputMode::ALLOCATE) {
+ DLOG(ERROR) << "Only ALLOCATE OutputMode is supported by this VDA";
+ return false;
+ }
+
+ DCHECK(client);
+ client_ = client;
+ config_ = config;
+ codec_config_ = new CodecConfig();
+ codec_config_->codec = VideoCodecProfileToVideoCodec(config.profile);
+ codec_config_->initial_expected_coded_size =
+ config.initial_expected_coded_size;
+
+ if (codec_config_->codec != kCodecVP8 && codec_config_->codec != kCodecVP9 &&
+#if BUILDFLAG(ENABLE_HEVC_DEMUXING)
+ codec_config_->codec != kCodecHEVC &&
+#endif
+ codec_config_->codec != kCodecH264) {
+ DLOG(ERROR) << "Unsupported profile: " << GetProfileName(config.profile);
+ return false;
+ }
+
+ codec_config_->software_codec_forbidden =
+ IsMediaCodecSoftwareDecodingForbidden();
+
+ if (codec_config_->codec == kCodecH264) {
+ codec_config_->csd0 = config.sps;
+ codec_config_->csd1 = config.pps;
+ }
+
+ // Only use MediaCodec for VP8/9 if it's likely backed by hardware
+ // or if the stream is encrypted.
+ if (IsMediaCodecSoftwareDecodingForbidden() &&
+ MediaCodecUtil::IsKnownUnaccelerated(codec_config_->codec,
+ MediaCodecDirection::DECODER)) {
+ DVLOG(1) << "Initialization failed: " << GetCodecName(codec_config_->codec)
+ << " is not hardware accelerated";
+ return false;
+ }
+
+ auto gles_decoder = get_gles2_decoder_cb_.Run();
+ if (!gles_decoder) {
+ DLOG(ERROR) << "Failed to get gles2 decoder instance.";
+ return false;
+ }
+
+ // SetSurface() can't be called before Initialize(), so we pick up our first
+ // surface ID from the codec configuration.
+ DCHECK(!pending_surface_id_);
+
+ // We signaled that we support deferred initialization, so see if the client
+ // does also.
+ deferred_initialization_pending_ = config.is_deferred_initialization_allowed;
+
+ // If we're low on resources, we may decide to defer creation of the surface
+ // until the codec is actually used.
+ if (force_defer_surface_creation_for_testing_ ||
+ ShouldDeferSurfaceCreation(codec_allocator_, config_.overlay_info,
+ codec_config_->codec, device_info_)) {
+ // We should never be here if a SurfaceView is required.
+ // TODO(liberato): This really isn't true with AndroidOverlay.
+ DCHECK(!config_.overlay_info.HasValidSurfaceId());
+ defer_surface_creation_ = true;
+ }
+
+ codec_allocator_->StartThread(this);
+
+ // If we're supposed to use overlays all the time, then they should always
+ // be marked as required.
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kForceVideoOverlays)) {
+ surface_chooser_state_.is_required = is_overlay_required_ = true;
+ }
+
+ // If we're trying for fullscreen-div cases, then we should promote more.
+ surface_chooser_state_.promote_aggressively =
+ base::FeatureList::IsEnabled(media::kUseAndroidOverlayAggressively);
+
+ // For encrypted media, start by initializing the CDM. Otherwise, start with
+ // the surface.
+ if (config_.is_encrypted()) {
+ if (!deferred_initialization_pending_) {
+ DLOG(ERROR)
+ << "Deferred initialization must be used for encrypted streams";
+ return false;
+ }
+ InitializeCdm();
+ } else {
+ StartSurfaceChooser();
+ }
+
+ // Fail / complete / defer initialization.
+ return state_ != ERROR;
+}
+
+void AndroidVideoDecodeAccelerator::StartSurfaceChooser() {
+ DCHECK_EQ(state_, BEFORE_OVERLAY_INIT);
+
+ // If we're trying to defer surface creation, then don't notify the chooser
+ // that it may start getting surfaces yet. We'll do that later.
+ if (defer_surface_creation_) {
+ if (deferred_initialization_pending_)
+ NotifyInitializationSucceeded();
+ return;
+ }
+
+ surface_chooser_state_.is_fullscreen = config_.overlay_info.is_fullscreen;
+
+ surface_chooser_->SetClientCallbacks(
+ base::Bind(&AndroidVideoDecodeAccelerator::OnSurfaceTransition,
+ weak_this_factory_.GetWeakPtr()),
+ base::Bind(&AndroidVideoDecodeAccelerator::OnSurfaceTransition,
+ weak_this_factory_.GetWeakPtr(), nullptr));
+
+ // Handle the sync path, which must use SurfaceTexture anyway. Note that we
+ // check both |during_initialize_| and |deferred_initialization_pending_|,
+ // since we might get here during deferred surface creation. In that case,
+ // Decode will call us (after clearing |defer_surface_creation_|), but
+ // deferred init will have already been signaled optimistically as success.
+ //
+ // Also note that we might choose to defer surface creation for the sync path,
+ // which won't get here. We'll exit above, successfully, during init, and
+ // will fall through to the below when Decode calls us back. That's okay.
+ // We only handle this case specially since |surface_chooser_| is allowed to
+ // post callbacks to us. Here, we guarantee that the sync case is actually
+ // resolved synchronously. The only exception will be if we need to defer
+ // surface creation for other reasons, in which case the sync path with just
+ // signal success optimistically.
+ if (during_initialize_ && !deferred_initialization_pending_) {
+ DCHECK(!config_.overlay_info.HasValidSurfaceId());
+ DCHECK(!config_.overlay_info.HasValidRoutingToken());
+ // Note that we might still send feedback to |surface_chooser_|, which might
+ // call us back. However, it will only ever tell us to use SurfaceTexture,
+ // since we have no overlay factory anyway.
+ OnSurfaceTransition(nullptr);
+ return;
+ }
+
+ // If we have a surface, then notify |surface_chooser_| about it. If we were
+ // told not to use an overlay (kNoSurfaceID or a null routing token), then we
+ // leave the factory blank.
+ AndroidOverlayFactoryCB factory;
+ if (config_.overlay_info.HasValidSurfaceId()) {
+ factory = base::Bind(&ContentVideoViewOverlay::Create,
+ config_.overlay_info.surface_id);
+ } else if (config_.overlay_info.HasValidRoutingToken() &&
+ overlay_factory_cb_) {
+ factory = base::Bind(overlay_factory_cb_, nullptr,
+ *config_.overlay_info.routing_token);
+ }
+
+ // Notify |surface_chooser_| that we've started. This guarantees that we'll
+ // get a callback. It might not be a synchronous callback, but we're not in
+ // the synchronous case. It will be soon, though. For pre-M, we rely on the
+ // fact that |surface_chooser_| won't tell us to use a SurfaceTexture while
+ // waiting for an overlay to become ready, for example.
+ surface_chooser_->UpdateState(std::move(factory), surface_chooser_state_);
+}
+
+void AndroidVideoDecodeAccelerator::OnSurfaceTransition(
+ std::unique_ptr<AndroidOverlay> overlay) {
+ if (overlay) {
+ overlay->AddSurfaceDestroyedCallback(base::Bind(
+ &AndroidVideoDecodeAccelerator::OnStopUsingOverlayImmediately,
+ weak_this_factory_.GetWeakPtr()));
+ }
+
+ // If we're waiting for a surface (e.g., during startup), then proceed
+ // immediately. Otherwise, wait for Dequeue to handle it. This can probably
+ // be merged with UpdateSurface.
+ if (state_ == BEFORE_OVERLAY_INIT) {
+ DCHECK(!incoming_overlay_);
+ incoming_bundle_ = new AVDASurfaceBundle(std::move(overlay));
+ InitializePictureBufferManager();
+ return;
+ }
+
+ // If, for some reason, |surface_chooser_| decides that we really should
+ // change our output surface pre-M, ignore it. For example, if the
+ // compositor tells us that it can't use an overlay, well, there's not much
+ // that we can do here unless we start falling forward to keyframes.
+ if (!device_info_->IsSetOutputSurfaceSupported())
+ return;
+
+ // If we're using a SurfaceTexture and are told to switch to one, then just
+ // do nothing. |surface_chooser_| doesn't really know if we've switched to
+ // SurfaceTexture or not. Note that it can't ask us to switch to the same
+ // overlay we're using, since it's unique_ptr.
+ if (!overlay && codec_config_->surface_bundle &&
+ !codec_config_->surface_bundle->overlay) {
+ // Also stop transitioning to an overlay, if we were doing so.
+ incoming_overlay_.reset();
+ return;
+ }
+
+ incoming_overlay_ = std::move(overlay);
+}
+
+void AndroidVideoDecodeAccelerator::InitializePictureBufferManager() {
+ DCHECK(!defer_surface_creation_);
+ DCHECK(incoming_bundle_);
+
+ if (!make_context_current_cb_.Run()) {
+ NOTIFY_ERROR(PLATFORM_FAILURE,
+ "Failed to make this decoder's GL context current");
+ incoming_bundle_ = nullptr;
+ return;
+ }
+
+ // Move |incoming_bundle_| to |codec_config_|. Our caller must set up an
+ // incoming bundle properly, since we don't want to accidentally overwrite
+ // |surface_bundle| for a codec that's being released elsewhere.
+ // TODO(liberato): it doesn't make sense anymore for the PictureBufferManager
+ // to create the surface texture. We can probably make an overlay impl out
+ // of it, and provide the surface texture to |picture_buffer_manager_|.
+ if (!picture_buffer_manager_.Initialize(incoming_bundle_)) {
+ NOTIFY_ERROR(PLATFORM_FAILURE, "Could not allocate surface texture");
+ incoming_bundle_ = nullptr;
+ return;
+ }
+
+ // If we have a media codec, then SetSurface. If that doesn't work, then we
+ // do not try to allocate a new codec; we might not be at a keyframe, etc.
+ // If we get here with a codec, then we must setSurface.
+ if (media_codec_) {
+ // TODO(liberato): fail on api check?
+ if (!media_codec_->SetSurface(incoming_bundle_->GetJavaSurface())) {
+ NOTIFY_ERROR(PLATFORM_FAILURE, "MediaCodec failed to switch surfaces.");
+ // We're not going to use |incoming_bundle_|.
+ } else {
+ // We've switched surfaces, so replace |surface_bundle|.
+ codec_config_->surface_bundle = incoming_bundle_;
+ // We could be in BEFORE_OVERLAY_INIT, but we're not anymore.
+ state_ = NO_ERROR;
+ }
+ incoming_bundle_ = nullptr;
+ CacheFrameInformation();
+ return;
+ }
+
+ // We're going to create a codec with |incoming_bundle_|. It might fail, but
+ // either way, we're done with any previous bundle. Note that, since we
+ // never get here after init (i.e., we never change surfaces without using
+ // SetSurface), there shouldn't be any previous bundle. However, this is the
+ // right thing to do even if we can switch.
+ codec_config_->surface_bundle = incoming_bundle_;
+ incoming_bundle_ = nullptr;
+ CacheFrameInformation();
+
+ // If the client doesn't support deferred initialization (WebRTC), then we
+ // should complete it now and return a meaningful result. Note that it would
+ // be nice if we didn't have to worry about starting codec configuration at
+ // all (::Initialize or the wrapper can do it), but then they have to remember
+ // not to start codec config if we have to wait for the cdm. It's somewhat
+ // clearer for us to handle both cases.
+ // For this to be a case for sync configuration, we must be called from
+ // Initialize(), and the client must not want deferred init. Note that having
+ // |deferred_initialization_pending_| false by itself isn't enough; if we're
+ // deferring surface creation, then we'll finish deferred init before asking
+ // for the surface. We'll be called via Decode.
+ if (during_initialize_ && !deferred_initialization_pending_) {
+ ConfigureMediaCodecSynchronously();
+ return;
+ }
+
+ // In all other cases, we don't have to wait for the codec.
+ ConfigureMediaCodecAsynchronously();
+}
+
+void AndroidVideoDecodeAccelerator::DoIOTask(bool start_timer) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ TRACE_EVENT0("media", "AVDA::DoIOTask");
+ if (state_ == ERROR || state_ == WAITING_FOR_CODEC ||
+ state_ == SURFACE_DESTROYED || state_ == BEFORE_OVERLAY_INIT) {
+ return;
+ }
+
+ picture_buffer_manager_.MaybeRenderEarly();
+ bool did_work = false, did_input = false, did_output = false;
+ do {
+ did_input = QueueInput();
+ did_output = DequeueOutput();
+ if (did_input || did_output)
+ did_work = true;
+ } while (did_input || did_output);
+
+ ManageTimer(did_work || start_timer);
+}
+
+bool AndroidVideoDecodeAccelerator::QueueInput() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ TRACE_EVENT0("media", "AVDA::QueueInput");
+ if (state_ == ERROR || state_ == WAITING_FOR_CODEC ||
+ state_ == WAITING_FOR_KEY || state_ == BEFORE_OVERLAY_INIT) {
+ return false;
+ }
+ if (bitstreams_notified_in_advance_.size() > kMaxBitstreamsNotifiedInAdvance)
+ return false;
+ if (pending_bitstream_records_.empty())
+ return false;
+
+ int input_buf_index = pending_input_buf_index_;
+
+ // Do not dequeue a new input buffer if we failed with MEDIA_CODEC_NO_KEY.
+ // That status does not return this buffer back to the pool of
+ // available input buffers. We have to reuse it in QueueSecureInputBuffer().
+ if (input_buf_index == -1) {
+ MediaCodecStatus status =
+ media_codec_->DequeueInputBuffer(NoWaitTimeOut, &input_buf_index);
+ switch (status) {
+ case MEDIA_CODEC_TRY_AGAIN_LATER:
+ return false;
+ case MEDIA_CODEC_ERROR:
+ NOTIFY_ERROR(PLATFORM_FAILURE, "DequeueInputBuffer failed");
+ return false;
+ case MEDIA_CODEC_OK:
+ break;
+ default:
+ NOTREACHED();
+ return false;
+ }
+ }
+
+ DCHECK_NE(input_buf_index, -1);
+
+ BitstreamBuffer bitstream_buffer = pending_bitstream_records_.front().buffer;
+
+ if (bitstream_buffer.id() == -1) {
+ pending_bitstream_records_.pop();
+ TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
+ pending_bitstream_records_.size());
+
+ media_codec_->QueueEOS(input_buf_index);
+ return true;
+ }
+
+ std::unique_ptr<SharedMemoryRegion> shm;
+
+ if (pending_input_buf_index_ == -1) {
+ // When |pending_input_buf_index_| is not -1, the buffer is already dequeued
+ // from MediaCodec, filled with data and bitstream_buffer.handle() is
+ // closed.
+ shm = std::move(pending_bitstream_records_.front().memory);
+
+ if (!shm->Map()) {
+ NOTIFY_ERROR(UNREADABLE_INPUT, "SharedMemoryRegion::Map() failed");
+ return false;
+ }
+ }
+
+ const base::TimeDelta presentation_timestamp =
+ bitstream_buffer.presentation_timestamp();
+ DCHECK(presentation_timestamp != kNoTimestamp)
+ << "Bitstream buffers must have valid presentation timestamps";
+
+ // There may already be a bitstream buffer with this timestamp, e.g., VP9 alt
+ // ref frames, but it's OK to overwrite it because we only expect a single
+ // output frame to have that timestamp. AVDA clients only use the bitstream
+ // buffer id in the returned Pictures to map a bitstream buffer back to a
+ // timestamp on their side, so either one of the bitstream buffer ids will
+ // result in them finding the right timestamp.
+ bitstream_buffers_in_decoder_[presentation_timestamp] = bitstream_buffer.id();
+
+ // Notice that |memory| will be null if we repeatedly enqueue the same buffer,
+ // this happens after MEDIA_CODEC_NO_KEY.
+ const uint8_t* memory =
+ shm ? static_cast<const uint8_t*>(shm->memory()) : nullptr;
+ const std::string& key_id = bitstream_buffer.key_id();
+ const std::string& iv = bitstream_buffer.iv();
+ const std::vector<SubsampleEntry>& subsamples = bitstream_buffer.subsamples();
+
+ MediaCodecStatus status;
+ if (key_id.empty() || iv.empty()) {
+ status = media_codec_->QueueInputBuffer(input_buf_index, memory,
+ bitstream_buffer.size(),
+ presentation_timestamp);
+ } else {
+ status = media_codec_->QueueSecureInputBuffer(
+ input_buf_index, memory, bitstream_buffer.size(), key_id, iv,
+ subsamples, config_.encryption_scheme, presentation_timestamp);
+ }
+
+ DVLOG(2) << __func__
+ << ": Queue(Secure)InputBuffer: pts:" << presentation_timestamp
+ << " status:" << status;
+
+ if (status == MEDIA_CODEC_NO_KEY) {
+ // Keep trying to enqueue the same input buffer.
+ // The buffer is owned by us (not the MediaCodec) and is filled with data.
+ DVLOG(1) << "QueueSecureInputBuffer failed: NO_KEY";
+ pending_input_buf_index_ = input_buf_index;
+ state_ = WAITING_FOR_KEY;
+ return false;
+ }
+
+ pending_input_buf_index_ = -1;
+ pending_bitstream_records_.pop();
+ TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
+ pending_bitstream_records_.size());
+ // We should call NotifyEndOfBitstreamBuffer(), when no more decoded output
+ // will be returned from the bitstream buffer. However, MediaCodec API is
+ // not enough to guarantee it.
+ // So, here, we calls NotifyEndOfBitstreamBuffer() in advance in order to
+ // keep getting more bitstreams from the client, and throttle them by using
+ // |bitstreams_notified_in_advance_|.
+ // TODO(dwkang): check if there is a way to remove this workaround.
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::Bind(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer,
+ weak_this_factory_.GetWeakPtr(), bitstream_buffer.id()));
+ bitstreams_notified_in_advance_.push_back(bitstream_buffer.id());
+
+ if (status != MEDIA_CODEC_OK) {
+ NOTIFY_ERROR(PLATFORM_FAILURE, "QueueInputBuffer failed:" << status);
+ return false;
+ }
+
+ return true;
+}
+
+bool AndroidVideoDecodeAccelerator::DequeueOutput() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ TRACE_EVENT0("media", "AVDA::DequeueOutput");
+ if (state_ == ERROR || state_ == WAITING_FOR_CODEC ||
+ state_ == BEFORE_OVERLAY_INIT) {
+ return false;
+ }
+ // If we're draining for reset or destroy, then we don't need picture buffers
+ // since we won't send any decoded frames anyway. There might not be any,
+ // since the pipeline might not be sending them back and / or they don't
+ // exist anymore. From the pipeline's point of view, for Destroy at least,
+ // the VDA is already gone.
+ if (picturebuffers_requested_ && output_picture_buffers_.empty() &&
+ !IsDrainingForResetOrDestroy()) {
+ return false;
+ }
+ if (!output_picture_buffers_.empty() && free_picture_ids_.empty() &&
+ !IsDrainingForResetOrDestroy()) {
+ // Don't have any picture buffer to send. Need to wait.
+ return false;
+ }
+
+ // If we're waiting to switch surfaces pause output release until we have all
+ // picture buffers returned. This is so we can ensure the right flags are set
+ // on the picture buffers returned to the client.
+ if (incoming_overlay_) {
+ if (picture_buffer_manager_.HasUnrenderedPictures())
+ return false;
+ if (!UpdateSurface())
+ return false;
+
+ // UpdateSurface should fail if we've transitioned to the error state.
+ DCHECK(state_ == NO_ERROR);
+ }
+
+ bool eos = false;
+ base::TimeDelta presentation_timestamp;
+ int32_t buf_index = 0;
+ do {
+ size_t offset = 0;
+ size_t size = 0;
+
+ TRACE_EVENT_BEGIN0("media", "AVDA::DequeueOutput");
+ MediaCodecStatus status = media_codec_->DequeueOutputBuffer(
+ NoWaitTimeOut, &buf_index, &offset, &size, &presentation_timestamp,
+ &eos, NULL);
+ TRACE_EVENT_END2("media", "AVDA::DequeueOutput", "status", status,
+ "presentation_timestamp (ms)",
+ presentation_timestamp.InMilliseconds());
+
+ switch (status) {
+ case MEDIA_CODEC_ERROR:
+ // Do not post an error if we are draining for reset and destroy.
+ // Instead, signal completion of the drain.
+ if (IsDrainingForResetOrDestroy()) {
+ DVLOG(1) << __func__ << ": error while draining";
+ state_ = ERROR;
+ OnDrainCompleted();
+ } else {
+ NOTIFY_ERROR(PLATFORM_FAILURE, "DequeueOutputBuffer failed.");
+ }
+ return false;
+
+ case MEDIA_CODEC_TRY_AGAIN_LATER:
+ return false;
+
+ case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED: {
+ // An OUTPUT_FORMAT_CHANGED is not reported after flush() if the frame
+ // size does not change. Therefore we have to keep track on the format
+ // even if draining, unless we are draining for destroy.
+ if (drain_type_ == DRAIN_FOR_DESTROY)
+ return true; // ignore
+
+ if (media_codec_->GetOutputSize(&size_) != MEDIA_CODEC_OK) {
+ NOTIFY_ERROR(PLATFORM_FAILURE, "GetOutputSize failed.");
+ return false;
+ }
+
+ DVLOG(3) << __func__
+ << " OUTPUT_FORMAT_CHANGED, new size: " << size_.ToString();
+
+ // Don't request picture buffers if we already have some. This avoids
+ // having to dismiss the existing buffers which may actively reference
+ // decoded images. Breaking their connection to the decoded image will
+ // cause rendering of black frames. Instead, we let the existing
+ // PictureBuffers live on and we simply update their size the next time
+ // they're attached to an image of the new resolution. See the
+ // size update in |SendDecodedFrameToClient| and https://crbug/587994.
+ if (output_picture_buffers_.empty() && !picturebuffers_requested_) {
+ picturebuffers_requested_ = true;
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::Bind(&AndroidVideoDecodeAccelerator::RequestPictureBuffers,
+ weak_this_factory_.GetWeakPtr()));
+ return false;
+ }
+
+ return true;
+ }
+
+ case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
+ break;
+
+ case MEDIA_CODEC_OK:
+ DCHECK_GE(buf_index, 0);
+ DVLOG(3) << __func__ << ": pts:" << presentation_timestamp
+ << " buf_index:" << buf_index << " offset:" << offset
+ << " size:" << size << " eos:" << eos;
+ break;
+
+ default:
+ NOTREACHED();
+ break;
+ }
+ } while (buf_index < 0);
+
+ if (eos) {
+ OnDrainCompleted();
+ return false;
+ }
+
+ if (IsDrainingForResetOrDestroy()) {
+ media_codec_->ReleaseOutputBuffer(buf_index, false);
+ return true;
+ }
+
+ if (!picturebuffers_requested_) {
+ // In 0.01% of playbacks MediaCodec returns a frame before FORMAT_CHANGED.
+ // Occurs on JB and M. (See the Media.AVDA.MissingFormatChanged histogram.)
+ media_codec_->ReleaseOutputBuffer(buf_index, false);
+ NOTIFY_ERROR(PLATFORM_FAILURE, "Dequeued buffers before FORMAT_CHANGED.");
+ return false;
+ }
+
+ // Get the bitstream buffer id from the timestamp.
+ auto it = bitstream_buffers_in_decoder_.find(presentation_timestamp);
+
+ if (it != bitstream_buffers_in_decoder_.end()) {
+ const int32_t bitstream_buffer_id = it->second;
+ bitstream_buffers_in_decoder_.erase(bitstream_buffers_in_decoder_.begin(),
+ ++it);
+ SendDecodedFrameToClient(buf_index, bitstream_buffer_id);
+
+ // Removes ids former or equal than the id from decoder. Note that
+ // |bitstreams_notified_in_advance_| does not mean bitstream ids in decoder
+ // because of frame reordering issue. We just maintain this roughly and use
+ // it for throttling.
+ for (auto bitstream_it = bitstreams_notified_in_advance_.begin();
+ bitstream_it != bitstreams_notified_in_advance_.end();
+ ++bitstream_it) {
+ if (*bitstream_it == bitstream_buffer_id) {
+ bitstreams_notified_in_advance_.erase(
+ bitstreams_notified_in_advance_.begin(), ++bitstream_it);
+ break;
+ }
+ }
+ } else {
+ // Normally we assume that the decoder makes at most one output frame for
+ // each distinct input timestamp. However MediaCodecBridge uses timestamp
+ // correction and provides a non-decreasing timestamp sequence, which might
+ // result in timestamp duplicates. Discard the frame if we cannot get the
+ // corresponding buffer id.
+ DVLOG(3) << __func__ << ": Releasing buffer with unexpected PTS: "
+ << presentation_timestamp;
+ media_codec_->ReleaseOutputBuffer(buf_index, false);
+ }
+
+ // We got a decoded frame, so try for another.
+ return true;
+}
+
+void AndroidVideoDecodeAccelerator::SendDecodedFrameToClient(
+ int32_t codec_buffer_index,
+ int32_t bitstream_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_NE(bitstream_id, -1);
+ DCHECK(!free_picture_ids_.empty());
+ TRACE_EVENT0("media", "AVDA::SendDecodedFrameToClient");
+
+ if (!make_context_current_cb_.Run()) {
+ NOTIFY_ERROR(PLATFORM_FAILURE, "Failed to make the GL context current.");
+ return;
+ }
+
+ int32_t picture_buffer_id = free_picture_ids_.front();
+ free_picture_ids_.pop();
+ TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
+
+ const auto it = output_picture_buffers_.find(picture_buffer_id);
+ if (it == output_picture_buffers_.end()) {
+ NOTIFY_ERROR(PLATFORM_FAILURE,
+ "Can't find PictureBuffer id: " << picture_buffer_id);
+ return;
+ }
+
+ PictureBuffer& picture_buffer = it->second;
+ const bool size_changed = picture_buffer.size() != size_;
+ if (size_changed)
+ picture_buffer.set_size(size_);
+
+ // Only ask for promotion hints if we can actually switch surfaces.
+ const bool want_promotion_hint = device_info_->IsSetOutputSurfaceSupported();
+ const bool allow_overlay = picture_buffer_manager_.ArePicturesOverlayable();
+
+ // TODO(liberato): remove in M63, if FrameInformation is clearly working.
+ UMA_HISTOGRAM_BOOLEAN("Media.AVDA.FrameSentAsOverlay", allow_overlay);
+
+ // Record the frame type that we're sending and some information about why.
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.AVDA.FrameInformation", cached_frame_information_,
+ static_cast<int>(FrameInformation::FRAME_INFORMATION_MAX) + 1);
+
+ // We unconditionally mark the picture as overlayable, even if
+ // |!allow_overlay|, if we want to get hints. It's required, else we won't
+ // get hints.
+ // TODO(hubbe): Insert the correct color space. http://crbug.com/647725
+ Picture picture(picture_buffer_id, bitstream_id, gfx::Rect(size_),
+ gfx::ColorSpace(),
+ want_promotion_hint ? true : allow_overlay);
+ picture.set_size_changed(size_changed);
+ if (want_promotion_hint) {
+ picture.set_wants_promotion_hint(true);
+ // This will prevent it from actually being promoted if it shouldn't be.
+ picture.set_surface_texture(!allow_overlay);
+ }
+
+ // Notify picture ready before calling UseCodecBufferForPictureBuffer() since
+ // that process may be slow and shouldn't delay delivery of the frame to the
+ // renderer. The picture is only used on the same thread as this method is
+ // called, so it is safe to do this.
+ NotifyPictureReady(picture);
+
+ // Connect the PictureBuffer to the decoded frame.
+ picture_buffer_manager_.UseCodecBufferForPictureBuffer(codec_buffer_index,
+ picture_buffer);
+}
+
+void AndroidVideoDecodeAccelerator::Decode(
+ const BitstreamBuffer& bitstream_buffer) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // If we deferred getting a surface, then start getting one now.
+ if (defer_surface_creation_) {
+ // We should still be in BEFORE_OVERLAY_INIT, since we've deferred doing it
+ // until now.
+ DCHECK_EQ(state_, BEFORE_OVERLAY_INIT);
+ defer_surface_creation_ = false;
+ StartSurfaceChooser();
+ if (state_ == ERROR) {
+ DLOG(ERROR) << "Failed deferred surface and MediaCodec initialization.";
+ return;
+ }
+ }
+
+ // If we previously deferred a codec restart, take care of it now. This can
+ // happen on older devices where configuration changes require a codec reset.
+ if (codec_needs_reset_) {
+ DCHECK(!drain_type_);
+ ResetCodecState();
+ }
+
+ if (bitstream_buffer.id() >= 0 && bitstream_buffer.size() > 0) {
+ DecodeBuffer(bitstream_buffer);
+ return;
+ }
+
+ if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+ base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+
+ if (bitstream_buffer.id() < 0) {
+ NOTIFY_ERROR(INVALID_ARGUMENT,
+ "Invalid bistream_buffer, id: " << bitstream_buffer.id());
+ } else {
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::Bind(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer,
+ weak_this_factory_.GetWeakPtr(), bitstream_buffer.id()));
+ }
+}
+
+void AndroidVideoDecodeAccelerator::DecodeBuffer(
+ const BitstreamBuffer& bitstream_buffer) {
+ pending_bitstream_records_.push(BitstreamRecord(bitstream_buffer));
+ TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
+ pending_bitstream_records_.size());
+
+ DoIOTask(true);
+}
+
+void AndroidVideoDecodeAccelerator::RequestPictureBuffers() {
+ if (client_) {
+ // Allocate a picture buffer that is the actual frame size. Note that it
+ // will be an external texture anyway, so it doesn't allocate an image of
+ // that size. It's important to get the coded size right, so that
+ // VideoLayerImpl doesn't try to scale the texture when building the quad
+ // for it.
+ client_->ProvidePictureBuffers(kNumPictureBuffers, PIXEL_FORMAT_UNKNOWN, 1,
+ size_,
+ AVDAPictureBufferManager::kTextureTarget);
+ }
+}
+
+void AndroidVideoDecodeAccelerator::AssignPictureBuffers(
+ const std::vector<PictureBuffer>& buffers) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(output_picture_buffers_.empty());
+ DCHECK(free_picture_ids_.empty());
+
+ if (buffers.size() < kNumPictureBuffers) {
+ NOTIFY_ERROR(INVALID_ARGUMENT, "Not enough picture buffers assigned.");
+ return;
+ }
+
+ const bool have_context = make_context_current_cb_.Run();
+ LOG_IF(WARNING, !have_context)
+ << "Failed to make GL context current for Assign, continuing.";
+
+ for (size_t i = 0; i < buffers.size(); ++i) {
+ DCHECK(buffers[i].size() == size_);
+ int32_t id = buffers[i].id();
+ output_picture_buffers_.insert(std::make_pair(id, buffers[i]));
+ free_picture_ids_.push(id);
+
+ picture_buffer_manager_.AssignOnePictureBuffer(buffers[i], have_context);
+ }
+ TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
+ DoIOTask(true);
+}
+
+void AndroidVideoDecodeAccelerator::ReusePictureBuffer(
+ int32_t picture_buffer_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ free_picture_ids_.push(picture_buffer_id);
+ TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
+
+ auto it = output_picture_buffers_.find(picture_buffer_id);
+ if (it == output_picture_buffers_.end()) {
+ NOTIFY_ERROR(PLATFORM_FAILURE,
+ "Can't find PictureBuffer id " << picture_buffer_id);
+ return;
+ }
+
+ picture_buffer_manager_.ReuseOnePictureBuffer(it->second);
+ DoIOTask(true);
+}
+
+void AndroidVideoDecodeAccelerator::Flush() {
+ DVLOG(1) << __func__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ StartCodecDrain(DRAIN_FOR_FLUSH);
+}
+
+void AndroidVideoDecodeAccelerator::ConfigureMediaCodecAsynchronously() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!media_codec_);
+ DCHECK_NE(state_, WAITING_FOR_CODEC);
+ state_ = WAITING_FOR_CODEC;
+
+ codec_allocator_->CreateMediaCodecAsync(weak_this_factory_.GetWeakPtr(),
+ codec_config_);
+}
+
+void AndroidVideoDecodeAccelerator::ConfigureMediaCodecSynchronously() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!media_codec_);
+ DCHECK_NE(state_, WAITING_FOR_CODEC);
+ state_ = WAITING_FOR_CODEC;
+
+ std::unique_ptr<MediaCodecBridge> media_codec =
+ codec_allocator_->CreateMediaCodecSync(codec_config_);
+ OnCodecConfigured(std::move(media_codec), codec_config_->surface_bundle);
+}
+
+void AndroidVideoDecodeAccelerator::OnCodecConfigured(
+ std::unique_ptr<MediaCodecBridge> media_codec,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(state_ == WAITING_FOR_CODEC || state_ == SURFACE_DESTROYED);
+ // If we are supposed to notify that initialization is complete, then do so
+ // before returning. Otherwise, this is a reconfiguration.
+
+ DCHECK(!media_codec_);
+ media_codec_ = std::move(media_codec);
+
+ // If |state_| changed to SURFACE_DESTROYED while we were configuring a codec,
+ // then the codec is already invalid so we return early and drop it.
+ if (state_ == SURFACE_DESTROYED) {
+ if (deferred_initialization_pending_) {
+ // Losing the output surface is not considered an error state, so notify
+ // success. The client will destroy |this| soon.
+ NotifyInitializationSucceeded();
+ }
+
+ // Post it to the right thread.
+ ReleaseCodecAndBundle();
+ return;
+ }
+
+ picture_buffer_manager_.CodecChanged(media_codec_.get());
+ if (!media_codec_) {
+ NOTIFY_ERROR(PLATFORM_FAILURE, "Failed to create MediaCodec");
+ return;
+ }
+
+ if (deferred_initialization_pending_)
+ NotifyInitializationSucceeded();
+
+ state_ = NO_ERROR;
+
+ ManageTimer(true);
+}
+
+void AndroidVideoDecodeAccelerator::StartCodecDrain(DrainType drain_type) {
+ DVLOG(2) << __func__ << " drain_type:" << drain_type;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ auto previous_drain_type = drain_type_;
+ drain_type_ = drain_type;
+
+ // Only DRAIN_FOR_DESTROY is allowed while a drain is already in progress.
+ DCHECK(!previous_drain_type || drain_type == DRAIN_FOR_DESTROY)
+ << "StartCodecDrain(" << drain_type
+ << ") while already draining with type " << previous_drain_type.value();
+
+ // Skip the drain if:
+ // * There's no codec.
+ // * The codec is not currently decoding and we have no more inputs to submit.
+ // (Reset() and Destroy() should clear pending inputs before calling this).
+ // * The drain is for reset or destroy (where we can drop pending decodes) and
+ // the codec is not VP8. We still have to drain VP8 in this case because
+ // MediaCodec can hang in release() or flush() if we don't drain it.
+ // http://crbug.com/598963
+ if (!media_codec_ ||
+ (pending_bitstream_records_.empty() &&
+ bitstream_buffers_in_decoder_.empty()) ||
+ (drain_type != DRAIN_FOR_FLUSH && codec_config_->codec != kCodecVP8)) {
+ OnDrainCompleted();
+ return;
+ }
+
+ // Queue EOS if one is not already queued.
+ if (!previous_drain_type)
+ DecodeBuffer(BitstreamBuffer(-1, base::SharedMemoryHandle(), 0));
+}
+
+bool AndroidVideoDecodeAccelerator::IsDrainingForResetOrDestroy() const {
+ return drain_type_ == DRAIN_FOR_RESET || drain_type_ == DRAIN_FOR_DESTROY;
+}
+
+void AndroidVideoDecodeAccelerator::OnDrainCompleted() {
+ DVLOG(2) << __func__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Sometimes MediaCodec returns an EOS buffer even if we didn't queue one.
+ // Consider it an error. http://crbug.com/585959.
+ if (!drain_type_) {
+ NOTIFY_ERROR(PLATFORM_FAILURE, "Unexpected EOS");
+ return;
+ }
+
+ switch (*drain_type_) {
+ case DRAIN_FOR_FLUSH:
+ ResetCodecState();
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyFlushDone,
+ weak_this_factory_.GetWeakPtr()));
+ break;
+ case DRAIN_FOR_RESET:
+ ResetCodecState();
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyResetDone,
+ weak_this_factory_.GetWeakPtr()));
+ break;
+ case DRAIN_FOR_DESTROY:
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::ActualDestroy,
+ weak_this_factory_.GetWeakPtr()));
+ break;
+ }
+ drain_type_.reset();
+}
+
+void AndroidVideoDecodeAccelerator::ResetCodecState() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // If there is already a reset in flight, then that counts. This can really
+ // only happen if somebody calls Reset.
+ // If the surface is destroyed or we're in an error state there's nothing to
+ // do. Note that BEFORE_OVERLAY_INIT implies that we have no codec, but it's
+ // included for completeness.
+ if (state_ == WAITING_FOR_CODEC || state_ == SURFACE_DESTROYED ||
+ state_ == BEFORE_OVERLAY_INIT || state_ == ERROR || !media_codec_) {
+ return;
+ }
+
+ bitstream_buffers_in_decoder_.clear();
+
+ if (pending_input_buf_index_ != -1) {
+ // The data for that index exists in the input buffer, but corresponding
+ // shm block been deleted. Check that it is safe to flush the codec, i.e.
+ // |pending_bitstream_records_| is empty.
+ // TODO(timav): keep shm block for that buffer and remove this restriction.
+ DCHECK(pending_bitstream_records_.empty());
+ pending_input_buf_index_ = -1;
+ }
+
+ // If we've just completed a flush don't reset the codec yet. Instead defer
+ // until the next decode call. This prevents us from unbacking frames that
+ // might be out for display at end of stream.
+ codec_needs_reset_ =
+ (drain_type_ == DRAIN_FOR_FLUSH) || (drain_type_ == DRAIN_FOR_RESET);
+ if (codec_needs_reset_)
+ return;
+
+ // Flush the codec if possible, or create a new one if not.
+ if (!MediaCodecUtil::CodecNeedsFlushWorkaround(media_codec_.get())) {
+ DVLOG(3) << __func__ << " Flushing MediaCodec.";
+ media_codec_->Flush();
+ // Since we just flushed all the output buffers, make sure that nothing is
+ // using them.
+ picture_buffer_manager_.CodecChanged(media_codec_.get());
+ } else {
+ DVLOG(3) << __func__ << " Deleting the MediaCodec and creating a new one.";
+ GetManager()->StopTimer(this);
+ // Release the codec, retain the bundle, and allocate a new codec. It will
+ // not wait for the old one to finish up with the bundle, which is bad. It
+ // works (usually) because it ends up allocating the codec on the same
+ // thread as is used to release the old one, so it's serialized anyway.
+ ReleaseCodec();
+ ConfigureMediaCodecAsynchronously();
+ }
+}
+
+void AndroidVideoDecodeAccelerator::Reset() {
+ DVLOG(1) << __func__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ TRACE_EVENT0("media", "AVDA::Reset");
+
+ if (defer_surface_creation_) {
+ DCHECK(!media_codec_);
+ DCHECK(pending_bitstream_records_.empty());
+ DCHECK_EQ(state_, BEFORE_OVERLAY_INIT);
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyResetDone,
+ weak_this_factory_.GetWeakPtr()));
+ return;
+ }
+
+ while (!pending_bitstream_records_.empty()) {
+ int32_t bitstream_buffer_id =
+ pending_bitstream_records_.front().buffer.id();
+ pending_bitstream_records_.pop();
+
+ if (bitstream_buffer_id != -1) {
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::Bind(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer,
+ weak_this_factory_.GetWeakPtr(), bitstream_buffer_id));
+ }
+ }
+ TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount", 0);
+ bitstreams_notified_in_advance_.clear();
+
+ picture_buffer_manager_.ReleaseCodecBuffers(output_picture_buffers_);
+ StartCodecDrain(DRAIN_FOR_RESET);
+}
+
+void AndroidVideoDecodeAccelerator::SetOverlayInfo(
+ const OverlayInfo& overlay_info) {
+ DVLOG(1) << __func__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (state_ == ERROR)
+ return;
+
+ // Update |config_| to contain the most recent info. Also save a copy, so
+ // that we can check for duplicate info later.
+ OverlayInfo previous_info = config_.overlay_info;
+ config_.overlay_info = overlay_info;
+
+ // It's possible that we'll receive SetSurface before initializing the surface
+ // chooser. For example, if we defer surface creation, then we'll signal
+ // success to WMPI before initializing it. WMPI is then free to change
+ // |surface_id|. In this case, take no additional action, since |config_| is
+ // up to date. We'll use it later.
+ if (state_ == BEFORE_OVERLAY_INIT)
+ return;
+
+ // Release any overlay immediately when hiding a frame. Otherwise, it will
+ // stick around as long as the VideoFrame does, which can be a long time.
+ if (overlay_info.is_frame_hidden)
+ picture_buffer_manager_.ImmediatelyForgetOverlay(output_picture_buffers_);
+
+ surface_chooser_state_.is_frame_hidden = overlay_info.is_frame_hidden;
+
+ if (overlay_info.is_fullscreen && !surface_chooser_state_.is_fullscreen) {
+ // It would be nice if we could just delay until we get a hint from an
+ // overlay that's "in fullscreen" in the sense that the CompositorFrame it
+ // came from had some flag set to indicate that the renderer was in
+ // fullscreen mode when it was generated. However, even that's hard, since
+ // there's no real connection between "renderer finds out about fullscreen"
+ // and "blink has completed layouts for it". The latter is what we really
+ // want to know.
+ surface_chooser_state_.is_expecting_relayout = true;
+ hints_until_clear_relayout_flag_ = kFrameDelayForFullscreenLayout;
+ }
+
+ // Notify the chooser about the fullscreen state.
+ surface_chooser_state_.is_fullscreen = overlay_info.is_fullscreen;
+
+ // Note that these might be kNoSurfaceID / empty. In that case, we will
+ // revoke the factory.
+ int32_t surface_id = overlay_info.surface_id;
+ OverlayInfo::RoutingToken routing_token = overlay_info.routing_token;
+
+ // We don't want to change the factory unless this info has actually changed.
+ // We'll get the same info many times if some other part of the config is now
+ // different, such as fullscreen state.
+ base::Optional<AndroidOverlayFactoryCB> new_factory;
+ if (surface_id != previous_info.surface_id ||
+ routing_token != previous_info.routing_token) {
+ if (routing_token && overlay_factory_cb_)
+ new_factory = base::Bind(overlay_factory_cb_, nullptr, *routing_token);
+ else if (surface_id != SurfaceManager::kNoSurfaceID)
+ new_factory = base::Bind(&ContentVideoViewOverlay::Create, surface_id);
+ }
+
+ surface_chooser_->UpdateState(new_factory, surface_chooser_state_);
+}
+
+void AndroidVideoDecodeAccelerator::Destroy() {
+ DVLOG(1) << __func__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ picture_buffer_manager_.Destroy(output_picture_buffers_);
+ client_ = nullptr;
+
+ // We don't want to queue more inputs while draining.
+ base::queue<BitstreamRecord>().swap(pending_bitstream_records_);
+ StartCodecDrain(DRAIN_FOR_DESTROY);
+}
+
+void AndroidVideoDecodeAccelerator::ActualDestroy() {
+ DVLOG(1) << __func__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Note that async codec construction might still be in progress. In that
+ // case, the codec will be deleted when it completes once we invalidate all
+ // our weak refs.
+ weak_this_factory_.InvalidateWeakPtrs();
+ GetManager()->StopTimer(this);
+ // We only release the codec here, in case codec allocation is in progress.
+ // We don't want to modify |codec_config_|. Note that the ref will sill be
+ // dropped when it completes, or when we delete |this|.
+ ReleaseCodec();
+
+ delete this;
+}
+
+bool AndroidVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+ return false;
+}
+
+const gfx::Size& AndroidVideoDecodeAccelerator::GetSize() const {
+ return size_;
+}
+
+base::WeakPtr<gpu::gles2::GLES2Decoder>
+AndroidVideoDecodeAccelerator::GetGlDecoder() const {
+ return get_gles2_decoder_cb_.Run();
+}
+
+void AndroidVideoDecodeAccelerator::OnStopUsingOverlayImmediately(
+ AndroidOverlay* overlay) {
+ DVLOG(1) << __func__;
+ TRACE_EVENT0("media", "AVDA::OnStopUsingOverlayImmediately");
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // We cannot get here if we're before surface allocation, since we transition
+ // to WAITING_FOR_CODEC (or NO_ERROR, if sync) when we get the surface without
+ // posting. If we do ever lose the surface before starting codec allocation,
+ // then we could just update the config to use a SurfaceTexture and return
+ // without changing state.
+ DCHECK_NE(state_, BEFORE_OVERLAY_INIT);
+
+ // If we're transitioning to |overlay|, then just stop here. We're not also
+ // using the overlay if we're transitioning to it.
+ if (!!incoming_overlay_ && incoming_overlay_->get() == overlay) {
+ incoming_overlay_.reset();
+ return;
+ }
+
+ // If we have no codec, or if our current config doesn't refer to |overlay|,
+ // then do nothing. |overlay| might be for some overlay that's waiting for
+ // codec destruction on some other thread.
+ if (!codec_config_->surface_bundle ||
+ codec_config_->surface_bundle->overlay.get() != overlay) {
+ return;
+ }
+
+ // If we have a codec, or if codec allocation is in flight, then it's using an
+ // overlay that was destroyed.
+ if (state_ == WAITING_FOR_CODEC) {
+ // What we should do here is to set |incoming_overlay_| to nullptr, to start
+ // a transistion to SurfaceTexture. OnCodecConfigured could notice that
+ // there's an incoming overlay, and then immediately transition the codec /
+ // drop and re-allocate the codec using it. However, for CVV, that won't
+ // work, since CVV-based overlays block the main thread waiting for the
+ // overlay to be dropped, so OnCodecConfigured won't run. For DS, it's the
+ // right thing.
+ // So, for now, we just fail, and let OnCodecConfigured drop the codec.
+ // Note that this case really can only happen on pre-M anyway, unless it's
+ // during initial construction. This will result in the overlay being
+ // destroyed after timeout, since OnCodecConfigured can't run until the
+ // synchronous CVV destruction quits.
+ state_ = SURFACE_DESTROYED;
+ return;
+ }
+
+ // If the API is available avoid having to restart the decoder in order to
+ // leave fullscreen. If we don't clear the surface immediately during this
+ // callback, the MediaCodec will throw an error as the surface is destroyed.
+ if (device_info_->IsSetOutputSurfaceSupported()) {
+ // Since we can't wait for a transition, we must invalidate all outstanding
+ // picture buffers to avoid putting the GL system in a broken state.
+ picture_buffer_manager_.ReleaseCodecBuffers(output_picture_buffers_);
+
+ // If we aren't transitioning to some other surface, then transition to a
+ // SurfaceTexture. Remember that, if |incoming_overlay_| is an overlay,
+ // then it's already ready and can be transitioned to immediately. We were
+ // just waiting for codec buffers to come back, but we just dropped them.
+ // Note that we want |incoming_overlay_| to has_value(), but that value
+ // should be a nullptr to indicate that we should switch to SurfaceTexture.
+ if (!incoming_overlay_)
+ incoming_overlay_ = std::unique_ptr<AndroidOverlay>();
+
+ UpdateSurface();
+ // Switching to a SurfaceTexture should never need to wait. If it does,
+ // then the codec might still be using the destroyed surface, which is bad.
+ return;
+ }
+
+ // If we're currently asynchronously configuring a codec, it will be destroyed
+ // when configuration completes and it notices that |state_| has changed to
+ // SURFACE_DESTROYED. It's safe to modify |codec_config_| here, since we
+ // checked above for WAITING_FOR_CODEC.
+ state_ = SURFACE_DESTROYED;
+ ReleaseCodecAndBundle();
+
+ // If we're draining, signal completion now because the drain can no longer
+ // proceed.
+ if (drain_type_)
+ OnDrainCompleted();
+}
+
+void AndroidVideoDecodeAccelerator::InitializeCdm() {
+ DVLOG(2) << __func__ << ": " << config_.cdm_id;
+
+#if !BUILDFLAG(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
+ NOTIMPLEMENTED();
+ NOTIFY_ERROR(PLATFORM_FAILURE, "Cdm support needs mojo in the gpu process");
+#else
+ // Store the CDM to hold a reference to it.
+ cdm_for_reference_holding_only_ =
+ CdmManager::GetInstance()->GetCdm(config_.cdm_id);
+ DCHECK(cdm_for_reference_holding_only_);
+
+ // On Android platform the CdmContext must be a MediaDrmBridgeCdmContext.
+ media_drm_bridge_cdm_context_ = static_cast<MediaDrmBridgeCdmContext*>(
+ cdm_for_reference_holding_only_->GetCdmContext());
+ DCHECK(media_drm_bridge_cdm_context_);
+
+ // Register CDM callbacks. The callbacks registered will be posted back to
+ // this thread via BindToCurrentLoop.
+
+ // Since |this| holds a reference to the |cdm_|, by the time the CDM is
+ // destructed, UnregisterPlayer() must have been called and |this| has been
+ // destructed as well. So the |cdm_unset_cb| will never have a chance to be
+ // called.
+ // TODO(xhwang): Remove |cdm_unset_cb| after it's not used on all platforms.
+ cdm_registration_id_ = media_drm_bridge_cdm_context_->RegisterPlayer(
+ BindToCurrentLoop(base::Bind(&AndroidVideoDecodeAccelerator::OnKeyAdded,
+ weak_this_factory_.GetWeakPtr())),
+ base::Bind(&base::DoNothing));
+
+ // Deferred initialization will continue in OnMediaCryptoReady().
+ media_drm_bridge_cdm_context_->SetMediaCryptoReadyCB(BindToCurrentLoop(
+ base::Bind(&AndroidVideoDecodeAccelerator::OnMediaCryptoReady,
+ weak_this_factory_.GetWeakPtr())));
+#endif // !BUILDFLAG(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
+}
+
+void AndroidVideoDecodeAccelerator::OnMediaCryptoReady(
+ JavaObjectPtr media_crypto,
+ bool requires_secure_video_codec) {
+ DVLOG(1) << __func__;
+
+ DCHECK(media_crypto);
+
+ if (media_crypto->is_null()) {
+ LOG(ERROR) << "MediaCrypto is not available, can't play encrypted stream.";
+ cdm_for_reference_holding_only_ = nullptr;
+ media_drm_bridge_cdm_context_ = nullptr;
+ NOTIFY_ERROR(PLATFORM_FAILURE, "MediaCrypto is not available");
+ return;
+ }
+
+ // We assume this is a part of the initialization process, thus MediaCodec
+ // is not created yet.
+ DCHECK(!media_codec_);
+ DCHECK(deferred_initialization_pending_);
+
+ codec_config_->media_crypto = std::move(media_crypto);
+ codec_config_->requires_secure_codec = requires_secure_video_codec;
+ // Request a secure surface in all cases. For L3, it's okay if we fall back
+ // to SurfaceTexture rather than fail composition. For L1, it's required.
+ // It's also required if the command line says so.
+ surface_chooser_state_.is_secure = true;
+ surface_chooser_state_.is_required =
+ requires_secure_video_codec || is_overlay_required_;
+
+ // After receiving |media_crypto_| we can start with surface creation.
+ StartSurfaceChooser();
+}
+
+void AndroidVideoDecodeAccelerator::OnKeyAdded() {
+ DVLOG(1) << __func__;
+
+ // This can also be called before initial surface allocation has completed,
+ // so we might not have a surface / codec yet. In that case, we'll never
+ // transition to WAITING_FOR_KEY, which is fine.
+ if (state_ == WAITING_FOR_KEY)
+ state_ = NO_ERROR;
+
+ DoIOTask(true);
+}
+
+void AndroidVideoDecodeAccelerator::NotifyInitializationSucceeded() {
+ DCHECK(deferred_initialization_pending_);
+
+ if (client_)
+ client_->NotifyInitializationComplete(true);
+ deferred_initialization_pending_ = false;
+}
+
+void AndroidVideoDecodeAccelerator::NotifyPictureReady(const Picture& picture) {
+ if (client_)
+ client_->PictureReady(picture);
+}
+
+void AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
+ int input_buffer_id) {
+ if (client_)
+ client_->NotifyEndOfBitstreamBuffer(input_buffer_id);
+}
+
+void AndroidVideoDecodeAccelerator::NotifyFlushDone() {
+ if (client_)
+ client_->NotifyFlushDone();
+}
+
+void AndroidVideoDecodeAccelerator::NotifyResetDone() {
+ if (client_)
+ client_->NotifyResetDone();
+}
+
+void AndroidVideoDecodeAccelerator::NotifyError(Error error) {
+ state_ = ERROR;
+
+ // If we're in the middle of Initialize, then stop. It will notice |state_|.
+ if (during_initialize_)
+ return;
+
+ // If deferred init is pending, then notify the client that it failed.
+ if (deferred_initialization_pending_) {
+ if (client_)
+ client_->NotifyInitializationComplete(false);
+ deferred_initialization_pending_ = false;
+ return;
+ }
+
+ // We're after all init. Just signal an error.
+ if (client_)
+ client_->NotifyError(error);
+}
+
+PromotionHintAggregator::NotifyPromotionHintCB
+AndroidVideoDecodeAccelerator::GetPromotionHintCB() {
+ return base::Bind(&AndroidVideoDecodeAccelerator::NotifyPromotionHint,
+ weak_this_factory_.GetWeakPtr());
+}
+
+void AndroidVideoDecodeAccelerator::NotifyPromotionHint(
+ PromotionHintAggregator::Hint hint) {
+ bool update_state = false;
+
+ promotion_hint_aggregator_->NotifyPromotionHint(hint);
+
+ // If we're expecting a full screen relayout, then also use this hint as a
+ // notification that another frame has happened.
+ if (hints_until_clear_relayout_flag_ > 0) {
+ hints_until_clear_relayout_flag_--;
+ if (hints_until_clear_relayout_flag_ == 0) {
+ surface_chooser_state_.is_expecting_relayout = false;
+ update_state = true;
+ }
+ }
+
+ surface_chooser_state_.initial_position = hint.screen_rect;
+ bool promotable = promotion_hint_aggregator_->IsSafeToPromote();
+ if (promotable != surface_chooser_state_.is_compositor_promotable) {
+ surface_chooser_state_.is_compositor_promotable = promotable;
+ update_state = true;
+ }
+
+ // If we've been provided with enough new frames, then update the state even
+ // if it hasn't changed. This lets |surface_chooser_| retry for an overlay.
+ // It's especially helpful for power-efficient overlays, since we don't know
+ // when an overlay becomes power efficient. It also helps retry any failure
+ // that's not accompanied by a state change, such as if android destroys the
+ // overlay asynchronously for a transient reason.
+ //
+ // If we're already using an overlay, then there's no need to do this.
+ base::TimeTicks now = base::TimeTicks::Now();
+ if (codec_config_->surface_bundle &&
+ !codec_config_->surface_bundle->overlay &&
+ now - most_recent_chooser_retry_ >= RetryChooserTimeout) {
+ update_state = true;
+ }
+
+ if (update_state) {
+ most_recent_chooser_retry_ = now;
+ surface_chooser_->UpdateState(base::Optional<AndroidOverlayFactoryCB>(),
+ surface_chooser_state_);
+ }
+}
+
+void AndroidVideoDecodeAccelerator::ManageTimer(bool did_work) {
+ bool should_be_running = true;
+
+ base::TimeTicks now = base::TimeTicks::Now();
+ if (!did_work && !most_recent_work_.is_null()) {
+ // Make sure that we have done work recently enough, else stop the timer.
+ if (now - most_recent_work_ > IdleTimerTimeOut) {
+ most_recent_work_ = base::TimeTicks();
+ should_be_running = false;
+ }
+ } else {
+ most_recent_work_ = now;
+ }
+
+ if (should_be_running)
+ GetManager()->StartTimer(this);
+ else
+ GetManager()->StopTimer(this);
+}
+
+// static
+VideoDecodeAccelerator::Capabilities
+AndroidVideoDecodeAccelerator::GetCapabilities(
+ const gpu::GpuPreferences& gpu_preferences) {
+ Capabilities capabilities;
+ SupportedProfiles& profiles = capabilities.supported_profiles;
+
+ if (MediaCodecUtil::IsVp8DecoderAvailable()) {
+ SupportedProfile profile;
+ profile.profile = VP8PROFILE_ANY;
+ // Since there is little to no power benefit below 360p, don't advertise
+ // support for it. Let libvpx decode it, and save a MediaCodec instance.
+ // Note that we allow it anyway for encrypted content, since we push a
+ // separate profile for that.
+ profile.min_resolution.SetSize(480, 360);
+ profile.max_resolution.SetSize(3840, 2160);
+ // If we know MediaCodec will just create a software codec, prefer our
+ // internal software decoder instead. It's more up to date and secured
+ // within the renderer sandbox. However if the content is encrypted, we
+ // must use MediaCodec anyways since MediaDrm offers no way to decrypt
+ // the buffers and let us use our internal software decoders.
+ profile.encrypted_only = MediaCodecUtil::IsKnownUnaccelerated(
+ kCodecVP8, MediaCodecDirection::DECODER);
+ profiles.push_back(profile);
+
+ // Always allow encrypted content, even at low resolutions.
+ profile.min_resolution.SetSize(0, 0);
+ profile.encrypted_only = true;
+ profiles.push_back(profile);
+ }
+
+ if (MediaCodecUtil::IsVp9DecoderAvailable()) {
+ const VideoCodecProfile profile_types[] = {
+ VP9PROFILE_PROFILE0, VP9PROFILE_PROFILE1, VP9PROFILE_PROFILE2,
+ VP9PROFILE_PROFILE3, VIDEO_CODEC_PROFILE_UNKNOWN};
+ const bool is_known_unaccelerated = MediaCodecUtil::IsKnownUnaccelerated(
+ kCodecVP9, MediaCodecDirection::DECODER);
+ for (int i = 0; profile_types[i] != VIDEO_CODEC_PROFILE_UNKNOWN; i++) {
+ SupportedProfile profile;
+ // Limit to 360p, like we do for vp8. See above.
+ profile.min_resolution.SetSize(480, 360);
+ profile.max_resolution.SetSize(3840, 2160);
+ // If we know MediaCodec will just create a software codec, prefer our
+ // internal software decoder instead. It's more up to date and secured
+ // within the renderer sandbox. However if the content is encrypted, we
+ // must use MediaCodec anyways since MediaDrm offers no way to decrypt
+ // the buffers and let us use our internal software decoders.
+ profile.encrypted_only = is_known_unaccelerated;
+ profile.profile = profile_types[i];
+ profiles.push_back(profile);
+
+ // Always allow encrypted content.
+ profile.min_resolution.SetSize(0, 0);
+ profile.encrypted_only = true;
+ profiles.push_back(profile);
+ }
+ }
+
+ for (const auto& supported_profile : kSupportedH264Profiles) {
+ SupportedProfile profile;
+ profile.profile = supported_profile;
+ profile.min_resolution.SetSize(0, 0);
+ // Advertise support for 4k and let the MediaCodec fail when decoding if it
+ // doesn't support the resolution. It's assumed that consumers won't have
+ // software fallback for H264 on Android anyway.
+ profile.max_resolution.SetSize(3840, 2160);
+ profiles.push_back(profile);
+ }
+
+ capabilities.flags = Capabilities::SUPPORTS_DEFERRED_INITIALIZATION |
+ Capabilities::NEEDS_ALL_PICTURE_BUFFERS_TO_DECODE |
+ Capabilities::SUPPORTS_ENCRYPTED_STREAMS;
+
+ // If we're using threaded texture mailboxes the COPY_REQUIRED flag must be
+ // set on the video frames (http://crbug.com/582170), and SurfaceView output
+ // is disabled (http://crbug.com/582170).
+ if (gpu_preferences.enable_threaded_texture_mailboxes) {
+ capabilities.flags |= Capabilities::REQUIRES_TEXTURE_COPY;
+ } else if (MediaCodecUtil::IsSurfaceViewOutputSupported()) {
+ capabilities.flags |= Capabilities::SUPPORTS_EXTERNAL_OUTPUT_SURFACE;
+ if (MediaCodecUtil::IsSetOutputSurfaceSupported())
+ capabilities.flags |= Capabilities::SUPPORTS_SET_EXTERNAL_OUTPUT_SURFACE;
+ }
+
+#if BUILDFLAG(ENABLE_HEVC_DEMUXING)
+ for (const auto& supported_profile : kSupportedHevcProfiles) {
+ SupportedProfile profile;
+ profile.profile = supported_profile;
+ profile.min_resolution.SetSize(0, 0);
+ profile.max_resolution.SetSize(3840, 2160);
+ profiles.push_back(profile);
+ }
+#endif
+
+ return capabilities;
+}
+
+bool AndroidVideoDecodeAccelerator::IsMediaCodecSoftwareDecodingForbidden()
+ const {
+ // Prevent MediaCodec from using its internal software decoders when we have
+ // more secure and up to date versions in the renderer process.
+ return !config_.is_encrypted() && (codec_config_->codec == kCodecVP8 ||
+ codec_config_->codec == kCodecVP9);
+}
+
+bool AndroidVideoDecodeAccelerator::UpdateSurface() {
+ DCHECK(incoming_overlay_);
+ DCHECK_NE(state_, WAITING_FOR_CODEC);
+
+ // Start surface creation. Note that if we're called via surfaceDestroyed,
+ // then this must complete synchronously or it will DCHECK. Otherwise, we
+ // might still be using the destroyed surface. We don't enforce this, but
+ // it's worth remembering that there are cases where it's required.
+ // Note that we don't re-use |surface_bundle|, since the codec is using it!
+ incoming_bundle_ =
+ new AVDASurfaceBundle(std::move(incoming_overlay_.value()));
+ incoming_overlay_.reset();
+ InitializePictureBufferManager();
+ if (state_ == ERROR) {
+ // This might be called from OnSurfaceDestroyed(), so we have to release the
+ // MediaCodec if we failed to switch the surface. We reset the surface ID
+ // to the previous one, since failures never result in the codec using the
+ // new surface. This is only guaranteed because of how OnCodecConfigured
+ // works. If it could fail after getting a codec, then this assumption
+ // wouldn't be necessarily true anymore.
+ // Also note that we might not have switched surfaces yet, which is also bad
+ // for OnSurfaceDestroyed, because of BEFORE_OVERLAY_INIT. Shouldn't
+ // happen with SurfaceTexture, and OnSurfaceDestroyed checks for it. In
+ // either case, we definitely should not still have an incoming bundle; it
+ // should have been dropped.
+ DCHECK(!incoming_bundle_);
+ ReleaseCodecAndBundle();
+ }
+
+ return state_ != ERROR;
+}
+
+void AndroidVideoDecodeAccelerator::ReleaseCodec() {
+ if (!media_codec_)
+ return;
+
+ picture_buffer_manager_.CodecChanged(nullptr);
+ codec_allocator_->ReleaseMediaCodec(std::move(media_codec_),
+ codec_config_->surface_bundle);
+}
+
+void AndroidVideoDecodeAccelerator::ReleaseCodecAndBundle() {
+ ReleaseCodec();
+ codec_config_->surface_bundle = nullptr;
+}
+
+void AndroidVideoDecodeAccelerator::CacheFrameInformation() {
+ if (!codec_config_->surface_bundle ||
+ !codec_config_->surface_bundle->overlay) {
+ // Not an overlay.
+ cached_frame_information_ = surface_chooser_state_.is_secure
+ ? FrameInformation::SURFACETEXTURE_L3
+ : FrameInformation::SURFACETEXTURE_INSECURE;
+ return;
+ }
+
+ // Overlay.
+ if (surface_chooser_state_.is_secure) {
+ cached_frame_information_ = surface_chooser_state_.is_required
+ ? FrameInformation::OVERLAY_L1
+ : FrameInformation::OVERLAY_L3;
+ return;
+ }
+
+ cached_frame_information_ =
+ surface_chooser_state_.is_fullscreen
+ ? FrameInformation::OVERLAY_INSECURE_PLAYER_ELEMENT_FULLSCREEN
+ : FrameInformation::OVERLAY_INSECURE_NON_PLAYER_ELEMENT_FULLSCREEN;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/android_video_decode_accelerator.h b/chromium/media/gpu/android/android_video_decode_accelerator.h
new file mode 100644
index 00000000000..f6b2c2db29d
--- /dev/null
+++ b/chromium/media/gpu/android/android_video_decode_accelerator.h
@@ -0,0 +1,446 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_ANDROID_VIDEO_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_ANDROID_ANDROID_VIDEO_DECODE_ACCELERATOR_H_
+
+#include <stdint.h>
+
+#include <list>
+#include <map>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/containers/queue.h"
+#include "base/optional.h"
+#include "base/threading/thread_checker.h"
+#include "base/timer/timer.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "media/base/android/media_codec_bridge_impl.h"
+#include "media/base/android/media_drm_bridge_cdm_context.h"
+#include "media/base/android_overlay_mojo_factory.h"
+#include "media/base/content_decryption_module.h"
+#include "media/gpu/android/android_video_surface_chooser.h"
+#include "media/gpu/android/avda_codec_allocator.h"
+#include "media/gpu/android/avda_picture_buffer_manager.h"
+#include "media/gpu/android/avda_state_provider.h"
+#include "media/gpu/android/device_info.h"
+#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/video/video_decode_accelerator.h"
+#include "ui/gl/android/scoped_java_surface.h"
+#include "ui/gl/android/surface_texture.h"
+
+namespace media {
+class SharedMemoryRegion;
+class PromotionHintAggregator;
+
+// A VideoDecodeAccelerator implementation for Android. This class decodes the
+// encoded input stream using Android's MediaCodec. It handles the work of
+// transferring data to and from MediaCodec, and delegates attaching MediaCodec
+// output buffers to PictureBuffers to AVDAPictureBufferManager.
+class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
+ : public VideoDecodeAccelerator,
+ public AVDAStateProvider,
+ public AVDACodecAllocatorClient {
+ public:
+ static VideoDecodeAccelerator::Capabilities GetCapabilities(
+ const gpu::GpuPreferences& gpu_preferences);
+
+ AndroidVideoDecodeAccelerator(
+ AVDACodecAllocator* codec_allocator,
+ std::unique_ptr<AndroidVideoSurfaceChooser> surface_chooser,
+ const MakeGLContextCurrentCallback& make_context_current_cb,
+ const GetGLES2DecoderCallback& get_gles2_decoder_cb,
+ const AndroidOverlayMojoFactoryCB& overlay_factory_cb,
+ DeviceInfo* device_info);
+
+ ~AndroidVideoDecodeAccelerator() override;
+
+ // VideoDecodeAccelerator implementation:
+ bool Initialize(const Config& config, Client* client) override;
+ void Decode(const BitstreamBuffer& bitstream_buffer) override;
+ void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
+ void ReusePictureBuffer(int32_t picture_buffer_id) override;
+ void Flush() override;
+ void Reset() override;
+ void SetOverlayInfo(const OverlayInfo& overlay_info) override;
+ void Destroy() override;
+ bool TryToSetupDecodeOnSeparateThread(
+ const base::WeakPtr<Client>& decode_client,
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+ override;
+
+ // AVDAStateProvider implementation:
+ const gfx::Size& GetSize() const override;
+ base::WeakPtr<gpu::gles2::GLES2Decoder> GetGlDecoder() const override;
+ // Notifies the client about the error and sets |state_| to |ERROR|. If we're
+ // in the middle of Initialize, we guarantee that Initialize will return
+ // failure. If deferred init is pending, then we'll fail deferred init.
+ // Otherwise, we'll signal errors normally.
+ void NotifyError(Error error) override;
+ PromotionHintAggregator::NotifyPromotionHintCB GetPromotionHintCB() override;
+
+ // AVDACodecAllocatorClient implementation:
+ void OnCodecConfigured(
+ std::unique_ptr<MediaCodecBridge> media_codec,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) override;
+
+ private:
+ friend class AVDAManager;
+
+ // TODO(timav): evaluate the need for more states in the AVDA state machine.
+ enum State {
+ NO_ERROR,
+ ERROR,
+ // We haven't initialized |surface_chooser_| yet, so we don't have a surface
+ // or a codec. After we initialize |surface_chooser_|, we'll transition to
+ // WAITING_FOR_CODEC, NO_ERROR, or ERROR.
+ BEFORE_OVERLAY_INIT,
+ // Set when we are asynchronously constructing the codec. Will transition
+ // to NO_ERROR or ERROR depending on success.
+ WAITING_FOR_CODEC,
+ // Set when we have a codec, but it doesn't yet have a key.
+ WAITING_FOR_KEY,
+ // The output surface was destroyed. We must not configure a new MediaCodec
+ // with the destroyed surface.
+ SURFACE_DESTROYED,
+ };
+
+ enum DrainType {
+ DRAIN_FOR_FLUSH,
+ DRAIN_FOR_RESET,
+ DRAIN_FOR_DESTROY,
+ };
+
+ // Called once before (possibly deferred) initialization succeeds, to set up
+ // |surface_chooser_| with our initial factory from VDA::Config.
+ void StartSurfaceChooser();
+
+ // Start a transition to an overlay, or, if |!overlay|, SurfaceTexture. The
+ // transition doesn't have to be immediate; we'll favor not dropping frames.
+ void OnSurfaceTransition(std::unique_ptr<AndroidOverlay> overlay);
+
+ // Called by AndroidOverlay when a surface is lost. We will discard pending
+ // frames, as needed, to switch away from |overlay| if we're using it. Before
+ // we return, we will have either dropped |overlay| if we own it, or posted
+ // it for async release with the codec that's using it. We also handle the
+ // case where we're not using |overlay| at all, since that can happen too
+ // while async codec release is pending.
+ void OnStopUsingOverlayImmediately(AndroidOverlay* overlay);
+
+ // Initializes the picture buffer manager to use the current surface, once
+ // it is available. This is not normally called directly, but rather via
+ // StartSurfaceCreation. If we have a media codec already, then this will
+ // attempt to setSurface the new surface. Otherwise, it will start codec
+ // config using the new surface. In that case, there might not be a codec
+ // ready even if this succeeds, but async config will be started. If
+ // setSurface fails, this will not replace the codec. On failure, this will
+ // transition |state_| to ERROR.
+ // Note that this assumes that there is an |incoming_bundle_| that we'll use.
+ // On success, we'll replace the bundle in |codec_config_|. On failure, we'll
+ // delete the incoming bundle.
+ void InitializePictureBufferManager();
+
+ // A part of destruction process that is sometimes postponed after the drain.
+ void ActualDestroy();
+
+ // Configures |media_codec_| with the given codec parameters from the client.
+ // This configuration will (probably) not be complete before this call
+ // returns. Multiple calls before completion will be ignored. |state_|
+ // must be NO_ERROR or WAITING_FOR_CODEC. Note that, once you call this,
+ // you should be careful to avoid modifying members of |codec_config_| until
+ // |state_| is no longer WAITING_FOR_CODEC.
+ void ConfigureMediaCodecAsynchronously();
+
+ // Like ConfigureMediaCodecAsynchronously, but synchronous. Will NotifyError
+ // on failure. Since all configuration is done synchronously, there is no
+ // concern with modifying |codec_config_| after this returns.
+ void ConfigureMediaCodecSynchronously();
+
+ // Sends the decoded frame specified by |codec_buffer_index| to the client.
+ void SendDecodedFrameToClient(int32_t codec_buffer_index,
+ int32_t bitstream_id);
+
+ // Does pending IO tasks if any. Once this is called, it polls |media_codec_|
+ // until it finishes pending tasks. For the polling, |kDecodePollDelay| is
+ // used.
+ void DoIOTask(bool start_timer);
+
+ // Feeds buffers in |pending_bitstream_records_| to |media_codec_|. Returns
+ // true if one was queued.
+ bool QueueInput();
+
+ // Dequeues output from |media_codec_| and feeds the decoded frame to the
+ // client. Returns a hint about whether calling again might produce
+ // more output.
+ bool DequeueOutput();
+
+ // Requests picture buffers from the client.
+ void RequestPictureBuffers();
+
+ // Decode the content in the |bitstream_buffer|. Note that a
+ // |bitstream_buffer| of id as -1 indicates a flush command.
+ void DecodeBuffer(const BitstreamBuffer& bitstream_buffer);
+
+ // Called during Initialize() for encrypted streams to set up the CDM.
+ void InitializeCdm();
+
+ // Called after the CDM obtains a MediaCrypto object.
+ void OnMediaCryptoReady(JavaObjectPtr media_crypto,
+ bool requires_secure_video_codec);
+
+ // Called when a new key is added to the CDM.
+ void OnKeyAdded();
+
+ // Notifies the client that deferred initialization succeeded. If it fails,
+ // then call NotifyError instead.
+ void NotifyInitializationSucceeded();
+
+ // Notifies the client about the availability of a picture.
+ void NotifyPictureReady(const Picture& picture);
+
+ // Notifies the client that the input buffer identifed by input_buffer_id has
+ // been processed.
+ void NotifyEndOfBitstreamBuffer(int input_buffer_id);
+
+ // Notifies the client that the decoder was flushed.
+ void NotifyFlushDone();
+
+ // Notifies the client that the decoder was reset.
+ void NotifyResetDone();
+
+ // Start or stop our work-polling timer based on whether we did any work, and
+ // how long it has been since we've done work. Calling this with true will
+ // start the timer. Calling it with false may stop the timer.
+ void ManageTimer(bool did_work);
+
+ // Start the MediaCodec drain process by adding end_of_stream() buffer to the
+ // encoded buffers queue. When we receive EOS from the output buffer the drain
+ // process completes and we perform the action depending on the |drain_type|.
+ void StartCodecDrain(DrainType drain_type);
+
+ // Returns true if we are currently draining the codec and doing that as part
+ // of Reset() or Destroy() VP8 workaround. (http://crbug.com/598963). We won't
+ // display any frames and disable normal errors handling.
+ bool IsDrainingForResetOrDestroy() const;
+
+ // A helper method that performs the operation required after the drain
+ // completion (usually when we receive EOS in the output). The operation
+ // itself depends on the |drain_type_|.
+ void OnDrainCompleted();
+
+ // Resets MediaCodec and buffers/containers used for storing output. These
+ // components need to be reset upon EOS to decode a later stream. Input state
+ // (e.g. queued BitstreamBuffers) is not reset, as input following an EOS
+ // is still valid and should be processed.
+ void ResetCodecState();
+
+ // Indicates if MediaCodec should not be used for software decoding since we
+ // have safer versions elsewhere.
+ bool IsMediaCodecSoftwareDecodingForbidden() const;
+
+ // On platforms which support seamless surface changes, this will reinitialize
+ // the picture buffer manager with the new surface. This function reads and
+ // clears the surface id from |pending_surface_id_|. It will issue a decode
+ // error if the surface change fails. Returns false on failure.
+ bool UpdateSurface();
+
+ // Release |media_codec_| if it's not null, and notify
+ // |picture_buffer_manager_|.
+ void ReleaseCodec();
+
+ // ReleaseCodec(), and also drop our ref to it's surface bundle. This is
+ // the right thing to do unless you're planning to re-use the bundle with
+ // another codec. Normally, one doesn't.
+ void ReleaseCodecAndBundle();
+
+ // Send a |hint| to |promotion_hint_aggregator_|.
+ void NotifyPromotionHint(PromotionHintAggregator::Hint hint);
+
+ // Used to DCHECK that we are called on the correct thread.
+ base::ThreadChecker thread_checker_;
+
+ // To expose client callbacks from VideoDecodeAccelerator.
+ Client* client_;
+
+ AVDACodecAllocator* codec_allocator_;
+
+ // Callback to set the correct gl context.
+ MakeGLContextCurrentCallback make_context_current_cb_;
+
+ // Callback to get the GLES2Decoder instance.
+ GetGLES2DecoderCallback get_gles2_decoder_cb_;
+
+ // The current state of this class. For now, this is used only for setting
+ // error state.
+ State state_;
+
+ // The assigned picture buffers by picture buffer id.
+ AVDAPictureBufferManager::PictureBufferMap output_picture_buffers_;
+
+ // This keeps the free picture buffer ids which can be used for sending
+ // decoded frames to the client.
+ base::queue<int32_t> free_picture_ids_;
+
+ // The low-level decoder which Android SDK provides.
+ std::unique_ptr<MediaCodecBridge> media_codec_;
+
+ // Set to true after requesting picture buffers to the client.
+ bool picturebuffers_requested_;
+
+ // The resolution of the stream.
+ gfx::Size size_;
+
+ // Handy structure to remember a BitstreamBuffer and also its shared memory,
+ // if any. The goal is to prevent leaving a BitstreamBuffer's shared memory
+ // handle open.
+ struct BitstreamRecord {
+ BitstreamRecord(const BitstreamBuffer&);
+ BitstreamRecord(BitstreamRecord&& other);
+ ~BitstreamRecord();
+
+ BitstreamBuffer buffer;
+
+ // |memory| is not mapped, and may be null if buffer has no data.
+ std::unique_ptr<SharedMemoryRegion> memory;
+ };
+
+ // Encoded bitstream buffers to be passed to media codec, queued until an
+ // input buffer is available.
+ base::queue<BitstreamRecord> pending_bitstream_records_;
+
+ // A map of presentation timestamp to bitstream buffer id for the bitstream
+ // buffers that have been submitted to the decoder but haven't yet produced an
+ // output frame with the same timestamp. Note: there will only be one entry
+ // for multiple bitstream buffers that have the same presentation timestamp.
+ std::map<base::TimeDelta, int32_t> bitstream_buffers_in_decoder_;
+
+ // Keeps track of bitstream ids notified to the client with
+ // NotifyEndOfBitstreamBuffer() before getting output from the bitstream.
+ std::list<int32_t> bitstreams_notified_in_advance_;
+
+ AVDAPictureBufferManager picture_buffer_manager_;
+
+ // Time at which we last did useful work on io_timer_.
+ base::TimeTicks most_recent_work_;
+
+ // The ongoing drain operation, if any.
+ base::Optional<DrainType> drain_type_;
+
+ // Holds a ref-count to the CDM to avoid using the CDM after it's destroyed.
+ scoped_refptr<ContentDecryptionModule> cdm_for_reference_holding_only_;
+
+ MediaDrmBridgeCdmContext* media_drm_bridge_cdm_context_;
+
+ // MediaDrmBridge requires registration/unregistration of the player, this
+ // registration id is used for this.
+ int cdm_registration_id_;
+
+ // Configuration that we use for MediaCodec.
+ // Do not update any of its members while |state_| is WAITING_FOR_CODEC.
+ scoped_refptr<CodecConfig> codec_config_;
+
+ // Index of the dequeued and filled buffer that we keep trying to enqueue.
+ // Such buffer appears in MEDIA_CODEC_NO_KEY processing.
+ int pending_input_buf_index_;
+
+ // Monotonically increasing value that is used to prevent old, delayed errors
+ // from being sent after a reset.
+ int error_sequence_token_;
+
+ // Are we currently processing a call to Initialize()? Please don't use this
+ // unless you're NotifyError.
+ bool during_initialize_;
+
+ // True if and only if VDA initialization is deferred, and we have not yet
+ // called NotifyInitializationComplete.
+ bool deferred_initialization_pending_;
+
+ // Indicates if ResetCodecState() should be called upon the next call to
+ // Decode(). Allows us to avoid trashing the last few frames of a playback
+ // when the EOS buffer is received.
+ bool codec_needs_reset_;
+
+ // True if surface creation and |picture_buffer_manager_| initialization has
+ // been defered until the first Decode() call.
+ bool defer_surface_creation_;
+
+ // Has a value if a SetSurface() call has occurred and a new surface should be
+ // switched to when possible. Cleared during OnSurfaceDestroyed() and if all
+ // pictures have been rendered in DequeueOutput().
+ base::Optional<int32_t> pending_surface_id_;
+
+ // Copy of the VDA::Config we were given.
+ Config config_;
+
+ // SurfaceBundle that we're going to use for StartSurfaceCreation. This is
+ // separate than the bundle in |codec_config_|, since we can start surface
+ // creation while another codec is using the old surface. For example, if
+ // we're going to SetSurface, then the current codec will depend on the
+ // current bundle until then.
+ scoped_refptr<AVDASurfaceBundle> incoming_bundle_;
+
+ // If we have been given an overlay to use, then this is it. If we've been
+ // told to move to SurfaceTexture, then this will be value() == nullptr.
+ base::Optional<std::unique_ptr<AndroidOverlay>> incoming_overlay_;
+
+ std::unique_ptr<AndroidVideoSurfaceChooser> surface_chooser_;
+
+ DeviceInfo* device_info_;
+
+ bool force_defer_surface_creation_for_testing_;
+
+ AndroidVideoSurfaceChooser::State surface_chooser_state_;
+
+ // Number of promotion hints that we need to receive before clearing the
+ // "delay overlay promotion" flag in |surface_chooser_state_|. We do this so
+ // that the transition looks better, since it gives blink time to stabilize.
+ // Since overlay positioning isn't synchronous, it's good to make sure that
+ // blink isn't moving the quad around too.
+ int hints_until_clear_relayout_flag_ = 0;
+
+ // Optional factory to produce mojo AndroidOverlay instances.
+ AndroidOverlayMojoFactoryCB overlay_factory_cb_;
+
+ std::unique_ptr<PromotionHintAggregator> promotion_hint_aggregator_;
+
+ // Are overlays required by command-line options?
+ bool is_overlay_required_ = false;
+
+ // Must match AVDAFrameInformation UMA enum. Please do not remove or re-order
+ // values, only append new ones.
+ enum class FrameInformation {
+ SURFACETEXTURE_INSECURE = 0,
+ SURFACETEXTURE_L3 = 1,
+ OVERLAY_L3 = 2,
+ OVERLAY_L1 = 3,
+ OVERLAY_INSECURE_PLAYER_ELEMENT_FULLSCREEN = 4,
+ OVERLAY_INSECURE_NON_PLAYER_ELEMENT_FULLSCREEN = 5,
+
+ // Max enum value.
+ FRAME_INFORMATION_MAX = OVERLAY_INSECURE_NON_PLAYER_ELEMENT_FULLSCREEN
+ };
+
+ // Update |cached_frame_information_|.
+ void CacheFrameInformation();
+
+ // Most recently cached frame information, so that we can dispatch it without
+ // recomputing it on every frame. It changes very rarely.
+ FrameInformation cached_frame_information_ =
+ FrameInformation::SURFACETEXTURE_INSECURE;
+
+ // Time since we last updated the chooser state.
+ base::TimeTicks most_recent_chooser_retry_;
+
+ // WeakPtrFactory for posting tasks back to |this|.
+ base::WeakPtrFactory<AndroidVideoDecodeAccelerator> weak_this_factory_;
+
+ friend class AndroidVideoDecodeAcceleratorTest;
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_ANDROID_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/chromium/media/gpu/android/android_video_decode_accelerator_unittest.cc b/chromium/media/gpu/android/android_video_decode_accelerator_unittest.cc
new file mode 100644
index 00000000000..4fcfb62eb29
--- /dev/null
+++ b/chromium/media/gpu/android/android_video_decode_accelerator_unittest.cc
@@ -0,0 +1,542 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/android_video_decode_accelerator.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "media/base/android/media_codec_util.h"
+#include "media/base/android/mock_android_overlay.h"
+#include "media/base/android/mock_media_codec_bridge.h"
+#include "media/gpu/android/android_video_decode_accelerator.h"
+#include "media/gpu/android/android_video_surface_chooser.h"
+#include "media/gpu/android/avda_codec_allocator.h"
+#include "media/gpu/android/fake_android_video_surface_chooser.h"
+#include "media/gpu/android/fake_codec_allocator.h"
+#include "media/gpu/android/mock_device_info.h"
+#include "media/video/picture.h"
+#include "media/video/video_decode_accelerator.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/init/gl_factory.h"
+
+using ::testing::NiceMock;
+using ::testing::NotNull;
+using ::testing::Return;
+using ::testing::_;
+
+namespace media {
+namespace {
+
+#define SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE() \
+ do { \
+ if (!MediaCodecUtil::IsMediaCodecAvailable()) \
+ return; \
+ } while (false)
+
+bool MakeContextCurrent() {
+ return true;
+}
+
+base::WeakPtr<gpu::gles2::GLES2Decoder> GetGLES2Decoder(
+ const base::WeakPtr<gpu::gles2::GLES2Decoder>& decoder) {
+ return decoder;
+}
+
+class MockVDAClient : public VideoDecodeAccelerator::Client {
+ public:
+ MockVDAClient() {}
+
+ MOCK_METHOD1(NotifyInitializationComplete, void(bool));
+ MOCK_METHOD5(
+ ProvidePictureBuffers,
+ void(uint32_t, VideoPixelFormat, uint32_t, const gfx::Size&, uint32_t));
+ MOCK_METHOD1(DismissPictureBuffer, void(int32_t));
+ MOCK_METHOD1(PictureReady, void(const Picture&));
+ MOCK_METHOD1(NotifyEndOfBitstreamBuffer, void(int32_t));
+ MOCK_METHOD0(NotifyFlushDone, void());
+ MOCK_METHOD0(NotifyResetDone, void());
+ MOCK_METHOD1(NotifyError, void(VideoDecodeAccelerator::Error));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockVDAClient);
+};
+
+} // namespace
+
+class AndroidVideoDecodeAcceleratorTest : public testing::Test {
+ public:
+ // Default to baseline H264 because it's always supported.
+ AndroidVideoDecodeAcceleratorTest()
+ : gl_decoder_(&command_buffer_service_, &outputter_),
+ config_(H264PROFILE_BASELINE) {}
+
+ void SetUp() override {
+ ASSERT_TRUE(gl::init::InitializeGLOneOff());
+ surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size(16, 16));
+ context_ = gl::init::CreateGLContext(nullptr, surface_.get(),
+ gl::GLContextAttribs());
+ context_->MakeCurrent(surface_.get());
+
+ codec_allocator_ = base::MakeUnique<FakeCodecAllocator>(
+ base::SequencedTaskRunnerHandle::Get());
+ device_info_ = base::MakeUnique<NiceMock<MockDeviceInfo>>();
+
+ chooser_that_is_usually_null_ =
+ base::MakeUnique<NiceMock<FakeSurfaceChooser>>();
+ chooser_ = chooser_that_is_usually_null_.get();
+
+ // By default, allow deferred init.
+ config_.is_deferred_initialization_allowed = true;
+ }
+
+ ~AndroidVideoDecodeAcceleratorTest() override {
+ // ~AVDASurfaceBundle() might rely on GL being available, so we have to
+ // explicitly drop references to them before tearing down GL.
+ vda_ = nullptr;
+ codec_allocator_ = nullptr;
+ context_ = nullptr;
+ surface_ = nullptr;
+ gl::init::ShutdownGL();
+ }
+
+ // Create and initialize AVDA with |config_|, and return the result.
+ bool InitializeAVDA(bool force_defer_surface_creation = false) {
+ // Because VDA has a custom deleter, we must assign it to |vda_| carefully.
+ AndroidVideoDecodeAccelerator* avda = new AndroidVideoDecodeAccelerator(
+ codec_allocator_.get(), std::move(chooser_that_is_usually_null_),
+ base::Bind(&MakeContextCurrent),
+ base::Bind(&GetGLES2Decoder, gl_decoder_.AsWeakPtr()),
+ AndroidOverlayMojoFactoryCB(), device_info_.get());
+ vda_.reset(avda);
+ avda->force_defer_surface_creation_for_testing_ =
+ force_defer_surface_creation;
+
+ bool result = vda_->Initialize(config_, &client_);
+ base::RunLoop().RunUntilIdle();
+ return result;
+ }
+
+ // Initialize |vda_|, providing a new surface for it. You may get the surface
+ // by asking |codec_allocator_|.
+ void InitializeAVDAWithOverlay() {
+ config_.overlay_info.surface_id = 123;
+ ASSERT_TRUE(InitializeAVDA());
+ base::RunLoop().RunUntilIdle();
+ ASSERT_TRUE(chooser_->factory_);
+
+ // Have the factory provide an overlay, and verify that codec creation is
+ // provided with that overlay.
+ std::unique_ptr<MockAndroidOverlay> overlay =
+ base::MakeUnique<MockAndroidOverlay>();
+ overlay_callbacks_ = overlay->GetCallbacks();
+
+ // Set the expectations first, since ProvideOverlay might cause callbacks.
+ EXPECT_CALL(*codec_allocator_,
+ MockCreateMediaCodecAsync(overlay.get(), nullptr));
+ chooser_->ProvideOverlay(std::move(overlay));
+
+ // Provide the codec so that we can check if it's freed properly.
+ EXPECT_CALL(client_, NotifyInitializationComplete(true));
+ codec_allocator_->ProvideMockCodecAsync();
+ base::RunLoop().RunUntilIdle();
+ }
+
+ void InitializeAVDAWithSurfaceTexture() {
+ ASSERT_TRUE(InitializeAVDA());
+ base::RunLoop().RunUntilIdle();
+ // We do not expect a factory, since we are using SurfaceTexture.
+ ASSERT_FALSE(chooser_->factory_);
+
+ // Set the expectations first, since ProvideOverlay might cause callbacks.
+ EXPECT_CALL(*codec_allocator_,
+ MockCreateMediaCodecAsync(nullptr, NotNull()));
+ chooser_->ProvideSurfaceTexture();
+
+ // Provide the codec so that we can check if it's freed properly.
+ EXPECT_CALL(client_, NotifyInitializationComplete(true));
+ codec_allocator_->ProvideMockCodecAsync();
+ base::RunLoop().RunUntilIdle();
+ }
+
+ // Set whether HasUnrendereredPictureBuffers will return true or false.
+ // TODO(liberato): We can't actually do this yet. It turns out to be okay,
+ // because AVDA doesn't actually SetSurface before DequeueOutput. It could do
+ // so, though, if there aren't unrendered buffers. Should AVDA ever start
+ // switching surfaces immediately upon receiving them, rather than waiting for
+ // DequeueOutput, then we'll want to be able to indicate that it has
+ // unrendered pictures to prevent that behavior.
+ void SetHasUnrenderedPictureBuffers(bool flag) {}
+
+ // Tell |avda_| to switch surfaces to its incoming surface. This is a method
+ // since we're a friend of AVDA, and the tests are subclasses. It's also
+ // somewhat hacky, but much less hacky than trying to run it via a timer.
+ void LetAVDAUpdateSurface() {
+ SetHasUnrenderedPictureBuffers(false);
+ avda()->DequeueOutput();
+ }
+
+ // So that SequencedTaskRunnerHandle::Get() works.
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
+
+ scoped_refptr<gl::GLSurface> surface_;
+ scoped_refptr<gl::GLContext> context_;
+ gpu::FakeCommandBufferServiceBase command_buffer_service_;
+ gpu::gles2::TraceOutputter outputter_;
+ NiceMock<gpu::gles2::MockGLES2Decoder> gl_decoder_;
+ NiceMock<MockVDAClient> client_;
+ std::unique_ptr<FakeCodecAllocator> codec_allocator_;
+
+ // Only set until InitializeAVDA() is called.
+ std::unique_ptr<FakeSurfaceChooser> chooser_that_is_usually_null_;
+ FakeSurfaceChooser* chooser_;
+ VideoDecodeAccelerator::Config config_;
+ std::unique_ptr<MockDeviceInfo> device_info_;
+
+ // Set by InitializeAVDAWithOverlay()
+ MockAndroidOverlay::Callbacks overlay_callbacks_;
+
+ // This must be a unique pointer to a VDA, not an AVDA, to ensure the
+ // the default_delete specialization that calls Destroy() will be used.
+ std::unique_ptr<VideoDecodeAccelerator> vda_;
+
+ AndroidVideoDecodeAccelerator* avda() {
+ return reinterpret_cast<AndroidVideoDecodeAccelerator*>(vda_.get());
+ }
+};
+
+TEST_F(AndroidVideoDecodeAcceleratorTest, ConfigureUnsupportedCodec) {
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ config_ = VideoDecodeAccelerator::Config(VIDEO_CODEC_PROFILE_UNKNOWN);
+ ASSERT_FALSE(InitializeAVDA());
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest,
+ ConfigureSupportedCodecSynchronously) {
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ config_.is_deferred_initialization_allowed = false;
+
+ EXPECT_CALL(*codec_allocator_, MockCreateMediaCodecSync(_, _));
+ // AVDA must set client callbacks even in sync mode, so that the chooser is
+ // in a sane state. crbug.com/772899 .
+ EXPECT_CALL(*chooser_, MockSetClientCallbacks());
+ ASSERT_TRUE(InitializeAVDA());
+ testing::Mock::VerifyAndClearExpectations(chooser_);
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest, FailingToCreateACodecSyncIsAnError) {
+ // Failuew to create a codec during sync init should cause Initialize to fail.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ config_.is_deferred_initialization_allowed = false;
+ codec_allocator_->allow_sync_creation = false;
+
+ EXPECT_CALL(*codec_allocator_, MockCreateMediaCodecSync(nullptr, NotNull()));
+ ASSERT_FALSE(InitializeAVDA());
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest, FailingToCreateACodecAsyncIsAnError) {
+ // Verify that a null codec signals error for async init when it doesn't get a
+ // mediacodec instance.
+ //
+ // Also assert that there's only one call to CreateMediaCodecAsync. And since
+ // it replies with a null codec, AVDA will be in an error state when it shuts
+ // down. Since we know that it's constructed before we destroy the VDA, we
+ // verify that AVDA doens't create codecs during destruction.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ // Note that if we somehow end up deferring surface creation, then this would
+ // no longer be expected to fail. It would signal success before asking for a
+ // surface or codec.
+ EXPECT_CALL(*codec_allocator_, MockCreateMediaCodecAsync(_, NotNull()));
+ EXPECT_CALL(client_, NotifyInitializationComplete(false));
+
+ ASSERT_TRUE(InitializeAVDA());
+ chooser_->ProvideSurfaceTexture();
+ codec_allocator_->ProvideNullCodecAsync();
+
+ // Make sure that codec allocation has happened before destroying the VDA.
+ testing::Mock::VerifyAndClearExpectations(codec_allocator_.get());
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest,
+ LowEndDevicesSucceedInitWithoutASurface) {
+ // If AVDA decides that we should defer surface creation, then it should
+ // signal success before we provide a surface. It should still ask for a
+ // surface, though.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ config_.overlay_info.surface_id = SurfaceManager::kNoSurfaceID;
+
+ EXPECT_CALL(*chooser_, MockUpdateState()).Times(0);
+ EXPECT_CALL(client_, NotifyInitializationComplete(true));
+
+ // It would be nicer if we didn't just force this on, since we might do so
+ // in a state that AVDA isn't supposed to handle (e.g., if we give it a
+ // surface, then it would never decide to defer surface creation).
+ bool force_defer_surface_creation = true;
+ InitializeAVDA(force_defer_surface_creation);
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest,
+ AsyncInitWithSurfaceTextureAndDelete) {
+ // When configuring with a SurfaceTexture and deferred init, we should be
+ // asked for a codec, and be notified of init success if we provide one. When
+ // AVDA is destroyed, it should release the codec and surface texture.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ InitializeAVDAWithSurfaceTexture();
+
+ // Delete the VDA, and make sure that it tries to free the codec and the right
+ // surface texture.
+ EXPECT_CALL(
+ *codec_allocator_,
+ MockReleaseMediaCodec(codec_allocator_->most_recent_codec,
+ codec_allocator_->most_recent_overlay,
+ codec_allocator_->most_recent_surface_texture));
+ codec_allocator_->most_recent_codec_destruction_observer->ExpectDestruction();
+ vda_ = nullptr;
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest, AsyncInitWithSurfaceAndDelete) {
+ // When |config_| specifies a surface, we should be given a factory during
+ // startup for it. When |chooser_| provides an overlay, the codec should be
+ // allocated using it. Shutdown should provide the overlay when releasing the
+ // media codec.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ InitializeAVDAWithOverlay();
+
+ // Delete the VDA, and make sure that it tries to free the codec and the
+ // overlay that it provided to us.
+ EXPECT_CALL(
+ *codec_allocator_,
+ MockReleaseMediaCodec(codec_allocator_->most_recent_codec,
+ codec_allocator_->most_recent_overlay,
+ codec_allocator_->most_recent_surface_texture));
+ codec_allocator_->most_recent_codec_destruction_observer->ExpectDestruction();
+ vda_ = nullptr;
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest,
+ SwitchesToSurfaceTextureWhenSurfaceDestroyed) {
+ // Provide a surface, and a codec, then destroy the surface. AVDA should use
+ // SetSurface to switch to SurfaceTexture.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ InitializeAVDAWithOverlay();
+
+ // It would be nice if we knew that this was a surface texture. As it is, we
+ // just destroy the VDA and expect that we're provided with one. Hopefully,
+ // AVDA is actually calling SetSurface properly.
+ EXPECT_CALL(*codec_allocator_->most_recent_codec, SetSurface(_))
+ .WillOnce(Return(true));
+ codec_allocator_->most_recent_codec_destruction_observer
+ ->VerifyAndClearExpectations();
+ overlay_callbacks_.SurfaceDestroyed.Run();
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_CALL(*codec_allocator_,
+ MockReleaseMediaCodec(codec_allocator_->most_recent_codec,
+ nullptr, NotNull()));
+ vda_ = nullptr;
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest, SwitchesToSurfaceTextureEventually) {
+ // Provide a surface, and a codec, then request that AVDA switches to a
+ // surface texture. Verify that it does.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ InitializeAVDAWithOverlay();
+
+ EXPECT_CALL(*codec_allocator_->most_recent_codec, SetSurface(_))
+ .WillOnce(Return(true));
+
+ // Note that it's okay if |avda_| switches before ProvideSurfaceTexture
+ // returns, since it has no queued output anyway.
+ chooser_->ProvideSurfaceTexture();
+ LetAVDAUpdateSurface();
+
+ // Verify that we're now using some surface texture.
+ EXPECT_CALL(*codec_allocator_,
+ MockReleaseMediaCodec(codec_allocator_->most_recent_codec,
+ nullptr, NotNull()));
+ codec_allocator_->most_recent_codec_destruction_observer->ExpectDestruction();
+ vda_ = nullptr;
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest,
+ SetSurfaceFailureDoesntSwitchSurfaces) {
+ // Initialize AVDA with a surface, then request that AVDA switches to a
+ // surface texture. When it tries to UpdateSurface, pretend to fail. AVDA
+ // should notify error, and also release the original surface.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ InitializeAVDAWithOverlay();
+
+ EXPECT_CALL(*codec_allocator_->most_recent_codec, SetSurface(_))
+ .WillOnce(Return(false));
+ EXPECT_CALL(client_,
+ NotifyError(AndroidVideoDecodeAccelerator::PLATFORM_FAILURE))
+ .Times(1);
+ codec_allocator_->most_recent_codec_destruction_observer
+ ->VerifyAndClearExpectations();
+ chooser_->ProvideSurfaceTexture();
+ LetAVDAUpdateSurface();
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest,
+ SwitchToSurfaceAndBackBeforeSetSurface) {
+ // Ask AVDA to switch from ST to overlay, then back to ST before it has a
+ // chance to do the first switch. It should simply drop the overlay.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ InitializeAVDAWithSurfaceTexture();
+
+ // Don't let AVDA switch immediately, else it could choose to SetSurface when
+ // it first gets the overlay.
+ SetHasUnrenderedPictureBuffers(true);
+ EXPECT_CALL(*codec_allocator_->most_recent_codec, SetSurface(_)).Times(0);
+ std::unique_ptr<MockAndroidOverlay> overlay =
+ base::MakeUnique<MockAndroidOverlay>();
+ // Make sure that the overlay is not destroyed too soon.
+ std::unique_ptr<DestructionObserver> observer =
+ overlay->CreateDestructionObserver();
+ observer->DoNotAllowDestruction();
+
+ chooser_->ProvideOverlay(std::move(overlay));
+
+ // Now it is expected to drop the overlay.
+ observer->ExpectDestruction();
+
+ // While the incoming surface is pending, switch back to SurfaceTexture.
+ chooser_->ProvideSurfaceTexture();
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest,
+ ChangingOutputSurfaceVoluntarilyWithoutSetSurfaceIsIgnored) {
+ // If we ask AVDA to change to SurfaceTexture should be ignored on platforms
+ // that don't support SetSurface (pre-M or blacklisted). It should also
+ // ignore SurfaceTexture => overlay, but we don't check that.
+ //
+ // Also note that there are other probably reasonable things to do (like
+ // signal an error), but we want to be sure that it doesn't try to SetSurface.
+ // We also want to be sure that, if it doesn't signal an error, that it also
+ // doesn't get confused about which surface is in use. So, we assume that it
+ // doesn't signal an error, and we check that it releases the right surface
+ // with the codec.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+ EXPECT_CALL(client_, NotifyError(_)).Times(0);
+
+ ON_CALL(*device_info_, IsSetOutputSurfaceSupported())
+ .WillByDefault(Return(false));
+ InitializeAVDAWithOverlay();
+ EXPECT_CALL(*codec_allocator_->most_recent_codec, SetSurface(_)).Times(0);
+
+ // This should not switch to SurfaceTexture.
+ chooser_->ProvideSurfaceTexture();
+ LetAVDAUpdateSurface();
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest,
+ OnSurfaceDestroyedWithoutSetSurfaceFreesTheCodec) {
+ // If AVDA receives OnSurfaceDestroyed without support for SetSurface, then it
+ // should free the codec.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+ ON_CALL(*device_info_, IsSetOutputSurfaceSupported())
+ .WillByDefault(Return(false));
+ InitializeAVDAWithOverlay();
+ EXPECT_CALL(*codec_allocator_->most_recent_codec, SetSurface(_)).Times(0);
+
+ // This should free the codec.
+ EXPECT_CALL(
+ *codec_allocator_,
+ MockReleaseMediaCodec(codec_allocator_->most_recent_codec,
+ codec_allocator_->most_recent_overlay, nullptr));
+ codec_allocator_->most_recent_codec_destruction_observer->ExpectDestruction();
+ overlay_callbacks_.SurfaceDestroyed.Run();
+ base::RunLoop().RunUntilIdle();
+
+ // Verify that the codec has been released, since |vda_| will be destroyed
+ // soon. The expectations must be met before that.
+ testing::Mock::VerifyAndClearExpectations(&codec_allocator_);
+ codec_allocator_->most_recent_codec_destruction_observer
+ ->VerifyAndClearExpectations();
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest,
+ MultipleSurfaceTextureCallbacksAreIgnored) {
+ // Ask AVDA to switch to ST when it's already using ST, nothing should happen.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+
+ InitializeAVDAWithSurfaceTexture();
+
+ // This should do nothing.
+ EXPECT_CALL(*codec_allocator_->most_recent_codec, SetSurface(_)).Times(0);
+ chooser_->ProvideSurfaceTexture();
+
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest,
+ OverlayInfoWithDuplicateSurfaceIDDoesntChangeTheFactory) {
+ // Send OverlayInfo with duplicate info, and verify that it doesn't change
+ // the factory.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+ InitializeAVDAWithOverlay();
+
+ EXPECT_CALL(*chooser_, MockUpdateState()).Times(1);
+ EXPECT_CALL(*chooser_, MockReplaceOverlayFactory(_)).Times(0);
+ OverlayInfo overlay_info = config_.overlay_info;
+ avda()->SetOverlayInfo(overlay_info);
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest,
+ OverlayInfoWithNewSurfaceIDDoesChangeTheFactory) {
+ // Send OverlayInfo with new surface info, and verify that it does change the
+ // overlay factory.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+ InitializeAVDAWithOverlay();
+
+ EXPECT_CALL(*chooser_, MockUpdateState()).Times(1);
+ OverlayInfo overlay_info = config_.overlay_info;
+ overlay_info.surface_id++;
+ avda()->SetOverlayInfo(overlay_info);
+}
+
+TEST_F(AndroidVideoDecodeAcceleratorTest, FullscreenSignalIsSentToChooser) {
+ // Send OverlayInfo that has |is_fullscreen| set, and verify that the chooser
+ // is notified about it.
+ SKIP_IF_MEDIACODEC_IS_NOT_AVAILABLE();
+ InitializeAVDAWithOverlay();
+ OverlayInfo overlay_info = config_.overlay_info;
+ overlay_info.is_fullscreen = !config_.overlay_info.is_fullscreen;
+ avda()->SetOverlayInfo(overlay_info);
+ ASSERT_EQ(chooser_->current_state_.is_fullscreen, overlay_info.is_fullscreen);
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/android_video_encode_accelerator.cc b/chromium/media/gpu/android/android_video_encode_accelerator.cc
new file mode 100644
index 00000000000..a3341698027
--- /dev/null
+++ b/chromium/media/gpu/android/android_video_encode_accelerator.cc
@@ -0,0 +1,433 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/android_video_encode_accelerator.h"
+
+#include <memory>
+#include <set>
+#include <tuple>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "media/base/android/media_codec_util.h"
+#include "media/base/bitstream_buffer.h"
+#include "media/base/limits.h"
+#include "media/gpu/shared_memory_region.h"
+#include "media/video/picture.h"
+#include "third_party/libyuv/include/libyuv/convert_from.h"
+#include "ui/gl/android/scoped_java_surface.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace media {
+
+// Limit default max video codec size for Android to avoid
+// HW codec initialization failure for resolution higher than 720p.
+// Default values are from Libjingle "jsepsessiondescription.cc".
+const int kMaxEncodeFrameWidth = 1280;
+const int kMaxEncodeFrameHeight = 720;
+const int kMaxFramerateNumerator = 30;
+const int kMaxFramerateDenominator = 1;
+
+enum PixelFormat {
+ // Subset of MediaCodecInfo.CodecCapabilities.
+ COLOR_FORMAT_YUV420_PLANAR = 19,
+ COLOR_FORMAT_YUV420_SEMIPLANAR = 21,
+};
+
+// Helper macros for dealing with failure. If |result| evaluates false, emit
+// |log| to DLOG(ERROR), register |error| with the client, and return.
+#define RETURN_ON_FAILURE(result, log, error) \
+ do { \
+ if (!(result)) { \
+ DLOG(ERROR) << log; \
+ if (!error_occurred_) { \
+ client_ptr_factory_->GetWeakPtr()->NotifyError(error); \
+ error_occurred_ = true; \
+ } \
+ return; \
+ } \
+ } while (0)
+
+// Because MediaCodec is thread-hostile (must be poked on a single thread) and
+// has no callback mechanism (b/11990118), we must drive it by polling for
+// complete frames (and available input buffers, when the codec is fully
+// saturated). This function defines the polling delay. The value used is an
+// arbitrary choice that trades off CPU utilization (spinning) against latency.
+// Mirrors android_video_decode_accelerator.cc::DecodePollDelay().
+static inline const base::TimeDelta EncodePollDelay() {
+ // An alternative to this polling scheme could be to dedicate a new thread
+ // (instead of using the ChildThread) to run the MediaCodec, and make that
+ // thread use the timeout-based flavor of MediaCodec's dequeue methods when it
+ // believes the codec should complete "soon" (e.g. waiting for an input
+ // buffer, or waiting for a picture when it knows enough complete input
+ // pictures have been fed to saturate any internal buffering). This is
+ // speculative and it's unclear that this would be a win (nor that there's a
+ // reasonably device-agnostic way to fill in the "believes" above).
+ return base::TimeDelta::FromMilliseconds(10);
+}
+
+static inline const base::TimeDelta NoWaitTimeOut() {
+ return base::TimeDelta::FromMicroseconds(0);
+}
+
+static bool GetSupportedColorFormatForMime(const std::string& mime,
+ PixelFormat* pixel_format) {
+ if (mime.empty())
+ return false;
+
+ std::set<int> formats = MediaCodecUtil::GetEncoderColorFormats(mime);
+ if (formats.count(COLOR_FORMAT_YUV420_SEMIPLANAR) > 0)
+ *pixel_format = COLOR_FORMAT_YUV420_SEMIPLANAR;
+ else if (formats.count(COLOR_FORMAT_YUV420_PLANAR) > 0)
+ *pixel_format = COLOR_FORMAT_YUV420_PLANAR;
+ else
+ return false;
+
+ return true;
+}
+
+AndroidVideoEncodeAccelerator::AndroidVideoEncodeAccelerator()
+ : num_buffers_at_codec_(0), last_set_bitrate_(0), error_occurred_(false) {}
+
+AndroidVideoEncodeAccelerator::~AndroidVideoEncodeAccelerator() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+VideoEncodeAccelerator::SupportedProfiles
+AndroidVideoEncodeAccelerator::GetSupportedProfiles() {
+ SupportedProfiles profiles;
+
+ const struct {
+ const VideoCodec codec;
+ const VideoCodecProfile profile;
+ } kSupportedCodecs[] = {{kCodecVP8, VP8PROFILE_ANY},
+ {kCodecH264, H264PROFILE_BASELINE}};
+
+ for (const auto& supported_codec : kSupportedCodecs) {
+ if (supported_codec.codec == kCodecVP8 &&
+ !MediaCodecUtil::IsVp8EncoderAvailable()) {
+ continue;
+ }
+
+ if (supported_codec.codec == kCodecH264 &&
+ !MediaCodecUtil::IsH264EncoderAvailable()) {
+ continue;
+ }
+
+ if (MediaCodecUtil::IsKnownUnaccelerated(supported_codec.codec,
+ MediaCodecDirection::ENCODER)) {
+ continue;
+ }
+
+ SupportedProfile profile;
+ profile.profile = supported_codec.profile;
+ // It would be nice if MediaCodec exposes the maximum capabilities of
+ // the encoder. Hard-code some reasonable defaults as workaround.
+ profile.max_resolution.SetSize(kMaxEncodeFrameWidth, kMaxEncodeFrameHeight);
+ profile.max_framerate_numerator = kMaxFramerateNumerator;
+ profile.max_framerate_denominator = kMaxFramerateDenominator;
+ profiles.push_back(profile);
+ }
+ return profiles;
+}
+
+bool AndroidVideoEncodeAccelerator::Initialize(
+ VideoPixelFormat format,
+ const gfx::Size& input_visible_size,
+ VideoCodecProfile output_profile,
+ uint32_t initial_bitrate,
+ Client* client) {
+ DVLOG(3) << __func__ << " format: " << VideoPixelFormatToString(format)
+ << ", input_visible_size: " << input_visible_size.ToString()
+ << ", output_profile: " << GetProfileName(output_profile)
+ << ", initial_bitrate: " << initial_bitrate;
+ DCHECK(!media_codec_);
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(client);
+
+ client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
+
+ if (!(MediaCodecUtil::SupportsSetParameters() &&
+ format == PIXEL_FORMAT_I420)) {
+ DLOG(ERROR) << "Unexpected combo: " << format << ", "
+ << GetProfileName(output_profile);
+ return false;
+ }
+
+ std::string mime_type;
+ VideoCodec codec;
+ // The client should be prepared to feed at least this many frames into the
+ // encoder before being returned any output frames, since the encoder may
+ // need to hold onto some subset of inputs as reference pictures.
+ uint32_t frame_input_count;
+ uint32_t i_frame_interval;
+ if (output_profile == VP8PROFILE_ANY) {
+ codec = kCodecVP8;
+ mime_type = "video/x-vnd.on2.vp8";
+ frame_input_count = 1;
+ i_frame_interval = IFRAME_INTERVAL_VPX;
+ } else if (output_profile == H264PROFILE_BASELINE ||
+ output_profile == H264PROFILE_MAIN) {
+ codec = kCodecH264;
+ mime_type = "video/avc";
+ frame_input_count = 30;
+ i_frame_interval = IFRAME_INTERVAL_H264;
+ } else {
+ return false;
+ }
+
+ frame_size_ = input_visible_size;
+ last_set_bitrate_ = initial_bitrate;
+
+ // Only consider using MediaCodec if it's likely backed by hardware.
+ if (MediaCodecUtil::IsKnownUnaccelerated(codec,
+ MediaCodecDirection::ENCODER)) {
+ DLOG(ERROR) << "No HW support";
+ return false;
+ }
+
+ PixelFormat pixel_format = COLOR_FORMAT_YUV420_SEMIPLANAR;
+ if (!GetSupportedColorFormatForMime(mime_type, &pixel_format)) {
+ DLOG(ERROR) << "No color format support.";
+ return false;
+ }
+ media_codec_ = MediaCodecBridgeImpl::CreateVideoEncoder(
+ codec, input_visible_size, initial_bitrate, INITIAL_FRAMERATE,
+ i_frame_interval, pixel_format);
+
+ if (!media_codec_) {
+ DLOG(ERROR) << "Failed to create/start the codec: "
+ << input_visible_size.ToString();
+ return false;
+ }
+
+ // Conservative upper bound for output buffer size: decoded size + 2KB.
+ const size_t output_buffer_capacity =
+ VideoFrame::AllocationSize(format, input_visible_size) + 2048;
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoEncodeAccelerator::Client::RequireBitstreamBuffers,
+ client_ptr_factory_->GetWeakPtr(), frame_input_count,
+ input_visible_size, output_buffer_capacity));
+ return true;
+}
+
+void AndroidVideoEncodeAccelerator::MaybeStartIOTimer() {
+ if (!io_timer_.IsRunning() &&
+ (num_buffers_at_codec_ > 0 || !pending_frames_.empty())) {
+ io_timer_.Start(FROM_HERE, EncodePollDelay(), this,
+ &AndroidVideoEncodeAccelerator::DoIOTask);
+ }
+}
+
+void AndroidVideoEncodeAccelerator::MaybeStopIOTimer() {
+ if (io_timer_.IsRunning() &&
+ (num_buffers_at_codec_ == 0 && pending_frames_.empty())) {
+ io_timer_.Stop();
+ }
+}
+
+void AndroidVideoEncodeAccelerator::Encode(
+ const scoped_refptr<VideoFrame>& frame,
+ bool force_keyframe) {
+ DVLOG(3) << __PRETTY_FUNCTION__ << ": " << force_keyframe;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ RETURN_ON_FAILURE(frame->format() == PIXEL_FORMAT_I420, "Unexpected format",
+ kInvalidArgumentError);
+ RETURN_ON_FAILURE(frame->visible_rect().size() == frame_size_,
+ "Unexpected resolution", kInvalidArgumentError);
+ // MediaCodec doesn't have a way to specify stride for non-Packed formats, so
+ // we insist on being called with packed frames and no cropping :(
+ RETURN_ON_FAILURE(frame->row_bytes(VideoFrame::kYPlane) ==
+ frame->stride(VideoFrame::kYPlane) &&
+ frame->row_bytes(VideoFrame::kUPlane) ==
+ frame->stride(VideoFrame::kUPlane) &&
+ frame->row_bytes(VideoFrame::kVPlane) ==
+ frame->stride(VideoFrame::kVPlane) &&
+ frame->coded_size() == frame->visible_rect().size(),
+ "Non-packed frame, or visible_rect != coded_size",
+ kInvalidArgumentError);
+
+ pending_frames_.push(
+ std::make_tuple(frame, force_keyframe, base::Time::Now()));
+ DoIOTask();
+}
+
+void AndroidVideoEncodeAccelerator::UseOutputBitstreamBuffer(
+ const BitstreamBuffer& buffer) {
+ DVLOG(3) << __PRETTY_FUNCTION__ << ": bitstream_buffer_id=" << buffer.id();
+ DCHECK(thread_checker_.CalledOnValidThread());
+ available_bitstream_buffers_.push_back(buffer);
+ DoIOTask();
+}
+
+void AndroidVideoEncodeAccelerator::RequestEncodingParametersChange(
+ uint32_t bitrate,
+ uint32_t framerate) {
+ DVLOG(3) << __PRETTY_FUNCTION__ << ": bitrate: " << bitrate
+ << ", framerate: " << framerate;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (bitrate != last_set_bitrate_) {
+ last_set_bitrate_ = bitrate;
+ media_codec_->SetVideoBitrate(bitrate, framerate);
+ }
+ // Note: Android's MediaCodec doesn't allow mid-stream adjustments to
+ // framerate, so we ignore that here. This is OK because Android only uses
+ // the framerate value from MediaFormat during configure() as a proxy for
+ // bitrate, and we set that explicitly.
+}
+
+void AndroidVideoEncodeAccelerator::Destroy() {
+ DVLOG(3) << __PRETTY_FUNCTION__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ client_ptr_factory_.reset();
+ if (media_codec_) {
+ if (io_timer_.IsRunning())
+ io_timer_.Stop();
+ media_codec_->Stop();
+ }
+ delete this;
+}
+
+void AndroidVideoEncodeAccelerator::DoIOTask() {
+ QueueInput();
+ DequeueOutput();
+ MaybeStartIOTimer();
+ MaybeStopIOTimer();
+}
+
+void AndroidVideoEncodeAccelerator::QueueInput() {
+ if (error_occurred_ || pending_frames_.empty())
+ return;
+
+ int input_buf_index = 0;
+ MediaCodecStatus status =
+ media_codec_->DequeueInputBuffer(NoWaitTimeOut(), &input_buf_index);
+ if (status != MEDIA_CODEC_OK) {
+ DCHECK(status == MEDIA_CODEC_TRY_AGAIN_LATER ||
+ status == MEDIA_CODEC_ERROR);
+ RETURN_ON_FAILURE(status != MEDIA_CODEC_ERROR, "MediaCodec error",
+ kPlatformFailureError);
+ return;
+ }
+
+ const PendingFrames::value_type& input = pending_frames_.front();
+ bool is_key_frame = std::get<1>(input);
+ if (is_key_frame) {
+ // Ideally MediaCodec would honor BUFFER_FLAG_SYNC_FRAME so we could
+ // indicate this in the QueueInputBuffer() call below and guarantee _this_
+ // frame be encoded as a key frame, but sadly that flag is ignored.
+ // Instead, we request a key frame "soon".
+ media_codec_->RequestKeyFrameSoon();
+ }
+ scoped_refptr<VideoFrame> frame = std::get<0>(input);
+
+ uint8_t* buffer = nullptr;
+ size_t capacity = 0;
+ status = media_codec_->GetInputBuffer(input_buf_index, &buffer, &capacity);
+ RETURN_ON_FAILURE(status == MEDIA_CODEC_OK, "GetInputBuffer failed.",
+ kPlatformFailureError);
+
+ size_t queued_size =
+ VideoFrame::AllocationSize(PIXEL_FORMAT_I420, frame->coded_size());
+ RETURN_ON_FAILURE(capacity >= queued_size,
+ "Failed to get input buffer: " << input_buf_index,
+ kPlatformFailureError);
+
+ uint8_t* dst_y = buffer;
+ int dst_stride_y = frame->stride(VideoFrame::kYPlane);
+ uint8_t* dst_uv = buffer + frame->stride(VideoFrame::kYPlane) *
+ frame->rows(VideoFrame::kYPlane);
+ int dst_stride_uv = frame->stride(VideoFrame::kUPlane) * 2;
+ // Why NV12? Because COLOR_FORMAT_YUV420_SEMIPLANAR. See comment at other
+ // mention of that constant.
+ bool converted = !libyuv::I420ToNV12(
+ frame->data(VideoFrame::kYPlane), frame->stride(VideoFrame::kYPlane),
+ frame->data(VideoFrame::kUPlane), frame->stride(VideoFrame::kUPlane),
+ frame->data(VideoFrame::kVPlane), frame->stride(VideoFrame::kVPlane),
+ dst_y, dst_stride_y, dst_uv, dst_stride_uv, frame->coded_size().width(),
+ frame->coded_size().height());
+ RETURN_ON_FAILURE(converted, "Failed to I420ToNV12!", kPlatformFailureError);
+
+ input_timestamp_ += base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond / INITIAL_FRAMERATE);
+ status = media_codec_->QueueInputBuffer(input_buf_index, nullptr, queued_size,
+ input_timestamp_);
+ UMA_HISTOGRAM_TIMES("Media.AVDA.InputQueueTime",
+ base::Time::Now() - std::get<2>(input));
+ RETURN_ON_FAILURE(status == MEDIA_CODEC_OK,
+ "Failed to QueueInputBuffer: " << status,
+ kPlatformFailureError);
+ ++num_buffers_at_codec_;
+ pending_frames_.pop();
+}
+
+void AndroidVideoEncodeAccelerator::DequeueOutput() {
+ if (error_occurred_ || available_bitstream_buffers_.empty() ||
+ num_buffers_at_codec_ == 0) {
+ return;
+ }
+
+ int32_t buf_index = 0;
+ size_t offset = 0;
+ size_t size = 0;
+ bool key_frame = false;
+
+ MediaCodecStatus status =
+ media_codec_->DequeueOutputBuffer(NoWaitTimeOut(), &buf_index, &offset,
+ &size, nullptr, nullptr, &key_frame);
+ switch (status) {
+ case MEDIA_CODEC_TRY_AGAIN_LATER:
+ return;
+
+ case MEDIA_CODEC_ERROR:
+ RETURN_ON_FAILURE(false, "Codec error", kPlatformFailureError);
+ // Unreachable because of previous statement, but included for clarity.
+ return;
+
+ case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
+ return;
+
+ case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
+ return;
+
+ case MEDIA_CODEC_OK:
+ DCHECK_GE(buf_index, 0);
+ break;
+
+ default:
+ NOTREACHED();
+ break;
+ }
+
+ BitstreamBuffer bitstream_buffer = available_bitstream_buffers_.back();
+ available_bitstream_buffers_.pop_back();
+ std::unique_ptr<SharedMemoryRegion> shm(
+ new SharedMemoryRegion(bitstream_buffer, false));
+ RETURN_ON_FAILURE(shm->Map(), "Failed to map SHM", kPlatformFailureError);
+ RETURN_ON_FAILURE(size <= shm->size(),
+ "Encoded buffer too large: " << size << ">" << shm->size(),
+ kPlatformFailureError);
+
+ status = media_codec_->CopyFromOutputBuffer(buf_index, offset, shm->memory(),
+ size);
+ RETURN_ON_FAILURE(status == MEDIA_CODEC_OK, "CopyFromOutputBuffer failed",
+ kPlatformFailureError);
+ media_codec_->ReleaseOutputBuffer(buf_index, false);
+ --num_buffers_at_codec_;
+
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::Bind(&VideoEncodeAccelerator::Client::BitstreamBufferReady,
+ client_ptr_factory_->GetWeakPtr(), bitstream_buffer.id(), size,
+ key_frame, base::TimeDelta()));
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/android_video_encode_accelerator.h b/chromium/media/gpu/android/android_video_encode_accelerator.h
new file mode 100644
index 00000000000..b9624217312
--- /dev/null
+++ b/chromium/media/gpu/android/android_video_encode_accelerator.h
@@ -0,0 +1,115 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_ANDROID_VIDEO_ENCODE_ACCELERATOR_H_
+#define MEDIA_GPU_ANDROID_ANDROID_VIDEO_ENCODE_ACCELERATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include "base/containers/queue.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "base/timer/timer.h"
+#include "media/base/android/media_codec_bridge_impl.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/video/video_encode_accelerator.h"
+
+namespace media {
+
+class BitstreamBuffer;
+
+// Android-specific implementation of VideoEncodeAccelerator, enabling
+// hardware-acceleration of video encoding, based on Android's MediaCodec class
+// (http://developer.android.com/reference/android/media/MediaCodec.html). This
+// class expects to live and be called on a single thread (the GPU process'
+// ChildThread).
+class MEDIA_GPU_EXPORT AndroidVideoEncodeAccelerator
+ : public VideoEncodeAccelerator {
+ public:
+ AndroidVideoEncodeAccelerator();
+ ~AndroidVideoEncodeAccelerator() override;
+
+ // VideoEncodeAccelerator implementation.
+ VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
+ bool Initialize(VideoPixelFormat format,
+ const gfx::Size& input_visible_size,
+ VideoCodecProfile output_profile,
+ uint32_t initial_bitrate,
+ Client* client) override;
+ void Encode(const scoped_refptr<VideoFrame>& frame,
+ bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
+ void RequestEncodingParametersChange(uint32_t bitrate,
+ uint32_t framerate) override;
+ void Destroy() override;
+
+ private:
+ enum {
+ // Arbitrary choice.
+ INITIAL_FRAMERATE = 30,
+ // Default I-Frame interval in seconds.
+ IFRAME_INTERVAL_H264 = 20,
+ IFRAME_INTERVAL_VPX = 100,
+ IFRAME_INTERVAL = INT32_MAX,
+ };
+
+ // Impedance-mismatch fixers: MediaCodec is a poll-based API but VEA is a
+ // push-based API; these methods turn the crank to make the two work together.
+ void DoIOTask();
+ void QueueInput();
+ void DequeueOutput();
+
+ // Start & stop |io_timer_| if the time seems right.
+ void MaybeStartIOTimer();
+ void MaybeStopIOTimer();
+
+ // Used to DCHECK that we are called on the correct thread.
+ base::ThreadChecker thread_checker_;
+
+ // VideoDecodeAccelerator::Client callbacks go here. Invalidated once any
+ // error triggers.
+ std::unique_ptr<base::WeakPtrFactory<Client>> client_ptr_factory_;
+
+ std::unique_ptr<MediaCodecBridge> media_codec_;
+
+ // Bitstream buffers waiting to be populated & returned to the client.
+ std::vector<BitstreamBuffer> available_bitstream_buffers_;
+
+ // Frames waiting to be passed to the codec, queued until an input buffer is
+ // available. Each element is a tuple of <Frame, key_frame, enqueue_time>.
+ using PendingFrames =
+ base::queue<std::tuple<scoped_refptr<VideoFrame>, bool, base::Time>>;
+ PendingFrames pending_frames_;
+
+ // Repeating timer responsible for draining pending IO to the codec.
+ base::RepeatingTimer io_timer_;
+
+ // The difference between number of buffers queued & dequeued at the codec.
+ int32_t num_buffers_at_codec_;
+
+ // A monotonically-growing value.
+ base::TimeDelta input_timestamp_;
+
+ // Resolution of input stream. Set once in initialization and not allowed to
+ // change after.
+ gfx::Size frame_size_;
+
+ uint32_t last_set_bitrate_; // In bps.
+
+ // True if there is encoder error.
+ bool error_occurred_;
+
+ DISALLOW_COPY_AND_ASSIGN(AndroidVideoEncodeAccelerator);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_ANDROID_VIDEO_ENCODE_ACCELERATOR_H_
diff --git a/chromium/media/gpu/android/android_video_surface_chooser.h b/chromium/media/gpu/android/android_video_surface_chooser.h
new file mode 100644
index 00000000000..dc738138828
--- /dev/null
+++ b/chromium/media/gpu/android/android_video_surface_chooser.h
@@ -0,0 +1,81 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_ANDROID_VIDEO_SURFACE_CHOOSER_H_
+#define MEDIA_GPU_ANDROID_ANDROID_VIDEO_SURFACE_CHOOSER_H_
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/optional.h"
+#include "media/base/android/android_overlay.h"
+#include "media/gpu/media_gpu_export.h"
+#include "ui/gfx/geometry/rect.h"
+
+namespace media {
+
+// Manage details of which surface to use for video playback.
+class MEDIA_GPU_EXPORT AndroidVideoSurfaceChooser {
+ public:
+ // Input state used for choosing the surface type.
+ struct State {
+ // Is an overlay required?
+ bool is_required = false;
+
+ // Is the player currently in fullscreen?
+ bool is_fullscreen = false;
+
+ // Should the overlay be marked as secure?
+ bool is_secure = false;
+
+ // Is the player's frame hidden / closed?
+ bool is_frame_hidden = false;
+
+ // Is the compositor willing to promote this?
+ bool is_compositor_promotable = false;
+
+ // Are we expecting a relayout soon?
+ bool is_expecting_relayout = false;
+
+ // If true, then we will default to promoting to overlay if it's power-
+ // efficient even if not otherwise required. Otherwise, we'll require other
+ // signals, like fs or secure, before we promote.
+ bool promote_aggressively = false;
+
+ // Hint to use for the initial position when transitioning to an overlay.
+ gfx::Rect initial_position;
+ };
+
+ // Notify the client that |overlay| is ready for use. The client may get
+ // the surface immediately.
+ using UseOverlayCB =
+ base::RepeatingCallback<void(std::unique_ptr<AndroidOverlay> overlay)>;
+
+ // Notify the client that the most recently provided overlay should be
+ // discarded. The overlay is still valid, but we recommend against
+ // using it soon, in favor of a SurfaceTexture.
+ using UseSurfaceTextureCB = base::RepeatingCallback<void(void)>;
+
+ AndroidVideoSurfaceChooser() {}
+ virtual ~AndroidVideoSurfaceChooser() {}
+
+ // Sets the client callbacks to be called when a new surface choice is made.
+ // Must be called before UpdateState();
+ virtual void SetClientCallbacks(
+ UseOverlayCB use_overlay_cb,
+ UseSurfaceTextureCB use_surface_texture_cb) = 0;
+
+ // Updates the current state and makes a new surface choice with the new
+ // state. If |new_factory| is empty, the factory is left as-is. Otherwise,
+ // the factory is updated to |*new_factory|.
+ virtual void UpdateState(base::Optional<AndroidOverlayFactoryCB> new_factory,
+ const State& new_state) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AndroidVideoSurfaceChooser);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_ANDROID_VIDEO_SURFACE_CHOOSER_H_
diff --git a/chromium/media/gpu/android/android_video_surface_chooser_impl.cc b/chromium/media/gpu/android/android_video_surface_chooser_impl.cc
new file mode 100644
index 00000000000..d1882075e72
--- /dev/null
+++ b/chromium/media/gpu/android/android_video_surface_chooser_impl.cc
@@ -0,0 +1,294 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/android_video_surface_chooser_impl.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/time/default_tick_clock.h"
+
+namespace media {
+
+// Minimum time that we require after a failed overlay attempt before we'll try
+// again for an overlay.
+constexpr base::TimeDelta MinimumDelayAfterFailedOverlay =
+ base::TimeDelta::FromSeconds(5);
+
+AndroidVideoSurfaceChooserImpl::AndroidVideoSurfaceChooserImpl(
+ bool allow_dynamic,
+ base::TickClock* tick_clock)
+ : allow_dynamic_(allow_dynamic),
+ tick_clock_(tick_clock),
+ weak_factory_(this) {
+ // Use a DefaultTickClock if one wasn't provided.
+ if (!tick_clock_) {
+ optional_tick_clock_ = base::MakeUnique<base::DefaultTickClock>();
+ tick_clock_ = optional_tick_clock_.get();
+ }
+}
+
+AndroidVideoSurfaceChooserImpl::~AndroidVideoSurfaceChooserImpl() {}
+
+void AndroidVideoSurfaceChooserImpl::SetClientCallbacks(
+ UseOverlayCB use_overlay_cb,
+ UseSurfaceTextureCB use_surface_texture_cb) {
+ DCHECK(use_overlay_cb && use_surface_texture_cb);
+ use_overlay_cb_ = std::move(use_overlay_cb);
+ use_surface_texture_cb_ = std::move(use_surface_texture_cb);
+}
+
+void AndroidVideoSurfaceChooserImpl::UpdateState(
+ base::Optional<AndroidOverlayFactoryCB> new_factory,
+ const State& new_state) {
+ DCHECK(use_overlay_cb_);
+ bool entered_fullscreen =
+ !current_state_.is_fullscreen && new_state.is_fullscreen;
+ current_state_ = new_state;
+
+ bool factory_changed = new_factory.has_value();
+ if (factory_changed)
+ overlay_factory_ = std::move(*new_factory);
+
+ if (!allow_dynamic_) {
+ if (!initial_state_received_) {
+ initial_state_received_ = true;
+ // Choose here so that Choose() doesn't have to handle non-dynamic.
+ if (overlay_factory_ &&
+ (current_state_.is_fullscreen || current_state_.is_secure ||
+ current_state_.is_required)) {
+ SwitchToOverlay(false);
+ } else {
+ SwitchToSurfaceTexture();
+ }
+ }
+ return;
+ }
+
+ // If we're entering fullscreen, clear any previous failure attempt. It's
+ // likely that any previous failure was due to a lack of power efficiency,
+ // but entering fs likely changes that anyway.
+ if (entered_fullscreen)
+ most_recent_overlay_failure_ = base::TimeTicks();
+
+ // If the factory changed, we should cancel pending overlay requests and
+ // set the client state back to Unknown if they're using an old overlay.
+ if (factory_changed) {
+ overlay_ = nullptr;
+ if (client_overlay_state_ == kUsingOverlay)
+ client_overlay_state_ = kUnknown;
+ }
+
+ Choose();
+}
+
+void AndroidVideoSurfaceChooserImpl::Choose() {
+ // Pre-M we shouldn't be called.
+ DCHECK(allow_dynamic_);
+
+ // TODO(liberato): should this depend on resolution?
+ OverlayState new_overlay_state = current_state_.promote_aggressively
+ ? kUsingOverlay
+ : kUsingSurfaceTexture;
+ // Do we require a power-efficient overlay?
+ bool needs_power_efficient = current_state_.promote_aggressively;
+
+ // In player element fullscreen, we want to use overlays if we can. Note that
+ // this does nothing if |promote_aggressively|, which is fine since switching
+ // from "want power efficient" from "don't care" is problematic.
+ if (current_state_.is_fullscreen)
+ new_overlay_state = kUsingOverlay;
+
+ // Try to use an overlay if possible for protected content. If the compositor
+ // won't promote, though, it's okay if we switch out. Set |is_required| in
+ // addition, if you don't want this behavior.
+ if (current_state_.is_secure) {
+ new_overlay_state = kUsingOverlay;
+ // Don't un-promote if not power efficient. If we did, then inline playback
+ // would likely not promote.
+ needs_power_efficient = false;
+ }
+
+ // If the compositor won't promote, then don't.
+ if (!current_state_.is_compositor_promotable)
+ new_overlay_state = kUsingSurfaceTexture;
+
+ // If we're expecting a relayout, then don't transition to overlay if we're
+ // not already in one. We don't want to transition out, though. This lets us
+ // delay entering on a fullscreen transition until blink relayout is complete.
+ // TODO(liberato): Detect this more directly.
+ if (current_state_.is_expecting_relayout &&
+ client_overlay_state_ != kUsingOverlay)
+ new_overlay_state = kUsingSurfaceTexture;
+
+ // If we're requesting an overlay, check that we haven't asked too recently
+ // since the last failure. This includes L1. We don't bother to check for
+ // our current state, since using an overlay would imply that our most recent
+ // failure was long ago enough to pass this check earlier.
+ if (new_overlay_state == kUsingOverlay) {
+ base::TimeDelta time_since_last_failure =
+ tick_clock_->NowTicks() - most_recent_overlay_failure_;
+ if (time_since_last_failure < MinimumDelayAfterFailedOverlay)
+ new_overlay_state = kUsingSurfaceTexture;
+ }
+
+ // If our frame is hidden, then don't use overlays.
+ if (current_state_.is_frame_hidden)
+ new_overlay_state = kUsingSurfaceTexture;
+
+ // If an overlay is required, then choose one. The only way we won't is if we
+ // don't have a factory or our request fails.
+ if (current_state_.is_required) {
+ new_overlay_state = kUsingOverlay;
+ // Required overlays don't need to be power efficient.
+ needs_power_efficient = false;
+ }
+
+ // If we have no factory, then we definitely don't want to use overlays.
+ if (!overlay_factory_)
+ new_overlay_state = kUsingSurfaceTexture;
+
+ // Make sure that we're in |new_overlay_state_|.
+ if (new_overlay_state == kUsingSurfaceTexture)
+ SwitchToSurfaceTexture();
+ else
+ SwitchToOverlay(needs_power_efficient);
+}
+
+void AndroidVideoSurfaceChooserImpl::SwitchToSurfaceTexture() {
+ // Invalidate any outstanding deletion callbacks for any overlays that we've
+ // provided to the client already. We assume that it will eventually drop
+ // them in response to the callback. Ready / failed callbacks aren't affected
+ // by this, since we own the overlay until those occur. We're about to
+ // drop |overlay_|, if we have one, which cancels them.
+ weak_factory_.InvalidateWeakPtrs();
+
+ // Cancel any outstanding overlay request, in case we're switching to overlay.
+ if (overlay_)
+ overlay_ = nullptr;
+
+ // Notify the client to switch if it's in the wrong state.
+ if (client_overlay_state_ != kUsingSurfaceTexture) {
+ DCHECK(use_surface_texture_cb_);
+
+ client_overlay_state_ = kUsingSurfaceTexture;
+ use_surface_texture_cb_.Run();
+ }
+}
+
+void AndroidVideoSurfaceChooserImpl::SwitchToOverlay(
+ bool needs_power_efficient) {
+ // If there's already an overlay request outstanding, then do nothing. We'll
+ // finish switching when it completes.
+ // TODO(liberato): If the power efficient flag for |overlay_| doesn't match
+ // |needs_power_efficient|, then we should cancel it anyway. In practice,
+ // this doesn't happen, so we ignore it.
+ if (overlay_)
+ return;
+
+ // Do nothing if the client is already using an overlay. Note that if one
+ // changes overlay factories, then this might not be true; an overlay from the
+ // old factory is not the same as an overlay from the new factory. However,
+ // we assume that ReplaceOverlayFactory handles that.
+ if (client_overlay_state_ == kUsingOverlay)
+ return;
+
+ // We don't modify |client_overlay_state_| yet, since we don't call the client
+ // back yet.
+
+ // Invalidate any outstanding callbacks. This is needed for the deletion
+ // callback, since for ready/failed callbacks, we still have ownership of the
+ // object. If we delete the object, then callbacks are cancelled anyway.
+ // We also don't want to receive the power efficient callback.
+ weak_factory_.InvalidateWeakPtrs();
+
+ AndroidOverlayConfig config;
+ // We bind all of our callbacks with weak ptrs, since we don't know how long
+ // the client will hold on to overlays. They could, in principle, show up
+ // long after the client is destroyed too, if codec destruction hangs.
+ config.ready_cb = base::Bind(&AndroidVideoSurfaceChooserImpl::OnOverlayReady,
+ weak_factory_.GetWeakPtr());
+ config.failed_cb =
+ base::Bind(&AndroidVideoSurfaceChooserImpl::OnOverlayFailed,
+ weak_factory_.GetWeakPtr());
+ config.rect = current_state_.initial_position;
+ config.secure = current_state_.is_secure;
+
+ // Request power efficient overlays and callbacks if we're supposed to.
+ config.power_efficient = needs_power_efficient;
+ config.power_cb =
+ base::Bind(&AndroidVideoSurfaceChooserImpl::OnPowerEfficientState,
+ weak_factory_.GetWeakPtr());
+
+ overlay_ = overlay_factory_.Run(std::move(config));
+ if (!overlay_)
+ SwitchToSurfaceTexture();
+}
+
+void AndroidVideoSurfaceChooserImpl::OnOverlayReady(AndroidOverlay* overlay) {
+ // |overlay_| is the only overlay for which we haven't gotten a ready callback
+ // back yet.
+ DCHECK_EQ(overlay, overlay_.get());
+
+ // Notify the overlay that we'd like to know if it's destroyed, so that we can
+ // update our internal state if the client drops it without being told.
+ overlay_->AddOverlayDeletedCallback(
+ base::Bind(&AndroidVideoSurfaceChooserImpl::OnOverlayDeleted,
+ weak_factory_.GetWeakPtr()));
+
+ client_overlay_state_ = kUsingOverlay;
+ use_overlay_cb_.Run(std::move(overlay_));
+}
+
+void AndroidVideoSurfaceChooserImpl::OnOverlayFailed(AndroidOverlay* overlay) {
+ // We shouldn't get a failure for any overlay except the incoming one.
+ DCHECK_EQ(overlay, overlay_.get());
+
+ overlay_ = nullptr;
+ most_recent_overlay_failure_ = tick_clock_->NowTicks();
+
+ // If the client isn't already using a SurfaceTexture, then switch to it.
+ // Note that this covers the case of kUnknown, when we might not have told the
+ // client anything yet. That's important for Initialize, so that a failed
+ // overlay request still results in some callback to the client to know what
+ // surface to start with.
+ SwitchToSurfaceTexture();
+}
+
+void AndroidVideoSurfaceChooserImpl::OnOverlayDeleted(AndroidOverlay* overlay) {
+ client_overlay_state_ = kUsingSurfaceTexture;
+ // We don't call SwitchToSurfaceTexture since the client dropped the overlay.
+ // It's already using SurfaceTexture.
+}
+
+void AndroidVideoSurfaceChooserImpl::OnPowerEfficientState(
+ AndroidOverlay* overlay,
+ bool is_power_efficient) {
+ // We cannot receive this before OnSurfaceReady, since that is the first
+ // callback if it arrives. Getting a new overlay clears any previous cbs.
+ DCHECK(!overlay_);
+
+ // We cannot receive it after switching to SurfaceTexture, since that also
+ // clears all callbacks.
+ DCHECK(client_overlay_state_ == kUsingOverlay);
+
+ // If the overlay has become power efficient, then take no action.
+ if (is_power_efficient)
+ return;
+
+ // If the overlay is now required, then keep it. It might have become
+ // required since we requested it.
+ if (current_state_.is_required)
+ return;
+
+ // If we're not able to switch dynamically, then keep the overlay.
+ if (!allow_dynamic_)
+ return;
+
+ // We could set the failure timer here, but we don't mostly for fullscreen.
+ // We don't want to delay transitioning to an overlay if the user re-enters
+ // fullscreen. TODO(liberato): Perhaps we should just clear the failure timer
+ // if we detect a transition into fs when we get new state from the client.
+ SwitchToSurfaceTexture();
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/android_video_surface_chooser_impl.h b/chromium/media/gpu/android/android_video_surface_chooser_impl.h
new file mode 100644
index 00000000000..fedc2dfbccc
--- /dev/null
+++ b/chromium/media/gpu/android/android_video_surface_chooser_impl.h
@@ -0,0 +1,102 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_ANDROID_VIDEO_SURFACE_CHOOSER_IMPL_H_
+#define MEDIA_GPU_ANDROID_ANDROID_VIDEO_SURFACE_CHOOSER_IMPL_H_
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/base/android/android_overlay.h"
+#include "media/gpu/android/android_video_surface_chooser.h"
+#include "media/gpu/media_gpu_export.h"
+
+namespace media {
+
+// Implementation of AndroidVideoSurfaceChooser.
+class MEDIA_GPU_EXPORT AndroidVideoSurfaceChooserImpl
+ : public AndroidVideoSurfaceChooser {
+ public:
+ // |allow_dynamic| should be true if and only if we are allowed to change the
+ // surface selection after the initial callback. |tick_clock|, if provided,
+ // will be used as our time source. Otherwise, we'll use wall clock. If
+ // provided, then it must outlast |this|.
+ AndroidVideoSurfaceChooserImpl(bool allow_dynamic,
+ base::TickClock* tick_clock = nullptr);
+ ~AndroidVideoSurfaceChooserImpl() override;
+
+ // AndroidVideoSurfaceChooser
+ void SetClientCallbacks(UseOverlayCB use_overlay_cb,
+ UseSurfaceTextureCB use_surface_texture_cb) override;
+ void UpdateState(base::Optional<AndroidOverlayFactoryCB> new_factory,
+ const State& new_state) override;
+
+ private:
+ // Choose whether we should be using a SurfaceTexture or overlay, and issue
+ // the right callbacks if we're changing between them. This should only be
+ // called if |allow_dynamic_|.
+ void Choose();
+
+ // Start switching to SurfaceTexture or overlay, as needed. These will call
+ // the client callbacks if we're changing state, though those callbacks might
+ // happen after this returns.
+ void SwitchToSurfaceTexture();
+ // If |overlay_| has an in-flight request, then this will do nothing. If
+ // |power_efficient|, then we will require a power-efficient overlay, and
+ // cancel it if it becomes not power efficient.
+ void SwitchToOverlay(bool power_efficient);
+
+ // AndroidOverlay callbacks.
+ void OnOverlayReady(AndroidOverlay*);
+ void OnOverlayFailed(AndroidOverlay*);
+ void OnOverlayDeleted(AndroidOverlay*);
+ void OnPowerEfficientState(AndroidOverlay* overlay, bool is_power_efficient);
+
+ // Client callbacks.
+ UseOverlayCB use_overlay_cb_;
+ UseSurfaceTextureCB use_surface_texture_cb_;
+
+ // Current overlay that we've constructed but haven't received ready / failed
+ // callbacks yet. Will be nullptr if we haven't constructed one, or if we
+ // sent it to the client already once it became ready to use.
+ std::unique_ptr<AndroidOverlay> overlay_;
+
+ AndroidOverlayFactoryCB overlay_factory_;
+
+ // Do we allow dynamic surface switches. Usually this means "Are we running
+ // on M or later?".
+ bool allow_dynamic_;
+
+ enum OverlayState {
+ kUnknown,
+ kUsingSurfaceTexture,
+ kUsingOverlay,
+ };
+
+ // What was the last signal that the client received?
+ OverlayState client_overlay_state_ = kUnknown;
+
+ State current_state_;
+
+ bool initial_state_received_ = false;
+
+ // Not owned by us.
+ base::TickClock* tick_clock_;
+
+ // Owned copy of |tick_clock_|, or nullptr if one was provided to us.
+ std::unique_ptr<base::TickClock> optional_tick_clock_;
+
+ // Time at which we most recently got a failed overlay request.
+ base::TimeTicks most_recent_overlay_failure_;
+
+ base::WeakPtrFactory<AndroidVideoSurfaceChooserImpl> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(AndroidVideoSurfaceChooserImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_ANDROID_VIDEO_SURFACE_CHOOSER_IMPL_H_
diff --git a/chromium/media/gpu/android/android_video_surface_chooser_impl_unittest.cc b/chromium/media/gpu/android/android_video_surface_chooser_impl_unittest.cc
new file mode 100644
index 00000000000..a7aeaeae781
--- /dev/null
+++ b/chromium/media/gpu/android/android_video_surface_chooser_impl_unittest.cc
@@ -0,0 +1,574 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/android_video_surface_chooser_impl.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/base/android/mock_android_overlay.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::AnyNumber;
+using ::testing::Bool;
+using ::testing::Combine;
+using ::testing::NotNull;
+using ::testing::Return;
+using ::testing::StrictMock;
+using ::testing::Values;
+using ::testing::_;
+
+namespace {
+using ::media::AndroidOverlay;
+using ::media::MockAndroidOverlay;
+
+class MockClient {
+ public:
+ MOCK_METHOD1(UseOverlay, void(AndroidOverlay*));
+
+ void UseOverlayImpl(std::unique_ptr<AndroidOverlay> overlay) {
+ UseOverlay(overlay.get());
+
+ // Also take ownership of the overlay, so that it's not destroyed.
+ overlay_ = std::move(overlay);
+ }
+
+ // Note that this won't clear |overlay_|, which is helpful.
+ MOCK_METHOD0(UseSurfaceTexture, void(void));
+
+ // Let the test have the overlay.
+ std::unique_ptr<AndroidOverlay> ReleaseOverlay() {
+ return std::move(overlay_);
+ }
+
+ private:
+ std::unique_ptr<AndroidOverlay> overlay_;
+};
+
+// Strongly-typed enums for TestParams. It would be nice if Values() didn't
+// do something that causes these to work anyway if you mis-match them. Maybe
+// it'll work better in a future gtest. At the very least, it's a lot more
+// readable than 'true' and 'false' in the test instantiations.
+//
+// Outputs from the chooser.
+enum class ShouldUseOverlay { No, Yes };
+enum class ShouldBePowerEfficient { No, Yes, Ignored /* for clarity */ };
+// Inputs to the chooser.
+enum class AllowDynamic { No, Yes };
+enum class IsFullscreen { No, Yes };
+enum class IsRequired { No, Yes };
+enum class IsSecure { No, Yes };
+enum class IsFrameHidden { No, Yes };
+enum class IsCCPromotable { No, Yes };
+enum class IsExpectingRelayout { No, Yes };
+enum class PromoteAggressively { No, Yes };
+
+using TestParams = std::tuple<ShouldUseOverlay,
+ ShouldBePowerEfficient,
+ AllowDynamic,
+ IsRequired,
+ IsFullscreen,
+ IsSecure,
+ IsFrameHidden,
+ IsCCPromotable,
+ IsExpectingRelayout,
+ PromoteAggressively>;
+
+// Useful macro for instantiating tests.
+#define Either(x) Values(x::No, x::Yes)
+
+// Check if a parameter of type |type| is Yes. |n| is the location of the
+// parameter of that type.
+// c++14 can remove |n|, and std::get() by type.
+#define IsYes(type, n) (::testing::get<n>(GetParam()) == type::Yes);
+#define IsIgnored(type, n) (::testing::get<n>(GetParam()) == type::Ignored);
+
+} // namespace
+
+namespace media {
+
+// Unit tests for AndroidVideoSurfaceChooserImpl
+class AndroidVideoSurfaceChooserImplTest
+ : public testing::TestWithParam<TestParams> {
+ public:
+ ~AndroidVideoSurfaceChooserImplTest() override {}
+
+ void SetUp() override {
+ overlay_ = base::MakeUnique<MockAndroidOverlay>();
+
+ // Advance the clock just so we're not at 0.
+ tick_clock_.Advance(base::TimeDelta::FromSeconds(10));
+
+ // Don't prevent promotions because of the compositor.
+ chooser_state_.is_compositor_promotable = true;
+
+ // We create a destruction observer. By default, the overlay must not be
+ // destroyed until the test completes. Of course, the test may ask the
+ // observer to expect something else.
+ destruction_observer_ = overlay_->CreateDestructionObserver();
+ destruction_observer_->DoNotAllowDestruction();
+ overlay_callbacks_ = overlay_->GetCallbacks();
+ }
+
+ void TearDown() override {
+ // If we get this far, the assume that whatever |destruction_observer_|
+ // was looking for should have already happened. We don't want the
+ // lifetime of the observer to matter with respect to the overlay when
+ // checking expectations.
+ // Note that it might already be null.
+ destruction_observer_ = nullptr;
+ }
+
+ // Start the chooser, providing |factory| as the initial factory.
+ void StartChooser(AndroidOverlayFactoryCB factory) {
+ chooser_ = base::MakeUnique<AndroidVideoSurfaceChooserImpl>(allow_dynamic_,
+ &tick_clock_);
+ chooser_->SetClientCallbacks(
+ base::Bind(&MockClient::UseOverlayImpl, base::Unretained(&client_)),
+ base::Bind(&MockClient::UseSurfaceTexture, base::Unretained(&client_)));
+ chooser_->UpdateState(
+ factory ? base::make_optional(std::move(factory)) : base::nullopt,
+ chooser_state_);
+ }
+
+ // Start the chooser with |overlay_|, and verify that the client is told to
+ // use it. As a convenience, return the overlay raw ptr.
+ MockAndroidOverlay* StartChooserAndProvideOverlay() {
+ MockAndroidOverlay* overlay = overlay_.get();
+
+ EXPECT_CALL(*this, MockOnOverlayCreated());
+ StartChooser(FactoryFor(std::move(overlay_)));
+ testing::Mock::VerifyAndClearExpectations(&client_);
+ testing::Mock::VerifyAndClearExpectations(this);
+ EXPECT_CALL(client_, UseOverlay(NotNull()));
+ overlay_callbacks_.OverlayReady.Run();
+
+ return overlay;
+ }
+
+ // AndroidOverlayFactoryCB is a RepeatingCallback, so we can't just bind
+ // something that uses unique_ptr. RepeatingCallback needs to copy it.
+ class Factory {
+ public:
+ Factory(std::unique_ptr<MockAndroidOverlay> overlay,
+ base::RepeatingCallback<void()> create_overlay_cb)
+ : overlay_(std::move(overlay)),
+ create_overlay_cb_(std::move(create_overlay_cb)) {}
+
+ // Return whatever overlay we're given. This is used to construct factory
+ // callbacks for the chooser.
+ std::unique_ptr<AndroidOverlay> ReturnOverlay(AndroidOverlayConfig config) {
+ // Notify the mock.
+ create_overlay_cb_.Run();
+ if (overlay_)
+ overlay_->SetConfig(std::move(config));
+ return std::move(overlay_);
+ }
+
+ private:
+ std::unique_ptr<MockAndroidOverlay> overlay_;
+ base::RepeatingCallback<void()> create_overlay_cb_;
+ };
+
+ // Create a factory that will return |overlay| when run.
+ AndroidOverlayFactoryCB FactoryFor(
+ std::unique_ptr<MockAndroidOverlay> overlay) {
+ Factory* factory = new Factory(
+ std::move(overlay),
+ base::Bind(&AndroidVideoSurfaceChooserImplTest::MockOnOverlayCreated,
+ base::Unretained(this)));
+
+ // Leaky!
+ return base::Bind(&Factory::ReturnOverlay, base::Unretained(factory));
+ }
+
+ // Called by the factory when it's run.
+ MOCK_METHOD0(MockOnOverlayCreated, void());
+
+ std::unique_ptr<AndroidVideoSurfaceChooserImpl> chooser_;
+ StrictMock<MockClient> client_;
+ std::unique_ptr<MockAndroidOverlay> overlay_;
+
+ // Callbacks to control the overlay that will be vended by |factory_|
+ MockAndroidOverlay::Callbacks overlay_callbacks_;
+
+ std::unique_ptr<DestructionObserver> destruction_observer_;
+
+ // Will the chooser created by StartChooser() support dynamic surface changes?
+ bool allow_dynamic_ = true;
+
+ base::SimpleTestTickClock tick_clock_;
+
+ AndroidVideoSurfaceChooser::State chooser_state_;
+};
+
+TEST_F(AndroidVideoSurfaceChooserImplTest,
+ InitializeWithoutFactoryUsesSurfaceTexture) {
+ // Calling Initialize() with no factory should result in a callback to use
+ // surface texture.
+ EXPECT_CALL(client_, UseSurfaceTexture());
+ StartChooser(AndroidOverlayFactoryCB());
+}
+
+TEST_F(AndroidVideoSurfaceChooserImplTest,
+ NullInitialOverlayUsesSurfaceTexture) {
+ // If we provide a factory, but it fails to create an overlay, then |client_|
+ // should be notified to use a surface texture.
+
+ chooser_state_.is_fullscreen = true;
+ EXPECT_CALL(*this, MockOnOverlayCreated());
+ EXPECT_CALL(client_, UseSurfaceTexture());
+ StartChooser(FactoryFor(nullptr));
+}
+
+TEST_F(AndroidVideoSurfaceChooserImplTest,
+ FailedInitialOverlayUsesSurfaceTexture) {
+ // If we provide a factory, but the overlay that it provides returns 'failed',
+ // then |client_| should use surface texture. Also check that it won't retry
+ // after a failed overlay too soon.
+ chooser_state_.is_fullscreen = true;
+ EXPECT_CALL(*this, MockOnOverlayCreated());
+ StartChooser(FactoryFor(std::move(overlay_)));
+
+ testing::Mock::VerifyAndClearExpectations(&client_);
+ testing::Mock::VerifyAndClearExpectations(this);
+
+ // The overlay may be destroyed at any time after we send OverlayFailed. It
+ // doesn't have to be destroyed. We just care that it hasn't been destroyed
+ // before now.
+ destruction_observer_ = nullptr;
+ EXPECT_CALL(client_, UseSurfaceTexture());
+ overlay_callbacks_.OverlayFailed.Run();
+ testing::Mock::VerifyAndClearExpectations(&client_);
+ testing::Mock::VerifyAndClearExpectations(this);
+
+ // Try to get it to choose again, which shouldn't do anything.
+ tick_clock_.Advance(base::TimeDelta::FromSeconds(2));
+ EXPECT_CALL(*this, MockOnOverlayCreated()).Times(0);
+ chooser_->UpdateState(FactoryFor(nullptr), chooser_state_);
+ testing::Mock::VerifyAndClearExpectations(&client_);
+ testing::Mock::VerifyAndClearExpectations(this);
+
+ // Advance some more and try again. This time, it should request an overlay
+ // from the factory.
+ tick_clock_.Advance(base::TimeDelta::FromSeconds(100));
+ EXPECT_CALL(*this, MockOnOverlayCreated()).Times(1);
+ chooser_->UpdateState(FactoryFor(nullptr), chooser_state_);
+ testing::Mock::VerifyAndClearExpectations(&client_);
+ testing::Mock::VerifyAndClearExpectations(this);
+}
+
+TEST_F(AndroidVideoSurfaceChooserImplTest, NullLaterOverlayUsesSurfaceTexture) {
+ // If an overlay factory is provided after startup that returns a null overlay
+ // from CreateOverlay, |chooser_| should, at most, notify |client_| to use
+ // SurfaceTexture zero or more times.
+
+ // Start with SurfaceTexture.
+ chooser_state_.is_fullscreen = true;
+ EXPECT_CALL(client_, UseSurfaceTexture());
+ allow_dynamic_ = true;
+ StartChooser(AndroidOverlayFactoryCB());
+ testing::Mock::VerifyAndClearExpectations(&client_);
+
+ // Provide a factory that will return a null overlay.
+ EXPECT_CALL(*this, MockOnOverlayCreated());
+ EXPECT_CALL(client_, UseSurfaceTexture()).Times(AnyNumber());
+ chooser_->UpdateState(FactoryFor(nullptr), chooser_state_);
+}
+
+TEST_F(AndroidVideoSurfaceChooserImplTest, FailedLaterOverlayDoesNothing) {
+ // If we send an overlay factory that returns an overlay, and that overlay
+ // fails, then the client should not be notified except for zero or more
+ // callbacks to switch to surface texture.
+
+ // Start with SurfaceTexture.
+ chooser_state_.is_fullscreen = true;
+ EXPECT_CALL(client_, UseSurfaceTexture());
+ StartChooser(AndroidOverlayFactoryCB());
+ testing::Mock::VerifyAndClearExpectations(&client_);
+
+ // Provide a factory.
+ EXPECT_CALL(*this, MockOnOverlayCreated());
+ EXPECT_CALL(client_, UseSurfaceTexture()).Times(AnyNumber());
+ chooser_->UpdateState(FactoryFor(std::move(overlay_)), chooser_state_);
+ testing::Mock::VerifyAndClearExpectations(&client_);
+
+ // Fail the overlay. We don't care if it's destroyed after that, as long as
+ // it hasn't been destroyed yet.
+ destruction_observer_ = nullptr;
+ overlay_callbacks_.OverlayFailed.Run();
+}
+
+TEST_F(AndroidVideoSurfaceChooserImplTest,
+ SuccessfulLaterOverlayNotifiesClient) {
+ // |client_| is notified if we provide a factory that gets an overlay.
+
+ // Start with SurfaceTexture.
+ chooser_state_.is_fullscreen = true;
+ EXPECT_CALL(client_, UseSurfaceTexture());
+ StartChooser(AndroidOverlayFactoryCB());
+ testing::Mock::VerifyAndClearExpectations(&client_);
+
+ // Provide a factory. |chooser_| should try to create an overlay. We don't
+ // care if a call to UseSurfaceTexture is elided or not. Note that AVDA will
+ // ignore duplicate calls anyway (MultipleSurfaceTextureCallbacksAreIgnored).
+ EXPECT_CALL(*this, MockOnOverlayCreated());
+ EXPECT_CALL(client_, UseSurfaceTexture()).Times(AnyNumber());
+ chooser_->UpdateState(FactoryFor(std::move(overlay_)), chooser_state_);
+ testing::Mock::VerifyAndClearExpectations(&client_);
+ testing::Mock::VerifyAndClearExpectations(this);
+
+ // Notify |chooser_| that the overlay is ready.
+ EXPECT_CALL(client_, UseOverlay(NotNull()));
+ overlay_callbacks_.OverlayReady.Run();
+}
+
+TEST_F(AndroidVideoSurfaceChooserImplTest,
+ UpdateStateAfterDeleteRetriesOverlay) {
+ // Make sure that SurfaceChooser notices that we delete the overlay, and have
+ // switched back to SurfaceTexture mode.
+
+ chooser_state_.is_fullscreen = true;
+ StartChooserAndProvideOverlay();
+
+ // Delete the overlay.
+ destruction_observer_ = nullptr;
+ client_.ReleaseOverlay();
+
+ // Force chooser to choose again. We expect that it will retry the overlay,
+ // since the delete should have informed it that we've switched back to
+ // SurfaceTexture without a callback from SurfaceChooser. If it didn't know
+ // this, then it would think that the client is still using an overlay, and
+ // take no action.
+
+ // Note that if it enforces a delay here before retrying, that might be okay
+ // too. For now, we assume that it doesn't.
+ EXPECT_CALL(*this, MockOnOverlayCreated());
+ chooser_->UpdateState(base::Optional<AndroidOverlayFactoryCB>(),
+ chooser_state_);
+}
+
+TEST_F(AndroidVideoSurfaceChooserImplTest,
+ PowerEffcientOverlayCancelsIfNotPowerEfficient) {
+ // If we request a power efficient overlay that later becomes not power
+ // efficient, then the client should switch to SurfaceTexture.
+
+ chooser_state_.promote_aggressively = true;
+ MockAndroidOverlay* overlay = StartChooserAndProvideOverlay();
+
+ // Verify that this results in a power efficient overlay. If not, then we've
+ // picked the wrong flags, since we're just assuming what state will make the
+ // chooser care about power-efficiency.
+ ASSERT_TRUE(overlay->config()->power_efficient);
+
+ // Notify the chooser that it's not power efficient anymore.
+ EXPECT_CALL(client_, UseSurfaceTexture());
+ overlay_callbacks_.PowerEfficientState.Run(false);
+}
+
+TEST_P(AndroidVideoSurfaceChooserImplTest, OverlayIsUsedOrNotBasedOnState) {
+ // Provide a factory, and verify that it is used when the state says that it
+ // should be. If the overlay is used, then we also verify that it does not
+ // switch to SurfaceTexture first, since pre-M requires it.
+
+ const bool should_use_overlay = IsYes(ShouldUseOverlay, 0);
+ const bool should_be_power_efficient = IsYes(ShouldBePowerEfficient, 1);
+ const bool ignore_power_efficient = IsIgnored(ShouldBePowerEfficient, 1);
+ allow_dynamic_ = IsYes(AllowDynamic, 2);
+ chooser_state_.is_required = IsYes(IsRequired, 3);
+ chooser_state_.is_fullscreen = IsYes(IsFullscreen, 4);
+ chooser_state_.is_secure = IsYes(IsSecure, 5);
+ chooser_state_.is_frame_hidden = IsYes(IsFrameHidden, 6);
+ chooser_state_.is_compositor_promotable = IsYes(IsCCPromotable, 7);
+ chooser_state_.is_expecting_relayout = IsYes(IsExpectingRelayout, 8);
+ chooser_state_.promote_aggressively = IsYes(PromoteAggressively, 9);
+
+ MockAndroidOverlay* overlay = overlay_.get();
+
+ if (should_use_overlay) {
+ EXPECT_CALL(client_, UseSurfaceTexture()).Times(0);
+ EXPECT_CALL(*this, MockOnOverlayCreated());
+ } else {
+ EXPECT_CALL(client_, UseSurfaceTexture());
+ EXPECT_CALL(*this, MockOnOverlayCreated()).Times(0);
+ }
+
+ StartChooser(FactoryFor(std::move(overlay_)));
+
+ // Check that the overlay config has the right power-efficient state set.
+ if (should_use_overlay && !ignore_power_efficient)
+ ASSERT_EQ(should_be_power_efficient, overlay->config()->power_efficient);
+
+ // Verify that the overlay is provided when it becomes ready.
+ if (should_use_overlay) {
+ EXPECT_CALL(client_, UseOverlay(NotNull()));
+ overlay_callbacks_.OverlayReady.Run();
+ }
+}
+
+// Unless we're promoting aggressively, we should default to SurfaceTexture.
+INSTANTIATE_TEST_CASE_P(NoFullscreenUsesSurfaceTexture,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::No),
+ Values(ShouldBePowerEfficient::Ignored),
+ Either(AllowDynamic),
+ Values(IsRequired::No),
+ Values(IsFullscreen::No),
+ Values(IsSecure::No),
+ Either(IsFrameHidden),
+ Either(IsCCPromotable),
+ Either(IsExpectingRelayout),
+ Values(PromoteAggressively::No)));
+
+INSTANTIATE_TEST_CASE_P(FullscreenUsesOverlay,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::Yes),
+ Values(ShouldBePowerEfficient::Ignored),
+ Either(AllowDynamic),
+ Either(IsRequired),
+ Values(IsFullscreen::Yes),
+ Values(IsSecure::No),
+ Values(IsFrameHidden::No),
+ Values(IsCCPromotable::Yes),
+ Values(IsExpectingRelayout::No),
+ Either(PromoteAggressively)));
+
+INSTANTIATE_TEST_CASE_P(RequiredUsesOverlay,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::Yes),
+ Values(ShouldBePowerEfficient::No),
+ Values(AllowDynamic::Yes),
+ Values(IsRequired::Yes),
+ Either(IsFullscreen),
+ Either(IsSecure),
+ Either(IsFrameHidden),
+ Either(IsCCPromotable),
+ Either(IsExpectingRelayout),
+ Either(PromoteAggressively)));
+
+// Secure textures should use an overlay if the compositor will promote them.
+// We don't care about relayout, since it's transient; either behavior is okay
+// if a relayout is epected. Similarly, hidden frames are fine either way.
+INSTANTIATE_TEST_CASE_P(SecureUsesOverlayIfPromotable,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::Yes),
+ Values(ShouldBePowerEfficient::No),
+ Either(AllowDynamic),
+ Either(IsRequired),
+ Either(IsFullscreen),
+ Values(IsSecure::Yes),
+ Values(IsFrameHidden::No),
+ Values(IsCCPromotable::Yes),
+ Values(IsExpectingRelayout::No),
+ Either(PromoteAggressively)));
+
+INSTANTIATE_TEST_CASE_P(HiddenFramesUseSurfaceTexture,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::No),
+ Values(ShouldBePowerEfficient::No),
+ Values(AllowDynamic::Yes),
+ Values(IsRequired::No),
+ Either(IsFullscreen),
+ Either(IsSecure),
+ Values(IsFrameHidden::Yes),
+ Either(IsCCPromotable),
+ Either(IsExpectingRelayout),
+ Either(PromoteAggressively)));
+
+// For all dynamic cases, we shouldn't use an overlay if the compositor won't
+// promote it, unless it's marked as required. This includes secure surfaces,
+// so that L3 will fall back to SurfaceTexture. Non-dynamic is excluded, since
+// we don't get (or use) compositor feedback before the first frame. At that
+// point, we've already chosen the output surface and can't switch it.
+INSTANTIATE_TEST_CASE_P(NotCCPromotableNotRequiredUsesSurfaceTexture,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::No),
+ Values(ShouldBePowerEfficient::No),
+ Values(AllowDynamic::Yes),
+ Values(IsRequired::No),
+ Either(IsFullscreen),
+ Either(IsSecure),
+ Values(IsFrameHidden::No),
+ Values(IsCCPromotable::No),
+ Either(IsExpectingRelayout),
+ Either(PromoteAggressively)));
+
+// If we're expecting a relayout, then we should never use an overlay unless
+// it's required.
+INSTANTIATE_TEST_CASE_P(InsecureExpectingRelayoutUsesSurfaceTexture,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::No),
+ Values(ShouldBePowerEfficient::No),
+ Values(AllowDynamic::Yes),
+ Values(IsRequired::No),
+ Either(IsFullscreen),
+ Either(IsSecure),
+ Either(IsFrameHidden),
+ Either(IsCCPromotable),
+ Values(IsExpectingRelayout::Yes),
+ Either(PromoteAggressively)));
+
+// "is_fullscreen" should be enough to trigger an overlay pre-M.
+INSTANTIATE_TEST_CASE_P(NotDynamicInFullscreenUsesOverlay,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::Yes),
+ Values(ShouldBePowerEfficient::No),
+ Values(AllowDynamic::No),
+ Either(IsRequired),
+ Values(IsFullscreen::Yes),
+ Either(IsSecure),
+ Either(IsFrameHidden),
+ Either(IsCCPromotable),
+ Either(IsExpectingRelayout),
+ Either(PromoteAggressively)));
+
+// "is_secure" should be enough to trigger an overlay pre-M.
+INSTANTIATE_TEST_CASE_P(NotDynamicSecureUsesOverlay,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::Yes),
+ Values(ShouldBePowerEfficient::No),
+ Values(AllowDynamic::No),
+ Either(IsRequired),
+ Either(IsFullscreen),
+ Values(IsSecure::Yes),
+ Either(IsFrameHidden),
+ Either(IsCCPromotable),
+ Either(IsExpectingRelayout),
+ Either(PromoteAggressively)));
+
+// "is_required" should be enough to trigger an overlay pre-M.
+INSTANTIATE_TEST_CASE_P(NotDynamicRequiredUsesOverlay,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::Yes),
+ Values(ShouldBePowerEfficient::No),
+ Values(AllowDynamic::No),
+ Values(IsRequired::Yes),
+ Either(IsFullscreen),
+ Either(IsSecure),
+ Either(IsFrameHidden),
+ Either(IsCCPromotable),
+ Either(IsExpectingRelayout),
+ Either(PromoteAggressively)));
+
+// If we're promoting aggressively, then we should request power efficient.
+INSTANTIATE_TEST_CASE_P(AggressiveOverlayIsPowerEfficient,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::Yes),
+ Values(ShouldBePowerEfficient::Yes),
+ Values(AllowDynamic::Yes),
+ Values(IsRequired::No),
+ Values(IsFullscreen::No),
+ Values(IsSecure::No),
+ Values(IsFrameHidden::No),
+ Values(IsCCPromotable::Yes),
+ Values(IsExpectingRelayout::No),
+ Values(PromoteAggressively::Yes)));
+
+} // namespace media
diff --git a/chromium/media/gpu/android/avda_codec_allocator.cc b/chromium/media/gpu/android/avda_codec_allocator.cc
new file mode 100644
index 00000000000..9942a61198f
--- /dev/null
+++ b/chromium/media/gpu/android/avda_codec_allocator.cc
@@ -0,0 +1,482 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/avda_codec_allocator.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "base/sys_info.h"
+#include "base/task_runner_util.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/default_tick_clock.h"
+#include "base/trace_event/trace_event.h"
+#include "media/base/android/media_codec_bridge_impl.h"
+#include "media/base/limits.h"
+#include "media/base/media.h"
+#include "media/base/timestamp_constants.h"
+#include "media/gpu/android/android_video_decode_accelerator.h"
+
+namespace media {
+
+namespace {
+
+// Give tasks 800ms before considering them hung. MediaCodec.configure() calls
+// typically take 100-200ms on a N5, so 800ms is expected to very rarely result
+// in false positives. Also, false positives have low impact because we resume
+// using the thread when the task completes.
+constexpr base::TimeDelta kHungTaskDetectionTimeout =
+ base::TimeDelta::FromMilliseconds(800);
+
+// This must be safe to call on any thread. Returns nullptr on failure.
+std::unique_ptr<MediaCodecBridge> CreateMediaCodecInternal(
+ AVDACodecAllocator::CodecFactoryCB factory_cb,
+ scoped_refptr<CodecConfig> codec_config,
+ bool requires_software_codec) {
+ TRACE_EVENT0("media", "CreateMediaCodecInternal");
+
+ const base::android::JavaRef<jobject>& media_crypto =
+ codec_config->media_crypto ? *codec_config->media_crypto : nullptr;
+
+ // |requires_secure_codec| implies that it's an encrypted stream.
+ DCHECK(!codec_config->requires_secure_codec || !media_crypto.is_null());
+
+ CodecType codec_type = CodecType::kAny;
+ if (codec_config->requires_secure_codec && requires_software_codec) {
+ DVLOG(1) << "Secure software codec doesn't exist.";
+ return nullptr;
+ } else if (codec_config->requires_secure_codec) {
+ codec_type = CodecType::kSecure;
+ } else if (requires_software_codec) {
+ codec_type = CodecType::kSoftware;
+ }
+
+ std::unique_ptr<MediaCodecBridge> codec(factory_cb.Run(
+ codec_config->codec, codec_type,
+ codec_config->initial_expected_coded_size,
+ codec_config->surface_bundle->GetJavaSurface(), media_crypto,
+ codec_config->csd0, codec_config->csd1, true));
+
+ return codec;
+}
+
+// Delete |codec| and signal |done_event| if it's not null.
+void DeleteMediaCodecAndSignal(std::unique_ptr<MediaCodecBridge> codec,
+ base::WaitableEvent* done_event) {
+ codec.reset();
+ if (done_event)
+ done_event->Signal();
+}
+
+} // namespace
+
+CodecConfig::CodecConfig() {}
+CodecConfig::~CodecConfig() {}
+
+AVDACodecAllocator::HangDetector::HangDetector(base::TickClock* tick_clock)
+ : tick_clock_(tick_clock) {}
+
+void AVDACodecAllocator::HangDetector::WillProcessTask(
+ const base::PendingTask& pending_task) {
+ base::AutoLock l(lock_);
+ task_start_time_ = tick_clock_->NowTicks();
+}
+
+void AVDACodecAllocator::HangDetector::DidProcessTask(
+ const base::PendingTask& pending_task) {
+ base::AutoLock l(lock_);
+ task_start_time_ = base::TimeTicks();
+}
+
+bool AVDACodecAllocator::HangDetector::IsThreadLikelyHung() {
+ base::AutoLock l(lock_);
+ if (task_start_time_.is_null())
+ return false;
+
+ return (tick_clock_->NowTicks() - task_start_time_) >
+ kHungTaskDetectionTimeout;
+}
+
+// static
+AVDACodecAllocator* AVDACodecAllocator::GetInstance(
+ scoped_refptr<base::SequencedTaskRunner> task_runner) {
+ static AVDACodecAllocator* allocator = new AVDACodecAllocator(
+ base::BindRepeating(&MediaCodecBridgeImpl::CreateVideoDecoder),
+ task_runner);
+
+ // Verify that this caller agrees on the task runner, if one was specified.
+ DCHECK(!task_runner || allocator->task_runner_ == task_runner);
+
+ return allocator;
+}
+
+void AVDACodecAllocator::StartThread(AVDACodecAllocatorClient* client) {
+ if (!task_runner_->RunsTasksInCurrentSequence()) {
+ task_runner_->PostTask(FROM_HERE,
+ base::Bind(&AVDACodecAllocator::StartThread,
+ base::Unretained(this), client));
+ return;
+ }
+
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
+
+ // NOTE: |client| might not be a valid pointer anymore. All we know is that
+ // no other client is aliased to it, as long as |client| called StopThread
+ // before it was destroyed. The reason is that any re-use of |client| would
+ // have to also post StartThread to this thread. Since the re-use must be
+ // ordered later with respect to deleting the original |client|, the post must
+ // also be ordered later. So, there might be an aliased client posted, but it
+ // won't have started yet.
+
+ // Cancel any pending StopThreadTask()s because we need the threads now.
+ weak_this_factory_.InvalidateWeakPtrs();
+
+ // Try to start the threads if they haven't been started.
+ for (auto* thread : threads_) {
+ if (thread->thread.IsRunning())
+ continue;
+
+ if (!thread->thread.Start())
+ return;
+
+ // Register the hang detector to observe the thread's MessageLoop.
+ thread->thread.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&base::MessageLoop::AddTaskObserver,
+ base::Unretained(thread->thread.message_loop()),
+ &thread->hang_detector));
+ }
+
+ clients_.insert(client);
+ return;
+}
+
+void AVDACodecAllocator::StopThread(AVDACodecAllocatorClient* client) {
+ if (!task_runner_->RunsTasksInCurrentSequence()) {
+ task_runner_->PostTask(FROM_HERE,
+ base::Bind(&AVDACodecAllocator::StopThread,
+ base::Unretained(this), client));
+ return;
+ }
+
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
+
+ clients_.erase(client);
+ if (!clients_.empty()) {
+ // If we aren't stopping, then signal immediately.
+ if (stop_event_for_testing_)
+ stop_event_for_testing_->Signal();
+ return;
+ }
+
+ // Post a task to stop each thread through its task runner and back to this
+ // thread. This ensures that all pending tasks are run first. If a new AVDA
+ // calls StartThread() before StopThreadTask() runs, it's canceled by
+ // invalidating its weak pointer. As a result we're guaranteed to only call
+ // Thread::Stop() while there are no tasks on its queue. We don't try to stop
+ // hung threads. But if it recovers it will be stopped the next time a client
+ // calls this.
+ for (size_t i = 0; i < threads_.size(); i++) {
+ if (threads_[i]->thread.IsRunning() &&
+ !threads_[i]->hang_detector.IsThreadLikelyHung()) {
+ threads_[i]->thread.task_runner()->PostTaskAndReply(
+ FROM_HERE, base::Bind(&base::DoNothing),
+ base::Bind(&AVDACodecAllocator::StopThreadTask,
+ weak_this_factory_.GetWeakPtr(), i));
+ }
+ }
+}
+
+// Return the task runner for tasks of type |type|.
+scoped_refptr<base::SingleThreadTaskRunner> AVDACodecAllocator::TaskRunnerFor(
+ TaskType task_type) {
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
+ return threads_[task_type]->thread.task_runner();
+}
+
+std::unique_ptr<MediaCodecBridge> AVDACodecAllocator::CreateMediaCodecSync(
+ scoped_refptr<CodecConfig> codec_config) {
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
+
+ auto task_type =
+ TaskTypeForAllocation(codec_config->software_codec_forbidden);
+ if (!task_type)
+ return nullptr;
+
+ auto codec = CreateMediaCodecInternal(factory_cb_, codec_config,
+ task_type == SW_CODEC);
+ if (codec)
+ codec_task_types_[codec.get()] = *task_type;
+ return codec;
+}
+
+void AVDACodecAllocator::CreateMediaCodecAsync(
+ base::WeakPtr<AVDACodecAllocatorClient> client,
+ scoped_refptr<CodecConfig> codec_config) {
+ if (!task_runner_->RunsTasksInCurrentSequence()) {
+ // We need to be ordered with respect to any Start/StopThread from this
+ // client. Otherwise, we might post work to the worker thread before the
+ // posted task to start the worker threads (on |task_runner_|) has run yet.
+ // We also need to avoid data races, since our member variables are all
+ // supposed to be accessed from the main thread only.
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&AVDACodecAllocator::CreateMediaCodecAsyncInternal,
+ base::Unretained(this), base::ThreadTaskRunnerHandle::Get(),
+ client, codec_config));
+ return;
+ }
+
+ // We're on the right thread, so just send in |task_runner_|.
+ CreateMediaCodecAsyncInternal(task_runner_, client, codec_config);
+}
+
+void AVDACodecAllocator::CreateMediaCodecAsyncInternal(
+ scoped_refptr<base::SequencedTaskRunner> client_task_runner,
+ base::WeakPtr<AVDACodecAllocatorClient> client,
+ scoped_refptr<CodecConfig> codec_config) {
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
+ DCHECK(client_task_runner);
+
+ // TODO(liberato): BindOnce more often if possible.
+
+ // Allocate the codec on the appropriate thread, and reply to this one with
+ // the result. If |client| is gone by then, we handle cleanup.
+ auto task_type =
+ TaskTypeForAllocation(codec_config->software_codec_forbidden);
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner =
+ task_type ? TaskRunnerFor(*task_type) : nullptr;
+ if (!task_type || !task_runner) {
+ // The allocator threads didn't start or are stuck.
+ // Post even if it's the current thread, to avoid re-entrancy.
+ client_task_runner->PostTask(
+ FROM_HERE, base::Bind(&AVDACodecAllocatorClient::OnCodecConfigured,
+ client, nullptr, codec_config->surface_bundle));
+ return;
+ }
+
+ base::PostTaskAndReplyWithResult(
+ task_runner.get(), FROM_HERE,
+ base::BindOnce(&CreateMediaCodecInternal, factory_cb_, codec_config,
+ task_type == SW_CODEC),
+ base::BindOnce(&AVDACodecAllocator::ForwardOrDropCodec,
+ base::Unretained(this), client_task_runner, client,
+ *task_type, codec_config->surface_bundle));
+}
+
+void AVDACodecAllocator::ForwardOrDropCodec(
+ scoped_refptr<base::SequencedTaskRunner> client_task_runner,
+ base::WeakPtr<AVDACodecAllocatorClient> client,
+ TaskType task_type,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle,
+ std::unique_ptr<MediaCodecBridge> media_codec) {
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
+
+ // Remember: we are not necessarily on the right thread to use |client|.
+
+ if (media_codec)
+ codec_task_types_[media_codec.get()] = task_type;
+
+ // We could call directly if |task_runner_| is the current thread. Also note
+ // that there's no guarantee that |client_task_runner|'s thread is still
+ // running. That's okay; MediaCodecAndSurface will handle it.
+ client_task_runner->PostTask(
+ FROM_HERE,
+ base::BindOnce(&AVDACodecAllocator::ForwardOrDropCodecOnClientThread,
+ base::Unretained(this), client,
+ base::MakeUnique<MediaCodecAndSurface>(
+ std::move(media_codec), std::move(surface_bundle))));
+}
+
+void AVDACodecAllocator::ForwardOrDropCodecOnClientThread(
+ base::WeakPtr<AVDACodecAllocatorClient> client,
+ std::unique_ptr<MediaCodecAndSurface> codec_and_surface) {
+ // Note that if |client| has been destroyed, MediaCodecAndSurface will clean
+ // up properly on the correct thread. Also note that |surface_bundle| will be
+ // preserved at least as long as the codec.
+ if (!client)
+ return;
+
+ client->OnCodecConfigured(std::move(codec_and_surface->media_codec),
+ std::move(codec_and_surface->surface_bundle));
+}
+
+AVDACodecAllocator::MediaCodecAndSurface::MediaCodecAndSurface(
+ std::unique_ptr<MediaCodecBridge> codec,
+ scoped_refptr<AVDASurfaceBundle> surface)
+ : media_codec(std::move(codec)), surface_bundle(std::move(surface)) {}
+
+AVDACodecAllocator::MediaCodecAndSurface::~MediaCodecAndSurface() {
+ // This code may be run on any thread.
+
+ if (!media_codec)
+ return;
+
+ // If there are no registered clients, then the threads are stopped or are
+ // stopping. We must restart them / cancel any pending stop requests before
+ // we can post codec destruction to them. In the "restart them" case, the
+ // threads aren't running. In the "cancel...requests" case, the threads are
+ // running, but we're trying to clear them out via a DoNothing task posted
+ // there. Once that completes, there will be a join on the main thread. If
+ // we post, then it will be ordered after the DoNothing, but before the join
+ // on the main thread (this thread). If the destruction task hangs, then so
+ // will the join.
+ //
+ // We register a fake client to make sure that the threads are ready.
+ //
+ // If we can't start the thread, then ReleaseMediaCodec will free it on the
+ // current thread.
+ AVDACodecAllocator* allocator = GetInstance(nullptr);
+ allocator->StartThread(nullptr);
+ allocator->ReleaseMediaCodec(std::move(media_codec),
+ std::move(surface_bundle));
+
+ // We can stop the threads immediately. If other clients are around, then
+ // this will do nothing. Otherwise, this will order the join after the
+ // release completes successfully.
+ allocator->StopThread(nullptr);
+}
+
+void AVDACodecAllocator::ReleaseMediaCodec(
+ std::unique_ptr<MediaCodecBridge> media_codec,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) {
+ DCHECK(media_codec);
+
+ if (!task_runner_->RunsTasksInCurrentSequence()) {
+ // See CreateMediaCodecAsync
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&AVDACodecAllocator::ReleaseMediaCodec,
+ base::Unretained(this), std::move(media_codec),
+ std::move(surface_bundle)));
+ return;
+ }
+
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
+
+ auto task_type = codec_task_types_[media_codec.get()];
+ int erased = codec_task_types_.erase(media_codec.get());
+ DCHECK(erased);
+
+ // Save a waitable event for the release if the codec is attached to an
+ // overlay so we can block on it in WaitForPendingRelease().
+ base::WaitableEvent* released_event = nullptr;
+ if (surface_bundle->overlay) {
+ pending_codec_releases_.emplace(
+ std::piecewise_construct,
+ std::forward_as_tuple(surface_bundle->overlay.get()),
+ std::forward_as_tuple(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED));
+ released_event =
+ &pending_codec_releases_.find(surface_bundle->overlay.get())->second;
+ }
+
+ // Note that we forward |surface_bundle|, too, so that the surface outlasts
+ // the codec.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner =
+ TaskRunnerFor(task_type);
+ if (!task_runner) {
+ // Thread isn't running, so just delete it now and hope for the best.
+ media_codec.reset();
+ OnMediaCodecReleased(std::move(surface_bundle));
+ return;
+ }
+
+ task_runner->PostTaskAndReply(
+ FROM_HERE,
+ base::Bind(&DeleteMediaCodecAndSignal,
+ base::Passed(std::move(media_codec)), released_event),
+ base::Bind(&AVDACodecAllocator::OnMediaCodecReleased,
+ base::Unretained(this), std::move(surface_bundle)));
+}
+
+void AVDACodecAllocator::OnMediaCodecReleased(
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) {
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
+
+ // This is a no-op if it's a non overlay bundle.
+ pending_codec_releases_.erase(surface_bundle->overlay.get());
+}
+
+bool AVDACodecAllocator::IsAnyRegisteredAVDA() {
+ return !clients_.empty();
+}
+
+base::Optional<TaskType> AVDACodecAllocator::TaskTypeForAllocation(
+ bool software_codec_forbidden) {
+ if (!threads_[AUTO_CODEC]->hang_detector.IsThreadLikelyHung())
+ return AUTO_CODEC;
+
+ if (!threads_[SW_CODEC]->hang_detector.IsThreadLikelyHung() &&
+ !software_codec_forbidden) {
+ return SW_CODEC;
+ }
+
+ return base::nullopt;
+}
+
+base::Thread& AVDACodecAllocator::GetThreadForTesting(TaskType task_type) {
+ return threads_[task_type]->thread;
+}
+
+bool AVDACodecAllocator::WaitForPendingRelease(AndroidOverlay* overlay) {
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
+ if (!pending_codec_releases_.count(overlay))
+ return true;
+
+ // The codec is being released so we have to wait for it here. It's a
+ // TimedWait() because the MediaCodec release may hang due to framework bugs.
+ // And in that case we don't want to hang the browser UI thread. Android ANRs
+ // occur when the UI thread is blocked for 5 seconds, so waiting for 2 seconds
+ // gives us leeway to avoid an ANR. Verified no ANR on a Nexus 7.
+ base::WaitableEvent& released = pending_codec_releases_.find(overlay)->second;
+ released.TimedWait(base::TimeDelta::FromSeconds(2));
+ if (released.IsSignaled())
+ return true;
+
+ DLOG(WARNING) << __func__ << ": timed out waiting for MediaCodec#release()";
+ return false;
+}
+
+AVDACodecAllocator::AVDACodecAllocator(
+ AVDACodecAllocator::CodecFactoryCB factory_cb,
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ base::TickClock* tick_clock,
+ base::WaitableEvent* stop_event)
+ : task_runner_(task_runner),
+ stop_event_for_testing_(stop_event),
+ factory_cb_(std::move(factory_cb)),
+ weak_this_factory_(this) {
+ // We leak the clock we create, but that's okay because we're a singleton.
+ auto* clock = tick_clock ? tick_clock : new base::DefaultTickClock();
+
+ // Create threads with names and indices that match up with TaskType.
+ threads_.push_back(new ThreadAndHangDetector("AVDAAutoThread", clock));
+ threads_.push_back(new ThreadAndHangDetector("AVDASWThread", clock));
+ static_assert(AUTO_CODEC == 0 && SW_CODEC == 1,
+ "TaskType values are not ordered correctly.");
+}
+
+AVDACodecAllocator::~AVDACodecAllocator() {
+ // Only tests should reach here. Shut down threads so that we guarantee that
+ // nothing will use the threads.
+ for (auto* thread : threads_)
+ thread->thread.Stop();
+}
+
+void AVDACodecAllocator::StopThreadTask(size_t index) {
+ threads_[index]->thread.Stop();
+ // Signal the stop event after both threads are stopped.
+ if (stop_event_for_testing_ && !threads_[AUTO_CODEC]->thread.IsRunning() &&
+ !threads_[SW_CODEC]->thread.IsRunning()) {
+ stop_event_for_testing_->Signal();
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/avda_codec_allocator.h b/chromium/media/gpu/android/avda_codec_allocator.h
new file mode 100644
index 00000000000..fc684496e31
--- /dev/null
+++ b/chromium/media/gpu/android/avda_codec_allocator.h
@@ -0,0 +1,296 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_AVDA_CODEC_ALLOCATOR_H_
+#define MEDIA_GPU_ANDROID_AVDA_CODEC_ALLOCATOR_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/android/build_info.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/optional.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/sys_info.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/threading/thread.h"
+#include "base/time/tick_clock.h"
+#include "base/trace_event/trace_event.h"
+#include "media/base/android/android_overlay.h"
+#include "media/base/android/media_codec_bridge_impl.h"
+#include "media/base/android/media_drm_bridge_cdm_context.h"
+#include "media/base/media.h"
+#include "media/base/surface_manager.h"
+#include "media/base/video_codecs.h"
+#include "media/gpu/android/avda_surface_bundle.h"
+#include "media/gpu/media_gpu_export.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gl/android/scoped_java_surface.h"
+
+namespace media {
+
+// For TaskRunnerFor. These are used as vector indices, so please update
+// AVDACodecAllocator's constructor if you add / change them.
+enum TaskType {
+ // Task for an autodetected MediaCodec instance.
+ AUTO_CODEC = 0,
+
+ // Task for a software-codec-required MediaCodec.
+ SW_CODEC = 1,
+};
+
+// Configuration info for MediaCodec.
+// This is used to shuttle configuration info between threads without needing
+// to worry about the lifetime of the AVDA instance.
+class MEDIA_GPU_EXPORT CodecConfig
+ : public base::RefCountedThreadSafe<CodecConfig> {
+ public:
+ CodecConfig();
+
+ VideoCodec codec = kUnknownVideoCodec;
+
+ // The surface that MediaCodec is configured to output to.
+ scoped_refptr<AVDASurfaceBundle> surface_bundle;
+
+ // The MediaCrypto that MediaCodec is configured with for an encrypted stream.
+ JavaObjectPtr media_crypto;
+
+ // Whether MediaCrypto requires a secure codec.
+ bool requires_secure_codec = false;
+
+ // The initial coded size. The actual size might change at any time, so this
+ // is only a hint.
+ gfx::Size initial_expected_coded_size;
+
+ // Whether creating a software decoder backed MediaCodec is forbidden.
+ bool software_codec_forbidden = false;
+
+ // Codec specific data (SPS and PPS for H264).
+ std::vector<uint8_t> csd0;
+ std::vector<uint8_t> csd1;
+
+ protected:
+ friend class base::RefCountedThreadSafe<CodecConfig>;
+ virtual ~CodecConfig();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CodecConfig);
+};
+
+class AVDASurfaceAllocatorClient {
+ public:
+ // Called when the requested SurfaceView becomes available after a call to
+ // AllocateSurface()
+ virtual void OnSurfaceAvailable(bool success) = 0;
+
+ // Called when the allocated surface is being destroyed. This must either
+ // replace the surface with MediaCodec#setSurface, or release the MediaCodec
+ // it's attached to. The client no longer owns the surface and doesn't
+ // need to call DeallocateSurface();
+ virtual void OnSurfaceDestroyed() = 0;
+
+ protected:
+ ~AVDASurfaceAllocatorClient() {}
+};
+
+class AVDACodecAllocatorClient {
+ public:
+ // Called on the main thread when a new MediaCodec is configured.
+ // |media_codec| will be null if configuration failed.
+ virtual void OnCodecConfigured(
+ std::unique_ptr<MediaCodecBridge> media_codec,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) = 0;
+
+ protected:
+ ~AVDACodecAllocatorClient() {}
+};
+
+// AVDACodecAllocator manages threads for allocating and releasing MediaCodec
+// instances. These activities can hang, depending on android version, due
+// to mediaserver bugs. AVDACodecAllocator detects these cases, and reports
+// on them to allow software fallback if the HW path is hung up.
+class MEDIA_GPU_EXPORT AVDACodecAllocator {
+ public:
+ static AVDACodecAllocator* GetInstance(
+ scoped_refptr<base::SequencedTaskRunner> task_runner);
+
+ using CodecFactoryCB =
+ base::RepeatingCallback<std::unique_ptr<MediaCodecBridge>(
+ VideoCodec codec,
+ CodecType codec_type,
+ const gfx::Size& size, // Output frame size.
+ const base::android::JavaRef<jobject>& surface,
+ const base::android::JavaRef<jobject>& media_crypto,
+ const std::vector<uint8_t>& csd0,
+ const std::vector<uint8_t>& csd1,
+ bool allow_adaptive_playback)>;
+
+ // Make sure the construction threads are started for |client|. If the
+ // threads fail to start, then codec allocation may fail.
+ virtual void StartThread(AVDACodecAllocatorClient* client);
+ virtual void StopThread(AVDACodecAllocatorClient* client);
+
+ // Create and configure a MediaCodec synchronously.
+ virtual std::unique_ptr<MediaCodecBridge> CreateMediaCodecSync(
+ scoped_refptr<CodecConfig> codec_config);
+
+ // Create and configure a MediaCodec asynchronously. The result is delivered
+ // via OnCodecConfigured().
+ virtual void CreateMediaCodecAsync(
+ base::WeakPtr<AVDACodecAllocatorClient> client,
+ scoped_refptr<CodecConfig> codec_config);
+
+ // Asynchronously release |media_codec| with the attached surface. We will
+ // drop our reference to |surface_bundle| on the main thread after the codec
+ // is deallocated, since the codec isn't using it anymore. We will not take
+ // other action on it (e.g., calling ReleaseSurfaceTexture if it has one),
+ // since some other codec might be going to use it. We just want to be sure
+ // that it outlives |media_codec|.
+ virtual void ReleaseMediaCodec(
+ std::unique_ptr<MediaCodecBridge> media_codec,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle);
+
+ // Return true if and only if there is any AVDA registered.
+ bool IsAnyRegisteredAVDA();
+
+ // Return a reference to the thread for unit tests.
+ base::Thread& GetThreadForTesting(TaskType task_type);
+
+ // Wait for a bounded amount of time for |overlay| to be freed, if it's
+ // in use pending release of a codec. Returns true on success, or false if
+ // the wait times out.
+ bool WaitForPendingRelease(AndroidOverlay* overlay);
+
+ protected:
+ // |tick_clock| and |stop_event| are for tests only.
+ AVDACodecAllocator(AVDACodecAllocator::CodecFactoryCB factory_cb,
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ base::TickClock* tick_clock = nullptr,
+ base::WaitableEvent* stop_event = nullptr);
+ virtual ~AVDACodecAllocator();
+
+ // Struct to own a codec and surface bundle, with a custom deleter to post
+ // destruction to the right thread.
+ struct MediaCodecAndSurface {
+ MediaCodecAndSurface(std::unique_ptr<MediaCodecBridge> media_codec,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle);
+ ~MediaCodecAndSurface();
+ std::unique_ptr<MediaCodecBridge> media_codec;
+ scoped_refptr<AVDASurfaceBundle> surface_bundle;
+ };
+
+ // Forward |media_codec|, which is configured to output to |surface_bundle|,
+ // to |client| if |client| is still around. Otherwise, release the codec and
+ // then drop our ref to |surface_bundle|. This is called on |task_runner_|.
+ // It may only reference |client| from |client_task_runner|.
+ void ForwardOrDropCodec(
+ scoped_refptr<base::SequencedTaskRunner> client_task_runner,
+ base::WeakPtr<AVDACodecAllocatorClient> client,
+ TaskType task_type,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle,
+ std::unique_ptr<MediaCodecBridge> media_codec);
+
+ // Forward |surface_bundle| and |media_codec| to |client| on the right thread
+ // to access |client|.
+ void ForwardOrDropCodecOnClientThread(
+ base::WeakPtr<AVDACodecAllocatorClient> client,
+ std::unique_ptr<MediaCodecAndSurface> codec_and_surface);
+
+ private:
+ friend class AVDACodecAllocatorTest;
+
+ struct OwnerRecord {
+ AVDASurfaceAllocatorClient* owner = nullptr;
+ AVDASurfaceAllocatorClient* waiter = nullptr;
+ };
+
+ class HangDetector : public base::MessageLoop::TaskObserver {
+ public:
+ HangDetector(base::TickClock* tick_clock);
+ void WillProcessTask(const base::PendingTask& pending_task) override;
+ void DidProcessTask(const base::PendingTask& pending_task) override;
+ bool IsThreadLikelyHung();
+
+ private:
+ base::Lock lock_;
+
+ // Non-null when a task is currently running.
+ base::TimeTicks task_start_time_;
+
+ base::TickClock* tick_clock_;
+
+ DISALLOW_COPY_AND_ASSIGN(HangDetector);
+ };
+
+ // Handy combination of a thread and hang detector for it.
+ struct ThreadAndHangDetector {
+ ThreadAndHangDetector(const std::string& name, base::TickClock* tick_clock)
+ : thread(name), hang_detector(tick_clock) {}
+ base::Thread thread;
+ HangDetector hang_detector;
+ };
+
+ // Helper function for CreateMediaCodecAsync which takes the task runner on
+ // which it should post the reply to |client|.
+ void CreateMediaCodecAsyncInternal(
+ scoped_refptr<base::SequencedTaskRunner> client_task_runner,
+ base::WeakPtr<AVDACodecAllocatorClient> client,
+ scoped_refptr<CodecConfig> codec_config);
+
+ // Return the task type to use for a new codec allocation, or nullopt if
+ // both threads are hung.
+ base::Optional<TaskType> TaskTypeForAllocation(bool software_codec_forbidden);
+
+ // Return the task runner for tasks of type |type|.
+ scoped_refptr<base::SingleThreadTaskRunner> TaskRunnerFor(TaskType task_type);
+
+ // Called on the gpu main thread when a codec is freed on a codec thread.
+ // |surface_bundle| is the surface bundle that the codec was using. It's
+ // important to pass this through to ensure a) it outlives the codec, and b)
+ // it's deleted on the right thread.
+ void OnMediaCodecReleased(scoped_refptr<AVDASurfaceBundle> surface_bundle);
+
+ // Stop the thread indicated by |index|. This signals stop_event_for_testing_
+ // after both threads are stopped.
+ void StopThreadTask(size_t index);
+
+ // Task runner on which we do all our work. All members should be accessed
+ // only from this task runner. |task_runner_| itself may be referenced from
+ // any thread (hence const).
+ const scoped_refptr<base::SequencedTaskRunner> task_runner_;
+
+ // All registered AVDAs.
+ std::set<AVDACodecAllocatorClient*> clients_;
+
+ // Waitable events for ongoing release tasks indexed by overlay so we can
+ // wait on the codec release if the surface attached to it is being destroyed.
+ // This really is needed only for ContentVideoViewOverlay, since it requires
+ // synchronous releases with respect to the main thread.
+ std::map<AndroidOverlay*, base::WaitableEvent> pending_codec_releases_;
+
+ // Threads for each of TaskType. They are started / stopped as avda instances
+ // show and and request them. The vector indicies must match TaskType.
+ std::vector<ThreadAndHangDetector*> threads_;
+
+ base::WaitableEvent* stop_event_for_testing_;
+
+ // Saves the TaskType used to create a given codec so it can later be released
+ // on the same thread.
+ std::map<MediaCodecBridge*, TaskType> codec_task_types_;
+
+ // Low-level codec factory, for testing.
+ CodecFactoryCB factory_cb_;
+
+ // For canceling pending StopThreadTask()s.
+ base::WeakPtrFactory<AVDACodecAllocator> weak_this_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(AVDACodecAllocator);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_AVDA_CODEC_ALLOCATOR_H_
diff --git a/chromium/media/gpu/android/avda_codec_allocator_unittest.cc b/chromium/media/gpu/android/avda_codec_allocator_unittest.cc
new file mode 100644
index 00000000000..6e7fe85599f
--- /dev/null
+++ b/chromium/media/gpu/android/avda_codec_allocator_unittest.cc
@@ -0,0 +1,384 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/avda_codec_allocator.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/time/tick_clock.h"
+#include "media/base/android/mock_android_overlay.h"
+#include "media/base/android/mock_media_codec_bridge.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::Invoke;
+using testing::NiceMock;
+using testing::ReturnRef;
+using testing::_;
+
+namespace media {
+namespace {
+template <typename ReturnType>
+void RunAndSignalTask(base::WaitableEvent* event,
+ ReturnType* return_value,
+ const base::Callback<ReturnType(void)>& cb) {
+ *return_value = cb.Run();
+ event->Signal();
+}
+
+void WaitUntilRestarted(base::WaitableEvent* about_to_wait_event,
+ base::WaitableEvent* wait_event) {
+ // Notify somebody that we've started.
+ if (about_to_wait_event)
+ about_to_wait_event->Signal();
+ wait_event->Wait();
+}
+
+void SignalImmediately(base::WaitableEvent* event) {
+ event->Signal();
+}
+} // namespace
+
+class MockClient : public AVDACodecAllocatorClient {
+ public:
+ MockClient()
+ : codec_arrived_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED),
+ weak_factory_(this) {}
+
+ // Gmock doesn't let us mock methods taking move-only types.
+ MOCK_METHOD1(OnCodecConfiguredMock, void(MediaCodecBridge* media_codec));
+ void OnCodecConfigured(
+ std::unique_ptr<MediaCodecBridge> media_codec,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) override {
+ media_codec_ = std::move(media_codec);
+ OnCodecConfiguredMock(media_codec.get());
+ codec_arrived_event_.Signal();
+ }
+
+ base::WeakPtr<AVDACodecAllocatorClient> GetWeakPtr() {
+ return weak_factory_.GetWeakPtr();
+ }
+
+ // Most recently provided codec.
+ std::unique_ptr<MediaCodecBridge> media_codec_;
+
+ base::WaitableEvent codec_arrived_event_;
+
+ base::WeakPtrFactory<AVDACodecAllocatorClient> weak_factory_;
+};
+
+class AVDACodecAllocatorTest : public testing::Test {
+ public:
+ AVDACodecAllocatorTest()
+ : allocator_thread_("AllocatorThread"),
+ stop_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED) {
+ // Don't start the clock at null.
+ tick_clock_.Advance(base::TimeDelta::FromSeconds(1));
+ }
+
+ ~AVDACodecAllocatorTest() override {}
+
+ // Utility fn to test out threading.
+ void AllocateCodec() {
+ allocator_->StartThread(avda1_);
+ scoped_refptr<CodecConfig> codec_config(new CodecConfig);
+ codec_config->surface_bundle = surface_bundle_;
+ EXPECT_CALL(*avda1_, OnCodecConfiguredMock(_));
+ allocator_->CreateMediaCodecAsync(avda1_->GetWeakPtr(), codec_config);
+ }
+
+ void DestroyCodec() {
+ // Make sure that we got a codec.
+ ASSERT_NE(avda1_->media_codec_, nullptr);
+ base::WaitableEvent destruction_event(
+ base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ static_cast<MockMediaCodecBridge*>(avda1_->media_codec_.get())
+ ->SetCodecDestroyedEvent(&destruction_event);
+ allocator_->ReleaseMediaCodec(std::move(avda1_->media_codec_),
+ surface_bundle_);
+
+ // This won't wait for the threads to stop, which means that the release
+ // might not have completed yet. Even once we are signalled that the codec
+ // has been destroyed, we can't be sure that OnMediaCodecReleased has run
+ // on the allocator thread. To get around this, one should wait on
+ // |stop_event_|, but not here. If we're run on the allocator's thread,
+ // then that's where |stop_event_| will be signalled from.
+ allocator_->StopThread(avda1_);
+ // The codec destruction should be async with respect to us.
+ destruction_event.Wait();
+
+ // Important: we don't know that OnMediaCodecReleased has completed.
+ // If we clean up the test and post the allocator's destruction to the
+ // allocator thread, before the "and reply" posts the codec release, then
+ // the codec release will be run on a destructed allocator. Either we
+ // should synchronize on that, or quit using base::Unretained().
+ // Waiting for |stop_event_| and then for |allocator_thread_| should be
+ // sufficent to avoid this.
+ // Waiting for the overlay to be released is probably also enough, since
+ // that happens to be run on OnMediaCodecReleased also.
+ }
+
+ void WaitForSurfaceDestruction() {
+ // This may be called from any thread.
+ PostAndWait(FROM_HERE,
+ base::Bind(
+ [](AVDACodecAllocator* allocator, AndroidOverlay* overlay) {
+ allocator->WaitForPendingRelease(overlay);
+ return true;
+ },
+ allocator_, surface_bundle_->overlay.get()));
+ }
+
+ protected:
+ void SetUp() override {
+ // Start the main thread for the allocator. This would normally be the GPU
+ // main thread.
+ ASSERT_TRUE(allocator_thread_.Start());
+
+ AVDACodecAllocator::CodecFactoryCB factory_cb(
+ base::BindRepeating(&MockMediaCodecBridge::CreateVideoDecoder));
+
+ // Create the first allocator on the allocator thread.
+ allocator_ = PostAndWait(
+ FROM_HERE, base::Bind(
+ [](AVDACodecAllocator::CodecFactoryCB factory_cb,
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ base::TickClock* clock, base::WaitableEvent* event) {
+ return new AVDACodecAllocator(factory_cb, task_runner,
+ clock, event);
+ },
+ factory_cb, allocator_thread_.task_runner(),
+ &tick_clock_, &stop_event_));
+ allocator2_ = new AVDACodecAllocator(
+ factory_cb, base::SequencedTaskRunnerHandle::Get());
+
+ // Create a SurfaceBundle that provides an overlay. It will provide a null
+ // java ref if requested.
+ std::unique_ptr<MockAndroidOverlay> overlay =
+ base::MakeUnique<NiceMock<MockAndroidOverlay>>();
+ scoped_refptr<CodecConfig> codec_config(new CodecConfig);
+ ON_CALL(*overlay, GetJavaSurface())
+ .WillByDefault(ReturnRef(null_java_ref_));
+ surface_bundle_ = new AVDASurfaceBundle(std::move(overlay));
+ }
+
+ void TearDown() override {
+ // Don't leave any threads hung, or this will hang too.
+ // It would be nice if we could let a unique ptr handle this, but the
+ // destructor is private. We also have to destroy it on the right thread.
+ PostAndWait(FROM_HERE, base::Bind(
+ [](AVDACodecAllocator* allocator) {
+ delete allocator;
+ return true;
+ },
+ allocator_));
+
+ allocator_thread_.Stop();
+ delete allocator2_;
+ }
+
+ protected:
+ // Start / stop the threads for |avda| on the right thread.
+ void StartThread(AVDACodecAllocatorClient* avda) {
+ PostAndWait(FROM_HERE, base::Bind(
+ [](AVDACodecAllocator* allocator,
+ AVDACodecAllocatorClient* avda) {
+ allocator->StartThread(avda);
+ return true; // void won't work.
+ },
+ allocator_, avda));
+ }
+
+ void StopThread(AVDACodecAllocatorClient* avda) {
+ // Note that we also wait for the stop event, so that we know that the
+ // stop has completed. It's async with respect to the allocator thread.
+ PostAndWait(FROM_HERE, base::Bind(
+ [](AVDACodecAllocator* allocator,
+ AVDACodecAllocatorClient* avda) {
+ allocator->StopThread(avda);
+ return true;
+ },
+ allocator_, avda));
+ // Note that we don't do this on the allocator thread, since that's the
+ // thread that will signal it.
+ stop_event_.Wait();
+ }
+
+ // Return the running state of |task_type|, doing the necessary thread hops.
+ bool IsThreadRunning(TaskType task_type) {
+ return PostAndWait(
+ FROM_HERE,
+ base::Bind(
+ [](AVDACodecAllocator* allocator, TaskType task_type) {
+ return allocator->GetThreadForTesting(task_type).IsRunning();
+ },
+ allocator_, task_type));
+ }
+
+ base::Optional<TaskType> TaskTypeForAllocation(
+ bool software_codec_forbidden) {
+ return PostAndWait(
+ FROM_HERE,
+ base::Bind(&AVDACodecAllocator::TaskTypeForAllocation,
+ base::Unretained(allocator_), software_codec_forbidden));
+ }
+
+ scoped_refptr<base::SingleThreadTaskRunner> TaskRunnerFor(
+ TaskType task_type) {
+ return PostAndWait(FROM_HERE,
+ base::Bind(&AVDACodecAllocator::TaskRunnerFor,
+ base::Unretained(allocator_), task_type));
+ }
+
+ // Post |cb| to the allocator thread, and wait for a response. Note that we
+ // don't have a specialization for void, and void won't work as written. So,
+ // be sure to return something.
+ template <typename ReturnType>
+ ReturnType PostAndWait(const base::Location& from_here,
+ const base::Callback<ReturnType(void)>& cb) {
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ ReturnType return_value = ReturnType();
+ allocator_thread_.task_runner()->PostTask(
+ from_here,
+ base::Bind(&RunAndSignalTask<ReturnType>, &event, &return_value, cb));
+ event.Wait();
+ return return_value;
+ }
+
+ // So that we can get the thread's task runner.
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
+
+ base::Thread allocator_thread_;
+
+ // The test params for |allocator_|.
+ base::SimpleTestTickClock tick_clock_;
+ base::WaitableEvent stop_event_;
+
+ // Allocators that we own. The first is intialized to be used on the allocator
+ // thread and the second one is initialized on the test thread. Each test
+ // should only be using one of the two. They are not unique_ptrs because the
+ // destructor is private and they need to be destructed on the right thread.
+ AVDACodecAllocator* allocator_ = nullptr;
+ AVDACodecAllocator* allocator2_ = nullptr;
+
+ NiceMock<MockClient> client1_, client2_, client3_;
+ NiceMock<MockClient>* avda1_ = &client1_;
+ NiceMock<MockClient>* avda2_ = &client2_;
+ NiceMock<MockClient>* avda3_ = &client3_;
+
+ // Surface bundle that has an overlay.
+ scoped_refptr<AVDASurfaceBundle> surface_bundle_;
+ base::android::JavaRef<jobject> null_java_ref_;
+};
+
+TEST_F(AVDACodecAllocatorTest, ThreadsStartWhenClientsStart) {
+ ASSERT_FALSE(IsThreadRunning(AUTO_CODEC));
+ ASSERT_FALSE(IsThreadRunning(SW_CODEC));
+ StartThread(avda1_);
+ ASSERT_TRUE(IsThreadRunning(AUTO_CODEC));
+ ASSERT_TRUE(IsThreadRunning(SW_CODEC));
+}
+
+TEST_F(AVDACodecAllocatorTest, ThreadsStopAfterAllClientsStop) {
+ StartThread(avda1_);
+ StartThread(avda2_);
+ StopThread(avda1_);
+ ASSERT_TRUE(IsThreadRunning(AUTO_CODEC));
+ StopThread(avda2_);
+ ASSERT_FALSE(IsThreadRunning(AUTO_CODEC));
+ ASSERT_FALSE(IsThreadRunning(SW_CODEC));
+}
+
+TEST_F(AVDACodecAllocatorTest, TestHangThread) {
+ StartThread(avda1_);
+ ASSERT_EQ(AUTO_CODEC, TaskTypeForAllocation(false));
+
+ // Hang the AUTO_CODEC thread.
+ base::WaitableEvent about_to_wait_event(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ base::WaitableEvent wait_event(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ TaskRunnerFor(AUTO_CODEC)
+ ->PostTask(FROM_HERE, base::Bind(&WaitUntilRestarted,
+ &about_to_wait_event, &wait_event));
+ // Wait until the task starts, so that |allocator_| starts the hang timer.
+ about_to_wait_event.Wait();
+
+ // Verify that we've failed over after a long time has passed.
+ tick_clock_.Advance(base::TimeDelta::FromSeconds(1));
+ ASSERT_EQ(SW_CODEC, TaskTypeForAllocation(false));
+
+ // Un-hang the thread and wait for it to let another task run. This will
+ // notify |allocator_| that the thread is no longer hung.
+ base::WaitableEvent done_waiting_event(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ TaskRunnerFor(AUTO_CODEC)
+ ->PostTask(FROM_HERE,
+ base::Bind(&SignalImmediately, &done_waiting_event));
+ wait_event.Signal();
+ done_waiting_event.Wait();
+
+ // Verify that we've un-failed over.
+ ASSERT_EQ(AUTO_CODEC, TaskTypeForAllocation(false));
+}
+
+TEST_F(AVDACodecAllocatorTest, AllocateAndDestroyCodecOnAllocatorThread) {
+ // Make sure that allocating / freeing a codec on the allocator's thread
+ // completes, and doesn't DCHECK.
+ allocator_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&AVDACodecAllocatorTest::AllocateCodec,
+ base::Unretained(this)));
+
+ // Wait for the codec on this thread, rather than the allocator thread, since
+ // that's where the codec will be posted.
+ avda1_->codec_arrived_event_.Wait();
+
+ allocator_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&AVDACodecAllocatorTest::DestroyCodec,
+ base::Unretained(this)));
+
+ // Note that TearDown will join |allocator_thread_|.
+ WaitForSurfaceDestruction();
+
+ // Wait for threads to stop, now that we're not on the allocator thread.
+ stop_event_.Wait();
+}
+
+TEST_F(AVDACodecAllocatorTest, AllocateAndDestroyCodecOnNewThread) {
+ // Make sure that allocating / freeing a codec on a random thread completes,
+ // and doesn't DCHECK.
+ base::Thread new_thread("NewThreadForTesting");
+ ASSERT_TRUE(new_thread.Start());
+ new_thread.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&AVDACodecAllocatorTest::AllocateCodec,
+ base::Unretained(this)));
+
+ // Wait for the codec on this thread, rather than |new_thread|, since that's
+ // where the codec will be posted.
+ avda1_->codec_arrived_event_.Wait();
+
+ new_thread.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&AVDACodecAllocatorTest::DestroyCodec,
+ base::Unretained(this)));
+ new_thread.Stop();
+ WaitForSurfaceDestruction();
+ stop_event_.Wait();
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/avda_codec_image.cc b/chromium/media/gpu/android/avda_codec_image.cc
new file mode 100644
index 00000000000..5d2d4da056d
--- /dev/null
+++ b/chromium/media/gpu/android/avda_codec_image.cc
@@ -0,0 +1,252 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/avda_codec_image.h"
+
+#include <string.h>
+
+#include <memory>
+
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "media/base/android/media_codec_bridge_impl.h"
+#include "media/gpu/android/avda_shared_state.h"
+#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/scoped_make_current.h"
+
+namespace media {
+
+AVDACodecImage::AVDACodecImage(
+ const scoped_refptr<AVDASharedState>& shared_state,
+ MediaCodecBridge* codec)
+ : shared_state_(shared_state),
+ codec_buffer_index_(kInvalidCodecBufferIndex),
+ media_codec_(codec),
+ has_surface_texture_(false),
+ texture_(0) {}
+
+AVDACodecImage::~AVDACodecImage() {}
+
+gfx::Size AVDACodecImage::GetSize() {
+ return size_;
+}
+
+unsigned AVDACodecImage::GetInternalFormat() {
+ return GL_RGBA;
+}
+
+bool AVDACodecImage::BindTexImage(unsigned target) {
+ return false;
+}
+
+void AVDACodecImage::ReleaseTexImage(unsigned target) {}
+
+bool AVDACodecImage::CopyTexImage(unsigned target) {
+ if (!has_surface_texture_ || target != GL_TEXTURE_EXTERNAL_OES)
+ return false;
+
+ GLint bound_service_id = 0;
+ glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &bound_service_id);
+ // We insist that the currently bound texture is the right one.
+ if (bound_service_id !=
+ static_cast<GLint>(shared_state_->surface_texture_service_id())) {
+ return false;
+ }
+
+ // Make sure that we have the right image in the front buffer. Note that the
+ // bound_service_id is guaranteed to be equal to the surface texture's client
+ // texture id, so we can skip preserving it if the right context is current.
+ UpdateSurfaceInternal(UpdateMode::RENDER_TO_FRONT_BUFFER,
+ kDontRestoreBindings);
+
+ // By setting image state to UNBOUND instead of COPIED we ensure that
+ // CopyTexImage() is called each time the surface texture is used for drawing.
+ // It would be nice if we could do this via asking for the currently bound
+ // Texture, but the active unit never seems to change.
+ texture_->SetLevelImageState(GL_TEXTURE_EXTERNAL_OES, 0,
+ gpu::gles2::Texture::UNBOUND);
+
+ return true;
+}
+
+bool AVDACodecImage::CopyTexSubImage(unsigned target,
+ const gfx::Point& offset,
+ const gfx::Rect& rect) {
+ return false;
+}
+
+bool AVDACodecImage::ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
+ int z_order,
+ gfx::OverlayTransform transform,
+ const gfx::Rect& bounds_rect,
+ const gfx::RectF& crop_rect) {
+ // This should only be called when we're rendering to a SurfaceView.
+ if (has_surface_texture_) {
+ DVLOG(1) << "Invalid call to ScheduleOverlayPlane; this image is "
+ "SurfaceTexture backed.";
+ return false;
+ }
+
+ // Move the overlay if needed.
+ if (shared_state_->overlay() && most_recent_bounds_ != bounds_rect) {
+ most_recent_bounds_ = bounds_rect;
+ shared_state_->overlay()->ScheduleLayout(bounds_rect);
+ }
+
+ UpdateSurface(UpdateMode::RENDER_TO_FRONT_BUFFER);
+ return true;
+}
+
+void AVDACodecImage::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t process_tracing_id,
+ const std::string& dump_name) {}
+
+void AVDACodecImage::UpdateSurfaceTexture(RestoreBindingsMode mode) {
+ DCHECK(has_surface_texture_);
+ DCHECK_EQ(codec_buffer_index_, kUpdateOnly);
+ codec_buffer_index_ = kRendered;
+
+ // Swap the rendered image to the front.
+ std::unique_ptr<ui::ScopedMakeCurrent> scoped_make_current =
+ MakeCurrentIfNeeded();
+
+ // If we changed contexts, then we always want to restore it, since the caller
+ // doesn't know that we're switching contexts.
+ if (scoped_make_current)
+ mode = kDoRestoreBindings;
+
+ // Save the current binding if requested.
+ GLint bound_service_id = 0;
+ if (mode == kDoRestoreBindings)
+ glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &bound_service_id);
+
+ shared_state_->UpdateTexImage();
+ if (mode == kDoRestoreBindings)
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, bound_service_id);
+}
+
+void AVDACodecImage::UpdateSurface(UpdateMode update_mode) {
+ UpdateSurfaceInternal(update_mode, kDoRestoreBindings);
+}
+
+void AVDACodecImage::CodecChanged(MediaCodecBridge* codec) {
+ media_codec_ = codec;
+ codec_buffer_index_ = kInvalidCodecBufferIndex;
+}
+
+void AVDACodecImage::SetBufferMetadata(int buffer_index,
+ bool has_surface_texture,
+ const gfx::Size& size) {
+ has_surface_texture_ = has_surface_texture;
+ codec_buffer_index_ = buffer_index;
+ size_ = size;
+}
+
+bool AVDACodecImage::SetSharedState(
+ scoped_refptr<AVDASharedState> shared_state) {
+ if (shared_state == shared_state_)
+ return false;
+ shared_state_ = shared_state;
+ most_recent_bounds_ = gfx::Rect();
+ return true;
+}
+
+void AVDACodecImage::UpdateSurfaceInternal(
+ UpdateMode update_mode,
+ RestoreBindingsMode attached_bindings_mode) {
+ if (!IsCodecBufferOutstanding())
+ return;
+
+ ReleaseOutputBuffer(update_mode);
+
+ // SurfaceViews are updated implicitly, so no further steps are necessary.
+ if (!has_surface_texture_) {
+ DCHECK(update_mode != UpdateMode::RENDER_TO_BACK_BUFFER);
+ return;
+ }
+
+ // If front buffer rendering hasn't been requested, exit early.
+ if (update_mode != UpdateMode::RENDER_TO_FRONT_BUFFER)
+ return;
+
+ UpdateSurfaceTexture(attached_bindings_mode);
+}
+
+void AVDACodecImage::ReleaseOutputBuffer(UpdateMode update_mode) {
+ DCHECK(IsCodecBufferOutstanding());
+
+ // In case of discard, simply discard and clear our codec buffer index.
+ if (update_mode == UpdateMode::DISCARD_CODEC_BUFFER) {
+ if (codec_buffer_index_ != kUpdateOnly)
+ media_codec_->ReleaseOutputBuffer(codec_buffer_index_, false);
+
+ // Note: No need to wait for the frame to be available in the kUpdateOnly
+ // case since it will be or has been waited on by another release call.
+ codec_buffer_index_ = kInvalidCodecBufferIndex;
+ return;
+ }
+
+ DCHECK(update_mode == UpdateMode::RENDER_TO_BACK_BUFFER ||
+ update_mode == UpdateMode::RENDER_TO_FRONT_BUFFER);
+
+ if (!has_surface_texture_) {
+ DCHECK(update_mode == UpdateMode::RENDER_TO_FRONT_BUFFER);
+ DCHECK_GE(codec_buffer_index_, 0);
+ media_codec_->ReleaseOutputBuffer(codec_buffer_index_, true);
+ codec_buffer_index_ = kRendered;
+ return;
+ }
+
+ // If we've already released to the back buffer, there's nothing left to do,
+ // but wait for the previously released buffer if necessary.
+ if (codec_buffer_index_ != kUpdateOnly) {
+ DCHECK(has_surface_texture_);
+ DCHECK_GE(codec_buffer_index_, 0);
+ shared_state_->RenderCodecBufferToSurfaceTexture(media_codec_,
+ codec_buffer_index_);
+ codec_buffer_index_ = kUpdateOnly;
+ }
+
+ // Only wait for the SurfaceTexture update if we're rendering to the front.
+ if (update_mode == UpdateMode::RENDER_TO_FRONT_BUFFER)
+ shared_state_->WaitForFrameAvailable();
+}
+
+std::unique_ptr<ui::ScopedMakeCurrent> AVDACodecImage::MakeCurrentIfNeeded() {
+ DCHECK(shared_state_->context());
+ // Remember: virtual contexts return true if and only if their shared context
+ // is current, regardless of which virtual context it is.
+ return std::unique_ptr<ui::ScopedMakeCurrent>(
+ shared_state_->context()->IsCurrent(nullptr)
+ ? nullptr
+ : new ui::ScopedMakeCurrent(shared_state_->context(),
+ shared_state_->surface()));
+}
+
+void AVDACodecImage::GetTextureMatrix(float matrix[16]) {
+ // Our current matrix may be stale. Update it if possible.
+ if (has_surface_texture_)
+ UpdateSurface(UpdateMode::RENDER_TO_FRONT_BUFFER);
+ shared_state_->GetTransformMatrix(matrix);
+ YInvertMatrix(matrix);
+}
+
+void AVDACodecImage::NotifyPromotionHint(bool promotion_hint,
+ int display_x,
+ int display_y,
+ int display_width,
+ int display_height) {
+ shared_state_->GetPromotionHintCB().Run(PromotionHintAggregator::Hint(
+ gfx::Rect(display_x, display_y, display_width, display_height),
+ promotion_hint));
+}
+
+bool AVDACodecImage::IsCodecBufferOutstanding() const {
+ static_assert(kUpdateOnly < 0 && kUpdateOnly > kRendered &&
+ kRendered > kInvalidCodecBufferIndex,
+ "Codec buffer index enum values are not ordered correctly.");
+ return codec_buffer_index_ > kRendered && media_codec_;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/avda_codec_image.h b/chromium/media/gpu/android/avda_codec_image.h
new file mode 100644
index 00000000000..cff49ad2086
--- /dev/null
+++ b/chromium/media/gpu/android/avda_codec_image.h
@@ -0,0 +1,164 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_AVDA_CODEC_IMAGE_H_
+#define MEDIA_GPU_ANDROID_AVDA_CODEC_IMAGE_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/macros.h"
+#include "gpu/command_buffer/service/gl_stream_texture_image.h"
+#include "media/gpu/android/avda_shared_state.h"
+
+namespace ui {
+class ScopedMakeCurrent;
+}
+
+namespace media {
+
+class MediaCodecBridge;
+
+// GLImage that renders MediaCodec buffers to a SurfaceTexture or SurfaceView as
+// needed in order to draw them.
+class AVDACodecImage : public gpu::gles2::GLStreamTextureImage {
+ public:
+ AVDACodecImage(const scoped_refptr<AVDASharedState>& shared_state,
+ MediaCodecBridge* codec);
+
+ // gl::GLImage implementation
+ gfx::Size GetSize() override;
+ unsigned GetInternalFormat() override;
+ bool BindTexImage(unsigned target) override;
+ void ReleaseTexImage(unsigned target) override;
+ bool CopyTexImage(unsigned target) override;
+ bool CopyTexSubImage(unsigned target,
+ const gfx::Point& offset,
+ const gfx::Rect& rect) override;
+ bool ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
+ int z_order,
+ gfx::OverlayTransform transform,
+ const gfx::Rect& bounds_rect,
+ const gfx::RectF& crop_rect) override;
+ void Flush() override {}
+ void OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t process_tracing_id,
+ const std::string& dump_name) override;
+ // gpu::gles2::GLStreamTextureMatrix implementation
+ void GetTextureMatrix(float xform[16]) override;
+ void NotifyPromotionHint(bool promotion_hint,
+ int display_x,
+ int display_y,
+ int display_width,
+ int display_height) override;
+
+ enum class UpdateMode {
+ // Discards the codec buffer, no UpdateTexImage().
+ DISCARD_CODEC_BUFFER,
+
+ // Renders to back buffer, no UpdateTexImage(); can only be used with a
+ // valid |surface_texture_|.
+ RENDER_TO_BACK_BUFFER,
+
+ // Renders to the back buffer. When used with a SurfaceView, promotion to
+ // the front buffer is automatic. When using a |surface_texture_|,
+ // UpdateTexImage() is called to promote the back buffer into the front.
+ RENDER_TO_FRONT_BUFFER
+ };
+
+ // Releases the attached codec buffer (if not already released) indicated by
+ // |codec_buffer_index_| and updates the surface if specified by the given
+ // |update_mode|. See UpdateMode documentation for details.
+ void UpdateSurface(UpdateMode update_mode);
+
+ // Updates the MediaCodec for this image; clears |codec_buffer_index_|.
+ void CodecChanged(MediaCodecBridge* codec);
+
+ void set_texture(gpu::gles2::Texture* texture) { texture_ = texture; }
+
+ // Sets up the properties necessary for the image to render. |buffer_index| is
+ // supplied to ReleaseOutputBuffer(), |has_surface_texture| controls which
+ // rendering path is used, and |size| is used by the compositor.
+ void SetBufferMetadata(int buffer_index,
+ bool has_surface_texture,
+ const gfx::Size& size);
+
+ bool SetSharedState(scoped_refptr<AVDASharedState> shared_state);
+
+ // Indicates if the codec buffer has been released to the back buffer.
+ bool was_rendered_to_back_buffer() const {
+ return codec_buffer_index_ == kUpdateOnly;
+ }
+
+ // Indicates if the codec buffer has been released to the front buffer.
+ bool was_rendered_to_front_buffer() const {
+ return codec_buffer_index_ == kRendered;
+ }
+
+ bool is_unrendered() const { return codec_buffer_index_ >= kUpdateOnly; }
+
+ protected:
+ ~AVDACodecImage() override;
+
+ private:
+ // Make sure that the surface texture's front buffer is current. This will
+ // save / restore the current context. It will optionally restore the texture
+ // bindings in the surface texture's context, based on |mode|. This is
+ // intended as a hint if we don't need to change contexts. If we do need to
+ // change contexts, then we'll always preserve the texture bindings in the
+ // both contexts. In other words, the caller is telling us whether it's
+ // okay to change the binding in the current context.
+ enum RestoreBindingsMode { kDontRestoreBindings, kDoRestoreBindings };
+ void UpdateSurfaceTexture(RestoreBindingsMode mode);
+
+ // Internal helper for UpdateSurface() that allows callers to specify the
+ // RestoreBindingsMode when a SurfaceTexture is already attached prior to
+ // calling this method.
+ void UpdateSurfaceInternal(UpdateMode update_mode,
+ RestoreBindingsMode attached_bindings_mode);
+
+ // Releases the attached codec buffer (if not already released) indicated by
+ // |codec_buffer_index_|. Never updates the actual surface. See UpdateMode
+ // documentation for details. For the purposes of this function the values
+ // RENDER_TO_FRONT_BUFFER and RENDER_TO_BACK_BUFFER do the same thing.
+ void ReleaseOutputBuffer(UpdateMode update_mode);
+
+ // Make shared_state_->context() current if it isn't already.
+ std::unique_ptr<ui::ScopedMakeCurrent> MakeCurrentIfNeeded();
+
+ // Return whether there is a codec buffer that we haven't rendered yet. Will
+ // return false also if there's no codec or we otherwise can't update.
+ bool IsCodecBufferOutstanding() const;
+
+ // Shared state between the AVDA and all AVDACodecImages.
+ scoped_refptr<AVDASharedState> shared_state_;
+
+ // The MediaCodec buffer index that we should render. Must be >= 0 or one of
+ // the enum values below.
+ enum { kUpdateOnly = -1, kRendered = -2, kInvalidCodecBufferIndex = -3 };
+ int codec_buffer_index_;
+
+ // Our image size.
+ gfx::Size size_;
+
+ // May be null.
+ MediaCodecBridge* media_codec_;
+
+ // Indicates if we're rendering to a SurfaceTexture or not. Set during the
+ // call to SetBufferMetadata().
+ bool has_surface_texture_;
+
+ // The texture that we're attached to.
+ gpu::gles2::Texture* texture_;
+
+ // Bounds that we last sent to our overlay.
+ gfx::Rect most_recent_bounds_;
+
+ DISALLOW_COPY_AND_ASSIGN(AVDACodecImage);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_AVDA_CODEC_IMAGE_H_
diff --git a/chromium/media/gpu/android/avda_picture_buffer_manager.cc b/chromium/media/gpu/android/avda_picture_buffer_manager.cc
new file mode 100644
index 00000000000..a452cfacede
--- /dev/null
+++ b/chromium/media/gpu/android/avda_picture_buffer_manager.cc
@@ -0,0 +1,278 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/avda_picture_buffer_manager.h"
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+
+#include "base/android/build_info.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/gl_stream_texture_image.h"
+#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/ipc/service/gpu_channel.h"
+#include "media/base/android/media_codec_bridge_impl.h"
+#include "media/gpu/android/avda_codec_image.h"
+#include "media/gpu/android/avda_shared_state.h"
+#include "ui/gl/android/scoped_java_surface.h"
+#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/egl_util.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_surface_egl.h"
+#include "ui/gl/scoped_binders.h"
+#include "ui/gl/scoped_make_current.h"
+
+// If !|ptr|, log a message, notify |state_provider_| of the error, and
+// return an optional value.
+#define RETURN_IF_NULL(ptr, ...) \
+ do { \
+ if (!(ptr)) { \
+ DLOG(ERROR) << "Got null for " << #ptr; \
+ state_provider_->NotifyError(VideoDecodeAccelerator::ILLEGAL_STATE); \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+namespace media {
+
+AVDAPictureBufferManager::AVDAPictureBufferManager(
+ AVDAStateProvider* state_provider)
+ : state_provider_(state_provider), media_codec_(nullptr) {}
+
+AVDAPictureBufferManager::~AVDAPictureBufferManager() {}
+
+bool AVDAPictureBufferManager::Initialize(
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) {
+ shared_state_ = nullptr;
+ surface_texture_ = nullptr;
+
+ if (!surface_bundle->overlay) {
+ // Create the surface texture.
+ surface_texture_ = SurfaceTextureGLOwnerImpl::Create();
+ if (!surface_texture_)
+ return false;
+
+ surface_bundle->surface_texture_surface =
+ surface_texture_->CreateJavaSurface();
+ surface_bundle->surface_texture = surface_texture_;
+ }
+
+ // Only do this once the surface texture is filled in, since the constructor
+ // assumes that it will be.
+ shared_state_ = new AVDASharedState(surface_bundle);
+ shared_state_->SetPromotionHintCB(state_provider_->GetPromotionHintCB());
+
+ return true;
+}
+
+void AVDAPictureBufferManager::Destroy(const PictureBufferMap& buffers) {
+ // Do nothing if Initialize() has not been called.
+ if (!shared_state_)
+ return;
+
+ ReleaseCodecBuffers(buffers);
+ CodecChanged(nullptr);
+ surface_texture_ = nullptr;
+}
+
+void AVDAPictureBufferManager::SetImageForPicture(
+ const PictureBuffer& picture_buffer,
+ gpu::gles2::GLStreamTextureImage* image) {
+ auto gles_decoder = state_provider_->GetGlDecoder();
+ RETURN_IF_NULL(gles_decoder);
+ auto* context_group = gles_decoder->GetContextGroup();
+ RETURN_IF_NULL(context_group);
+ auto* texture_manager = context_group->texture_manager();
+ RETURN_IF_NULL(texture_manager);
+
+ DCHECK_LE(1u, picture_buffer.client_texture_ids().size());
+ gpu::gles2::TextureRef* texture_ref =
+ texture_manager->GetTexture(picture_buffer.client_texture_ids()[0]);
+ RETURN_IF_NULL(texture_ref);
+
+ // Default to zero which will clear the stream texture service id if one was
+ // previously set.
+ GLuint stream_texture_service_id = 0;
+ if (image) {
+ // Override the Texture's service id, so that it will use the one that is
+ // attached to the SurfaceTexture.
+ stream_texture_service_id = shared_state_->surface_texture_service_id();
+
+ // Also set the parameters for the level if we're not clearing the image.
+ const gfx::Size size = state_provider_->GetSize();
+ texture_manager->SetLevelInfo(texture_ref, kTextureTarget, 0, GL_RGBA,
+ size.width(), size.height(), 1, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, gfx::Rect());
+
+ static_cast<AVDACodecImage*>(image)->set_texture(texture_ref->texture());
+ }
+
+ // If we're clearing the image, or setting a SurfaceTexture backed image, we
+ // set the state to UNBOUND. For SurfaceTexture images, this ensures that the
+ // implementation will call CopyTexImage, which is where AVDACodecImage
+ // updates the SurfaceTexture to the right frame.
+ auto image_state = gpu::gles2::Texture::UNBOUND;
+ // For SurfaceView we set the state to BOUND because ScheduleOverlayPlane
+ // requires it. If something tries to sample from this texture it won't work,
+ // but there's no way to sample from a SurfaceView anyway, so it doesn't
+ // matter.
+ if (image && !surface_texture_)
+ image_state = gpu::gles2::Texture::BOUND;
+ texture_manager->SetLevelStreamTextureImage(texture_ref, kTextureTarget, 0,
+ image, image_state,
+ stream_texture_service_id);
+ texture_manager->SetLevelCleared(texture_ref, kTextureTarget, 0, true);
+}
+
+AVDACodecImage* AVDAPictureBufferManager::GetImageForPicture(
+ int picture_buffer_id) const {
+ auto it = codec_images_.find(picture_buffer_id);
+ DCHECK(it != codec_images_.end());
+ return it->second.get();
+}
+
+void AVDAPictureBufferManager::UseCodecBufferForPictureBuffer(
+ int32_t codec_buf_index,
+ const PictureBuffer& picture_buffer) {
+ // Notify the AVDACodecImage for picture_buffer that it should use the
+ // decoded buffer codec_buf_index to render this frame.
+ AVDACodecImage* avda_image = GetImageForPicture(picture_buffer.id());
+
+ // Note that this is not a race, since we do not re-use a PictureBuffer
+ // until after the CC is done drawing it.
+ pictures_out_for_display_.push_back(picture_buffer.id());
+ avda_image->SetBufferMetadata(codec_buf_index, !!surface_texture_,
+ state_provider_->GetSize());
+
+ // If the shared state has changed for this image, retarget its texture.
+ if (avda_image->SetSharedState(shared_state_))
+ SetImageForPicture(picture_buffer, avda_image);
+
+ MaybeRenderEarly();
+}
+
+void AVDAPictureBufferManager::AssignOnePictureBuffer(
+ const PictureBuffer& picture_buffer,
+ bool have_context) {
+ // Attach a GLImage to each texture that will use the surface texture.
+ scoped_refptr<gpu::gles2::GLStreamTextureImage> gl_image =
+ codec_images_[picture_buffer.id()] =
+ new AVDACodecImage(shared_state_, media_codec_);
+ SetImageForPicture(picture_buffer, gl_image.get());
+}
+
+void AVDAPictureBufferManager::ReleaseCodecBufferForPicture(
+ const PictureBuffer& picture_buffer) {
+ GetImageForPicture(picture_buffer.id())
+ ->UpdateSurface(AVDACodecImage::UpdateMode::DISCARD_CODEC_BUFFER);
+}
+
+void AVDAPictureBufferManager::ReuseOnePictureBuffer(
+ const PictureBuffer& picture_buffer) {
+ pictures_out_for_display_.erase(
+ std::remove(pictures_out_for_display_.begin(),
+ pictures_out_for_display_.end(), picture_buffer.id()),
+ pictures_out_for_display_.end());
+
+ // At this point, the CC must be done with the picture. We can't really
+ // check for that here directly. it's guaranteed in gpu_video_decoder.cc,
+ // when it waits on the sync point before releasing the mailbox. That sync
+ // point is inserted by destroying the resource in VideoLayerImpl::DidDraw.
+ ReleaseCodecBufferForPicture(picture_buffer);
+ MaybeRenderEarly();
+}
+
+void AVDAPictureBufferManager::ReleaseCodecBuffers(
+ const PictureBufferMap& buffers) {
+ for (const std::pair<int, PictureBuffer>& entry : buffers)
+ ReleaseCodecBufferForPicture(entry.second);
+}
+
+void AVDAPictureBufferManager::MaybeRenderEarly() {
+ if (pictures_out_for_display_.empty())
+ return;
+
+ // See if we can consume the front buffer / render to the SurfaceView. Iterate
+ // in reverse to find the most recent front buffer. If none is found, the
+ // |front_index| will point to the beginning of the array.
+ size_t front_index = pictures_out_for_display_.size() - 1;
+ AVDACodecImage* first_renderable_image = nullptr;
+ for (int i = front_index; i >= 0; --i) {
+ const int id = pictures_out_for_display_[i];
+ AVDACodecImage* avda_image = GetImageForPicture(id);
+
+ // Update the front buffer index as we move along to shorten the number of
+ // candidate images we look at for back buffer rendering.
+ front_index = i;
+ first_renderable_image = avda_image;
+
+ // If we find a front buffer, stop and indicate that front buffer rendering
+ // is not possible since another image is already in the front buffer.
+ if (avda_image->was_rendered_to_front_buffer()) {
+ first_renderable_image = nullptr;
+ break;
+ }
+ }
+
+ if (first_renderable_image) {
+ first_renderable_image->UpdateSurface(
+ AVDACodecImage::UpdateMode::RENDER_TO_FRONT_BUFFER);
+ }
+
+ // Back buffer rendering is only available for surface textures. We'll always
+ // have at least one front buffer, so the next buffer must be the backbuffer.
+ size_t backbuffer_index = front_index + 1;
+ if (!surface_texture_ || backbuffer_index >= pictures_out_for_display_.size())
+ return;
+
+ // See if the back buffer is free. If so, then render the frame adjacent to
+ // the front buffer. The listing is in render order, so we can just use the
+ // first unrendered frame if there is back buffer space.
+ first_renderable_image =
+ GetImageForPicture(pictures_out_for_display_[backbuffer_index]);
+ if (first_renderable_image->was_rendered_to_back_buffer())
+ return;
+
+ // Due to the loop in the beginning this should never be true.
+ DCHECK(!first_renderable_image->was_rendered_to_front_buffer());
+ first_renderable_image->UpdateSurface(
+ AVDACodecImage::UpdateMode::RENDER_TO_BACK_BUFFER);
+}
+
+void AVDAPictureBufferManager::CodecChanged(MediaCodecBridge* codec) {
+ media_codec_ = codec;
+ for (auto& image_kv : codec_images_)
+ image_kv.second->CodecChanged(codec);
+ shared_state_->ClearReleaseTime();
+}
+
+bool AVDAPictureBufferManager::ArePicturesOverlayable() {
+ // SurfaceView frames are always overlayable because that's the only way to
+ // display them.
+ return !surface_texture_;
+}
+
+bool AVDAPictureBufferManager::HasUnrenderedPictures() const {
+ for (int id : pictures_out_for_display_) {
+ if (GetImageForPicture(id)->is_unrendered())
+ return true;
+ }
+ return false;
+}
+
+void AVDAPictureBufferManager::ImmediatelyForgetOverlay(
+ const PictureBufferMap& buffers) {
+ if (!shared_state_ || !shared_state_->overlay())
+ return;
+
+ ReleaseCodecBuffers(buffers);
+ shared_state_->ClearOverlay(shared_state_->overlay());
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/avda_picture_buffer_manager.h b/chromium/media/gpu/android/avda_picture_buffer_manager.h
new file mode 100644
index 00000000000..8d71af1eebf
--- /dev/null
+++ b/chromium/media/gpu/android/avda_picture_buffer_manager.h
@@ -0,0 +1,137 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_AVDA_PICTURE_BUFFER_MANAGER_H_
+#define MEDIA_GPU_ANDROID_AVDA_PICTURE_BUFFER_MANAGER_H_
+
+#include <stdint.h>
+#include <vector>
+
+#include "base/macros.h"
+#include "media/gpu/android/avda_state_provider.h"
+#include "media/gpu/android/avda_surface_bundle.h"
+#include "media/gpu/android/surface_texture_gl_owner.h"
+#include "media/gpu/media_gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+class GLStreamTextureImage;
+}
+} // namespace gpu
+
+namespace media {
+class AVDACodecImage;
+class AVDASharedState;
+class MediaCodecBridge;
+
+// AVDAPictureBufferManager is used by AVDA to associate its PictureBuffers with
+// MediaCodec output buffers. It attaches AVDACodecImages to the PictureBuffer
+// textures so that when they're used to draw the AVDACodecImage can release the
+// MediaCodec buffer to the backing Surface. If the Surface is a SurfaceTexture,
+// the front buffer can then be used to draw without needing to copy the pixels.
+// If the Surface is a SurfaceView, the release causes the frame to be displayed
+// immediately.
+class MEDIA_GPU_EXPORT AVDAPictureBufferManager {
+ public:
+ using PictureBufferMap = std::map<int32_t, PictureBuffer>;
+
+ explicit AVDAPictureBufferManager(AVDAStateProvider* state_provider);
+ virtual ~AVDAPictureBufferManager();
+
+ // Call Initialize, providing the surface bundle that holds the surface that
+ // will back the frames. If an overlay is present in the bundle, then this
+ // will set us up to render codec buffers at the appropriate time for display,
+ // but will assume that consuming the resulting buffers is handled elsewhere
+ // (e.g., SurfaceFlinger). We will ensure that any reference to the bundle
+ // is dropped if the overlay sends OnSurfaceDestroyed.
+ //
+ // Without an overlay, we will create a SurfaceTexture and add it (and its
+ // surface) to |surface_bundle|. We will arrange to consume the buffers at
+ // the right time, in addition to releasing the codec buffers for rendering.
+ //
+ // One may call these multiple times to change between overlay and ST.
+ //
+ // Picture buffers will be updated to reflect the new surface during the call
+ // to UseCodecBufferForPicture().
+ //
+ // Returns true on success.
+ bool Initialize(scoped_refptr<AVDASurfaceBundle> surface_bundle);
+
+ void Destroy(const PictureBufferMap& buffers);
+
+ // Sets up |picture_buffer| so that its texture will refer to the image that
+ // is represented by the decoded output buffer at codec_buffer_index.
+ void UseCodecBufferForPictureBuffer(int32_t codec_buffer_index,
+ const PictureBuffer& picture_buffer);
+
+ // Assigns a picture buffer and attaches an image to its texture.
+ void AssignOnePictureBuffer(const PictureBuffer& picture_buffer,
+ bool have_context);
+
+ // Reuses a picture buffer to hold a new frame.
+ void ReuseOnePictureBuffer(const PictureBuffer& picture_buffer);
+
+ // Release MediaCodec buffers.
+ void ReleaseCodecBuffers(const PictureBufferMap& buffers);
+
+ // Attempts to free up codec output buffers by rendering early.
+ void MaybeRenderEarly();
+
+ // Called when the MediaCodec instance changes. If |codec| is nullptr the
+ // MediaCodec is being destroyed. Previously provided codecs should no longer
+ // be referenced.
+ void CodecChanged(MediaCodecBridge* codec);
+
+ // Whether the pictures buffers are overlayable.
+ bool ArePicturesOverlayable();
+
+ // Are there any unrendered picture buffers oustanding?
+ bool HasUnrenderedPictures() const;
+
+ // If we're using an overlay, then drop all codec buffers for it, and also
+ // drop any reference to the surface bundle. If we're not using an overlay,
+ // then do nothing.
+ void ImmediatelyForgetOverlay(const PictureBufferMap& buffers);
+
+ // Returns the GL texture target that the PictureBuffer textures use.
+ // Always use OES textures even though this will cause flickering in dev tools
+ // when inspecting a fullscreen video. See http://crbug.com/592798
+ static constexpr GLenum kTextureTarget = GL_TEXTURE_EXTERNAL_OES;
+
+ private:
+ // Release any codec buffer that is associated with the given picture buffer
+ // back to the codec. It is okay if there is no such buffer.
+ void ReleaseCodecBufferForPicture(const PictureBuffer& picture_buffer);
+
+ // Sets up the texture references (as found by |picture_buffer|), for the
+ // specified |image|. If |image| is null, clears any ref on the texture
+ // associated with |picture_buffer|.
+ void SetImageForPicture(const PictureBuffer& picture_buffer,
+ gpu::gles2::GLStreamTextureImage* image);
+
+ AVDACodecImage* GetImageForPicture(int picture_buffer_id) const;
+
+ scoped_refptr<AVDASharedState> shared_state_;
+
+ AVDAStateProvider* const state_provider_;
+
+ // The SurfaceTexture to render to. Non-null after Initialize() if
+ // we're not rendering to a SurfaceView.
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture_;
+
+ MediaCodecBridge* media_codec_;
+
+ // Picture buffer IDs that are out for display. Stored in order of frames as
+ // they are returned from the decoder.
+ std::vector<int32_t> pictures_out_for_display_;
+
+ // Maps a picture buffer id to a AVDACodecImage.
+ std::map<int, scoped_refptr<AVDACodecImage>> codec_images_;
+
+ DISALLOW_COPY_AND_ASSIGN(AVDAPictureBufferManager);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_AVDA_PICTURE_BUFFER_MANAGER_H_
diff --git a/chromium/media/gpu/android/avda_shared_state.cc b/chromium/media/gpu/android/avda_shared_state.cc
new file mode 100644
index 00000000000..dbb035370b3
--- /dev/null
+++ b/chromium/media/gpu/android/avda_shared_state.cc
@@ -0,0 +1,79 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/avda_shared_state.h"
+
+#include "base/metrics/histogram_macros.h"
+#include "base/time/time.h"
+#include "media/gpu/android/avda_codec_image.h"
+#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/scoped_make_current.h"
+
+namespace media {
+
+AVDASharedState::AVDASharedState(
+ scoped_refptr<AVDASurfaceBundle> surface_bundle)
+ : gl_matrix_{
+ 1, 0, 0, 0, // Default to a sane guess just in case we can't get the
+ 0, 1, 0, 0, // matrix on the first call. Will be Y-flipped later.
+ 0, 0, 1, 0, //
+ 0, 0, 0, 1, // Comment preserves 4x4 formatting.
+ },
+ surface_bundle_(surface_bundle),
+ weak_this_factory_(this) {
+ // If we're holding a reference to an overlay, then register to drop it if the
+ // overlay's surface is destroyed.
+ if (overlay()) {
+ overlay()->AddSurfaceDestroyedCallback(base::Bind(
+ &AVDASharedState::ClearOverlay, weak_this_factory_.GetWeakPtr()));
+ }
+}
+
+AVDASharedState::~AVDASharedState() = default;
+
+void AVDASharedState::RenderCodecBufferToSurfaceTexture(
+ MediaCodecBridge* codec,
+ int codec_buffer_index) {
+ if (surface_texture()->IsExpectingFrameAvailable())
+ surface_texture()->WaitForFrameAvailable();
+ codec->ReleaseOutputBuffer(codec_buffer_index, true);
+ surface_texture()->SetReleaseTimeToNow();
+}
+
+void AVDASharedState::WaitForFrameAvailable() {
+ surface_texture()->WaitForFrameAvailable();
+}
+
+void AVDASharedState::UpdateTexImage() {
+ surface_texture()->UpdateTexImage();
+ // Helpfully, this is already column major.
+ surface_texture()->GetTransformMatrix(gl_matrix_);
+}
+
+void AVDASharedState::GetTransformMatrix(float matrix[16]) const {
+ memcpy(matrix, gl_matrix_, sizeof(gl_matrix_));
+}
+
+void AVDASharedState::ClearReleaseTime() {
+ if (surface_texture())
+ surface_texture()->IgnorePendingRelease();
+}
+
+void AVDASharedState::ClearOverlay(AndroidOverlay* overlay_raw) {
+ if (surface_bundle_ && overlay() == overlay_raw)
+ surface_bundle_ = nullptr;
+}
+
+void AVDASharedState::SetPromotionHintCB(
+ PromotionHintAggregator::NotifyPromotionHintCB cb) {
+ promotion_hint_cb_ = cb;
+}
+
+const PromotionHintAggregator::NotifyPromotionHintCB&
+AVDASharedState::GetPromotionHintCB() {
+ return promotion_hint_cb_;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/avda_shared_state.h b/chromium/media/gpu/android/avda_shared_state.h
new file mode 100644
index 00000000000..3110eb0dada
--- /dev/null
+++ b/chromium/media/gpu/android/avda_shared_state.h
@@ -0,0 +1,113 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_AVDA_SHARED_STATE_H_
+#define MEDIA_GPU_ANDROID_AVDA_SHARED_STATE_H_
+
+#include "base/memory/weak_ptr.h"
+#include "base/synchronization/waitable_event.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "media/base/android/android_overlay.h"
+#include "media/base/android/media_codec_bridge.h"
+#include "media/gpu/android/avda_shared_state.h"
+#include "media/gpu/android/avda_surface_bundle.h"
+#include "media/gpu/android/promotion_hint_aggregator.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_image.h"
+#include "ui/gl/gl_surface.h"
+
+namespace media {
+
+// State shared by AVDACodecImages. This holds a reference to the surface
+// bundle that's backing the frames. If it's an overlay, then we'll
+// automatically drop our reference to the bundle if the overlay's surface gets
+// an OnSurfaceDestroyed.
+// TODO(watk): This doesn't really do anything any more; we should delete it.
+class AVDASharedState : public base::RefCounted<AVDASharedState> {
+ public:
+ AVDASharedState(scoped_refptr<AVDASurfaceBundle> surface_bundle);
+
+ GLuint surface_texture_service_id() const {
+ return surface_texture() ? surface_texture()->GetTextureId() : 0;
+ }
+
+ SurfaceTextureGLOwner* surface_texture() const {
+ return surface_bundle_ ? surface_bundle_->surface_texture.get() : nullptr;
+ }
+
+ AndroidOverlay* overlay() const {
+ return surface_bundle_ ? surface_bundle_->overlay.get() : nullptr;
+ }
+
+ // Context and surface that |surface_texture_| is bound to, if
+ // |surface_texture_| is not null.
+ gl::GLContext* context() const {
+ return surface_texture() ? surface_texture()->GetContext() : nullptr;
+ }
+
+ gl::GLSurface* surface() const {
+ return surface_texture() ? surface_texture()->GetSurface() : nullptr;
+ }
+
+ // Helper method for coordinating the interactions between
+ // MediaCodec::ReleaseOutputBuffer() and WaitForFrameAvailable() when
+ // rendering to a SurfaceTexture; this method should never be called when
+ // rendering to a SurfaceView.
+ //
+ // The release of the codec buffer to the surface texture is asynchronous, by
+ // using this helper we can attempt to let this process complete in a non
+ // blocking fashion before the SurfaceTexture is used.
+ //
+ // Clients should call this method to release the codec buffer for rendering
+ // and then call WaitForFrameAvailable() before using the SurfaceTexture. In
+ // the ideal case the SurfaceTexture has already been updated, otherwise the
+ // method will wait for a pro-rated amount of time based on elapsed time up
+ // to a short deadline.
+ //
+ // Some devices do not reliably notify frame availability, so we use a very
+ // short deadline of only a few milliseconds to avoid indefinite stalls.
+ void RenderCodecBufferToSurfaceTexture(MediaCodecBridge* codec,
+ int codec_buffer_index);
+
+ void WaitForFrameAvailable();
+
+ // Helper methods for interacting with |surface_texture_|. See
+ // gl::SurfaceTexture for method details.
+ void UpdateTexImage();
+
+ // Returns a matrix that needs to be y flipped in order to match the
+ // StreamTextureMatrix contract. See GLStreamTextureImage::YInvertMatrix().
+ void GetTransformMatrix(float matrix[16]) const;
+
+ // Resets the last time for RenderCodecBufferToSurfaceTexture(). Should be
+ // called during codec changes.
+ void ClearReleaseTime();
+
+ void ClearOverlay(AndroidOverlay* overlay);
+
+ void SetPromotionHintCB(PromotionHintAggregator::NotifyPromotionHintCB cb);
+ const PromotionHintAggregator::NotifyPromotionHintCB& GetPromotionHintCB();
+
+ protected:
+ virtual ~AVDASharedState();
+
+ private:
+ friend class base::RefCounted<AVDASharedState>;
+
+ // Texture matrix of the front buffer of the surface texture.
+ float gl_matrix_[16];
+
+ scoped_refptr<AVDASurfaceBundle> surface_bundle_;
+
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb_;
+
+ base::WeakPtrFactory<AVDASharedState> weak_this_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(AVDASharedState);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_AVDA_SHARED_STATE_H_
diff --git a/chromium/media/gpu/android/avda_state_provider.h b/chromium/media/gpu/android/avda_state_provider.h
new file mode 100644
index 00000000000..86af2c41db2
--- /dev/null
+++ b/chromium/media/gpu/android/avda_state_provider.h
@@ -0,0 +1,44 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_AVDA_STATE_PROVIDER_H_
+#define MEDIA_GPU_ANDROID_AVDA_STATE_PROVIDER_H_
+
+#include "base/compiler_specific.h"
+#include "base/threading/thread_checker.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "media/gpu/android/promotion_hint_aggregator.h"
+#include "media/video/video_decode_accelerator.h"
+
+namespace gpu {
+namespace gles2 {
+class GLES2Decoder;
+}
+} // namespace gpu
+
+namespace media {
+
+// Helper class that provides AVDAPictureBufferManager with enough state
+// to do useful work.
+class AVDAStateProvider {
+ public:
+ // Various handy getters.
+ virtual const gfx::Size& GetSize() const = 0;
+ virtual base::WeakPtr<gpu::gles2::GLES2Decoder> GetGlDecoder() const = 0;
+
+ // Report a fatal error. This will post NotifyError(), and transition to the
+ // error state.
+ virtual void NotifyError(VideoDecodeAccelerator::Error error) = 0;
+
+ // Return a callback that may be used to signal promotion hint info.
+ virtual PromotionHintAggregator::NotifyPromotionHintCB
+ GetPromotionHintCB() = 0;
+
+ protected:
+ ~AVDAStateProvider() = default;
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_AVDA_STATE_PROVIDER_H_
diff --git a/chromium/media/gpu/android/avda_surface_bundle.cc b/chromium/media/gpu/android/avda_surface_bundle.cc
new file mode 100644
index 00000000000..d8db0cfaa60
--- /dev/null
+++ b/chromium/media/gpu/android/avda_surface_bundle.cc
@@ -0,0 +1,47 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/avda_surface_bundle.h"
+
+#include "media/base/android/android_overlay.h"
+
+namespace media {
+
+AVDASurfaceBundle::AVDASurfaceBundle() = default;
+
+AVDASurfaceBundle::AVDASurfaceBundle(std::unique_ptr<AndroidOverlay> overlay)
+ : overlay(std::move(overlay)) {}
+
+AVDASurfaceBundle::AVDASurfaceBundle(
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture_owner)
+ : surface_texture(std::move(surface_texture_owner)),
+ surface_texture_surface(surface_texture->CreateJavaSurface()) {}
+
+AVDASurfaceBundle::~AVDASurfaceBundle() {
+ // Explicitly free the surface first, just to be sure that it's deleted before
+ // the SurfaceTexture is.
+ surface_texture_surface = gl::ScopedJavaSurface();
+
+ // Also release the back buffers.
+ if (surface_texture) {
+ auto task_runner = surface_texture->task_runner();
+ if (task_runner->RunsTasksInCurrentSequence()) {
+ surface_texture->ReleaseBackBuffers();
+ } else {
+ task_runner->PostTask(
+ FROM_HERE, base::Bind(&SurfaceTextureGLOwner::ReleaseBackBuffers,
+ surface_texture));
+ }
+ }
+}
+
+const base::android::JavaRef<jobject>& AVDASurfaceBundle::GetJavaSurface()
+ const {
+ if (overlay)
+ return overlay->GetJavaSurface();
+ else
+ return surface_texture_surface.j_surface();
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/avda_surface_bundle.h b/chromium/media/gpu/android/avda_surface_bundle.h
new file mode 100644
index 00000000000..b2236726727
--- /dev/null
+++ b/chromium/media/gpu/android/avda_surface_bundle.h
@@ -0,0 +1,51 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_AVDA_SURFACE_BUNDLE_H_
+#define MEDIA_GPU_ANDROID_AVDA_SURFACE_BUNDLE_H_
+
+#include "base/memory/ref_counted.h"
+#include "media/base/android/android_overlay.h"
+#include "media/base/surface_manager.h"
+#include "media/gpu/android/surface_texture_gl_owner.h"
+#include "media/gpu/media_gpu_export.h"
+#include "ui/gl/android/scoped_java_surface.h"
+
+namespace media {
+
+// AVDASurfaceBundle is a Java surface, and the SurfaceTexture or Overlay that
+// backs it.
+//
+// Once a MediaCodec is configured with an output surface, the corresponding
+// AVDASurfaceBundle should be kept alive as long as the codec to prevent
+// crashes due to the codec losing its output surface.
+// TODO(watk): Remove AVDA from the name.
+struct MEDIA_GPU_EXPORT AVDASurfaceBundle
+ : public base::RefCountedThreadSafe<AVDASurfaceBundle> {
+ public:
+ // Create an empty bundle to be manually populated.
+ explicit AVDASurfaceBundle();
+ explicit AVDASurfaceBundle(std::unique_ptr<AndroidOverlay> overlay);
+ explicit AVDASurfaceBundle(
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture_owner);
+
+ const base::android::JavaRef<jobject>& GetJavaSurface() const;
+
+ // The Overlay or SurfaceTexture.
+ std::unique_ptr<AndroidOverlay> overlay;
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture;
+
+ // The Java surface for |surface_texture|.
+ gl::ScopedJavaSurface surface_texture_surface;
+
+ private:
+ ~AVDASurfaceBundle();
+ friend class base::RefCountedThreadSafe<AVDASurfaceBundle>;
+
+ DISALLOW_COPY_AND_ASSIGN(AVDASurfaceBundle);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_AVDA_SURFACE_BUNDLE_H_
diff --git a/chromium/media/gpu/android/codec_image.cc b/chromium/media/gpu/android/codec_image.cc
index 8227161ae45..e230f276664 100644
--- a/chromium/media/gpu/android/codec_image.cc
+++ b/chromium/media/gpu/android/codec_image.cc
@@ -10,7 +10,7 @@
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "gpu/command_buffer/service/texture_manager.h"
-#include "media/gpu/surface_texture_gl_owner.h"
+#include "media/gpu/android/surface_texture_gl_owner.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/scoped_make_current.h"
@@ -32,12 +32,15 @@ std::unique_ptr<ui::ScopedMakeCurrent> MakeCurrentIfNeeded(
} // namespace
-CodecImage::CodecImage(std::unique_ptr<CodecOutputBuffer> output_buffer,
- scoped_refptr<SurfaceTextureGLOwner> surface_texture,
- DestructionCb destruction_cb)
+CodecImage::CodecImage(
+ std::unique_ptr<CodecOutputBuffer> output_buffer,
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
+ DestructionCb destruction_cb)
: phase_(Phase::kInCodec),
output_buffer_(std::move(output_buffer)),
surface_texture_(std::move(surface_texture)),
+ promotion_hint_cb_(std::move(promotion_hint_cb)),
destruction_cb_(std::move(destruction_cb)) {}
CodecImage::~CodecImage() {
@@ -92,8 +95,10 @@ bool CodecImage::ScheduleOverlayPlane(gfx::AcceleratedWidget widget,
// Move the overlay if needed.
if (most_recent_bounds_ != bounds_rect) {
most_recent_bounds_ = bounds_rect;
- // TODO(watk): Implement overlay layout scheduling. Either post the call
- // or create a threadsafe wrapper.
+ // TODO(liberato): When we start getting promotion hints, then we should
+ // not send a hint from NotifyPromotionHint() if it's promotable and we
+ // don't have a surface texture. We'll handle it here.
+ promotion_hint_cb_.Run(PromotionHintAggregator::Hint(bounds_rect, true));
}
RenderToOverlay();
diff --git a/chromium/media/gpu/android/codec_image.h b/chromium/media/gpu/android/codec_image.h
index a4da8a30e00..3b9b7afb01e 100644
--- a/chromium/media/gpu/android/codec_image.h
+++ b/chromium/media/gpu/android/codec_image.h
@@ -13,8 +13,9 @@
#include "base/macros.h"
#include "gpu/command_buffer/service/gl_stream_texture_image.h"
#include "media/gpu/android/codec_wrapper.h"
+#include "media/gpu/android/promotion_hint_aggregator.h"
+#include "media/gpu/android/surface_texture_gl_owner.h"
#include "media/gpu/media_gpu_export.h"
-#include "media/gpu/surface_texture_gl_owner.h"
namespace media {
@@ -27,6 +28,7 @@ class MEDIA_GPU_EXPORT CodecImage : public gpu::gles2::GLStreamTextureImage {
CodecImage(std::unique_ptr<CodecOutputBuffer> output_buffer,
scoped_refptr<SurfaceTextureGLOwner> surface_texture,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
DestructionCb destruction_cb);
// gl::GLImage implementation
@@ -109,6 +111,9 @@ class MEDIA_GPU_EXPORT CodecImage : public gpu::gles2::GLStreamTextureImage {
// The bounds last sent to the overlay.
gfx::Rect most_recent_bounds_;
+ // Callback to notify about promotion hints and overlay position.
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb_;
+
DestructionCb destruction_cb_;
DISALLOW_COPY_AND_ASSIGN(CodecImage);
diff --git a/chromium/media/gpu/android/codec_image_unittest.cc b/chromium/media/gpu/android/codec_image_unittest.cc
index ebaa9eb0c4a..a9711bdb997 100644
--- a/chromium/media/gpu/android/codec_image_unittest.cc
+++ b/chromium/media/gpu/android/codec_image_unittest.cc
@@ -11,7 +11,7 @@
#include "gpu/command_buffer/service/texture_manager.h"
#include "media/base/android/media_codec_bridge.h"
#include "media/base/android/mock_media_codec_bridge.h"
-#include "media/gpu/mock_surface_texture_gl_owner.h"
+#include "media/gpu/android/mock_surface_texture_gl_owner.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/geometry/rect.h"
@@ -40,8 +40,9 @@ class CodecImageTest : public testing::Test {
void SetUp() override {
auto codec = base::MakeUnique<NiceMock<MockMediaCodecBridge>>();
codec_ = codec.get();
- wrapper_ = base::MakeUnique<CodecWrapper>(std::move(codec),
- base::Bind(&base::DoNothing));
+ wrapper_ = base::MakeUnique<CodecWrapper>(
+ CodecSurfacePair(std::move(codec), new AVDASurfaceBundle()),
+ base::Bind(&base::DoNothing));
ON_CALL(*codec_, DequeueOutputBuffer(_, _, _, _, _, _, _))
.WillByDefault(Return(MEDIA_CODEC_OK));
@@ -67,7 +68,7 @@ class CodecImageTest : public testing::Test {
share_group_ = nullptr;
surface_ = nullptr;
gl::init::ShutdownGL();
- wrapper_->TakeCodec();
+ wrapper_->TakeCodecSurfacePair();
}
enum ImageKind { kOverlay, kSurfaceTexture };
@@ -76,9 +77,11 @@ class CodecImageTest : public testing::Test {
CodecImage::DestructionCb destruction_cb = kNoop) {
std::unique_ptr<CodecOutputBuffer> buffer;
wrapper_->DequeueOutputBuffer(nullptr, nullptr, &buffer);
- return new CodecImage(std::move(buffer),
- kind == kSurfaceTexture ? surface_texture_ : nullptr,
- std::move(destruction_cb));
+ return new CodecImage(
+ std::move(buffer), kind == kSurfaceTexture ? surface_texture_ : nullptr,
+ base::BindRepeating(&PromotionHintReceiver::OnPromotionHint,
+ base::Unretained(&promotion_hint_receiver_)),
+ std::move(destruction_cb));
}
base::test::ScopedTaskEnvironment scoped_task_environment_;
@@ -88,6 +91,13 @@ class CodecImageTest : public testing::Test {
scoped_refptr<gl::GLContext> context_;
scoped_refptr<gl::GLShareGroup> share_group_;
scoped_refptr<gl::GLSurface> surface_;
+
+ class PromotionHintReceiver {
+ public:
+ MOCK_METHOD1(OnPromotionHint, void(PromotionHintAggregator::Hint));
+ };
+
+ PromotionHintReceiver promotion_hint_receiver_;
};
TEST_F(CodecImageTest, DestructionCbRuns) {
@@ -168,8 +178,12 @@ TEST_F(CodecImageTest, GetTextureMatrixReturnsIdentityForOverlayImages) {
TEST_F(CodecImageTest, ScheduleOverlayPlaneTriggersFrontBufferRendering) {
auto i = NewImage(kOverlay);
EXPECT_CALL(*codec_, ReleaseOutputBuffer(_, true));
+ // Also verify that it sends the appropriate promotion hint so that the
+ // overlay is positioned properly.
+ PromotionHintAggregator::Hint hint(gfx::Rect(1, 2, 3, 4), true);
+ EXPECT_CALL(promotion_hint_receiver_, OnPromotionHint(hint));
i->ScheduleOverlayPlane(gfx::AcceleratedWidget(), 0, gfx::OverlayTransform(),
- gfx::Rect(), gfx::RectF());
+ hint.screen_rect, gfx::RectF());
ASSERT_TRUE(i->was_rendered_to_front_buffer());
}
@@ -182,7 +196,7 @@ TEST_F(CodecImageTest, CanRenderSurfaceTextureImageToBackBuffer) {
TEST_F(CodecImageTest, CodecBufferInvalidationResultsInRenderingFailure) {
auto i = NewImage(kSurfaceTexture);
// Invalidate the backing codec buffer.
- wrapper_->TakeCodec();
+ wrapper_->TakeCodecSurfacePair();
ASSERT_FALSE(i->RenderToSurfaceTextureBackBuffer());
}
@@ -208,13 +222,13 @@ TEST_F(CodecImageTest, PromotingTheBackBufferAlwaysSucceeds) {
i->RenderToSurfaceTextureBackBuffer();
// Invalidating the codec buffer doesn't matter after it's rendered to the
// back buffer.
- wrapper_->TakeCodec();
+ wrapper_->TakeCodecSurfacePair();
ASSERT_TRUE(i->RenderToFrontBuffer());
}
TEST_F(CodecImageTest, FrontBufferRenderingFailsIfBackBufferRenderingFailed) {
auto i = NewImage(kSurfaceTexture);
- wrapper_->TakeCodec();
+ wrapper_->TakeCodecSurfacePair();
i->RenderToSurfaceTextureBackBuffer();
ASSERT_FALSE(i->RenderToFrontBuffer());
}
diff --git a/chromium/media/gpu/android/codec_wrapper.cc b/chromium/media/gpu/android/codec_wrapper.cc
index e3efc2836d7..c9df3802c8a 100644
--- a/chromium/media/gpu/android/codec_wrapper.cc
+++ b/chromium/media/gpu/android/codec_wrapper.cc
@@ -11,6 +11,7 @@
#include <vector>
#include "base/memory/ptr_util.h"
+#include "base/optional.h"
#include "base/stl_util.h"
#include "media/base/android/media_codec_util.h"
#include "media/base/bind_to_current_loop.h"
@@ -22,37 +23,28 @@ namespace media {
// CodecOutputBuffer are the only two things that hold references to it.
class CodecWrapperImpl : public base::RefCountedThreadSafe<CodecWrapperImpl> {
public:
- CodecWrapperImpl(std::unique_ptr<MediaCodecBridge> codec,
+ CodecWrapperImpl(CodecSurfacePair codec_surface_pair,
base::Closure output_buffer_release_cb);
- std::unique_ptr<MediaCodecBridge> TakeCodec();
- bool HasValidCodecOutputBuffers() const;
- void DiscardCodecOutputBuffers();
+ using DequeueStatus = CodecWrapper::DequeueStatus;
+ using QueueStatus = CodecWrapper::QueueStatus;
+
+ CodecSurfacePair TakeCodecSurfacePair();
+ bool HasUnreleasedOutputBuffers() const;
+ void DiscardOutputBuffers();
bool IsFlushed() const;
bool IsDraining() const;
bool IsDrained() const;
bool SupportsFlush(DeviceInfo* device_info) const;
bool Flush();
- MediaCodecStatus QueueInputBuffer(int index,
- const uint8_t* data,
- size_t data_size,
- base::TimeDelta presentation_time);
- MediaCodecStatus QueueSecureInputBuffer(
- int index,
- const uint8_t* data,
- size_t data_size,
- const std::string& key_id,
- const std::string& iv,
- const std::vector<SubsampleEntry>& subsamples,
- const EncryptionScheme& encryption_scheme,
- base::TimeDelta presentation_time);
- void QueueEOS(int input_buffer_index);
- MediaCodecStatus DequeueInputBuffer(int* index);
- MediaCodecStatus DequeueOutputBuffer(
+ bool SetSurface(scoped_refptr<AVDASurfaceBundle> surface_bundle);
+ scoped_refptr<AVDASurfaceBundle> SurfaceBundle();
+ QueueStatus QueueInputBuffer(const DecoderBuffer& buffer,
+ const EncryptionScheme& encryption_scheme);
+ DequeueStatus DequeueOutputBuffer(
base::TimeDelta* presentation_time,
bool* end_of_stream,
std::unique_ptr<CodecOutputBuffer>* codec_buffer);
- MediaCodecStatus SetSurface(const base::android::JavaRef<jobject>& surface);
// Releases the codec buffer and optionally renders it. This is a noop if
// the codec buffer is not valid. Can be called on any thread. Returns true if
@@ -71,18 +63,26 @@ class CodecWrapperImpl : public base::RefCountedThreadSafe<CodecWrapperImpl> {
friend base::RefCountedThreadSafe<CodecWrapperImpl>;
~CodecWrapperImpl();
- void DiscardCodecOutputBuffers_Locked();
+ void DiscardOutputBuffers_Locked();
// |lock_| protects access to all member variables.
mutable base::Lock lock_;
- std::unique_ptr<MediaCodecBridge> codec_;
State state_;
+ std::unique_ptr<MediaCodecBridge> codec_;
+
+ // The currently configured surface.
+ scoped_refptr<AVDASurfaceBundle> surface_bundle_;
// Buffer ids are unique for a given CodecWrapper and map to MediaCodec buffer
// indices.
int64_t next_buffer_id_;
base::flat_map<int64_t, int> buffer_ids_;
+ // An input buffer that was dequeued but subsequently rejected from
+ // QueueInputBuffer() because the codec didn't have the crypto key. We
+ // maintain ownership of it and reuse it next time.
+ base::Optional<int> owned_input_buffer_;
+
// The current output size. Updated when DequeueOutputBuffer() reports
// OUTPUT_FORMAT_CHANGED.
gfx::Size size_;
@@ -107,10 +107,11 @@ bool CodecOutputBuffer::ReleaseToSurface() {
return codec_->ReleaseCodecOutputBuffer(id_, true);
}
-CodecWrapperImpl::CodecWrapperImpl(std::unique_ptr<MediaCodecBridge> codec,
+CodecWrapperImpl::CodecWrapperImpl(CodecSurfacePair codec_surface_pair,
base::Closure output_buffer_release_cb)
- : codec_(std::move(codec)),
- state_(State::kFlushed),
+ : state_(State::kFlushed),
+ codec_(std::move(codec_surface_pair.first)),
+ surface_bundle_(std::move(codec_surface_pair.second)),
next_buffer_id_(0),
output_buffer_release_cb_(std::move(output_buffer_release_cb)) {
DVLOG(2) << __func__;
@@ -118,13 +119,13 @@ CodecWrapperImpl::CodecWrapperImpl(std::unique_ptr<MediaCodecBridge> codec,
CodecWrapperImpl::~CodecWrapperImpl() = default;
-std::unique_ptr<MediaCodecBridge> CodecWrapperImpl::TakeCodec() {
+CodecSurfacePair CodecWrapperImpl::TakeCodecSurfacePair() {
DVLOG(2) << __func__;
base::AutoLock l(lock_);
if (!codec_)
- return nullptr;
- DiscardCodecOutputBuffers_Locked();
- return std::move(codec_);
+ return {nullptr, nullptr};
+ DiscardOutputBuffers_Locked();
+ return {std::move(codec_), std::move(surface_bundle_)};
}
bool CodecWrapperImpl::IsFlushed() const {
@@ -142,18 +143,18 @@ bool CodecWrapperImpl::IsDrained() const {
return state_ == State::kDrained;
}
-bool CodecWrapperImpl::HasValidCodecOutputBuffers() const {
+bool CodecWrapperImpl::HasUnreleasedOutputBuffers() const {
base::AutoLock l(lock_);
return !buffer_ids_.empty();
}
-void CodecWrapperImpl::DiscardCodecOutputBuffers() {
+void CodecWrapperImpl::DiscardOutputBuffers() {
DVLOG(2) << __func__;
base::AutoLock l(lock_);
- DiscardCodecOutputBuffers_Locked();
+ DiscardOutputBuffers_Locked();
}
-void CodecWrapperImpl::DiscardCodecOutputBuffers_Locked() {
+void CodecWrapperImpl::DiscardOutputBuffers_Locked() {
DVLOG(2) << __func__;
lock_.AssertAcquired();
for (auto& kv : buffer_ids_)
@@ -172,8 +173,9 @@ bool CodecWrapperImpl::Flush() {
base::AutoLock l(lock_);
DCHECK(codec_ && state_ != State::kError);
- // Dequeued output buffers are invalidated by flushing.
+ // Dequeued buffers are invalidated by flushing.
buffer_ids_.clear();
+ owned_input_buffer_.reset();
auto status = codec_->Flush();
if (status == MEDIA_CODEC_ERROR) {
state_ = State::kError;
@@ -183,77 +185,85 @@ bool CodecWrapperImpl::Flush() {
return true;
}
-MediaCodecStatus CodecWrapperImpl::QueueInputBuffer(
- int index,
- const uint8_t* data,
- size_t data_size,
- base::TimeDelta presentation_time) {
+CodecWrapperImpl::QueueStatus CodecWrapperImpl::QueueInputBuffer(
+ const DecoderBuffer& buffer,
+ const EncryptionScheme& encryption_scheme) {
DVLOG(4) << __func__;
base::AutoLock l(lock_);
DCHECK(codec_ && state_ != State::kError);
- auto status =
- codec_->QueueInputBuffer(index, data, data_size, presentation_time);
- if (status == MEDIA_CODEC_ERROR)
- state_ = State::kError;
- else
- state_ = State::kRunning;
- return status;
-}
-
-MediaCodecStatus CodecWrapperImpl::QueueSecureInputBuffer(
- int index,
- const uint8_t* data,
- size_t data_size,
- const std::string& key_id,
- const std::string& iv,
- const std::vector<SubsampleEntry>& subsamples,
- const EncryptionScheme& encryption_scheme,
- base::TimeDelta presentation_time) {
- DVLOG(4) << __func__;
- base::AutoLock l(lock_);
- DCHECK(codec_ && state_ != State::kError);
+ // Dequeue an input buffer if we don't already own one.
+ int input_buffer;
+ if (owned_input_buffer_) {
+ input_buffer = *owned_input_buffer_;
+ owned_input_buffer_.reset();
+ } else {
+ MediaCodecStatus status =
+ codec_->DequeueInputBuffer(base::TimeDelta(), &input_buffer);
+ switch (status) {
+ case MEDIA_CODEC_ERROR:
+ state_ = State::kError;
+ return QueueStatus::kError;
+ case MEDIA_CODEC_TRY_AGAIN_LATER:
+ return QueueStatus::kTryAgainLater;
+ case MEDIA_CODEC_OK:
+ break;
+ default:
+ NOTREACHED();
+ return QueueStatus::kError;
+ }
+ }
- auto status = codec_->QueueSecureInputBuffer(
- index, data, data_size, key_id, iv, subsamples, encryption_scheme,
- presentation_time);
- if (status == MEDIA_CODEC_ERROR)
- state_ = State::kError;
- else
- state_ = State::kRunning;
- return status;
-}
+ // Queue EOS if it's an EOS buffer.
+ if (buffer.end_of_stream()) {
+ // Some MediaCodecs consider it an error to get an EOS as the first buffer
+ // (http://crbug.com/672268).
+ DCHECK_NE(state_, State::kFlushed);
+ codec_->QueueEOS(input_buffer);
+ state_ = State::kDraining;
+ return QueueStatus::kOk;
+ }
-void CodecWrapperImpl::QueueEOS(int input_buffer_index) {
- DVLOG(2) << __func__;
- base::AutoLock l(lock_);
- DCHECK(codec_ && state_ != State::kError);
- // Some MediaCodecs consider it an error to get an EOS as the first buffer
- // (http://crbug.com/672268).
- DCHECK_NE(state_, State::kFlushed);
- codec_->QueueEOS(input_buffer_index);
- state_ = State::kDraining;
-}
+ // Queue a buffer.
+ const DecryptConfig* decrypt_config = buffer.decrypt_config();
+ bool encrypted = decrypt_config && decrypt_config->is_encrypted();
+ MediaCodecStatus status;
+ if (encrypted) {
+ status = codec_->QueueSecureInputBuffer(
+ input_buffer, buffer.data(), buffer.data_size(),
+ decrypt_config->key_id(), decrypt_config->iv(),
+ decrypt_config->subsamples(), encryption_scheme, buffer.timestamp());
+ } else {
+ status = codec_->QueueInputBuffer(input_buffer, buffer.data(),
+ buffer.data_size(), buffer.timestamp());
+ }
-MediaCodecStatus CodecWrapperImpl::DequeueInputBuffer(int* index) {
- DVLOG(4) << __func__;
- base::AutoLock l(lock_);
- DCHECK(codec_ && state_ != State::kError);
- auto status = codec_->DequeueInputBuffer(base::TimeDelta(), index);
- if (status == MEDIA_CODEC_ERROR)
- state_ = State::kError;
- return status;
+ switch (status) {
+ case MEDIA_CODEC_OK:
+ state_ = State::kRunning;
+ return QueueStatus::kOk;
+ case MEDIA_CODEC_ERROR:
+ state_ = State::kError;
+ return QueueStatus::kError;
+ case MEDIA_CODEC_NO_KEY:
+ // The input buffer remains owned by us, so save it for reuse.
+ owned_input_buffer_ = input_buffer;
+ return QueueStatus::kNoKey;
+ default:
+ NOTREACHED();
+ return QueueStatus::kError;
+ }
}
-MediaCodecStatus CodecWrapperImpl::DequeueOutputBuffer(
+CodecWrapperImpl::DequeueStatus CodecWrapperImpl::DequeueOutputBuffer(
base::TimeDelta* presentation_time,
bool* end_of_stream,
std::unique_ptr<CodecOutputBuffer>* codec_buffer) {
DVLOG(4) << __func__;
base::AutoLock l(lock_);
DCHECK(codec_ && state_ != State::kError);
- // If |*codec_buffer| were not null, deleting it may deadlock when it
- // tries to release itself.
+ // If |*codec_buffer| were not null, deleting it would deadlock when its
+ // destructor calls ReleaseCodecOutputBuffer().
DCHECK(!*codec_buffer);
// Dequeue in a loop so we can avoid propagating the uninteresting
@@ -276,50 +286,61 @@ MediaCodecStatus CodecWrapperImpl::DequeueOutputBuffer(
codec_->ReleaseOutputBuffer(index, false);
if (end_of_stream)
*end_of_stream = true;
- return status;
+ return DequeueStatus::kOk;
}
int64_t buffer_id = next_buffer_id_++;
buffer_ids_[buffer_id] = index;
*codec_buffer =
base::WrapUnique(new CodecOutputBuffer(this, buffer_id, size_));
- return status;
+ return DequeueStatus::kOk;
+ }
+ case MEDIA_CODEC_TRY_AGAIN_LATER: {
+ return DequeueStatus::kTryAgainLater;
}
case MEDIA_CODEC_ERROR: {
state_ = State::kError;
- return status;
+ return DequeueStatus::kError;
}
case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED: {
- // An OUTPUT_FORMAT_CHANGED is not reported after Flush() if the frame
- // size does not change.
if (codec_->GetOutputSize(&size_) == MEDIA_CODEC_ERROR) {
state_ = State::kError;
- return MEDIA_CODEC_ERROR;
+ return DequeueStatus::kError;
}
continue;
}
- case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
+ case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED: {
continue;
- default:
- return status;
+ }
+ case MEDIA_CODEC_NO_KEY: {
+ NOTREACHED();
+ return DequeueStatus::kError;
+ }
}
}
state_ = State::kError;
- return MEDIA_CODEC_ERROR;
+ return DequeueStatus::kError;
}
-MediaCodecStatus CodecWrapperImpl::SetSurface(
- const base::android::JavaRef<jobject>& surface) {
+bool CodecWrapperImpl::SetSurface(
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) {
DVLOG(2) << __func__;
base::AutoLock l(lock_);
+ DCHECK(surface_bundle);
DCHECK(codec_ && state_ != State::kError);
- if (!codec_->SetSurface(surface)) {
+ if (!codec_->SetSurface(surface_bundle->GetJavaSurface())) {
state_ = State::kError;
- return MEDIA_CODEC_ERROR;
+ return false;
}
- return MEDIA_CODEC_OK;
+ surface_bundle_ = std::move(surface_bundle);
+ return true;
+}
+
+scoped_refptr<AVDASurfaceBundle> CodecWrapperImpl::SurfaceBundle() {
+ base::AutoLock l(lock_);
+ return surface_bundle_;
}
bool CodecWrapperImpl::ReleaseCodecOutputBuffer(int64_t id, bool render) {
@@ -334,7 +355,8 @@ bool CodecWrapperImpl::ReleaseCodecOutputBuffer(int64_t id, bool render) {
if (!valid)
return false;
- // Discard the buffers preceding the one we're releasing.
+ // Discard the buffers preceding the one we're releasing. The buffers are in
+ // presentation order because the ids are generated in presentation order.
for (auto it = buffer_ids_.begin(); it < buffer_it; ++it) {
int index = it->second;
codec_->ReleaseOutputBuffer(index, false);
@@ -349,26 +371,26 @@ bool CodecWrapperImpl::ReleaseCodecOutputBuffer(int64_t id, bool render) {
return true;
}
-CodecWrapper::CodecWrapper(std::unique_ptr<MediaCodecBridge> codec,
+CodecWrapper::CodecWrapper(CodecSurfacePair codec_surface_pair,
base::Closure output_buffer_release_cb)
- : impl_(new CodecWrapperImpl(std::move(codec),
+ : impl_(new CodecWrapperImpl(std::move(codec_surface_pair),
std::move(output_buffer_release_cb))) {}
CodecWrapper::~CodecWrapper() {
// The codec must have already been taken.
- DCHECK(!impl_->TakeCodec());
+ DCHECK(!impl_->TakeCodecSurfacePair().first);
}
-std::unique_ptr<MediaCodecBridge> CodecWrapper::TakeCodec() {
- return impl_->TakeCodec();
+CodecSurfacePair CodecWrapper::TakeCodecSurfacePair() {
+ return impl_->TakeCodecSurfacePair();
}
-bool CodecWrapper::HasValidCodecOutputBuffers() const {
- return impl_->HasValidCodecOutputBuffers();
+bool CodecWrapper::HasUnreleasedOutputBuffers() const {
+ return impl_->HasUnreleasedOutputBuffers();
}
-void CodecWrapper::DiscardCodecOutputBuffers() {
- impl_->DiscardCodecOutputBuffers();
+void CodecWrapper::DiscardOutputBuffers() {
+ impl_->DiscardOutputBuffers();
}
bool CodecWrapper::SupportsFlush(DeviceInfo* device_info) const {
@@ -391,37 +413,13 @@ bool CodecWrapper::Flush() {
return impl_->Flush();
}
-MediaCodecStatus CodecWrapper::QueueInputBuffer(
- int index,
- const uint8_t* data,
- size_t data_size,
- base::TimeDelta presentation_time) {
- return impl_->QueueInputBuffer(index, data, data_size, presentation_time);
+CodecWrapper::QueueStatus CodecWrapper::QueueInputBuffer(
+ const DecoderBuffer& buffer,
+ const EncryptionScheme& encryption_scheme) {
+ return impl_->QueueInputBuffer(buffer, encryption_scheme);
}
-MediaCodecStatus CodecWrapper::QueueSecureInputBuffer(
- int index,
- const uint8_t* data,
- size_t data_size,
- const std::string& key_id,
- const std::string& iv,
- const std::vector<SubsampleEntry>& subsamples,
- const EncryptionScheme& encryption_scheme,
- base::TimeDelta presentation_time) {
- return impl_->QueueSecureInputBuffer(index, data, data_size, key_id, iv,
- subsamples, encryption_scheme,
- presentation_time);
-}
-
-void CodecWrapper::QueueEOS(int input_buffer_index) {
- impl_->QueueEOS(input_buffer_index);
-}
-
-MediaCodecStatus CodecWrapper::DequeueInputBuffer(int* index) {
- return impl_->DequeueInputBuffer(index);
-}
-
-MediaCodecStatus CodecWrapper::DequeueOutputBuffer(
+CodecWrapper::DequeueStatus CodecWrapper::DequeueOutputBuffer(
base::TimeDelta* presentation_time,
bool* end_of_stream,
std::unique_ptr<CodecOutputBuffer>* codec_buffer) {
@@ -429,9 +427,12 @@ MediaCodecStatus CodecWrapper::DequeueOutputBuffer(
codec_buffer);
}
-MediaCodecStatus CodecWrapper::SetSurface(
- const base::android::JavaRef<jobject>& surface) {
- return impl_->SetSurface(surface);
+bool CodecWrapper::SetSurface(scoped_refptr<AVDASurfaceBundle> surface_bundle) {
+ return impl_->SetSurface(std::move(surface_bundle));
+}
+
+scoped_refptr<AVDASurfaceBundle> CodecWrapper::SurfaceBundle() {
+ return impl_->SurfaceBundle();
}
} // namespace media
diff --git a/chromium/media/gpu/android/codec_wrapper.h b/chromium/media/gpu/android/codec_wrapper.h
index 87d0261c1a0..b316f114d40 100644
--- a/chromium/media/gpu/android/codec_wrapper.h
+++ b/chromium/media/gpu/android/codec_wrapper.h
@@ -16,13 +16,17 @@
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#include "media/base/android/media_codec_bridge.h"
+#include "media/base/decoder_buffer.h"
+#include "media/gpu/android/avda_surface_bundle.h"
#include "media/gpu/android/device_info.h"
#include "media/gpu/media_gpu_export.h"
-#include "media/gpu/surface_texture_gl_owner.h"
namespace media {
class CodecWrapperImpl;
+using CodecSurfacePair = std::pair<std::unique_ptr<MediaCodecBridge>,
+ scoped_refptr<AVDASurfaceBundle>>;
+
// A MediaCodec output buffer that can be released on any thread. Releasing a
// CodecOutputBuffer implicitly discards all CodecOutputBuffers that
// precede it in presentation order; i.e., the only supported use case is to
@@ -52,30 +56,26 @@ class MEDIA_GPU_EXPORT CodecOutputBuffer {
DISALLOW_COPY_AND_ASSIGN(CodecOutputBuffer);
};
-// This wraps a MediaCodecBridge and provides a pared down version of its
-// interface. It also adds the following features:
-// * It outputs CodecOutputBuffers from DequeueOutputBuffer() which can be
-// safely rendered on any thread, and that will release their buffers on
-// destruction. This lets us decode on one thread while rendering on another.
-// * It maintains codec specific state like whether an error has occurred.
-//
+// This wraps a MediaCodecBridge and provides higher level features and tracks
+// more state that is useful for video decoding.
// CodecWrapper is not threadsafe, but the CodecOutputBuffers it outputs
// can be released on any thread.
class MEDIA_GPU_EXPORT CodecWrapper {
public:
- // |codec| should be in the flushed state, i.e., freshly configured or after a
- // Flush(). |output_buffer_release_cb| will be run whenever an output buffer
- // is released back to the codec (whether it's rendered or not). This is a
- // signal that the codec might be ready to accept more input. It may be run on
- // any thread.
- CodecWrapper(std::unique_ptr<MediaCodecBridge> codec,
+ // The given codec should be in the flushed state, i.e., freshly configured or
+ // after a Flush(). The surface must be the one that the codec was configured
+ // with. |output_buffer_release_cb| will be run whenever an output buffer is
+ // released back to the codec (whether it's rendered or not). This is a signal
+ // that the codec might be ready to accept more input. It may be run on any
+ // thread.
+ CodecWrapper(CodecSurfacePair codec_surface_pair,
base::Closure output_buffer_release_cb);
~CodecWrapper();
- // Takes the backing codec and discards all outstanding codec buffers. This
- // lets you tear down the codec while there are still CodecOutputBuffers
- // referencing |this|.
- std::unique_ptr<MediaCodecBridge> TakeCodec();
+ // Takes the backing codec and surface, implicitly discarding all outstanding
+ // codec buffers. It's safe to use CodecOutputBuffers after this is called,
+ // but they can no longer be rendered.
+ CodecSurfacePair TakeCodecSurfacePair();
// Whether the codec is in the flushed state.
bool IsFlushed() const;
@@ -86,33 +86,29 @@ class MEDIA_GPU_EXPORT CodecWrapper {
// Whether an EOS has been dequeued but the codec hasn't been flushed yet.
bool IsDrained() const;
- // Whether there are any valid CodecOutputBuffers that have not been released.
- bool HasValidCodecOutputBuffers() const;
+ // Whether there are any dequeued output buffers that have not been released.
+ bool HasUnreleasedOutputBuffers() const;
- // Releases currently dequeued codec buffers back to the codec without
- // rendering.
- void DiscardCodecOutputBuffers();
+ // Releases all dequeued output buffers back to the codec without rendering.
+ void DiscardOutputBuffers();
// Whether the codec supports Flush().
bool SupportsFlush(DeviceInfo* device_info) const;
- // See MediaCodecBridge documentation for the following.
+ // Flushes the codec and discards all output buffers.
bool Flush();
- MediaCodecStatus QueueInputBuffer(int index,
- const uint8_t* data,
- size_t data_size,
- base::TimeDelta presentation_time);
- MediaCodecStatus QueueSecureInputBuffer(
- int index,
- const uint8_t* data,
- size_t data_size,
- const std::string& key_id,
- const std::string& iv,
- const std::vector<SubsampleEntry>& subsamples,
- const EncryptionScheme& encryption_scheme,
- base::TimeDelta presentation_time);
- void QueueEOS(int input_buffer_index);
- MediaCodecStatus DequeueInputBuffer(int* index);
+
+ // Sets the given surface and returns true on success.
+ bool SetSurface(scoped_refptr<AVDASurfaceBundle> surface_bundle);
+
+ // Returns the surface bundle that the codec is currently configured with.
+ // Returns null after TakeCodecSurfacePair() is called.
+ scoped_refptr<AVDASurfaceBundle> SurfaceBundle();
+
+ // Queues |buffer| if the codec has an available input buffer.
+ enum class QueueStatus { kOk, kError, kTryAgainLater, kNoKey };
+ QueueStatus QueueInputBuffer(const DecoderBuffer& buffer,
+ const EncryptionScheme& encryption_scheme);
// Like MediaCodecBridge::DequeueOutputBuffer() but it outputs a
// CodecOutputBuffer instead of an index. |*codec_buffer| must be null.
@@ -121,15 +117,12 @@ class MEDIA_GPU_EXPORT CodecWrapper {
// codec immediately. Unlike MediaCodecBridge, this does not return
// MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED or MEDIA_CODEC_OUTPUT_FORMAT_CHANGED. It
// tries to dequeue another buffer instead.
- MediaCodecStatus DequeueOutputBuffer(
+ enum class DequeueStatus { kOk, kError, kTryAgainLater };
+ DequeueStatus DequeueOutputBuffer(
base::TimeDelta* presentation_time,
bool* end_of_stream,
std::unique_ptr<CodecOutputBuffer>* codec_buffer);
- // Sets the given surface and returns MEDIA_CODEC_OK on success or
- // MEDIA_CODEC_ERROR on failure.
- MediaCodecStatus SetSurface(const base::android::JavaRef<jobject>& surface);
-
private:
scoped_refptr<CodecWrapperImpl> impl_;
DISALLOW_COPY_AND_ASSIGN(CodecWrapper);
diff --git a/chromium/media/gpu/android/codec_wrapper_unittest.cc b/chromium/media/gpu/android/codec_wrapper_unittest.cc
index 16ff9fdf034..711a01cab66 100644
--- a/chromium/media/gpu/android/codec_wrapper_unittest.cc
+++ b/chromium/media/gpu/android/codec_wrapper_unittest.cc
@@ -30,17 +30,24 @@ class CodecWrapperTest : public testing::Test {
CodecWrapperTest() {
auto codec = base::MakeUnique<NiceMock<MockMediaCodecBridge>>();
codec_ = codec.get();
- wrapper_ = base::MakeUnique<CodecWrapper>(std::move(codec),
- output_buffer_release_cb_.Get());
+ surface_bundle_ = base::MakeRefCounted<AVDASurfaceBundle>();
+ wrapper_ = base::MakeUnique<CodecWrapper>(
+ CodecSurfacePair(std::move(codec), surface_bundle_),
+ output_buffer_release_cb_.Get());
ON_CALL(*codec_, DequeueOutputBuffer(_, _, _, _, _, _, _))
.WillByDefault(Return(MEDIA_CODEC_OK));
+ ON_CALL(*codec_, DequeueInputBuffer(_, _))
+ .WillByDefault(DoAll(SetArgPointee<1>(12), Return(MEDIA_CODEC_OK)));
ON_CALL(*codec_, QueueInputBuffer(_, _, _, _))
.WillByDefault(Return(MEDIA_CODEC_OK));
+
+ uint8_t data = 0;
+ fake_decoder_buffer_ = DecoderBuffer::CopyFrom(&data, 1);
}
~CodecWrapperTest() override {
// ~CodecWrapper asserts that the codec was taken.
- wrapper_->TakeCodec();
+ wrapper_->TakeCodecSurfacePair();
}
std::unique_ptr<CodecOutputBuffer> DequeueCodecOutputBuffer() {
@@ -51,12 +58,14 @@ class CodecWrapperTest : public testing::Test {
NiceMock<MockMediaCodecBridge>* codec_;
std::unique_ptr<CodecWrapper> wrapper_;
+ scoped_refptr<AVDASurfaceBundle> surface_bundle_;
NiceMock<base::MockCallback<base::Closure>> output_buffer_release_cb_;
+ scoped_refptr<DecoderBuffer> fake_decoder_buffer_;
};
TEST_F(CodecWrapperTest, TakeCodecReturnsTheCodecFirstAndNullLater) {
- ASSERT_EQ(wrapper_->TakeCodec().get(), codec_);
- ASSERT_EQ(wrapper_->TakeCodec(), nullptr);
+ ASSERT_EQ(wrapper_->TakeCodecSurfacePair().first.get(), codec_);
+ ASSERT_EQ(wrapper_->TakeCodecSurfacePair().first, nullptr);
}
TEST_F(CodecWrapperTest, NoCodecOutputBufferReturnedIfDequeueFails) {
@@ -67,7 +76,7 @@ TEST_F(CodecWrapperTest, NoCodecOutputBufferReturnedIfDequeueFails) {
}
TEST_F(CodecWrapperTest, InitiallyThereAreNoValidCodecOutputBuffers) {
- ASSERT_FALSE(wrapper_->HasValidCodecOutputBuffers());
+ ASSERT_FALSE(wrapper_->HasUnreleasedOutputBuffers());
}
TEST_F(CodecWrapperTest, FlushInvalidatesCodecOutputBuffers) {
@@ -78,13 +87,13 @@ TEST_F(CodecWrapperTest, FlushInvalidatesCodecOutputBuffers) {
TEST_F(CodecWrapperTest, TakingTheCodecInvalidatesCodecOutputBuffers) {
auto codec_buffer = DequeueCodecOutputBuffer();
- wrapper_->TakeCodec();
+ wrapper_->TakeCodecSurfacePair();
ASSERT_FALSE(codec_buffer->ReleaseToSurface());
}
TEST_F(CodecWrapperTest, SetSurfaceInvalidatesCodecOutputBuffers) {
auto codec_buffer = DequeueCodecOutputBuffer();
- wrapper_->SetSurface(0);
+ wrapper_->SetSurface(base::MakeRefCounted<AVDASurfaceBundle>());
ASSERT_FALSE(codec_buffer->ReleaseToSurface());
}
@@ -94,7 +103,7 @@ TEST_F(CodecWrapperTest, CodecOutputBuffersAreAllInvalidatedTogether) {
wrapper_->Flush();
ASSERT_FALSE(codec_buffer1->ReleaseToSurface());
ASSERT_FALSE(codec_buffer2->ReleaseToSurface());
- ASSERT_FALSE(wrapper_->HasValidCodecOutputBuffers());
+ ASSERT_FALSE(wrapper_->HasUnreleasedOutputBuffers());
}
TEST_F(CodecWrapperTest, CodecOutputBuffersAfterFlushAreValid) {
@@ -134,13 +143,13 @@ TEST_F(CodecWrapperTest, CodecOutputBuffersDoNotReleaseIfAlreadyReleased) {
TEST_F(CodecWrapperTest, ReleasingCodecOutputBuffersAfterTheCodecIsSafe) {
auto codec_buffer = DequeueCodecOutputBuffer();
- wrapper_->TakeCodec();
+ wrapper_->TakeCodecSurfacePair();
codec_buffer->ReleaseToSurface();
}
TEST_F(CodecWrapperTest, DeletingCodecOutputBuffersAfterTheCodecIsSafe) {
auto codec_buffer = DequeueCodecOutputBuffer();
- wrapper_->TakeCodec();
+ wrapper_->TakeCodecSurfacePair();
// This test ensures the destructor doesn't crash.
codec_buffer = nullptr;
}
@@ -165,7 +174,7 @@ TEST_F(CodecWrapperTest, FormatChangedStatusIsSwallowed) {
.WillOnce(Return(MEDIA_CODEC_TRY_AGAIN_LATER));
std::unique_ptr<CodecOutputBuffer> codec_buffer;
auto status = wrapper_->DequeueOutputBuffer(nullptr, nullptr, &codec_buffer);
- ASSERT_EQ(status, MEDIA_CODEC_TRY_AGAIN_LATER);
+ ASSERT_EQ(status, CodecWrapper::DequeueStatus::kTryAgainLater);
}
TEST_F(CodecWrapperTest, BuffersChangedStatusIsSwallowed) {
@@ -174,7 +183,7 @@ TEST_F(CodecWrapperTest, BuffersChangedStatusIsSwallowed) {
.WillOnce(Return(MEDIA_CODEC_TRY_AGAIN_LATER));
std::unique_ptr<CodecOutputBuffer> codec_buffer;
auto status = wrapper_->DequeueOutputBuffer(nullptr, nullptr, &codec_buffer);
- ASSERT_EQ(status, MEDIA_CODEC_TRY_AGAIN_LATER);
+ ASSERT_EQ(status, CodecWrapper::DequeueStatus::kTryAgainLater);
}
TEST_F(CodecWrapperTest, MultipleFormatChangedStatusesIsAnError) {
@@ -182,7 +191,7 @@ TEST_F(CodecWrapperTest, MultipleFormatChangedStatusesIsAnError) {
.WillRepeatedly(Return(MEDIA_CODEC_OUTPUT_FORMAT_CHANGED));
std::unique_ptr<CodecOutputBuffer> codec_buffer;
auto status = wrapper_->DequeueOutputBuffer(nullptr, nullptr, &codec_buffer);
- ASSERT_EQ(status, MEDIA_CODEC_ERROR);
+ ASSERT_EQ(status, CodecWrapper::DequeueStatus::kError);
}
TEST_F(CodecWrapperTest, CodecOutputBuffersHaveTheCorrectSize) {
@@ -213,26 +222,27 @@ TEST_F(CodecWrapperTest, CodecStartsInFlushedState) {
ASSERT_FALSE(wrapper_->IsDrained());
}
-TEST_F(CodecWrapperTest, CodecIsNotFlushedAfterAnInputIsQueued) {
- wrapper_->QueueInputBuffer(0, nullptr, 0, base::TimeDelta());
+TEST_F(CodecWrapperTest, CodecIsNotInFlushedStateAfterAnInputIsQueued) {
+ wrapper_->QueueInputBuffer(*fake_decoder_buffer_, EncryptionScheme());
ASSERT_FALSE(wrapper_->IsFlushed());
ASSERT_FALSE(wrapper_->IsDraining());
ASSERT_FALSE(wrapper_->IsDrained());
}
-TEST_F(CodecWrapperTest, FlushReturnsCodecToFlushed) {
- wrapper_->QueueInputBuffer(0, nullptr, 0, base::TimeDelta());
+TEST_F(CodecWrapperTest, FlushTransitionsToFlushedState) {
+ wrapper_->QueueInputBuffer(*fake_decoder_buffer_, EncryptionScheme());
wrapper_->Flush();
ASSERT_TRUE(wrapper_->IsFlushed());
}
-TEST_F(CodecWrapperTest, EosTransitionsToStateDraining) {
- wrapper_->QueueInputBuffer(0, nullptr, 0, base::TimeDelta());
- wrapper_->QueueEOS(0);
+TEST_F(CodecWrapperTest, EosTransitionsToDrainingState) {
+ wrapper_->QueueInputBuffer(*fake_decoder_buffer_, EncryptionScheme());
+ auto eos = DecoderBuffer::CreateEOSBuffer();
+ wrapper_->QueueInputBuffer(*eos, EncryptionScheme());
ASSERT_TRUE(wrapper_->IsDraining());
}
-TEST_F(CodecWrapperTest, DequeuingEosTransitionsToStateDrained) {
+TEST_F(CodecWrapperTest, DequeuingEosTransitionsToDrainedState) {
// Set EOS on next dequeue.
codec_->ProduceOneOutput(MockMediaCodecBridge::kEos);
DequeueCodecOutputBuffer();
@@ -242,4 +252,34 @@ TEST_F(CodecWrapperTest, DequeuingEosTransitionsToStateDrained) {
ASSERT_FALSE(wrapper_->IsDrained());
}
+TEST_F(CodecWrapperTest, RejectedInputBuffersAreReused) {
+ // If we get a MEDIA_CODEC_NO_KEY status, the next time we try to queue a
+ // buffer the previous input buffer should be reused.
+ EXPECT_CALL(*codec_, DequeueInputBuffer(_, _))
+ .WillOnce(DoAll(SetArgPointee<1>(666), Return(MEDIA_CODEC_OK)));
+ EXPECT_CALL(*codec_, QueueInputBuffer(666, _, _, _))
+ .WillOnce(Return(MEDIA_CODEC_NO_KEY))
+ .WillOnce(Return(MEDIA_CODEC_OK));
+ auto status =
+ wrapper_->QueueInputBuffer(*fake_decoder_buffer_, EncryptionScheme());
+ ASSERT_EQ(status, CodecWrapper::QueueStatus::kNoKey);
+ wrapper_->QueueInputBuffer(*fake_decoder_buffer_, EncryptionScheme());
+}
+
+TEST_F(CodecWrapperTest, SurfaceBundleIsInitializedByConstructor) {
+ ASSERT_EQ(surface_bundle_.get(), wrapper_->SurfaceBundle());
+}
+
+TEST_F(CodecWrapperTest, SurfaceBundleIsUpdatedBySetSurface) {
+ auto new_bundle = base::MakeRefCounted<AVDASurfaceBundle>();
+ EXPECT_CALL(*codec_, SetSurface(_)).WillOnce(Return(true));
+ wrapper_->SetSurface(new_bundle);
+ ASSERT_EQ(new_bundle.get(), wrapper_->SurfaceBundle());
+}
+
+TEST_F(CodecWrapperTest, SurfaceBundleIsTaken) {
+ ASSERT_EQ(wrapper_->TakeCodecSurfacePair().second, surface_bundle_);
+ ASSERT_EQ(wrapper_->SurfaceBundle(), nullptr);
+}
+
} // namespace media
diff --git a/chromium/media/gpu/android/content_video_view_overlay.cc b/chromium/media/gpu/android/content_video_view_overlay.cc
new file mode 100644
index 00000000000..76d0a16f6f0
--- /dev/null
+++ b/chromium/media/gpu/android/content_video_view_overlay.cc
@@ -0,0 +1,78 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/content_video_view_overlay.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "gpu/ipc/common/gpu_surface_lookup.h"
+
+namespace media {
+
+// static
+std::unique_ptr<AndroidOverlay> ContentVideoViewOverlay::Create(
+ int surface_id,
+ AndroidOverlayConfig config) {
+ return base::MakeUnique<ContentVideoViewOverlay>(surface_id,
+ std::move(config));
+}
+
+ContentVideoViewOverlay::ContentVideoViewOverlay(int surface_id,
+ AndroidOverlayConfig config)
+ : surface_id_(surface_id), config_(std::move(config)), weak_factory_(this) {
+ if (ContentVideoViewOverlayAllocator::GetInstance()->AllocateSurface(this)) {
+ // We have the surface -- post a callback to our OnSurfaceAvailable.
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&ContentVideoViewOverlay::OnSurfaceAvailable,
+ weak_factory_.GetWeakPtr(), true));
+ }
+}
+
+ContentVideoViewOverlay::~ContentVideoViewOverlay() {
+ // Deallocate the surface. It's okay if we don't own it.
+ // Note that this only happens once any codec is done with us.
+ ContentVideoViewOverlayAllocator::GetInstance()->DeallocateSurface(this);
+}
+
+void ContentVideoViewOverlay::ScheduleLayout(const gfx::Rect& rect) {}
+
+const base::android::JavaRef<jobject>& ContentVideoViewOverlay::GetJavaSurface()
+ const {
+ return surface_.j_surface();
+}
+
+void ContentVideoViewOverlay::OnSurfaceAvailable(bool success) {
+ if (!success) {
+ // Notify that the surface won't be available.
+ config_.is_failed(this);
+ // |this| may be deleted.
+ return;
+ }
+
+ // Get the surface and notify our client.
+ surface_ =
+ gpu::GpuSurfaceLookup::GetInstance()->AcquireJavaSurface(surface_id_);
+
+ // If no surface was returned, then fail instead.
+ if (surface_.IsEmpty()) {
+ config_.is_failed(this);
+ // |this| may be deleted.
+ return;
+ }
+
+ config_.is_ready(this);
+}
+
+void ContentVideoViewOverlay::OnSurfaceDestroyed() {
+ RunSurfaceDestroyedCallbacks();
+ // |this| may be deleted, or deletion might be posted elsewhere.
+}
+
+int32_t ContentVideoViewOverlay::GetSurfaceId() {
+ return surface_id_;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/content_video_view_overlay.h b/chromium/media/gpu/android/content_video_view_overlay.h
new file mode 100644
index 00000000000..07f40328744
--- /dev/null
+++ b/chromium/media/gpu/android/content_video_view_overlay.h
@@ -0,0 +1,51 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_CONTENT_VIDEO_VIEW_OVERLAY_H_
+#define MEDIA_GPU_ANDROID_CONTENT_VIDEO_VIEW_OVERLAY_H_
+
+#include <memory>
+
+#include "base/memory/weak_ptr.h"
+#include "media/base/android/android_overlay.h"
+#include "media/gpu/android/content_video_view_overlay_allocator.h"
+#include "ui/gl/android/scoped_java_surface.h"
+
+namespace media {
+
+class ContentVideoViewOverlay
+ : public ContentVideoViewOverlayAllocator::Client {
+ public:
+ // This exists so we can bind construction into a callback returning
+ // std::unique_ptr<AndroidOverlay>.
+ static std::unique_ptr<AndroidOverlay> Create(int surface_id,
+ AndroidOverlayConfig config);
+
+ // |config| is ignored except for callbacks. Callbacks will not be called
+ // before this returns.
+ ContentVideoViewOverlay(int surface_id, AndroidOverlayConfig config);
+ ~ContentVideoViewOverlay() override;
+
+ // AndroidOverlay (via ContentVideoViewOverlayAllocator::Client)
+ // ContentVideoView ignores this, unfortunately.
+ void ScheduleLayout(const gfx::Rect& rect) override;
+ const base::android::JavaRef<jobject>& GetJavaSurface() const override;
+
+ // ContentVideoViewOverlayAllocator::Client
+ void OnSurfaceAvailable(bool success) override;
+ void OnSurfaceDestroyed() override;
+ int32_t GetSurfaceId() override;
+
+ private:
+ int surface_id_;
+ AndroidOverlayConfig config_;
+ gl::ScopedJavaSurface surface_;
+
+ base::WeakPtrFactory<ContentVideoViewOverlay> weak_factory_;
+ DISALLOW_COPY_AND_ASSIGN(ContentVideoViewOverlay);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_CONTENT_VIDEO_VIEW_OVERLAY_H_
diff --git a/chromium/media/gpu/android/content_video_view_overlay_allocator.cc b/chromium/media/gpu/android/content_video_view_overlay_allocator.cc
new file mode 100644
index 00000000000..a205ae783d3
--- /dev/null
+++ b/chromium/media/gpu/android/content_video_view_overlay_allocator.cc
@@ -0,0 +1,152 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/content_video_view_overlay_allocator.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+#include "media/gpu/android/avda_codec_allocator.h"
+
+namespace media {
+
+// static
+ContentVideoViewOverlayAllocator*
+ContentVideoViewOverlayAllocator::GetInstance() {
+ static ContentVideoViewOverlayAllocator* allocator =
+ new ContentVideoViewOverlayAllocator(
+ AVDACodecAllocator::GetInstance(base::ThreadTaskRunnerHandle::Get()));
+ return allocator;
+}
+
+ContentVideoViewOverlayAllocator::ContentVideoViewOverlayAllocator(
+ AVDACodecAllocator* allocator)
+ : allocator_(allocator) {}
+
+ContentVideoViewOverlayAllocator::~ContentVideoViewOverlayAllocator() {}
+
+bool ContentVideoViewOverlayAllocator::AllocateSurface(Client* client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ const int32_t surface_id = client->GetSurfaceId();
+ DVLOG(1) << __func__ << ": " << surface_id;
+ DCHECK_NE(surface_id, SurfaceManager::kNoSurfaceID);
+
+ // If it's not owned or being released, |client| now owns it.
+ // Note: it's owned until it's released, since AVDACodecAllocator does that.
+ // It keeps the bundle around (and also the overlay that's the current owner)
+ // until the codec is done with it. That's required to use AndroidOverlay.
+ // So, we don't need to check for 'being released'; the owner is good enough.
+ auto it = surface_owners_.find(surface_id);
+ if (it == surface_owners_.end()) {
+ OwnerRecord record;
+ record.owner = client;
+ surface_owners_.insert(OwnerMap::value_type(surface_id, record));
+ return true;
+ }
+
+ // Otherwise |client| replaces the previous waiter (if any).
+ OwnerRecord& record = it->second;
+ if (record.waiter)
+ record.waiter->OnSurfaceAvailable(false);
+ record.waiter = client;
+ return false;
+}
+
+void ContentVideoViewOverlayAllocator::DeallocateSurface(Client* client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ const int32_t surface_id = client->GetSurfaceId();
+ DCHECK_NE(surface_id, SurfaceManager::kNoSurfaceID);
+
+ // If we time out waiting for the surface to be destroyed, then we might have
+ // already removed |surface_id|. If it's now trying to deallocate, then
+ // maybe we just weren't patient enough, or mediaserver restarted.
+ auto it = surface_owners_.find(surface_id);
+ if (it == surface_owners_.end())
+ return;
+
+ OwnerRecord& record = it->second;
+ if (record.owner == client)
+ record.owner = nullptr;
+ else if (record.waiter == client)
+ record.waiter = nullptr;
+
+ // Promote the waiter if possible.
+ if (record.waiter && !record.owner) {
+ record.owner = record.waiter;
+ record.waiter = nullptr;
+ record.owner->OnSurfaceAvailable(true);
+ return;
+ }
+
+ // Remove the record if it's now unused.
+ if (!record.owner && !record.waiter)
+ surface_owners_.erase(it);
+}
+
+// During surface teardown we have to handle the following cases.
+// 1) No AVDA has acquired the surface, or the surface has already been
+// completely released.
+// This case is easy -- there's no owner or waiter, and we can return.
+//
+// 2) A MediaCodec is currently being configured with the surface on another
+// thread. Whether an AVDA owns the surface or has already deallocated it,
+// the MediaCodec should be dropped when configuration completes.
+// In this case, we'll find an owner. We'll notify it about the destruction.
+// Note that AVDA doesn't handle this case correctly right now, since it
+// doesn't know the state of codec creation on the codec thread. This is
+// only a problem because CVV has the 'wait on main thread' semantics.
+//
+// 3) An AVDA owns the surface and it responds to OnSurfaceDestroyed() by:
+// a) Replacing the destroyed surface by calling MediaCodec#setSurface().
+// b) Releasing the MediaCodec it's attached to.
+// In case a, the surface will be destroyed during OnSurfaceDestroyed.
+// In case b, we'll have to wait for the release to complete.
+//
+// 4) No AVDA owns the surface, but the MediaCodec it's attached to is currently
+// being destroyed on another thread.
+// This is the same as 3b.
+void ContentVideoViewOverlayAllocator::OnSurfaceDestroyed(int32_t surface_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(1) << __func__ << ": " << surface_id;
+
+ // If it isn't currently owned, then we're done. Rememeber that the overlay
+ // must outlive any user of it (MediaCodec!), and currently AVDACodecAllocator
+ // is responsible for making sure that happens for AVDA.
+ auto it = surface_owners_.find(surface_id);
+ if (it == surface_owners_.end())
+ return;
+
+ // Notify the owner and waiter (if any).
+ OwnerRecord& record = it->second;
+ if (record.waiter) {
+ record.waiter->OnSurfaceAvailable(false);
+ record.waiter = nullptr;
+ }
+
+ DCHECK(record.owner);
+
+ // |record| could be removed by the callback, if it deallocates the surface.
+ record.owner->OnSurfaceDestroyed();
+
+ // If owner deallocated the surface, then we don't need to wait. Note that
+ // the owner might have been deleted in that case. Since CVVOverlay only
+ // deallocates the surface during destruction, it's a safe bet.
+ it = surface_owners_.find(surface_id);
+ if (it == surface_owners_.end())
+ return;
+
+ // The surface is still in use, but should have been posted to another thread
+ // for destruction. Note that this isn't technically required for overlays
+ // in general, but CVV requires it. All of the pending release stuff should
+ // be moved here, or to custom deleters of CVVOverlay. However, in the
+ // interest of not changing too much at once, we let AVDACodecAllocator
+ // handle it. Since we're deprecating CVVOverlay anyway, all of this can be
+ // removed eventually.
+ // If the wait fails, then clean up |surface_owners_| anyway, since the codec
+ // release is probably hung up.
+ if (!allocator_->WaitForPendingRelease(record.owner))
+ surface_owners_.erase(it);
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/content_video_view_overlay_allocator.h b/chromium/media/gpu/android/content_video_view_overlay_allocator.h
new file mode 100644
index 00000000000..4e6d4db3390
--- /dev/null
+++ b/chromium/media/gpu/android/content_video_view_overlay_allocator.h
@@ -0,0 +1,86 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_CONTENT_VIDEO_VIEW_OVERLAY_ALLOCATOR_H_
+#define MEDIA_GPU_ANDROID_CONTENT_VIDEO_VIEW_OVERLAY_ALLOCATOR_H_
+
+#include <stddef.h>
+
+#include "base/containers/flat_map.h"
+#include "base/macros.h"
+#include "base/threading/thread_checker.h"
+#include "media/base/android/android_overlay.h"
+#include "media/gpu/media_gpu_export.h"
+
+namespace media {
+
+class AVDACodecAllocator;
+class ContentVideoViewOverlayAllocatorTest;
+
+// ContentVideoViewOverlayAllocator lets different instances of CVVOverlay that
+// share the same surface ID to be synchronized with respect to each other.
+// It also manages synchronous surface destruction.
+class MEDIA_GPU_EXPORT ContentVideoViewOverlayAllocator {
+ public:
+ class Client : public AndroidOverlay {
+ public:
+ // Called when the requested SurfaceView becomes available after a call to
+ // AllocateSurface()
+ virtual void OnSurfaceAvailable(bool success) = 0;
+
+ // Called when the allocated surface is being destroyed. This must either
+ // replace the surface with MediaCodec#setSurface, or release the MediaCodec
+ // it's attached to. The client no longer owns the surface and doesn't
+ // need to call DeallocateSurface();
+ virtual void OnSurfaceDestroyed() = 0;
+
+ // Return the surface id of the client's ContentVideoView.
+ virtual int32_t GetSurfaceId() = 0;
+
+ protected:
+ ~Client() override {}
+ };
+
+ static ContentVideoViewOverlayAllocator* GetInstance();
+
+ // Called synchronously when the given surface is being destroyed on the
+ // browser UI thread.
+ void OnSurfaceDestroyed(int32_t surface_id);
+
+ // Returns true if the caller now owns the surface, or false if someone else
+ // owns the surface. |client| will be notified when the surface is available
+ // via OnSurfaceAvailable().
+ bool AllocateSurface(Client* client);
+
+ // Relinquish ownership of the surface or stop waiting for it to be available.
+ // The caller must guarantee that when calling this the surface is either no
+ // longer attached to a MediaCodec, or the MediaCodec it was attached to is
+ // was released with ReleaseMediaCodec().
+ void DeallocateSurface(Client* client);
+
+ private:
+ friend class ContentVideoViewOverlayAllocatorTest;
+
+ ContentVideoViewOverlayAllocator(AVDACodecAllocator* allocator);
+ ~ContentVideoViewOverlayAllocator();
+
+ struct OwnerRecord {
+ Client* owner = nullptr;
+ Client* waiter = nullptr;
+ };
+
+ // Indexed by surface id.
+ using OwnerMap = base::flat_map<int32_t, OwnerRecord>;
+ OwnerMap surface_owners_;
+
+ AVDACodecAllocator* allocator_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(ContentVideoViewOverlayAllocator);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_CONTENT_VIDEO_VIEW_OVERLAY_ALLOCATOR_H_
diff --git a/chromium/media/gpu/android/content_video_view_overlay_allocator_unittest.cc b/chromium/media/gpu/android/content_video_view_overlay_allocator_unittest.cc
new file mode 100644
index 00000000000..977810125aa
--- /dev/null
+++ b/chromium/media/gpu/android/content_video_view_overlay_allocator_unittest.cc
@@ -0,0 +1,155 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/content_video_view_overlay_allocator.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/time/tick_clock.h"
+#include "media/base/surface_manager.h"
+#include "media/gpu/android/fake_codec_allocator.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::AnyNumber;
+using testing::Invoke;
+using testing::Return;
+using testing::StrictMock;
+using testing::_;
+
+namespace media {
+class ContentVideoViewOverlayAllocatorTest : public testing::Test {
+ public:
+ class MockClient
+ : public StrictMock<ContentVideoViewOverlayAllocator::Client> {
+ public:
+ MOCK_METHOD1(ScheduleLayout, void(const gfx::Rect&));
+ MOCK_CONST_METHOD0(GetJavaSurface,
+ const base::android::JavaRef<jobject>&());
+
+ MOCK_METHOD1(OnSurfaceAvailable, void(bool success));
+ MOCK_METHOD0(OnSurfaceDestroyed, void());
+ MOCK_METHOD0(GetSurfaceId, int32_t());
+ };
+
+ ContentVideoViewOverlayAllocatorTest() {}
+
+ ~ContentVideoViewOverlayAllocatorTest() override {}
+
+ protected:
+ void SetUp() override {
+ codec_allocator_ =
+ new FakeCodecAllocator(base::SequencedTaskRunnerHandle::Get());
+ allocator_ = new ContentVideoViewOverlayAllocator(codec_allocator_);
+
+ avda1_ = new MockClient();
+ avda2_ = new MockClient();
+ avda3_ = new MockClient();
+ // Default all |avda*| instances to surface ID 1.
+ SetSurfaceId(avda1_, 1);
+ SetSurfaceId(avda2_, 1);
+ SetSurfaceId(avda3_, 1);
+ }
+
+ void TearDown() override {
+ delete avda3_;
+ delete avda2_;
+ delete avda1_;
+ delete allocator_;
+ delete codec_allocator_;
+ }
+
+ void SetSurfaceId(MockClient* client, int32_t surface_id) {
+ ON_CALL(*client, GetSurfaceId()).WillByDefault(Return(surface_id));
+ EXPECT_CALL(*client, GetSurfaceId()).Times(AnyNumber());
+ }
+
+ protected:
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
+ ContentVideoViewOverlayAllocator* allocator_;
+ FakeCodecAllocator* codec_allocator_;
+
+ MockClient* avda1_;
+ MockClient* avda2_;
+ MockClient* avda3_;
+};
+
+TEST_F(ContentVideoViewOverlayAllocatorTest, AllocatingAnOwnedSurfaceFails) {
+ ASSERT_TRUE(allocator_->AllocateSurface(avda1_));
+ ASSERT_FALSE(allocator_->AllocateSurface(avda2_));
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ LaterWaitersReplaceEarlierWaiters) {
+ allocator_->AllocateSurface(avda1_);
+ allocator_->AllocateSurface(avda2_);
+ EXPECT_CALL(*avda2_, OnSurfaceAvailable(false));
+ allocator_->AllocateSurface(avda3_);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ WaitersBecomeOwnersWhenSurfacesAreReleased) {
+ allocator_->AllocateSurface(avda1_);
+ allocator_->AllocateSurface(avda2_);
+ EXPECT_CALL(*avda2_, OnSurfaceAvailable(true));
+ allocator_->DeallocateSurface(avda1_);
+ // The surface should still be owned.
+ ASSERT_FALSE(allocator_->AllocateSurface(avda1_));
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ DeallocatingUnownedSurfacesIsSafe) {
+ allocator_->DeallocateSurface(avda1_);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ WaitersAreRemovedIfTheyDeallocate) {
+ allocator_->AllocateSurface(avda1_);
+ allocator_->AllocateSurface(avda2_);
+ allocator_->DeallocateSurface(avda2_);
+ // |avda2_| should should not receive a notification.
+ EXPECT_CALL(*avda2_, OnSurfaceAvailable(_)).Times(0);
+ allocator_->DeallocateSurface(avda1_);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest, OwnersAreNotifiedOnDestruction) {
+ allocator_->AllocateSurface(avda1_);
+ // Owner is notified for a surface it owns.
+ EXPECT_CALL(*avda1_, OnSurfaceDestroyed());
+ allocator_->OnSurfaceDestroyed(1);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ NonOwnersAreNotNotifiedOnDestruction) {
+ allocator_->AllocateSurface(avda1_);
+ // Not notified for a surface it doesn't own.
+ EXPECT_CALL(*avda1_, OnSurfaceDestroyed()).Times(0);
+ allocator_->OnSurfaceDestroyed(123);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest, WaitersAreNotifiedOnDestruction) {
+ allocator_->AllocateSurface(avda1_);
+ allocator_->AllocateSurface(avda2_);
+ EXPECT_CALL(*avda1_, OnSurfaceDestroyed());
+ EXPECT_CALL(*avda2_, OnSurfaceAvailable(false));
+ allocator_->OnSurfaceDestroyed(1);
+}
+
+TEST_F(ContentVideoViewOverlayAllocatorTest,
+ DeallocatingIsSafeDuringSurfaceDestroyed) {
+ allocator_->AllocateSurface(avda1_);
+ EXPECT_CALL(*avda1_, OnSurfaceDestroyed()).WillOnce(Invoke([=]() {
+ allocator_->DeallocateSurface(avda1_);
+ }));
+ allocator_->OnSurfaceDestroyed(1);
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/fake_android_video_surface_chooser.cc b/chromium/media/gpu/android/fake_android_video_surface_chooser.cc
new file mode 100644
index 00000000000..ec005ffbb50
--- /dev/null
+++ b/chromium/media/gpu/android/fake_android_video_surface_chooser.cc
@@ -0,0 +1,40 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/fake_android_video_surface_chooser.h"
+
+namespace media {
+
+FakeSurfaceChooser::FakeSurfaceChooser() = default;
+FakeSurfaceChooser::~FakeSurfaceChooser() = default;
+
+void FakeSurfaceChooser::SetClientCallbacks(
+ UseOverlayCB use_overlay_cb,
+ UseSurfaceTextureCB use_surface_texture_cb) {
+ MockSetClientCallbacks();
+ use_overlay_cb_ = std::move(use_overlay_cb);
+ use_surface_texture_cb_ = std::move(use_surface_texture_cb);
+}
+
+void FakeSurfaceChooser::UpdateState(
+ base::Optional<AndroidOverlayFactoryCB> factory,
+ const State& new_state) {
+ MockUpdateState();
+ if (factory) {
+ factory_ = std::move(*factory);
+ MockReplaceOverlayFactory(!factory_.is_null());
+ }
+ current_state_ = new_state;
+}
+
+void FakeSurfaceChooser::ProvideSurfaceTexture() {
+ use_surface_texture_cb_.Run();
+}
+
+void FakeSurfaceChooser::ProvideOverlay(
+ std::unique_ptr<AndroidOverlay> overlay) {
+ use_overlay_cb_.Run(std::move(overlay));
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/fake_android_video_surface_chooser.h b/chromium/media/gpu/android/fake_android_video_surface_chooser.h
new file mode 100644
index 00000000000..a7000ca5f57
--- /dev/null
+++ b/chromium/media/gpu/android/fake_android_video_surface_chooser.h
@@ -0,0 +1,49 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_FAKE_ANDROID_VIDEO_SURFACE_CHOOSER_H_
+#define MEDIA_GPU_ANDROID_FAKE_ANDROID_VIDEO_SURFACE_CHOOSER_H_
+
+#include "media/gpu/android/android_video_surface_chooser.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// A fake surface chooser that lets tests choose the surface with
+// ProvideOverlay() and ProvideSurfaceTexture().
+class FakeSurfaceChooser : public AndroidVideoSurfaceChooser {
+ public:
+ FakeSurfaceChooser();
+ ~FakeSurfaceChooser() override;
+
+ // Mocks that are called by the fakes below.
+ MOCK_METHOD0(MockSetClientCallbacks, void());
+ MOCK_METHOD0(MockUpdateState, void());
+
+ // Called by UpdateState if the factory is changed. It is called with true if
+ // and only if the replacement factory isn't null.
+ MOCK_METHOD1(MockReplaceOverlayFactory, void(bool));
+
+ void SetClientCallbacks(UseOverlayCB use_overlay_cb,
+ UseSurfaceTextureCB use_surface_texture_cb) override;
+ void UpdateState(base::Optional<AndroidOverlayFactoryCB> factory,
+ const State& new_state) override;
+
+ // Calls the corresponding callback to choose the surface.
+ void ProvideOverlay(std::unique_ptr<AndroidOverlay> overlay);
+ void ProvideSurfaceTexture();
+
+ UseOverlayCB use_overlay_cb_;
+ UseSurfaceTextureCB use_surface_texture_cb_;
+ AndroidOverlayFactoryCB factory_;
+ State current_state_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FakeSurfaceChooser);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_FAKE_ANDROID_VIDEO_SURFACE_CHOOSER_H_
diff --git a/chromium/media/gpu/android/fake_codec_allocator.cc b/chromium/media/gpu/android/fake_codec_allocator.cc
index d957b7c5268..500957d9026 100644
--- a/chromium/media/gpu/android/fake_codec_allocator.cc
+++ b/chromium/media/gpu/android/fake_codec_allocator.cc
@@ -8,41 +8,40 @@
#include "base/memory/ptr_util.h"
#include "base/memory/weak_ptr.h"
-#include "media/gpu/avda_codec_allocator.h"
+#include "media/base/android/mock_media_codec_bridge.h"
+#include "media/gpu/android/avda_codec_allocator.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
-FakeCodecAllocator::FakeCodecAllocator() = default;
+FakeCodecAllocator::FakeCodecAllocator(
+ scoped_refptr<base::SequencedTaskRunner> task_runner)
+ : testing::NiceMock<AVDACodecAllocator>(
+ base::BindRepeating(&MockMediaCodecBridge::CreateVideoDecoder),
+ task_runner) {}
FakeCodecAllocator::~FakeCodecAllocator() = default;
-bool FakeCodecAllocator::StartThread(AVDACodecAllocatorClient* client) {
- return true;
-}
+void FakeCodecAllocator::StartThread(AVDACodecAllocatorClient* client) {}
void FakeCodecAllocator::StopThread(AVDACodecAllocatorClient* client) {}
std::unique_ptr<MediaCodecBridge> FakeCodecAllocator::CreateMediaCodecSync(
- scoped_refptr<CodecConfig> codec_config) {
- MockCreateMediaCodecSync(codec_config->surface_bundle->overlay.get(),
- codec_config->surface_bundle->surface_texture.get());
-
- CopyCodecAllocParams(codec_config);
+ scoped_refptr<CodecConfig> config) {
+ most_recent_overlay = config->surface_bundle->overlay.get();
+ most_recent_surface_texture = config->surface_bundle->surface_texture.get();
+ MockCreateMediaCodecSync(most_recent_overlay, most_recent_surface_texture);
std::unique_ptr<MockMediaCodecBridge> codec;
- if (allow_sync_creation)
+ if (allow_sync_creation) {
codec = base::MakeUnique<MockMediaCodecBridge>();
-
- if (codec) {
- most_recent_codec_ = codec.get();
- most_recent_codec_destruction_observer_ =
- codec->CreateDestructionObserver();
- most_recent_codec_destruction_observer_->DoNotAllowDestruction();
+ most_recent_codec = codec.get();
+ most_recent_codec_destruction_observer = codec->CreateDestructionObserver();
+ most_recent_codec_destruction_observer->DoNotAllowDestruction();
} else {
- most_recent_codec_ = nullptr;
- most_recent_codec_destruction_observer_ = nullptr;
+ most_recent_codec = nullptr;
+ most_recent_codec_destruction_observer = nullptr;
}
return std::move(codec);
@@ -51,14 +50,16 @@ std::unique_ptr<MediaCodecBridge> FakeCodecAllocator::CreateMediaCodecSync(
void FakeCodecAllocator::CreateMediaCodecAsync(
base::WeakPtr<AVDACodecAllocatorClient> client,
scoped_refptr<CodecConfig> config) {
- // Clear |most_recent_codec_| until somebody calls Provide*CodecAsync().
- most_recent_codec_ = nullptr;
- most_recent_codec_destruction_observer_ = nullptr;
- CopyCodecAllocParams(config);
+ // Clear |most_recent_codec| until somebody calls Provide*CodecAsync().
+ most_recent_codec = nullptr;
+ most_recent_codec_destruction_observer = nullptr;
+ most_recent_overlay = config->surface_bundle->overlay.get();
+ most_recent_surface_texture = config->surface_bundle->surface_texture.get();
+ pending_surface_bundle_ = config->surface_bundle;
client_ = client;
+ codec_creation_pending_ = true;
- MockCreateMediaCodecAsync(most_recent_overlay(),
- most_recent_surface_texture());
+ MockCreateMediaCodecAsync(most_recent_overlay, most_recent_surface_texture);
}
void FakeCodecAllocator::ReleaseMediaCodec(
@@ -68,31 +69,31 @@ void FakeCodecAllocator::ReleaseMediaCodec(
surface_bundle->surface_texture.get());
}
-MockMediaCodecBridge* FakeCodecAllocator::ProvideMockCodecAsync() {
- // There must be a pending codec creation.
- DCHECK(client_);
-
- std::unique_ptr<MockMediaCodecBridge> codec =
- base::MakeUnique<NiceMock<MockMediaCodecBridge>>();
- auto* raw_codec = codec.get();
- most_recent_codec_ = raw_codec;
- most_recent_codec_destruction_observer_ = codec->CreateDestructionObserver();
- client_->OnCodecConfigured(std::move(codec));
+MockMediaCodecBridge* FakeCodecAllocator::ProvideMockCodecAsync(
+ std::unique_ptr<MockMediaCodecBridge> codec) {
+ DCHECK(codec_creation_pending_);
+ codec_creation_pending_ = false;
+
+ if (!client_)
+ return nullptr;
+
+ auto mock_codec = codec ? std::move(codec)
+ : base::MakeUnique<NiceMock<MockMediaCodecBridge>>();
+ auto* raw_codec = mock_codec.get();
+ most_recent_codec = raw_codec;
+ most_recent_codec_destruction_observer =
+ mock_codec->CreateDestructionObserver();
+ client_->OnCodecConfigured(std::move(mock_codec),
+ std::move(pending_surface_bundle_));
return raw_codec;
}
void FakeCodecAllocator::ProvideNullCodecAsync() {
- // There must be a pending codec creation.
- DCHECK(client_);
- most_recent_codec_ = nullptr;
- client_->OnCodecConfigured(nullptr);
-}
-
-void FakeCodecAllocator::CopyCodecAllocParams(
- scoped_refptr<CodecConfig> config) {
- config_ = config;
- most_recent_overlay_ = config->surface_bundle->overlay.get();
- most_recent_surface_texture_ = config->surface_bundle->surface_texture.get();
+ DCHECK(codec_creation_pending_);
+ codec_creation_pending_ = false;
+ most_recent_codec = nullptr;
+ if (client_)
+ client_->OnCodecConfigured(nullptr, std::move(pending_surface_bundle_));
}
} // namespace media
diff --git a/chromium/media/gpu/android/fake_codec_allocator.h b/chromium/media/gpu/android/fake_codec_allocator.h
index 79a380db3c8..8ff6bf29b06 100644
--- a/chromium/media/gpu/android/fake_codec_allocator.h
+++ b/chromium/media/gpu/android/fake_codec_allocator.h
@@ -5,8 +5,8 @@
#include <memory>
#include "media/base/android/mock_media_codec_bridge.h"
-#include "media/gpu/avda_codec_allocator.h"
-#include "media/gpu/avda_surface_bundle.h"
+#include "media/gpu/android/avda_codec_allocator.h"
+#include "media/gpu/android/avda_surface_bundle.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/android/surface_texture.h"
@@ -17,10 +17,10 @@ namespace media {
// and lets you set expecations on the "Mock*" methods.
class FakeCodecAllocator : public testing::NiceMock<AVDACodecAllocator> {
public:
- FakeCodecAllocator();
+ FakeCodecAllocator(scoped_refptr<base::SequencedTaskRunner> task_runner);
~FakeCodecAllocator() override;
- bool StartThread(AVDACodecAllocatorClient* client) override;
+ void StartThread(AVDACodecAllocatorClient* client) override;
void StopThread(AVDACodecAllocatorClient* client) override;
// These are called with some parameters of the codec config by our
@@ -40,62 +40,46 @@ class FakeCodecAllocator : public testing::NiceMock<AVDACodecAllocator> {
SurfaceTextureGLOwner*));
std::unique_ptr<MediaCodecBridge> CreateMediaCodecSync(
- scoped_refptr<CodecConfig> codec_config) override;
+ scoped_refptr<CodecConfig> config) override;
void CreateMediaCodecAsync(base::WeakPtr<AVDACodecAllocatorClient> client,
scoped_refptr<CodecConfig> config) override;
void ReleaseMediaCodec(
std::unique_ptr<MediaCodecBridge> media_codec,
scoped_refptr<AVDASurfaceBundle> surface_bundle) override;
- // Satisfies the pending codec creation with a mock codec and returns a raw
- // pointer to it.
- MockMediaCodecBridge* ProvideMockCodecAsync();
+ // Satisfies the pending codec creation with |codec| if given, or a new
+ // MockMediaCodecBridge if not. Returns a raw pointer to the codec, or nullptr
+ // if the client WeakPtr was invalidated.
+ MockMediaCodecBridge* ProvideMockCodecAsync(
+ std::unique_ptr<MockMediaCodecBridge> codec = nullptr);
// Satisfies the pending codec creation with a null codec.
void ProvideNullCodecAsync();
- // Returns the most recent codec that we provided, which might already have
- // been freed. By default, the destruction observer will fail the test
- // if this happens, unless the expectation is explicitly changed. If you
- // change it, then use this with caution.
- MockMediaCodecBridge* most_recent_codec() { return most_recent_codec_; }
-
- // Returns the destruction observer for the most recent codec. We retain
- // ownership of it.
- DestructionObserver* codec_destruction_observer() {
- return most_recent_codec_destruction_observer_.get();
- }
-
- // Returns the most recent overlay / etc. that we were given during codec
- // allocation (sync or async).
- AndroidOverlay* most_recent_overlay() { return most_recent_overlay_; }
- SurfaceTextureGLOwner* most_recent_surface_texture() {
- return most_recent_surface_texture_;
- }
-
// Most recent codec that we've created via CreateMockCodec, since we have
// to assign ownership. It may be freed already.
- MockMediaCodecBridge* most_recent_codec_;
+ MockMediaCodecBridge* most_recent_codec = nullptr;
- // DestructionObserver for |most_recent_codec_|.
- std::unique_ptr<DestructionObserver> most_recent_codec_destruction_observer_;
+ // The DestructionObserver for |most_recent_codec|.
+ std::unique_ptr<DestructionObserver> most_recent_codec_destruction_observer;
// The most recent overlay provided during codec allocation.
- AndroidOverlay* most_recent_overlay_ = nullptr;
+ AndroidOverlay* most_recent_overlay = nullptr;
// The most recent surface texture provided during codec allocation.
- SurfaceTextureGLOwner* most_recent_surface_texture_ = nullptr;
+ SurfaceTextureGLOwner* most_recent_surface_texture = nullptr;
// Whether CreateMediaCodecSync() is allowed to succeed.
bool allow_sync_creation = true;
private:
- // Saves a reference to |config| and copies out the fields that may
- // get modified by the client.
- void CopyCodecAllocParams(scoped_refptr<CodecConfig> config);
-
+ // Whether CreateMediaCodecAsync() has been called but a codec hasn't been
+ // provided yet.
+ bool codec_creation_pending_ = false;
base::WeakPtr<AVDACodecAllocatorClient> client_;
- scoped_refptr<CodecConfig> config_;
+
+ // The surface bundle of the pending codec creation.
+ scoped_refptr<AVDASurfaceBundle> pending_surface_bundle_;
DISALLOW_COPY_AND_ASSIGN(FakeCodecAllocator);
};
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.cc b/chromium/media/gpu/android/media_codec_video_decoder.cc
index 05da1c6a7af..efea07ab824 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.cc
+++ b/chromium/media/gpu/android/media_codec_video_decoder.cc
@@ -15,9 +15,8 @@
#include "media/base/decoder_buffer.h"
#include "media/base/video_codecs.h"
#include "media/base/video_decoder_config.h"
-#include "media/gpu/android_video_surface_chooser.h"
-#include "media/gpu/avda_codec_allocator.h"
-#include "media/gpu/content_video_view_overlay.h"
+#include "media/gpu/android/android_video_surface_chooser.h"
+#include "media/gpu/android/avda_codec_allocator.h"
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
#include "media/base/android/extract_sps_and_pps.h"
@@ -82,14 +81,6 @@ bool ConfigSupported(const VideoDecoderConfig& config,
} // namespace
-CodecAllocatorAdapter::CodecAllocatorAdapter() = default;
-CodecAllocatorAdapter::~CodecAllocatorAdapter() = default;
-
-void CodecAllocatorAdapter::OnCodecConfigured(
- std::unique_ptr<MediaCodecBridge> media_codec) {
- codec_created_cb.Run(std::move(media_codec));
-}
-
// static
PendingDecode PendingDecode::CreateEos() {
auto nop = [](DecodeStatus s) {};
@@ -103,41 +94,49 @@ PendingDecode::PendingDecode(PendingDecode&& other) = default;
PendingDecode::~PendingDecode() = default;
MediaCodecVideoDecoder::MediaCodecVideoDecoder(
- scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
- base::Callback<gpu::GpuCommandBufferStub*()> get_stub_cb,
+ const gpu::GpuPreferences& gpu_preferences,
VideoFrameFactory::OutputWithReleaseMailboxCB output_cb,
DeviceInfo* device_info,
AVDACodecAllocator* codec_allocator,
std::unique_ptr<AndroidVideoSurfaceChooser> surface_chooser,
+ AndroidOverlayMojoFactoryCB overlay_factory_cb,
+ RequestOverlayInfoCB request_overlay_info_cb,
std::unique_ptr<VideoFrameFactory> video_frame_factory,
std::unique_ptr<service_manager::ServiceContextRef> context_ref)
- : state_(State::kBeforeSurfaceInit),
- lazy_init_pending_(true),
- reset_generation_(0),
- output_cb_(output_cb),
- gpu_task_runner_(gpu_task_runner),
- get_stub_cb_(get_stub_cb),
+ : output_cb_(output_cb),
codec_allocator_(codec_allocator),
+ request_overlay_info_cb_(std::move(request_overlay_info_cb)),
surface_chooser_(std::move(surface_chooser)),
video_frame_factory_(std::move(video_frame_factory)),
+ overlay_factory_cb_(std::move(overlay_factory_cb)),
device_info_(device_info),
+ enable_threaded_texture_mailboxes_(
+ gpu_preferences.enable_threaded_texture_mailboxes),
context_ref_(std::move(context_ref)),
- weak_factory_(this) {
+ weak_factory_(this),
+ codec_allocator_weak_factory_(this) {
DVLOG(2) << __func__;
+ surface_chooser_->SetClientCallbacks(
+ base::Bind(&MediaCodecVideoDecoder::OnSurfaceChosen,
+ weak_factory_.GetWeakPtr()),
+ base::Bind(&MediaCodecVideoDecoder::OnSurfaceChosen,
+ weak_factory_.GetWeakPtr(), nullptr));
}
MediaCodecVideoDecoder::~MediaCodecVideoDecoder() {
DVLOG(2) << __func__;
ReleaseCodec();
- codec_allocator_->StopThread(&codec_allocator_adapter_);
+ codec_allocator_->StopThread(this);
}
void MediaCodecVideoDecoder::Destroy() {
DVLOG(2) << __func__;
// Mojo callbacks require that they're run before destruction.
if (reset_cb_)
- reset_cb_.Run();
- ClearPendingDecodes(DecodeStatus::ABORTED);
+ std::move(reset_cb_).Run();
+ // Cancel callbacks we no longer want.
+ codec_allocator_weak_factory_.InvalidateWeakPtrs();
+ CancelPendingDecodes(DecodeStatus::ABORTED);
StartDrainingCodec(DrainType::kForDestroy);
}
@@ -162,45 +161,30 @@ void MediaCodecVideoDecoder::Initialize(const VideoDecoderConfig& config,
bound_init_cb.Run(false);
return;
}
-
decoder_config_ = config;
- if (first_init) {
- if (!codec_allocator_->StartThread(&codec_allocator_adapter_)) {
- LOG(ERROR) << "Unable to start thread";
- bound_init_cb.Run(false);
- return;
- }
-
- codec_config_ = new CodecConfig();
- codec_config_->codec = config.codec();
- // TODO(watk): Set |requires_secure_codec| correctly using
- // MediaDrmBridgeCdmContext::MediaCryptoReadyCB.
- codec_config_->requires_secure_codec = config.is_encrypted();
- }
-
- codec_config_->initial_expected_coded_size = config.coded_size();
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
- // We pass the SPS and PPS because it makes MediaCodec initialization
- // more reliable (http://crbug.com/649185).
- if (config.codec() == kCodecH264) {
- ExtractSpsAndPps(config.extra_data(), &codec_config_->csd0,
- &codec_config_->csd1);
- }
+ if (config.codec() == kCodecH264)
+ ExtractSpsAndPps(config.extra_data(), &csd0_, &csd1_);
#endif
- // We defer initialization of the Surface and MediaCodec until we
- // receive a Decode() call to avoid consuming those resources in cases where
- // we'll be destructed before getting a Decode(). Failure to initialize those
- // resources will be reported as a decode error on the first decode.
- // TODO(watk): Initialize the CDM before calling init_cb.
+ // Do the rest of the initialization lazily on the first decode.
+ // TODO(watk): Add CDM Support.
+ DCHECK(!cdm_context);
init_cb.Run(true);
}
+void MediaCodecVideoDecoder::OnKeyAdded() {
+ DVLOG(2) << __func__;
+ waiting_for_key_ = false;
+ StartTimer();
+}
+
void MediaCodecVideoDecoder::StartLazyInit() {
DVLOG(2) << __func__;
+ lazy_init_pending_ = false;
+ codec_allocator_->StartThread(this);
video_frame_factory_->Initialize(
- gpu_task_runner_, get_stub_cb_,
base::Bind(&MediaCodecVideoDecoder::OnVideoFrameFactoryInitialized,
weak_factory_.GetWeakPtr()));
}
@@ -209,200 +193,183 @@ void MediaCodecVideoDecoder::OnVideoFrameFactoryInitialized(
scoped_refptr<SurfaceTextureGLOwner> surface_texture) {
DVLOG(2) << __func__;
if (!surface_texture) {
- HandleError();
+ EnterTerminalState(State::kError);
+ return;
+ }
+ surface_texture_bundle_ = new AVDASurfaceBundle(std::move(surface_texture));
+
+ // Overlays are disabled when |enable_threaded_texture_mailboxes| is true
+ // (http://crbug.com/582170).
+ if (enable_threaded_texture_mailboxes_ ||
+ !device_info_->SupportsOverlaySurfaces()) {
+ OnSurfaceChosen(nullptr);
return;
}
- surface_texture_ = std::move(surface_texture);
- InitializeSurfaceChooser();
+
+ // Request OverlayInfo updates. Initialization continues on the first one.
+ bool restart_for_transitions = !device_info_->IsSetOutputSurfaceSupported();
+ std::move(request_overlay_info_cb_)
+ .Run(restart_for_transitions,
+ base::Bind(&MediaCodecVideoDecoder::OnOverlayInfoChanged,
+ weak_factory_.GetWeakPtr()));
}
-void MediaCodecVideoDecoder::SetOverlayInfo(const OverlayInfo& overlay_info) {
+void MediaCodecVideoDecoder::OnOverlayInfoChanged(
+ const OverlayInfo& overlay_info) {
DVLOG(2) << __func__;
+ DCHECK(device_info_->SupportsOverlaySurfaces());
+ DCHECK(!enable_threaded_texture_mailboxes_);
+ if (InTerminalState())
+ return;
+
+ // TODO(watk): Handle frame_hidden like AVDA. Maybe even if in a terminal
+ // state.
+ // TODO(watk): Incorporate the other chooser_state_ signals.
+
bool overlay_changed = !overlay_info_.RefersToSameOverlayAs(overlay_info);
overlay_info_ = overlay_info;
- // Only update surface chooser if it's initialized and the overlay changed.
- if (state_ != State::kBeforeSurfaceInit && overlay_changed)
- surface_chooser_->UpdateState(CreateOverlayFactoryCb(), chooser_state_);
-}
-
-void MediaCodecVideoDecoder::InitializeSurfaceChooser() {
- DVLOG(2) << __func__;
- DCHECK_EQ(state_, State::kBeforeSurfaceInit);
- // Initialize |surface_chooser_| and wait for its decision. Note: the
- // callback may be reentrant.
- surface_chooser_->Initialize(
- base::Bind(&MediaCodecVideoDecoder::OnSurfaceChosen,
- weak_factory_.GetWeakPtr()),
- base::Bind(&MediaCodecVideoDecoder::OnSurfaceChosen,
- weak_factory_.GetWeakPtr(), nullptr),
- CreateOverlayFactoryCb(), chooser_state_);
+ chooser_state_.is_fullscreen = overlay_info_.is_fullscreen;
+ chooser_state_.is_frame_hidden = overlay_info_.is_frame_hidden;
+ surface_chooser_->UpdateState(
+ overlay_changed ? base::make_optional(CreateOverlayFactoryCb())
+ : base::nullopt,
+ chooser_state_);
}
void MediaCodecVideoDecoder::OnSurfaceChosen(
std::unique_ptr<AndroidOverlay> overlay) {
DVLOG(2) << __func__;
+ DCHECK(state_ == State::kInitializing ||
+ device_info_->IsSetOutputSurfaceSupported());
+
if (overlay) {
overlay->AddSurfaceDestroyedCallback(
base::Bind(&MediaCodecVideoDecoder::OnSurfaceDestroyed,
weak_factory_.GetWeakPtr()));
+ target_surface_bundle_ = new AVDASurfaceBundle(std::move(overlay));
+ } else {
+ target_surface_bundle_ = surface_texture_bundle_;
}
// If we were waiting for our first surface during initialization, then
- // proceed to create a codec with the chosen surface.
- if (state_ == State::kBeforeSurfaceInit) {
- codec_config_->surface_bundle =
- overlay ? new AVDASurfaceBundle(std::move(overlay))
- : new AVDASurfaceBundle(surface_texture_);
+ // proceed to create a codec.
+ if (state_ == State::kInitializing) {
+ state_ = State::kRunning;
CreateCodec();
- return;
- }
-
- // If setOutputSurface() is not supported we can't do the transition.
- if (!device_info_->IsSetOutputSurfaceSupported())
- return;
-
- // If we're told to switch to a SurfaceTexture but we're already using a
- // SurfaceTexture, we just cancel any pending transition.
- // (It's not possible for this to choose the overlay we're already using.)
- if (!overlay && codec_config_->surface_bundle &&
- !codec_config_->surface_bundle->overlay) {
- incoming_surface_.reset();
- return;
}
-
- incoming_surface_.emplace(overlay ? new AVDASurfaceBundle(std::move(overlay))
- : new AVDASurfaceBundle(surface_texture_));
}
void MediaCodecVideoDecoder::OnSurfaceDestroyed(AndroidOverlay* overlay) {
DVLOG(2) << __func__;
+ DCHECK_NE(state_, State::kInitializing);
- // If there is a pending transition to the overlay, cancel it.
- if (incoming_surface_ && (*incoming_surface_)->overlay.get() == overlay) {
- incoming_surface_.reset();
- return;
- }
-
- // If we've already stopped using the overlay, ignore it.
- if (!codec_config_->surface_bundle ||
- codec_config_->surface_bundle->overlay.get() != overlay) {
+ // If SetOutputSurface() is not supported we only ever observe destruction of
+ // a single overlay so this must be the one we're using. In this case it's
+ // the responsibility of our consumer to destroy us for surface transitions.
+ // TODO(liberato): This might not be true for L1 / L3, since our caller has
+ // no idea that this has happened. We should unback the frames here.
+ if (!device_info_->IsSetOutputSurfaceSupported()) {
+ EnterTerminalState(State::kSurfaceDestroyed);
return;
}
- // TODO(watk): If setOutputSurface() is available we're supposed to
- // transparently handle surface transitions, however we don't handle them
- // while codec creation is in progress. It should be handled gracefully
- // by allocating a new codec.
- if (state_ == State::kWaitingForCodec) {
- state_ = State::kSurfaceDestroyed;
- return;
- }
+ // Reset the target bundle if it is the one being destroyed.
+ if (target_surface_bundle_->overlay.get() == overlay)
+ target_surface_bundle_ = surface_texture_bundle_;
- if (!device_info_->IsSetOutputSurfaceSupported()) {
- state_ = State::kSurfaceDestroyed;
- ReleaseCodecAndBundle();
- if (drain_type_)
- OnCodecDrained();
- return;
- }
+ // Transition the codec away from the overlay if necessary.
+ if (SurfaceTransitionPending())
+ TransitionToTargetSurface();
+}
- // If there isn't a pending overlay then transition to a SurfaceTexture.
- if (!incoming_surface_)
- incoming_surface_.emplace(new AVDASurfaceBundle(surface_texture_));
- TransitionToIncomingSurface();
+bool MediaCodecVideoDecoder::SurfaceTransitionPending() {
+ return codec_ && codec_->SurfaceBundle() != target_surface_bundle_;
}
-void MediaCodecVideoDecoder::TransitionToIncomingSurface() {
+void MediaCodecVideoDecoder::TransitionToTargetSurface() {
DVLOG(2) << __func__;
- DCHECK(incoming_surface_);
- DCHECK(codec_);
- auto surface_bundle = std::move(*incoming_surface_);
- incoming_surface_.reset();
- if (codec_->SetSurface(surface_bundle->GetJavaSurface()) == MEDIA_CODEC_OK) {
- codec_config_->surface_bundle = std::move(surface_bundle);
- } else {
- ReleaseCodecAndBundle();
- HandleError();
- }
+ DCHECK(SurfaceTransitionPending());
+ DCHECK(device_info_->IsSetOutputSurfaceSupported());
+
+ if (!codec_->SetSurface(target_surface_bundle_))
+ EnterTerminalState(State::kError);
}
void MediaCodecVideoDecoder::CreateCodec() {
DCHECK(!codec_);
- state_ = State::kWaitingForCodec;
- codec_allocator_adapter_.codec_created_cb = base::Bind(
- &MediaCodecVideoDecoder::OnCodecCreated, weak_factory_.GetWeakPtr());
- codec_allocator_->CreateMediaCodecAsync(codec_allocator_adapter_.AsWeakPtr(),
- codec_config_);
-}
-
-void MediaCodecVideoDecoder::OnCodecCreated(
- std::unique_ptr<MediaCodecBridge> codec) {
- DCHECK(state_ == State::kWaitingForCodec ||
- state_ == State::kSurfaceDestroyed);
+ DCHECK(target_surface_bundle_);
+ DCHECK_EQ(state_, State::kRunning);
+
+ scoped_refptr<CodecConfig> config = new CodecConfig();
+ config->codec = decoder_config_.codec();
+ // TODO(watk): Set |requires_secure_codec| correctly using
+ // MediaDrmBridgeCdmContext::MediaCryptoReadyCB.
+ config->requires_secure_codec = decoder_config_.is_encrypted();
+ config->initial_expected_coded_size = decoder_config_.coded_size();
+ config->surface_bundle = target_surface_bundle_;
+ codec_allocator_->CreateMediaCodecAsync(
+ codec_allocator_weak_factory_.GetWeakPtr(), std::move(config));
+}
+
+void MediaCodecVideoDecoder::OnCodecConfigured(
+ std::unique_ptr<MediaCodecBridge> codec,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) {
DCHECK(!codec_);
- if (codec) {
- codec_ = base::MakeUnique<CodecWrapper>(
- std::move(codec),
- BindToCurrentLoop(base::Bind(&MediaCodecVideoDecoder::ManageTimer,
- weak_factory_.GetWeakPtr(), true)));
- }
+ DCHECK_EQ(state_, State::kRunning);
- // If we entered state kSurfaceDestroyed while we were waiting for
- // the codec, then it's already invalid and we have to drop it.
- if (state_ == State::kSurfaceDestroyed) {
- ReleaseCodecAndBundle();
+ if (!codec) {
+ EnterTerminalState(State::kError);
return;
}
+ codec_ = base::MakeUnique<CodecWrapper>(
+ CodecSurfacePair(std::move(codec), std::move(surface_bundle)),
+ BindToCurrentLoop(base::Bind(&MediaCodecVideoDecoder::StartTimer,
+ weak_factory_.GetWeakPtr())));
- // Handle the failure case after the kSurfaceDestroyed case to avoid
- // transitioning from kSurfaceDestroyed to kError.
- if (!codec_) {
- HandleError();
- return;
- }
+ // If the target surface changed while codec creation was in progress,
+ // transition to it immediately.
+ // Note: this can only happen if we support SetOutputSurface() because if we
+ // don't OnSurfaceDestroyed() cancels codec creations, and
+ // |surface_chooser_| doesn't change the target surface.
+ if (SurfaceTransitionPending())
+ TransitionToTargetSurface();
- state_ = State::kOk;
- ManageTimer(true);
+ StartTimer();
}
void MediaCodecVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) {
- DVLOG(2) << __func__ << buffer->AsHumanReadableString();
+ DVLOG(2) << __func__ << ": " << buffer->AsHumanReadableString();
if (state_ == State::kError) {
decode_cb.Run(DecodeStatus::DECODE_ERROR);
return;
}
pending_decodes_.emplace_back(buffer, std::move(decode_cb));
- if (lazy_init_pending_) {
- lazy_init_pending_ = false;
- StartLazyInit();
+ if (state_ == State::kInitializing) {
+ if (lazy_init_pending_)
+ StartLazyInit();
return;
}
-
PumpCodec(true);
}
void MediaCodecVideoDecoder::FlushCodec() {
DVLOG(2) << __func__;
- if (!codec_ || codec_->IsFlushed() || state_ == State::kSurfaceDestroyed ||
- state_ == State::kError) {
+ if (!codec_ || codec_->IsFlushed())
return;
- }
if (codec_->SupportsFlush(device_info_)) {
DVLOG(2) << "Flushing codec";
- if (!codec_->Flush()) {
- HandleError();
- return;
- }
+ if (!codec_->Flush())
+ EnterTerminalState(State::kError);
} else {
DVLOG(2) << "flush() workaround: creating a new codec";
- // Release the codec and create a new one with the same surface bundle.
- // TODO(watk): We should guarantee that the new codec is created after the
- // current one is released so they aren't attached to the same surface at
- // the same time. This doesn't usually happen because the release and
- // creation usually post to the same thread, but it's not guaranteed.
+ // Release the codec and create a new one.
+ // Note: we may end up with two codecs attached to the same surface if the
+ // release hangs on one thread and create proceeds on another. This will
+ // result in an error, letting the user retry the playback. The alternative
+ // of waiting for the release risks hanging the playback forever.
ReleaseCodec();
CreateCodec();
}
@@ -418,178 +385,196 @@ void MediaCodecVideoDecoder::PumpCodec(bool force_start_timer) {
did_work = true;
} while (did_input || did_output);
- ManageTimer(did_work || force_start_timer);
+ if (did_work || force_start_timer)
+ StartTimer();
+ else
+ StopTimerIfIdle();
}
-void MediaCodecVideoDecoder::ManageTimer(bool start_timer) {
- const base::TimeDelta kPollingPeriod = base::TimeDelta::FromMilliseconds(10);
- const base::TimeDelta kIdleTimeout = base::TimeDelta::FromSeconds(1);
+void MediaCodecVideoDecoder::StartTimer() {
+ DVLOG(4) << __func__;
+ if (state_ != State::kRunning)
+ return;
- if (!idle_timer_ || start_timer)
- idle_timer_ = base::ElapsedTimer();
+ idle_timer_ = base::ElapsedTimer();
- if (!start_timer && idle_timer_->Elapsed() > kIdleTimeout) {
- DVLOG(2) << __func__ << " Stopping timer; idle timeout hit";
+ // Poll at 10ms somewhat arbitrarily.
+ // TODO: Don't poll on new devices; use the callback API.
+ // TODO: Experiment with this number to save power. Since we already pump the
+ // codec in response to receiving a decode and output buffer release, polling
+ // at this frequency is likely overkill in the steady state.
+ const auto kPollingPeriod = base::TimeDelta::FromMilliseconds(10);
+ if (!pump_codec_timer_.IsRunning()) {
+ pump_codec_timer_.Start(FROM_HERE, kPollingPeriod,
+ base::Bind(&MediaCodecVideoDecoder::PumpCodec,
+ base::Unretained(this), false));
+ }
+}
+
+void MediaCodecVideoDecoder::StopTimerIfIdle() {
+ DVLOG(4) << __func__;
+ // Stop the timer if we've been idle for one second. Chosen arbitrarily.
+ const auto kTimeout = base::TimeDelta::FromSeconds(1);
+ if (idle_timer_.Elapsed() > kTimeout) {
+ DVLOG(2) << "Stopping timer; idle timeout hit";
+ pump_codec_timer_.Stop();
// Draining for destroy can no longer proceed if the timer is stopping,
// because no more Decode() calls can be made, so complete it now to avoid
// leaking |this|.
if (drain_type_ == DrainType::kForDestroy)
OnCodecDrained();
- pump_codec_timer_.Stop();
- } else if (!pump_codec_timer_.IsRunning()) {
- pump_codec_timer_.Start(FROM_HERE, kPollingPeriod,
- base::Bind(&MediaCodecVideoDecoder::PumpCodec,
- base::Unretained(this), false));
}
}
bool MediaCodecVideoDecoder::QueueInput() {
DVLOG(4) << __func__;
- if (state_ == State::kError || state_ == State::kWaitingForCodec ||
- state_ == State::kWaitingForKey || state_ == State::kBeforeSurfaceInit ||
- state_ == State::kSurfaceDestroyed) {
+ if (!codec_ || waiting_for_key_)
return false;
- }
- // Flush the codec when there are no unrendered codec buffers, but decodes
- // pending. This lets us avoid unbacking any frames when we flush, but only
- // flush when we have more frames to decode. Without waiting for pending
- // decodes we would create a new codec at the end of playback on devices that
- // need the flush workaround.
+ // If the codec is drained, flush it when there is a pending decode and no
+ // unreleased output buffers. This lets us avoid both unbacking frames when we
+ // flush, and flushing unnecessarily, like at EOS.
if (codec_->IsDrained()) {
- if (!codec_->HasValidCodecOutputBuffers() && !pending_decodes_.empty())
+ if (!codec_->HasUnreleasedOutputBuffers() && !pending_decodes_.empty()) {
FlushCodec();
+ return true;
+ }
return false;
}
if (pending_decodes_.empty())
return false;
- int input_buffer = -1;
- MediaCodecStatus status = codec_->DequeueInputBuffer(&input_buffer);
- if (status == MEDIA_CODEC_ERROR) {
- DVLOG(1) << "DequeueInputBuffer() error";
- HandleError();
- return false;
- } else if (status == MEDIA_CODEC_TRY_AGAIN_LATER) {
- return false;
+ PendingDecode& pending_decode = pending_decodes_.front();
+ auto status = codec_->QueueInputBuffer(*pending_decode.buffer,
+ decoder_config_.encryption_scheme());
+ DVLOG((status == CodecWrapper::QueueStatus::kTryAgainLater ? 3 : 2))
+ << "QueueInput(" << pending_decode.buffer->AsHumanReadableString()
+ << ") status=" << static_cast<int>(status);
+
+ switch (status) {
+ case CodecWrapper::QueueStatus::kOk:
+ break;
+ case CodecWrapper::QueueStatus::kTryAgainLater:
+ return false;
+ case CodecWrapper::QueueStatus::kNoKey:
+ // Retry when a key is added.
+ waiting_for_key_ = true;
+ return false;
+ case CodecWrapper::QueueStatus::kError:
+ EnterTerminalState(State::kError);
+ return false;
}
- DCHECK(status == MEDIA_CODEC_OK);
- DCHECK_GE(input_buffer, 0);
-
- PendingDecode pending_decode = std::move(pending_decodes_.front());
- pending_decodes_.pop_front();
if (pending_decode.buffer->end_of_stream()) {
- DVLOG(2) << ": QueueEOS()";
- codec_->QueueEOS(input_buffer);
+ // The VideoDecoder interface requires that the EOS DecodeCB is called after
+ // all decodes before it are delivered, so we have to save it and call it
+ // when the EOS is dequeued.
+ DCHECK(!eos_decode_cb_);
eos_decode_cb_ = std::move(pending_decode.decode_cb);
- return true;
- }
-
- MediaCodecStatus queue_status = codec_->QueueInputBuffer(
- input_buffer, pending_decode.buffer->data(),
- pending_decode.buffer->data_size(), pending_decode.buffer->timestamp());
- DVLOG(2) << "QueueInputBuffer(pts="
- << pending_decode.buffer->timestamp().InMilliseconds()
- << ") status=" << queue_status;
-
- DCHECK_NE(queue_status, MEDIA_CODEC_NO_KEY)
- << "Encrypted support not yet implemented";
- if (queue_status != MEDIA_CODEC_OK) {
- pending_decode.decode_cb.Run(DecodeStatus::DECODE_ERROR);
- HandleError();
- return false;
+ } else {
+ pending_decode.decode_cb.Run(DecodeStatus::OK);
}
-
- pending_decode.decode_cb.Run(DecodeStatus::OK);
+ pending_decodes_.pop_front();
return true;
}
bool MediaCodecVideoDecoder::DequeueOutput() {
DVLOG(4) << __func__;
- if (state_ == State::kError || state_ == State::kWaitingForCodec ||
- state_ == State::kWaitingForKey || state_ == State::kBeforeSurfaceInit ||
- state_ == State::kSurfaceDestroyed) {
+ if (!codec_ || codec_->IsDrained() || waiting_for_key_)
return false;
- }
- // Transition to the incoming surface when there are no unrendered codec
- // buffers. This is so we can ensure we create the right kind of VideoFrame
- // for the current surface.
- if (incoming_surface_) {
- if (codec_->HasValidCodecOutputBuffers())
- return false;
- TransitionToIncomingSurface();
- return true;
+ // If a surface transition is pending, wait for all outstanding buffers to be
+ // released before doing the transition. This is necessary because the
+ // VideoFrames corresponding to these buffers have metadata flags specific to
+ // the surface type, and changing the surface before they're rendered would
+ // invalidate them.
+ if (SurfaceTransitionPending()) {
+ if (!codec_->HasUnreleasedOutputBuffers()) {
+ TransitionToTargetSurface();
+ return true;
+ }
+ return false;
}
base::TimeDelta presentation_time;
bool eos = false;
std::unique_ptr<CodecOutputBuffer> output_buffer;
- MediaCodecStatus status =
+ auto status =
codec_->DequeueOutputBuffer(&presentation_time, &eos, &output_buffer);
- if (status == MEDIA_CODEC_ERROR) {
- DVLOG(1) << "DequeueOutputBuffer() error";
- HandleError();
- if (drain_type_)
- OnCodecDrained();
- return false;
- } else if (status == MEDIA_CODEC_TRY_AGAIN_LATER) {
- return false;
+ switch (status) {
+ case CodecWrapper::DequeueStatus::kOk:
+ break;
+ case CodecWrapper::DequeueStatus::kTryAgainLater:
+ return false;
+ case CodecWrapper::DequeueStatus::kError:
+ DVLOG(1) << "DequeueOutputBuffer() error";
+ EnterTerminalState(State::kError);
+ return false;
}
- DCHECK(status == MEDIA_CODEC_OK);
+ DVLOG(2) << "DequeueOutputBuffer(): pts="
+ << (eos ? "EOS"
+ : std::to_string(presentation_time.InMilliseconds()));
if (eos) {
- DVLOG(2) << "DequeueOutputBuffer(): EOS";
if (eos_decode_cb_) {
- // Note: It's important to post |eos_decode_cb_| through the gpu task
- // runner to ensure it follows all previous outputs.
- gpu_task_runner_->PostTaskAndReply(
- FROM_HERE, base::Bind(&base::DoNothing),
- base::Bind(base::ResetAndReturn(&eos_decode_cb_), DecodeStatus::OK));
+ // Schedule the EOS DecodeCB to run after all previous frames.
+ video_frame_factory_->RunAfterPendingVideoFrames(
+ base::Bind(&MediaCodecVideoDecoder::RunEosDecodeCb,
+ weak_factory_.GetWeakPtr(), reset_generation_));
}
if (drain_type_)
OnCodecDrained();
- // We don't want to flush the drained codec immediately if !|drain_type_|
- // because it might be backing unrendered frames near EOS. Instead we'll
- // flush it after all outstanding buffers are released.
+ // We don't flush the drained codec immediately because it might be
+ // backing unrendered frames near EOS. It's flushed lazily in QueueInput().
return false;
}
- DVLOG(2) << "DequeueOutputBuffer(): pts="
- << presentation_time.InMilliseconds();
-
// If we're draining for reset or destroy we can discard |output_buffer|
// without rendering it.
if (drain_type_)
return true;
video_frame_factory_->CreateVideoFrame(
- std::move(output_buffer), surface_texture_, presentation_time,
- decoder_config_.natural_size(),
+ std::move(output_buffer),
+ codec_->SurfaceBundle()->overlay
+ ? nullptr
+ : surface_texture_bundle_->surface_texture,
+ presentation_time, decoder_config_.natural_size(),
+ CreatePromotionHintCB(),
base::Bind(&MediaCodecVideoDecoder::ForwardVideoFrame,
weak_factory_.GetWeakPtr(), reset_generation_));
return true;
}
+void MediaCodecVideoDecoder::RunEosDecodeCb(int reset_generation) {
+ // Both of the following conditions are necessary because:
+ // * In an error state, the reset generations will match but |eos_decode_cb_|
+ // will be aborted.
+ // * After a Reset(), the reset generations won't match, but we might already
+ // have a new |eos_decode_cb_| for the new generation.
+ if (reset_generation == reset_generation_ && eos_decode_cb_)
+ std::move(eos_decode_cb_).Run(DecodeStatus::OK);
+}
+
void MediaCodecVideoDecoder::ForwardVideoFrame(
int reset_generation,
VideoFrameFactory::ReleaseMailboxCB release_cb,
const scoped_refptr<VideoFrame>& frame) {
- if (reset_generation_ == reset_generation)
+ if (reset_generation == reset_generation_)
output_cb_.Run(std::move(release_cb), frame);
}
+// Our Reset() provides a slightly stronger guarantee than VideoDecoder does.
+// After |closure| runs:
+// 1) no VideoFrames from before the Reset() will be output, and
+// 2) no DecodeCBs (including EOS) from before the Reset() will be run.
void MediaCodecVideoDecoder::Reset(const base::Closure& closure) {
DVLOG(2) << __func__;
+ DCHECK(!reset_cb_);
reset_generation_++;
- ClearPendingDecodes(DecodeStatus::ABORTED);
- if (!codec_) {
- closure.Run();
- return;
- }
reset_cb_ = std::move(closure);
+ CancelPendingDecodes(DecodeStatus::ABORTED);
StartDrainingCodec(DrainType::kForReset);
}
@@ -603,7 +588,9 @@ void MediaCodecVideoDecoder::StartDrainingCodec(DrainType drain_type) {
// Skip the drain if possible. Only VP8 codecs need draining because
// they can hang in release() or flush() otherwise
// (http://crbug.com/598963).
- if (!codec_ || decoder_config_.codec() != kCodecVP8 || codec_->IsFlushed() ||
+ // TODO(watk): Strongly consider blacklisting VP8 (or specific MediaCodecs)
+ // instead. Draining is responsible for a lot of complexity.
+ if (decoder_config_.codec() != kCodecVP8 || !codec_ || codec_->IsFlushed() ||
codec_->IsDrained()) {
OnCodecDrained();
return;
@@ -612,9 +599,10 @@ void MediaCodecVideoDecoder::StartDrainingCodec(DrainType drain_type) {
// Queue EOS if the codec isn't already processing one.
if (!codec_->IsDraining())
pending_decodes_.push_back(PendingDecode::CreateEos());
+
// We can safely invalidate outstanding buffers for both types of drain, and
// doing so can only make the drain complete quicker.
- codec_->DiscardCodecOutputBuffers();
+ codec_->DiscardOutputBuffers();
PumpCodec(true);
}
@@ -629,45 +617,67 @@ void MediaCodecVideoDecoder::OnCodecDrained() {
return;
}
- base::ResetAndReturn(&reset_cb_).Run();
+ std::move(reset_cb_).Run();
FlushCodec();
}
-void MediaCodecVideoDecoder::HandleError() {
- DVLOG(2) << __func__;
- state_ = State::kError;
- ClearPendingDecodes(DecodeStatus::DECODE_ERROR);
+void MediaCodecVideoDecoder::EnterTerminalState(State state) {
+ DVLOG(2) << __func__ << " " << static_cast<int>(state);
+
+ state_ = state;
+ DCHECK(InTerminalState());
+
+ // Cancel pending codec creation.
+ codec_allocator_weak_factory_.InvalidateWeakPtrs();
+ pump_codec_timer_.Stop();
+ ReleaseCodec();
+ target_surface_bundle_ = nullptr;
+ surface_texture_bundle_ = nullptr;
+ if (state == State::kError)
+ CancelPendingDecodes(DecodeStatus::DECODE_ERROR);
+ if (drain_type_)
+ OnCodecDrained();
+}
+
+bool MediaCodecVideoDecoder::InTerminalState() {
+ return state_ == State::kSurfaceDestroyed || state_ == State::kError;
}
-void MediaCodecVideoDecoder::ClearPendingDecodes(DecodeStatus status) {
+void MediaCodecVideoDecoder::CancelPendingDecodes(DecodeStatus status) {
for (auto& pending_decode : pending_decodes_)
pending_decode.decode_cb.Run(status);
pending_decodes_.clear();
if (eos_decode_cb_)
- base::ResetAndReturn(&eos_decode_cb_).Run(status);
+ std::move(eos_decode_cb_).Run(status);
}
void MediaCodecVideoDecoder::ReleaseCodec() {
if (!codec_)
return;
- codec_allocator_->ReleaseMediaCodec(codec_->TakeCodec(),
- codec_config_->surface_bundle);
+ auto pair = codec_->TakeCodecSurfacePair();
codec_ = nullptr;
-}
-
-void MediaCodecVideoDecoder::ReleaseCodecAndBundle() {
- ReleaseCodec();
- codec_config_->surface_bundle = nullptr;
+ codec_allocator_->ReleaseMediaCodec(std::move(pair.first),
+ std::move(pair.second));
}
AndroidOverlayFactoryCB MediaCodecVideoDecoder::CreateOverlayFactoryCb() {
- if (overlay_info_.HasValidSurfaceId()) {
- return base::Bind(&ContentVideoViewOverlay::Create,
- overlay_info_.surface_id);
- } else if (overlay_info_.HasValidRoutingToken() && overlay_factory_cb_) {
- return base::Bind(overlay_factory_cb_, *overlay_info_.routing_token);
- }
- return AndroidOverlayFactoryCB();
+ DCHECK(!overlay_info_.HasValidSurfaceId());
+ if (!overlay_factory_cb_ || !overlay_info_.HasValidRoutingToken())
+ return AndroidOverlayFactoryCB();
+
+ // This wrapper forwards its arguments and clones a context ref on each call.
+ auto wrapper = [](AndroidOverlayMojoFactoryCB overlay_factory_cb,
+ service_manager::ServiceContextRef* context_ref,
+ base::UnguessableToken routing_token,
+ AndroidOverlayConfig config) {
+ return overlay_factory_cb.Run(context_ref->Clone(),
+ std::move(routing_token), std::move(config));
+ };
+
+ // Pass ownership of a new context ref into the callback.
+ return base::Bind(wrapper, overlay_factory_cb_,
+ base::Owned(context_ref_->Clone().release()),
+ *overlay_info_.routing_token);
}
std::string MediaCodecVideoDecoder::GetDisplayName() const {
@@ -696,4 +706,28 @@ int MediaCodecVideoDecoder::GetMaxDecodeRequests() const {
return 2;
}
+PromotionHintAggregator::NotifyPromotionHintCB
+MediaCodecVideoDecoder::CreatePromotionHintCB() const {
+ // Right now, we don't request promotion hints. This is only used by SOP.
+ // While we could simplify it a bit, this is the general form that we'll use
+ // when handling promotion hints.
+
+ // TODO(liberato): Keeping the surface bundle around as long as the images
+ // doesn't work so well if the surface is destroyed. In that case, the right
+ // thing to do is (a) wait for any codec to quit using the surface, and (b)
+ // clear |overlay| out of the surface bundle.
+ // Having the surface bundle register for destruction callbacks, instead of
+ // us, makes sense.
+ return BindToCurrentLoop(base::BindRepeating(
+ [](scoped_refptr<AVDASurfaceBundle> surface_bundle,
+ PromotionHintAggregator::Hint hint) {
+ // If we're promotable, and we have a surface bundle, then also
+ // position the overlay. We could do this even if the overlay is
+ // not promotable, but it wouldn't have any visible effect.
+ if (hint.is_promotable && surface_bundle)
+ surface_bundle->overlay->ScheduleLayout(hint.screen_rect);
+ },
+ codec_->SurfaceBundle()));
+}
+
} // namespace media
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.h b/chromium/media/gpu/android/media_codec_video_decoder.h
index eb0f89044ef..2175c953fb4 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.h
+++ b/chromium/media/gpu/android/media_codec_video_decoder.h
@@ -5,19 +5,20 @@
#ifndef MEDIA_GPU_ANDROID_MEDIA_CODEC_VIDEO_DECODER_H_
#define MEDIA_GPU_ANDROID_MEDIA_CODEC_VIDEO_DECODER_H_
+#include "base/containers/circular_deque.h"
#include "base/optional.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_checker.h"
#include "base/timer/elapsed_timer.h"
-#include "gpu/ipc/service/gpu_command_buffer_stub.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
#include "media/base/android_overlay_mojo_factory.h"
#include "media/base/overlay_info.h"
#include "media/base/video_decoder.h"
+#include "media/gpu/android/android_video_surface_chooser.h"
+#include "media/gpu/android/avda_codec_allocator.h"
#include "media/gpu/android/codec_wrapper.h"
#include "media/gpu/android/device_info.h"
#include "media/gpu/android/video_frame_factory.h"
-#include "media/gpu/android_video_surface_chooser.h"
-#include "media/gpu/avda_codec_allocator.h"
#include "media/gpu/media_gpu_export.h"
#include "services/service_manager/public/cpp/service_context_ref.h"
@@ -37,31 +38,29 @@ struct PendingDecode {
DISALLOW_COPY_AND_ASSIGN(PendingDecode);
};
-// TODO(watk): Simplify the interface to AVDACodecAllocator.
-struct CodecAllocatorAdapter
- : public AVDACodecAllocatorClient,
- public base::SupportsWeakPtr<CodecAllocatorAdapter> {
- using CodecCreatedCb =
- base::Callback<void(std::unique_ptr<MediaCodecBridge>)>;
-
- CodecAllocatorAdapter();
- ~CodecAllocatorAdapter();
- void OnCodecConfigured(
- std::unique_ptr<MediaCodecBridge> media_codec) override;
-
- CodecCreatedCb codec_created_cb;
-};
-
// An Android VideoDecoder that delegates to MediaCodec.
-class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
+//
+// This decoder initializes in two stages. Low overhead initialization is done
+// eagerly in Initialize(), but the rest is done lazily and is kicked off by the
+// first Decode() (see StartLazyInit()). We do this because there are cases in
+// our media pipeline where we'll initialize a decoder but never use it
+// (e.g., MSE with no media data appended), and if we eagerly allocator decoder
+// resources, like MediaCodecs and SurfaceTextures, we will block other
+// playbacks that need them.
+// TODO: Lazy initialization should be handled at a higher layer of the media
+// stack for both simplicity and cross platform support.
+class MEDIA_GPU_EXPORT MediaCodecVideoDecoder
+ : public VideoDecoder,
+ public AVDACodecAllocatorClient {
public:
MediaCodecVideoDecoder(
- scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
- base::Callback<gpu::GpuCommandBufferStub*()> get_stub_cb,
+ const gpu::GpuPreferences& gpu_preferences,
VideoFrameFactory::OutputWithReleaseMailboxCB output_cb,
DeviceInfo* device_info,
AVDACodecAllocator* codec_allocator,
std::unique_ptr<AndroidVideoSurfaceChooser> surface_chooser,
+ AndroidOverlayMojoFactoryCB overlay_factory_cb,
+ RequestOverlayInfoCB request_overlay_info_cb,
std::unique_ptr<VideoFrameFactory> video_frame_factory,
std::unique_ptr<service_manager::ServiceContextRef> connection_ref);
@@ -79,10 +78,6 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
- // Sets the overlay info to use. This can be called before Initialize() to
- // set the first overlay.
- void SetOverlayInfo(const OverlayInfo& overlay_info);
-
protected:
// Protected for testing.
~MediaCodecVideoDecoder() override;
@@ -93,24 +88,21 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
friend class base::DeleteHelper<MediaCodecVideoDecoder>;
enum class State {
- kOk,
+ // Initializing resources required to create a codec.
+ kInitializing,
+ // Initialization has completed and we're running. This is the only state
+ // in which |codec_| might be non-null. If |codec_| is null, a codec
+ // creation is pending.
+ kRunning,
+ // A fatal error occurred. A terminal state.
kError,
- // We haven't initialized |surface_chooser_| yet, so we don't have a surface
- // or a codec.
- kBeforeSurfaceInit,
- // Set when we are waiting for a codec to be created.
- kWaitingForCodec,
- // Set when we have a codec, but it doesn't yet have a key.
- kWaitingForKey,
- // The output surface was destroyed. This is a terminal state like kError,
- // but it isn't reported as a decode error.
- kSurfaceDestroyed,
+ // The output surface was destroyed, but SetOutputSurface() is not supported
+ // by the device. In this case the consumer is responsible for destroying us
+ // soon, so this is terminal state but not a decode error.
+ kSurfaceDestroyed
};
- enum class DrainType {
- kForReset,
- kForDestroy,
- };
+ enum class DrainType { kForReset, kForDestroy };
// Starts teardown.
void Destroy() override;
@@ -120,30 +112,50 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
void OnVideoFrameFactoryInitialized(
scoped_refptr<SurfaceTextureGLOwner> surface_texture);
- // Initializes |surface_chooser_|.
- void InitializeSurfaceChooser();
+ // Resets |waiting_for_key_| to false, indicating that MediaCodec might now
+ // accept buffers.
+ void OnKeyAdded();
+
+ // Updates |surface_chooser_| with the new overlay info.
+ void OnOverlayInfoChanged(const OverlayInfo& overlay_info);
void OnSurfaceChosen(std::unique_ptr<AndroidOverlay> overlay);
void OnSurfaceDestroyed(AndroidOverlay* overlay);
- // Sets |codecs_|'s output surface to |incoming_surface_|. Releases the codec
- // and both the current and incoming bundles on failure.
- void TransitionToIncomingSurface();
+ // Whether we have a codec and its surface is not equal to
+ // |target_surface_bundle_|.
+ bool SurfaceTransitionPending();
+
+ // Sets |codecs_|'s output surface to |target_surface_bundle_|.
+ void TransitionToTargetSurface();
+
+ // Creates a codec asynchronously.
void CreateCodec();
- void OnCodecCreated(std::unique_ptr<MediaCodecBridge> codec);
+
+ // AVDACodecAllocatorClient implementation.
+ void OnCodecConfigured(
+ std::unique_ptr<MediaCodecBridge> media_codec,
+ scoped_refptr<AVDASurfaceBundle> surface_bundle) override;
// Flushes the codec, or if flush() is not supported, releases it and creates
// a new one.
void FlushCodec();
- // Attempts to queue input and dequeue output from the codec. If
- // |force_start_timer| is true the timer idle timeout is reset.
+ // Attempts to queue input and dequeue output from the codec. Calls
+ // StartTimer() even if the codec is idle when |force_start_timer|.
void PumpCodec(bool force_start_timer);
- void ManageTimer(bool start_timer);
bool QueueInput();
bool DequeueOutput();
- // Forwards |frame| via |output_cb_| if there hasn't been a Reset() since the
- // frame was created (i.e., |reset_generation| matches |reset_generation_|).
+ // Starts |pump_codec_timer_| if it's not started and resets the idle timeout.
+ void StartTimer();
+ void StopTimerIfIdle();
+
+ // Runs |eos_decode_cb_| if it's valid and |reset_generation| matches
+ // |reset_generation_|.
+ void RunEosDecodeCb(int reset_generation);
+
+ // Forwards |frame| via |output_cb_| if |reset_generation| matches
+ // |reset_generation_|.
void ForwardVideoFrame(int reset_generation,
VideoFrameFactory::ReleaseMailboxCB release_cb,
const scoped_refptr<VideoFrame>& frame);
@@ -152,26 +164,33 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
// if possible.
void StartDrainingCodec(DrainType drain_type);
void OnCodecDrained();
+ void CancelPendingDecodes(DecodeStatus status);
- void ClearPendingDecodes(DecodeStatus status);
-
- // Sets |state_| and runs pending callbacks.
- void HandleError();
+ // Sets |state_| and does common teardown for the terminal states. |state_|
+ // must be either kSurfaceDestroyed or kError.
+ void EnterTerminalState(State state);
+ bool InTerminalState();
// Releases |codec_| if it's not null.
void ReleaseCodec();
- // Calls ReleaseCodec() and drops the ref to its surface bundle.
- void ReleaseCodecAndBundle();
-
// Creates an overlay factory cb based on the value of overlay_info_.
AndroidOverlayFactoryCB CreateOverlayFactoryCb();
- State state_;
+ // Create a callback that will handle promotion hints, and set the overlay
+ // position if required.
+ PromotionHintAggregator::NotifyPromotionHintCB CreatePromotionHintCB() const;
+
+ State state_ = State::kInitializing;
// Whether initialization still needs to be done on the first decode call.
- bool lazy_init_pending_;
- std::deque<PendingDecode> pending_decodes_;
+ bool lazy_init_pending_ = true;
+ base::circular_deque<PendingDecode> pending_decodes_;
+
+ // Whether we've seen MediaCodec return MEDIA_CODEC_NO_KEY indicating that
+ // the corresponding key was not set yet, and MediaCodec will not accept
+ // buffers until OnKeyAdded() is called.
+ bool waiting_for_key_ = false;
// The reason for the current drain operation if any.
base::Optional<DrainType> drain_type_;
@@ -180,7 +199,7 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
base::Closure reset_cb_;
// A generation counter that's incremented every time Reset() is called.
- int reset_generation_;
+ int reset_generation_ = 0;
// The EOS decode cb for an EOS currently being processed by the codec. Called
// when the EOS is output.
@@ -189,30 +208,28 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
VideoFrameFactory::OutputWithReleaseMailboxCB output_cb_;
VideoDecoderConfig decoder_config_;
- // The surface bundle that we're transitioning to, if any.
- base::Optional<scoped_refptr<AVDASurfaceBundle>> incoming_surface_;
+ // Codec specific data (SPS and PPS for H264). Some MediaCodecs initialize
+ // more reliably if we explicitly pass these (http://crbug.com/649185).
+ std::vector<uint8_t> csd0_;
+ std::vector<uint8_t> csd1_;
- // |codec_config_| must not be modified while |state_| is kWaitingForCodec.
- scoped_refptr<CodecConfig> codec_config_;
std::unique_ptr<CodecWrapper> codec_;
- base::Optional<base::ElapsedTimer> idle_timer_;
+ base::ElapsedTimer idle_timer_;
base::RepeatingTimer pump_codec_timer_;
+ AVDACodecAllocator* codec_allocator_;
- scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner_;
- base::Callback<gpu::GpuCommandBufferStub*()> get_stub_cb_;
+ // The current target surface that |codec_| should be rendering to. It
+ // reflects the latest surface choice by |surface_chooser_|. If the codec is
+ // configured with some other surface, then a transition is pending. It's
+ // non-null from the first surface choice.
+ scoped_refptr<AVDASurfaceBundle> target_surface_bundle_;
- // An adapter to let us use AVDACodecAllocator.
- CodecAllocatorAdapter codec_allocator_adapter_;
- AVDACodecAllocator* codec_allocator_;
+ // A SurfaceTexture bundle that is kept for the lifetime of MCVD so that if we
+ // have to synchronously switch surfaces we always have one available.
+ scoped_refptr<AVDASurfaceBundle> surface_texture_bundle_;
- // A SurfaceTexture that is kept for the lifetime of MCVD so that if we have
- // to synchronously switch surfaces we always have one available.
- // TODO: Remove this once onSurfaceDestroyed() callbacks are not delivered
- // via the gpu thread. We can't post a task to the gpu thread to
- // create a SurfaceTexture inside the onSurfaceDestroyed() handler without
- // deadlocking currently, because the gpu thread might be blocked waiting
- // for the SurfaceDestroyed to be handled.
- scoped_refptr<SurfaceTextureGLOwner> surface_texture_;
+ // A callback for requesting overlay info updates.
+ RequestOverlayInfoCB request_overlay_info_cb_;
// The current overlay info, which possibly specifies an overlay to render to.
OverlayInfo overlay_info_;
@@ -231,11 +248,13 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
AndroidOverlayMojoFactoryCB overlay_factory_cb_;
DeviceInfo* device_info_;
+ bool enable_threaded_texture_mailboxes_;
// If we're running in a service context this ref lets us keep the service
// thread alive until destruction.
std::unique_ptr<service_manager::ServiceContextRef> context_ref_;
base::WeakPtrFactory<MediaCodecVideoDecoder> weak_factory_;
+ base::WeakPtrFactory<MediaCodecVideoDecoder> codec_allocator_weak_factory_;
DISALLOW_COPY_AND_ASSIGN(MediaCodecVideoDecoder);
};
diff --git a/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc b/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
index 2e6f201026a..8f88da07a02 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
+++ b/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
@@ -7,17 +7,19 @@
#include "base/run_loop.h"
#include "base/test/mock_callback.h"
#include "base/test/scoped_task_environment.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
#include "media/base/android/media_codec_util.h"
#include "media/base/android/mock_android_overlay.h"
#include "media/base/decoder_buffer.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/test_helpers.h"
+#include "media/gpu/android/android_video_surface_chooser_impl.h"
+#include "media/gpu/android/fake_android_video_surface_chooser.h"
#include "media/gpu/android/fake_codec_allocator.h"
#include "media/gpu/android/mock_device_info.h"
+#include "media/gpu/android/mock_surface_texture_gl_owner.h"
#include "media/gpu/android/video_frame_factory.h"
-#include "media/gpu/android_video_surface_chooser_impl.h"
-#include "media/gpu/fake_android_video_surface_chooser.h"
-#include "media/gpu/mock_surface_texture_gl_owner.h"
#include "testing/gtest/include/gtest/gtest.h"
using testing::InvokeWithoutArgs;
@@ -30,12 +32,15 @@ using testing::_;
namespace media {
namespace {
-void OutputCb(const scoped_refptr<VideoFrame>& frame) {}
+void OutputCb(const scoped_refptr<VideoFrame>&) {}
-void OutputWithReleaseMailboxCb(VideoFrameFactory::ReleaseMailboxCB release_cb,
- const scoped_refptr<VideoFrame>& frame) {}
+void OutputWithReleaseMailboxCb(VideoFrameFactory::ReleaseMailboxCB,
+ const scoped_refptr<VideoFrame>&) {}
-gpu::GpuCommandBufferStub* GetStubCb() {
+std::unique_ptr<AndroidOverlay> CreateAndroidOverlayCb(
+ std::unique_ptr<service_manager::ServiceContextRef>,
+ const base::UnguessableToken&,
+ AndroidOverlayConfig) {
return nullptr;
}
@@ -45,32 +50,49 @@ struct DestructionObservableMCVD : public DestructionObservable,
using MediaCodecVideoDecoder::MediaCodecVideoDecoder;
};
+class MockServiceContextRef : public service_manager::ServiceContextRef {
+ public:
+ std::unique_ptr<ServiceContextRef> Clone() override {
+ return base::MakeUnique<MockServiceContextRef>();
+ }
+};
+
} // namespace
class MockVideoFrameFactory : public VideoFrameFactory {
public:
- MOCK_METHOD3(Initialize,
- void(scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
- GetStubCb get_stub_cb,
- InitCb init_cb));
- MOCK_METHOD5(MockCreateVideoFrame,
- void(CodecOutputBuffer* raw_output_buffer,
- scoped_refptr<SurfaceTextureGLOwner> surface_texture,
- base::TimeDelta timestamp,
- gfx::Size natural_size,
- OutputWithReleaseMailboxCB output_cb));
-
- void CreateVideoFrame(std::unique_ptr<CodecOutputBuffer> output_buffer,
- scoped_refptr<SurfaceTextureGLOwner> surface_texture,
- base::TimeDelta timestamp,
- gfx::Size natural_size,
- OutputWithReleaseMailboxCB output_cb) override {
+ MOCK_METHOD1(Initialize, void(InitCb init_cb));
+ MOCK_METHOD6(
+ MockCreateVideoFrame,
+ void(CodecOutputBuffer* raw_output_buffer,
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture,
+ base::TimeDelta timestamp,
+ gfx::Size natural_size,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
+ OutputWithReleaseMailboxCB output_cb));
+ MOCK_METHOD1(MockRunAfterPendingVideoFrames,
+ void(base::OnceClosure* closure));
+ MOCK_METHOD0(CancelPendingCallbacks, void());
+
+ void CreateVideoFrame(
+ std::unique_ptr<CodecOutputBuffer> output_buffer,
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture,
+ base::TimeDelta timestamp,
+ gfx::Size natural_size,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
+ OutputWithReleaseMailboxCB output_cb) override {
MockCreateVideoFrame(output_buffer.get(), surface_texture, timestamp,
- natural_size, output_cb);
+ natural_size, promotion_hint_cb, output_cb);
last_output_buffer_ = std::move(output_buffer);
}
+ void RunAfterPendingVideoFrames(base::OnceClosure closure) override {
+ last_closure_ = std::move(closure);
+ MockRunAfterPendingVideoFrames(&last_closure_);
+ }
+
std::unique_ptr<CodecOutputBuffer> last_output_buffer_;
+ base::OnceClosure last_closure_;
};
class MediaCodecVideoDecoderTest : public testing::Test {
@@ -80,27 +102,41 @@ class MediaCodecVideoDecoderTest : public testing::Test {
void SetUp() override {
uint8_t data = 0;
fake_decoder_buffer_ = DecoderBuffer::CopyFrom(&data, 1);
- codec_allocator_ = base::MakeUnique<FakeCodecAllocator>();
+ codec_allocator_ = base::MakeUnique<FakeCodecAllocator>(
+ base::ThreadTaskRunnerHandle::Get());
device_info_ = base::MakeUnique<NiceMock<MockDeviceInfo>>();
+ }
+
+ void TearDown() override {
+ // MCVD calls DeleteSoon() on itself, so we have to run a RunLoop.
+ mcvd_.reset();
+ base::RunLoop().RunUntilIdle();
+ }
+
+ void CreateMcvd() {
auto surface_chooser = base::MakeUnique<NiceMock<FakeSurfaceChooser>>();
surface_chooser_ = surface_chooser.get();
- auto surface_texture = make_scoped_refptr(
- new NiceMock<MockSurfaceTextureGLOwner>(0, nullptr, nullptr));
+ auto surface_texture =
+ base::MakeRefCounted<NiceMock<MockSurfaceTextureGLOwner>>(0, nullptr,
+ nullptr);
surface_texture_ = surface_texture.get();
auto video_frame_factory =
base::MakeUnique<NiceMock<MockVideoFrameFactory>>();
video_frame_factory_ = video_frame_factory.get();
// Set up VFF to pass |surface_texture_| via its InitCb.
- ON_CALL(*video_frame_factory_, Initialize(_, _, _))
- .WillByDefault(RunCallback<2>(surface_texture));
+ ON_CALL(*video_frame_factory_, Initialize(_))
+ .WillByDefault(RunCallback<0>(surface_texture));
auto* observable_mcvd = new DestructionObservableMCVD(
- base::ThreadTaskRunnerHandle::Get(), base::Bind(&GetStubCb),
- base::Bind(&OutputWithReleaseMailboxCb), device_info_.get(),
- codec_allocator_.get(), std::move(surface_chooser),
- std::move(video_frame_factory), nullptr);
+ gpu_preferences_, base::Bind(&OutputWithReleaseMailboxCb),
+ device_info_.get(), codec_allocator_.get(), std::move(surface_chooser),
+ base::Bind(&CreateAndroidOverlayCb),
+ base::Bind(&MediaCodecVideoDecoderTest::RequestOverlayInfoCb,
+ base::Unretained(this)),
+ std::move(video_frame_factory),
+ base::MakeUnique<MockServiceContextRef>());
mcvd_.reset(observable_mcvd);
mcvd_raw_ = observable_mcvd;
destruction_observer_ = observable_mcvd->CreateDestructionObserver();
@@ -108,16 +144,12 @@ class MediaCodecVideoDecoderTest : public testing::Test {
destruction_observer_->ExpectDestruction();
}
- void TearDown() override {
- // MCVD calls DeleteSoon() on itself, so we have to run a RunLoop.
- mcvd_.reset();
- base::RunLoop().RunUntilIdle();
- }
-
// Just call Initialize(). MCVD will be waiting for a call to Decode() before
// continuining initialization.
bool Initialize(
VideoDecoderConfig config = TestVideoConfig::Large(kCodecH264)) {
+ if (!mcvd_)
+ CreateMcvd();
bool result = false;
auto init_cb = [](bool* result_out, bool result) { *result_out = result; };
mcvd_->Initialize(config, false, nullptr, base::Bind(init_cb, &result),
@@ -132,6 +164,9 @@ class MediaCodecVideoDecoderTest : public testing::Test {
VideoDecoderConfig config = TestVideoConfig::Large(kCodecH264)) {
Initialize(config);
mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
+ OverlayInfo info;
+ info.routing_token = base::UnguessableToken::Deserialize(1, 2);
+ provide_overlay_info_cb_.Run(info);
auto overlay_ptr = base::MakeUnique<MockAndroidOverlay>();
auto* overlay = overlay_ptr.get();
surface_chooser_->ProvideOverlay(std::move(overlay_ptr));
@@ -144,6 +179,7 @@ class MediaCodecVideoDecoderTest : public testing::Test {
VideoDecoderConfig config = TestVideoConfig::Large(kCodecH264)) {
Initialize(config);
mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
+ provide_overlay_info_cb_.Run(OverlayInfo());
surface_chooser_->ProvideSurfaceTexture();
}
@@ -160,6 +196,13 @@ class MediaCodecVideoDecoderTest : public testing::Test {
// it can be called after |mcvd_| is reset.
void PumpCodec() { mcvd_raw_->PumpCodec(false); }
+ void RequestOverlayInfoCb(
+ bool restart_for_transitions,
+ const ProvideOverlayInfoCB& provide_overlay_info_cb) {
+ restart_for_transitions_ = restart_for_transitions;
+ provide_overlay_info_cb_ = provide_overlay_info_cb;
+ }
+
protected:
base::test::ScopedTaskEnvironment scoped_task_environment_;
scoped_refptr<DecoderBuffer> fake_decoder_buffer_;
@@ -170,6 +213,9 @@ class MediaCodecVideoDecoderTest : public testing::Test {
MockVideoFrameFactory* video_frame_factory_;
NiceMock<base::MockCallback<VideoDecoder::DecodeCB>> decode_cb_;
std::unique_ptr<DestructionObserver> destruction_observer_;
+ ProvideOverlayInfoCB provide_overlay_info_cb_;
+ bool restart_for_transitions_;
+ gpu::GpuPreferences gpu_preferences_;
// |mcvd_raw_| lets us call PumpCodec() even after |mcvd_| is dropped, for
// testing the teardown path.
@@ -177,10 +223,6 @@ class MediaCodecVideoDecoderTest : public testing::Test {
std::unique_ptr<MediaCodecVideoDecoder> mcvd_;
};
-TEST_F(MediaCodecVideoDecoderTest, DestructBeforeInitWorks) {
- // Do nothing.
-}
-
TEST_F(MediaCodecVideoDecoderTest, UnknownCodecIsRejected) {
ASSERT_FALSE(Initialize(TestVideoConfig::Invalid()));
}
@@ -195,37 +237,73 @@ TEST_F(MediaCodecVideoDecoderTest, SmallVp8IsRejected) {
}
TEST_F(MediaCodecVideoDecoderTest, InitializeDoesntInitSurfaceOrCodec) {
- EXPECT_CALL(*video_frame_factory_, Initialize(_, _, _)).Times(0);
- EXPECT_CALL(*surface_chooser_, MockInitialize()).Times(0);
+ CreateMcvd();
+ EXPECT_CALL(*video_frame_factory_, Initialize(_)).Times(0);
+ EXPECT_CALL(*surface_chooser_, MockUpdateState()).Times(0);
EXPECT_CALL(*codec_allocator_, MockCreateMediaCodecAsync(_, _)).Times(0);
Initialize();
}
TEST_F(MediaCodecVideoDecoderTest, FirstDecodeTriggersFrameFactoryInit) {
Initialize();
- EXPECT_CALL(*video_frame_factory_, Initialize(_, _, _));
+ EXPECT_CALL(*video_frame_factory_, Initialize(_));
mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
}
-TEST_F(MediaCodecVideoDecoderTest, FirstDecodeTriggersSurfaceChooserInit) {
+TEST_F(MediaCodecVideoDecoderTest,
+ FirstDecodeTriggersOverlayInfoRequestIfSupported) {
+ Initialize();
+ // Requesting overlay info sets this cb.
+ ASSERT_FALSE(provide_overlay_info_cb_);
+ mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
+ ASSERT_TRUE(provide_overlay_info_cb_);
+}
+
+TEST_F(MediaCodecVideoDecoderTest,
+ OverlayInfoIsNotRequestedIfOverlaysNotSupported) {
+ Initialize();
+ ON_CALL(*device_info_, SupportsOverlaySurfaces())
+ .WillByDefault(Return(false));
+ mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
+ ASSERT_FALSE(provide_overlay_info_cb_);
+}
+
+TEST_F(MediaCodecVideoDecoderTest, RestartForOverlayTransitionsFlagIsCorrect) {
+ Initialize();
+ ON_CALL(*device_info_, IsSetOutputSurfaceSupported())
+ .WillByDefault(Return(true));
+ mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
+ ASSERT_FALSE(restart_for_transitions_);
+}
+
+TEST_F(MediaCodecVideoDecoderTest,
+ OverlayInfoIsNotRequestedIfThreadedTextureMailboxesEnabled) {
+ gpu_preferences_.enable_threaded_texture_mailboxes = true;
Initialize();
- EXPECT_CALL(*surface_chooser_, MockInitialize());
mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
+ ASSERT_FALSE(provide_overlay_info_cb_);
+}
+
+TEST_F(MediaCodecVideoDecoderTest, OverlayInfoDuringInitUpdatesSurfaceChooser) {
+ InitializeWithSurfaceTexture_OneDecodePending();
+ EXPECT_CALL(*surface_chooser_, MockUpdateState());
+ provide_overlay_info_cb_.Run(OverlayInfo());
}
TEST_F(MediaCodecVideoDecoderTest, CodecIsCreatedAfterSurfaceChosen) {
Initialize();
mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
+ provide_overlay_info_cb_.Run(OverlayInfo());
EXPECT_CALL(*codec_allocator_, MockCreateMediaCodecAsync(_, NotNull()));
surface_chooser_->ProvideSurfaceTexture();
}
TEST_F(MediaCodecVideoDecoderTest, FrameFactoryInitFailureIsAnError) {
Initialize();
- ON_CALL(*video_frame_factory_, Initialize(_, _, _))
- .WillByDefault(RunCallback<2>(nullptr));
+ ON_CALL(*video_frame_factory_, Initialize(_))
+ .WillByDefault(RunCallback<0>(nullptr));
EXPECT_CALL(decode_cb_, Run(DecodeStatus::DECODE_ERROR)).Times(1);
- EXPECT_CALL(*surface_chooser_, MockInitialize()).Times(0);
+ EXPECT_CALL(*surface_chooser_, MockUpdateState()).Times(0);
mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
}
@@ -237,6 +315,14 @@ TEST_F(MediaCodecVideoDecoderTest, CodecCreationFailureIsAnError) {
codec_allocator_->ProvideNullCodecAsync();
}
+TEST_F(MediaCodecVideoDecoderTest, CodecFailuresAreAnError) {
+ auto* codec = InitializeFully_OneDecodePending();
+ EXPECT_CALL(*codec, DequeueInputBuffer(_, _))
+ .WillOnce(Return(MEDIA_CODEC_ERROR));
+ EXPECT_CALL(decode_cb_, Run(DecodeStatus::DECODE_ERROR));
+ PumpCodec();
+}
+
TEST_F(MediaCodecVideoDecoderTest, AfterInitCompletesTheCodecIsPolled) {
auto* codec = InitializeFully_OneDecodePending();
// Run a RunLoop until the first time the codec is polled for an available
@@ -255,55 +341,39 @@ TEST_F(MediaCodecVideoDecoderTest, CodecIsReleasedOnDestruction) {
EXPECT_CALL(*codec_allocator_, MockReleaseMediaCodec(codec, _, _));
}
-TEST_F(MediaCodecVideoDecoderTest,
- SurfaceChooserNotInitializedWithOverlayFactory) {
+TEST_F(MediaCodecVideoDecoderTest, SurfaceChooserIsUpdatedOnOverlayChanges) {
InitializeWithSurfaceTexture_OneDecodePending();
- // The surface chooser should not have an overlay factory because
- // SetOverlayInfo() was not called before it was initialized.
- ASSERT_FALSE(surface_chooser_->factory_);
-}
-TEST_F(MediaCodecVideoDecoderTest,
- SurfaceChooserInitializedWithOverlayFactory) {
- Initialize();
+ EXPECT_CALL(*surface_chooser_, MockReplaceOverlayFactory(_)).Times(2);
OverlayInfo info;
- info.surface_id = 123;
- mcvd_->SetOverlayInfo(info);
- mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
- // The surface chooser should have an overlay factory because SetOverlayInfo()
- // was called before it was initialized.
+ info.routing_token = base::UnguessableToken::Deserialize(1, 2);
+ provide_overlay_info_cb_.Run(info);
ASSERT_TRUE(surface_chooser_->factory_);
-}
-
-TEST_F(MediaCodecVideoDecoderTest, SetOverlayInfoIsValidBeforeInitialize) {
- OverlayInfo info;
- info.surface_id = 123;
- mcvd_->SetOverlayInfo(info);
- Initialize();
- mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
+ info.routing_token = base::UnguessableToken::Deserialize(3, 4);
+ provide_overlay_info_cb_.Run(info);
ASSERT_TRUE(surface_chooser_->factory_);
}
-TEST_F(MediaCodecVideoDecoderTest, SetOverlayInfoReplacesTheOverlayFactory) {
- InitializeWithOverlay_OneDecodePending();
+TEST_F(MediaCodecVideoDecoderTest, OverlayInfoUpdatesAreIgnoredInStateError) {
+ InitializeWithSurfaceTexture_OneDecodePending();
+ // Enter the error state.
+ codec_allocator_->ProvideNullCodecAsync();
- EXPECT_CALL(*surface_chooser_, MockReplaceOverlayFactory(_)).Times(2);
+ EXPECT_CALL(*surface_chooser_, MockUpdateState()).Times(0);
OverlayInfo info;
- info.surface_id = 123;
- mcvd_->SetOverlayInfo(info);
- info.surface_id = 456;
- mcvd_->SetOverlayInfo(info);
+ info.routing_token = base::UnguessableToken::Deserialize(1, 2);
+ provide_overlay_info_cb_.Run(info);
}
-TEST_F(MediaCodecVideoDecoderTest, DuplicateSetOverlayInfosAreIgnored) {
- InitializeWithOverlay_OneDecodePending();
+TEST_F(MediaCodecVideoDecoderTest, DuplicateOverlayInfoUpdatesAreIgnored) {
+ InitializeWithSurfaceTexture_OneDecodePending();
- // The second SetOverlayInfo() should be ignored.
+ // The second overlay info update should be ignored.
EXPECT_CALL(*surface_chooser_, MockReplaceOverlayFactory(_)).Times(1);
OverlayInfo info;
- info.surface_id = 123;
- mcvd_->SetOverlayInfo(info);
- mcvd_->SetOverlayInfo(info);
+ info.routing_token = base::UnguessableToken::Deserialize(1, 2);
+ provide_overlay_info_cb_.Run(info);
+ provide_overlay_info_cb_.Run(info);
}
TEST_F(MediaCodecVideoDecoderTest, CodecIsCreatedWithChosenOverlay) {
@@ -315,15 +385,27 @@ TEST_F(MediaCodecVideoDecoderTest, CodecIsCreatedWithChosenOverlay) {
}
TEST_F(MediaCodecVideoDecoderTest,
- SurfaceDestroyedBeforeCodecCreationDropsCodec) {
+ CodecCreationWeakPtrIsInvalidatedBySurfaceDestroyed) {
auto* overlay = InitializeWithOverlay_OneDecodePending();
+ ON_CALL(*device_info_, IsSetOutputSurfaceSupported())
+ .WillByDefault(Return(false));
overlay->OnSurfaceDestroyed();
- // The codec is dropped as soon as it's ready.
- EXPECT_CALL(*codec_allocator_, MockReleaseMediaCodec(_, _, _));
- codec_allocator_->ProvideMockCodecAsync();
- // Verify expectations before we delete the MCVD.
- testing::Mock::VerifyAndClearExpectations(codec_allocator_.get());
+ // MCVD should invalidate its CodecAllocatorClient WeakPtr so that it doesn't
+ // receive the codec after surface destroyed. FakeCodecAllocator returns
+ // nullptr if the client pointer was invalidated.
+ ASSERT_FALSE(codec_allocator_->ProvideMockCodecAsync());
+}
+
+TEST_F(MediaCodecVideoDecoderTest, SurfaceChangedWhileCodecCreationPending) {
+ auto* overlay = InitializeWithOverlay_OneDecodePending();
+ overlay->OnSurfaceDestroyed();
+ auto codec = base::MakeUnique<NiceMock<MockMediaCodecBridge>>();
+
+ // SetSurface() is called as soon as the codec is created to switch away from
+ // the destroyed surface.
+ EXPECT_CALL(*codec, SetSurface(_)).WillOnce(Return(true));
+ codec_allocator_->ProvideMockCodecAsync(std::move(codec));
}
TEST_F(MediaCodecVideoDecoderTest, SurfaceDestroyedDoesSyncSurfaceTransition) {
@@ -406,18 +488,6 @@ TEST_F(MediaCodecVideoDecoderTest, TransitionToSameSurfaceIsIgnored) {
}
TEST_F(MediaCodecVideoDecoderTest,
- SurfaceTransitionsAreIgnoredIfSetSurfaceIsNotSupported) {
- InitializeWithSurfaceTexture_OneDecodePending();
- auto* codec = codec_allocator_->ProvideMockCodecAsync();
-
- EXPECT_CALL(*device_info_, IsSetOutputSurfaceSupported())
- .WillRepeatedly(Return(false));
- EXPECT_CALL(*codec, SetSurface(_)).Times(0);
- surface_chooser_->ProvideSurfaceTexture();
- mcvd_->Decode(fake_decoder_buffer_, decode_cb_.Get());
-}
-
-TEST_F(MediaCodecVideoDecoderTest,
ResetBeforeCodecInitializedSucceedsImmediately) {
InitializeWithSurfaceTexture_OneDecodePending();
base::MockCallback<base::Closure> reset_cb;
@@ -553,12 +623,29 @@ TEST_F(MediaCodecVideoDecoderTest, EosDecodeCbIsRunAfterEosIsDequeued) {
codec->AcceptOneInput(MockMediaCodecBridge::kEos);
PumpCodec();
+ // On dequeueing EOS, MCVD will post a closure to run eos_decode_cb after
+ // pending video frames.
+ EXPECT_CALL(*video_frame_factory_, MockRunAfterPendingVideoFrames(_));
codec->ProduceOneOutput(MockMediaCodecBridge::kEos);
PumpCodec();
- // eos_codec_cb is posted to the gpu thread, but in the tests the MCVD thread
- // and gpu thread are the same so it will be posted to this thread.
- EXPECT_CALL(eos_decode_cb, Run(_));
- base::RunLoop().RunUntilIdle();
+
+ EXPECT_CALL(eos_decode_cb, Run(DecodeStatus::OK));
+ std::move(video_frame_factory_->last_closure_).Run();
+}
+
+TEST_F(MediaCodecVideoDecoderTest, TeardownBeforeInitWorks) {
+ // Since we assert that MCVD is destructed by default, this test verifies that
+ // MCVD is destructed safely before Initialize().
+}
+
+TEST_F(MediaCodecVideoDecoderTest, TeardownInvalidatesCodecCreationWeakPtr) {
+ InitializeWithSurfaceTexture_OneDecodePending();
+ destruction_observer_->DoNotAllowDestruction();
+ mcvd_.reset();
+ // DeleteSoon() is now pending. Ensure it's safe if the codec creation
+ // completes before it runs.
+ ASSERT_FALSE(codec_allocator_->ProvideMockCodecAsync());
+ destruction_observer_->ExpectDestruction();
}
TEST_F(MediaCodecVideoDecoderTest, TeardownDoesNotDrainFlushedCodecs) {
diff --git a/chromium/media/gpu/android/mock_surface_texture_gl_owner.cc b/chromium/media/gpu/android/mock_surface_texture_gl_owner.cc
new file mode 100644
index 00000000000..54113058c14
--- /dev/null
+++ b/chromium/media/gpu/android/mock_surface_texture_gl_owner.cc
@@ -0,0 +1,39 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/mock_surface_texture_gl_owner.h"
+
+namespace media {
+
+using testing::Invoke;
+using testing::Return;
+
+MockSurfaceTextureGLOwner::MockSurfaceTextureGLOwner(
+ GLuint fake_texture_id,
+ gl::GLContext* fake_context,
+ gl::GLSurface* fake_surface)
+ : fake_texture_id(fake_texture_id),
+ fake_context(fake_context),
+ fake_surface(fake_surface),
+ expecting_frame_available(false) {
+ ON_CALL(*this, GetTextureId()).WillByDefault(Return(fake_texture_id));
+ ON_CALL(*this, GetContext()).WillByDefault(Return(fake_context));
+ ON_CALL(*this, GetSurface()).WillByDefault(Return(fake_surface));
+ ON_CALL(*this, SetReleaseTimeToNow())
+ .WillByDefault(
+ Invoke(this, &MockSurfaceTextureGLOwner::FakeSetReleaseTimeToNow));
+ ON_CALL(*this, IgnorePendingRelease())
+ .WillByDefault(
+ Invoke(this, &MockSurfaceTextureGLOwner::FakeIgnorePendingRelease));
+ ON_CALL(*this, IsExpectingFrameAvailable())
+ .WillByDefault(Invoke(
+ this, &MockSurfaceTextureGLOwner::FakeIsExpectingFrameAvailable));
+ ON_CALL(*this, WaitForFrameAvailable())
+ .WillByDefault(
+ Invoke(this, &MockSurfaceTextureGLOwner::FakeWaitForFrameAvailable));
+}
+
+MockSurfaceTextureGLOwner::~MockSurfaceTextureGLOwner() = default;
+
+} // namespace media
diff --git a/chromium/media/gpu/android/mock_surface_texture_gl_owner.h b/chromium/media/gpu/android/mock_surface_texture_gl_owner.h
new file mode 100644
index 00000000000..f43efe739f6
--- /dev/null
+++ b/chromium/media/gpu/android/mock_surface_texture_gl_owner.h
@@ -0,0 +1,53 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_MOCK_SURFACE_TEXTURE_GL_OWNER_H_
+#define MEDIA_GPU_ANDROID_MOCK_SURFACE_TEXTURE_GL_OWNER_H_
+
+#include "media/gpu/android/surface_texture_gl_owner.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_surface.h"
+
+namespace media {
+
+// This is a mock with a small amount of fake functionality too.
+class MockSurfaceTextureGLOwner : public SurfaceTextureGLOwner {
+ public:
+ MockSurfaceTextureGLOwner(GLuint fake_texture_id,
+ gl::GLContext* fake_context,
+ gl::GLSurface* fake_surface);
+
+ MOCK_CONST_METHOD0(GetTextureId, GLuint());
+ MOCK_CONST_METHOD0(GetContext, gl::GLContext*());
+ MOCK_CONST_METHOD0(GetSurface, gl::GLSurface*());
+ MOCK_CONST_METHOD0(CreateJavaSurface, gl::ScopedJavaSurface());
+ MOCK_METHOD0(UpdateTexImage, void());
+ MOCK_METHOD1(GetTransformMatrix, void(float mtx[16]));
+ MOCK_METHOD0(ReleaseBackBuffers, void());
+ MOCK_METHOD0(SetReleaseTimeToNow, void());
+ MOCK_METHOD0(IgnorePendingRelease, void());
+ MOCK_METHOD0(IsExpectingFrameAvailable, bool());
+ MOCK_METHOD0(WaitForFrameAvailable, void());
+
+ // Fake implementations that the mocks will call by default.
+ void FakeSetReleaseTimeToNow() { expecting_frame_available = true; }
+ void FakeIgnorePendingRelease() { expecting_frame_available = false; }
+ bool FakeIsExpectingFrameAvailable() { return expecting_frame_available; }
+ void FakeWaitForFrameAvailable() { expecting_frame_available = false; }
+
+ GLuint fake_texture_id;
+ gl::GLContext* fake_context;
+ gl::GLSurface* fake_surface;
+ bool expecting_frame_available;
+
+ protected:
+ ~MockSurfaceTextureGLOwner();
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_MOCK_SURFACE_TEXTURE_GL_OWNER_H_
diff --git a/chromium/media/gpu/android/promotion_hint_aggregator.h b/chromium/media/gpu/android/promotion_hint_aggregator.h
index 9fd1e4399b4..c01cbb49722 100644
--- a/chromium/media/gpu/android/promotion_hint_aggregator.h
+++ b/chromium/media/gpu/android/promotion_hint_aggregator.h
@@ -9,6 +9,7 @@
#include "base/callback.h"
#include "base/macros.h"
#include "media/gpu/media_gpu_export.h"
+#include "ui/gfx/geometry/rect.h"
namespace media {
@@ -20,14 +21,19 @@ namespace media {
class MEDIA_GPU_EXPORT PromotionHintAggregator {
public:
struct Hint {
- int x = 0;
- int y = 0;
- int width = 0;
- int height = 0;
+ Hint(const gfx::Rect& _screen_rect, bool _is_promotable)
+ : screen_rect(_screen_rect), is_promotable(_is_promotable) {}
+ gfx::Rect screen_rect;
bool is_promotable = false;
+
+ bool operator==(const Hint& other) const {
+ return other.screen_rect == screen_rect &&
+ other.is_promotable == is_promotable;
+ }
};
- using NotifyPromotionHintCB = base::Callback<void(const Hint& hint)>;
+ // Pass the hint by value to permit thread-hopping callbacks.
+ using NotifyPromotionHintCB = base::Callback<void(Hint hint)>;
virtual ~PromotionHintAggregator() = default;
diff --git a/chromium/media/gpu/android/promotion_hint_aggregator_impl_unittest.cc b/chromium/media/gpu/android/promotion_hint_aggregator_impl_unittest.cc
index c8a16aeb6e8..da7e3332712 100644
--- a/chromium/media/gpu/android/promotion_hint_aggregator_impl_unittest.cc
+++ b/chromium/media/gpu/android/promotion_hint_aggregator_impl_unittest.cc
@@ -40,8 +40,7 @@ class PromotionHintAggregatorImplTest : public testing::Test {
// previous frame. Returns whether the video is promotable.
bool SendFrame(bool is_promotable, TimeDelta elapsed = FrameTime) {
tick_clock_.Advance(elapsed);
- PromotionHintAggregator::Hint hint;
- hint.is_promotable = is_promotable;
+ PromotionHintAggregator::Hint hint(gfx::Rect(), is_promotable);
impl_->NotifyPromotionHint(hint);
return impl_->IsSafeToPromote();
}
diff --git a/chromium/media/gpu/android/surface_texture_gl_owner.cc b/chromium/media/gpu/android/surface_texture_gl_owner.cc
new file mode 100644
index 00000000000..f4b4d5be5d5
--- /dev/null
+++ b/chromium/media/gpu/android/surface_texture_gl_owner.cc
@@ -0,0 +1,163 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/surface_texture_gl_owner.h"
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "ui/gl/scoped_binders.h"
+#include "ui/gl/scoped_make_current.h"
+
+namespace media {
+
+// FrameAvailableEvent is a RefCounted wrapper for a WaitableEvent
+// (it's not possible to put one in RefCountedData).
+// This lets us safely signal an event on any thread.
+struct FrameAvailableEvent
+ : public base::RefCountedThreadSafe<FrameAvailableEvent> {
+ FrameAvailableEvent()
+ : event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED) {}
+ void Signal() { event.Signal(); }
+ base::WaitableEvent event;
+
+ private:
+ friend class RefCountedThreadSafe<FrameAvailableEvent>;
+ ~FrameAvailableEvent() = default;
+};
+
+SurfaceTextureGLOwner::SurfaceTextureGLOwner()
+ : base::RefCountedDeleteOnSequence<SurfaceTextureGLOwner>(
+ base::ThreadTaskRunnerHandle::Get()),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()) {}
+
+SurfaceTextureGLOwner::~SurfaceTextureGLOwner() = default;
+
+scoped_refptr<SurfaceTextureGLOwner> SurfaceTextureGLOwnerImpl::Create() {
+ GLuint texture_id;
+ glGenTextures(1, &texture_id);
+ if (!texture_id)
+ return nullptr;
+
+ // Set the parameters on the texture.
+ gl::ScopedActiveTexture active_texture(GL_TEXTURE0);
+ gl::ScopedTextureBinder texture_binder(GL_TEXTURE_EXTERNAL_OES, texture_id);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+
+ return new SurfaceTextureGLOwnerImpl(texture_id);
+}
+
+SurfaceTextureGLOwnerImpl::SurfaceTextureGLOwnerImpl(GLuint texture_id)
+ : surface_texture_(gl::SurfaceTexture::Create(texture_id)),
+ texture_id_(texture_id),
+ context_(gl::GLContext::GetCurrent()),
+ surface_(gl::GLSurface::GetCurrent()),
+ frame_available_event_(new FrameAvailableEvent()) {
+ DCHECK(context_);
+ DCHECK(surface_);
+ surface_texture_->SetFrameAvailableCallbackOnAnyThread(
+ base::Bind(&FrameAvailableEvent::Signal, frame_available_event_));
+}
+
+SurfaceTextureGLOwnerImpl::~SurfaceTextureGLOwnerImpl() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ // Make sure that the SurfaceTexture isn't using the GL objects.
+ surface_texture_ = nullptr;
+
+ ui::ScopedMakeCurrent scoped_make_current(context_.get(), surface_.get());
+ if (scoped_make_current.Succeeded()) {
+ glDeleteTextures(1, &texture_id_);
+ DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
+ }
+}
+
+GLuint SurfaceTextureGLOwnerImpl::GetTextureId() const {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ return texture_id_;
+}
+
+gl::ScopedJavaSurface SurfaceTextureGLOwnerImpl::CreateJavaSurface() const {
+ return gl::ScopedJavaSurface(surface_texture_.get());
+}
+
+void SurfaceTextureGLOwnerImpl::UpdateTexImage() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ surface_texture_->UpdateTexImage();
+}
+
+void SurfaceTextureGLOwnerImpl::GetTransformMatrix(float mtx[]) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ surface_texture_->GetTransformMatrix(mtx);
+}
+
+void SurfaceTextureGLOwnerImpl::ReleaseBackBuffers() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ surface_texture_->ReleaseBackBuffers();
+}
+
+gl::GLContext* SurfaceTextureGLOwnerImpl::GetContext() const {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ return context_.get();
+}
+
+gl::GLSurface* SurfaceTextureGLOwnerImpl::GetSurface() const {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ return surface_.get();
+}
+
+void SurfaceTextureGLOwnerImpl::SetReleaseTimeToNow() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ release_time_ = base::TimeTicks::Now();
+}
+
+void SurfaceTextureGLOwnerImpl::IgnorePendingRelease() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ release_time_ = base::TimeTicks();
+}
+
+bool SurfaceTextureGLOwnerImpl::IsExpectingFrameAvailable() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ return !release_time_.is_null();
+}
+
+void SurfaceTextureGLOwnerImpl::WaitForFrameAvailable() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK(!release_time_.is_null());
+
+ // 5msec covers >99.9% of cases, so just wait for up to that much before
+ // giving up. If an error occurs, we might not ever get a notification.
+ const base::TimeDelta max_wait = base::TimeDelta::FromMilliseconds(5);
+ const base::TimeTicks call_time = base::TimeTicks::Now();
+ const base::TimeDelta elapsed = call_time - release_time_;
+ const base::TimeDelta remaining = max_wait - elapsed;
+ release_time_ = base::TimeTicks();
+
+ if (remaining <= base::TimeDelta()) {
+ if (!frame_available_event_->event.IsSignaled()) {
+ DVLOG(1) << "Deferred WaitForFrameAvailable() timed out, elapsed: "
+ << elapsed.InMillisecondsF() << "ms";
+ }
+ return;
+ }
+
+ DCHECK_LE(remaining, max_wait);
+ SCOPED_UMA_HISTOGRAM_TIMER("Media.AvdaCodecImage.WaitTimeForFrame");
+ if (!frame_available_event_->event.TimedWait(remaining)) {
+ DVLOG(1) << "WaitForFrameAvailable() timed out, elapsed: "
+ << elapsed.InMillisecondsF()
+ << "ms, additionally waited: " << remaining.InMillisecondsF()
+ << "ms, total: " << (elapsed + remaining).InMillisecondsF()
+ << "ms";
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/surface_texture_gl_owner.h b/chromium/media/gpu/android/surface_texture_gl_owner.h
new file mode 100644
index 00000000000..904dc34d291
--- /dev/null
+++ b/chromium/media/gpu/android/surface_texture_gl_owner.h
@@ -0,0 +1,129 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_SURFACE_TEXTURE_GL_OWNER_H_
+#define MEDIA_GPU_ANDROID_SURFACE_TEXTURE_GL_OWNER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/ref_counted_delete_on_sequence.h"
+#include "base/sequenced_task_runner_helpers.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread_checker.h"
+#include "media/gpu/media_gpu_export.h"
+#include "ui/gl/android/scoped_java_surface.h"
+#include "ui/gl/android/surface_texture.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_surface.h"
+
+namespace media {
+
+struct FrameAvailableEvent;
+
+// A SurfaceTexture wrapper that creates and maintains ownership of the
+// attached GL texture. The texture is destroyed with the object but it's
+// possible to call ReleaseSurfaceTexture() without destroying the GL texture.
+// It should only be accessed on the thread it was created on, with the
+// exception of CreateJavaSurface(), which can be called on any thread.
+// It's safe to keep and drop refptrs to it on any thread; it will be
+// automatically destructed on the thread it was constructed on.
+// Virtual for testing; see SurfaceTextureGLOwnerImpl.
+class MEDIA_GPU_EXPORT SurfaceTextureGLOwner
+ : public base::RefCountedDeleteOnSequence<SurfaceTextureGLOwner> {
+ public:
+ SurfaceTextureGLOwner();
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner() {
+ return task_runner_;
+ }
+
+ // Returns the GL texture id that the SurfaceTexture is attached to.
+ virtual GLuint GetTextureId() const = 0;
+ virtual gl::GLContext* GetContext() const = 0;
+ virtual gl::GLSurface* GetSurface() const = 0;
+
+ // Create a java surface for the SurfaceTexture.
+ virtual gl::ScopedJavaSurface CreateJavaSurface() const = 0;
+
+ // See gl::SurfaceTexture for the following.
+ virtual void UpdateTexImage() = 0;
+ virtual void GetTransformMatrix(float mtx[16]) = 0;
+ virtual void ReleaseBackBuffers() = 0;
+
+ // Sets the expectation of onFrameAVailable for a new frame because a buffer
+ // was just released to this surface.
+ virtual void SetReleaseTimeToNow() = 0;
+
+ // Ignores a pending release that was previously indicated with
+ // SetReleaseTimeToNow().
+ // TODO(watk): This doesn't seem necessary. It actually may be detrimental
+ // because the next time we release a buffer we may confuse its
+ // onFrameAvailable with the one we're ignoring.
+ virtual void IgnorePendingRelease() = 0;
+
+ // Whether we're expecting onFrameAvailable. True when SetReleaseTimeToNow()
+ // was called but neither IgnorePendingRelease() nor WaitForFrameAvailable()
+ // have been called since.
+ virtual bool IsExpectingFrameAvailable() = 0;
+
+ // Waits for onFrameAvailable until it's been 5ms since the buffer was
+ // released. This must only be called if IsExpectingFrameAvailable().
+ virtual void WaitForFrameAvailable() = 0;
+
+ protected:
+ friend class base::RefCountedDeleteOnSequence<SurfaceTextureGLOwner>;
+ friend class base::DeleteHelper<SurfaceTextureGLOwner>;
+ virtual ~SurfaceTextureGLOwner();
+
+ private:
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(SurfaceTextureGLOwner);
+};
+
+class MEDIA_GPU_EXPORT SurfaceTextureGLOwnerImpl
+ : public SurfaceTextureGLOwner {
+ public:
+ // Creates a GL texture using the current platform GL context and returns a
+ // new SurfaceTextureGLOwnerImpl attached to it. Returns null on failure.
+ static scoped_refptr<SurfaceTextureGLOwner> Create();
+
+ GLuint GetTextureId() const override;
+ gl::GLContext* GetContext() const override;
+ gl::GLSurface* GetSurface() const override;
+ gl::ScopedJavaSurface CreateJavaSurface() const override;
+ void UpdateTexImage() override;
+ void GetTransformMatrix(float mtx[16]) override;
+ void ReleaseBackBuffers() override;
+ void SetReleaseTimeToNow() override;
+ void IgnorePendingRelease() override;
+ bool IsExpectingFrameAvailable() override;
+ void WaitForFrameAvailable() override;
+
+ private:
+ SurfaceTextureGLOwnerImpl(GLuint texture_id);
+ ~SurfaceTextureGLOwnerImpl() override;
+
+ scoped_refptr<gl::SurfaceTexture> surface_texture_;
+ GLuint texture_id_;
+
+ // The context and surface that were used to create |texture_id_|.
+ scoped_refptr<gl::GLContext> context_;
+ scoped_refptr<gl::GLSurface> surface_;
+
+ // When SetReleaseTimeToNow() was last called. i.e., when the last
+ // codec buffer was released to this surface. Or null if
+ // IgnorePendingRelease() or WaitForFrameAvailable() have been called since.
+ base::TimeTicks release_time_;
+ scoped_refptr<FrameAvailableEvent> frame_available_event_;
+
+ THREAD_CHECKER(thread_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(SurfaceTextureGLOwnerImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_SURFACE_TEXTURE_GL_OWNER_H_
diff --git a/chromium/media/gpu/android/surface_texture_gl_owner_unittest.cc b/chromium/media/gpu/android/surface_texture_gl_owner_unittest.cc
new file mode 100644
index 00000000000..b29941a2dee
--- /dev/null
+++ b/chromium/media/gpu/android/surface_texture_gl_owner_unittest.cc
@@ -0,0 +1,115 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/surface_texture_gl_owner.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_context_egl.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/gl_share_group.h"
+#include "ui/gl/gl_surface_egl.h"
+#include "ui/gl/init/gl_factory.h"
+
+using testing::Invoke;
+using testing::NiceMock;
+using testing::_;
+
+namespace media {
+
+class SurfaceTextureGLOwnerTest : public testing::Test {
+ public:
+ SurfaceTextureGLOwnerTest() {}
+ ~SurfaceTextureGLOwnerTest() override {}
+
+ protected:
+ void SetUp() override {
+ gl::init::InitializeGLOneOffImplementation(gl::kGLImplementationEGLGLES2,
+ false, false, false, true);
+ surface_ = new gl::PbufferGLSurfaceEGL(gfx::Size(320, 240));
+ surface_->Initialize();
+
+ share_group_ = new gl::GLShareGroup();
+ context_ = new gl::GLContextEGL(share_group_.get());
+ context_->Initialize(surface_.get(), gl::GLContextAttribs());
+ ASSERT_TRUE(context_->MakeCurrent(surface_.get()));
+
+ surface_texture_ = SurfaceTextureGLOwnerImpl::Create();
+ texture_id_ = surface_texture_->GetTextureId();
+ // Bind and un-bind the texture, since that's required for glIsTexture to
+ // return true.
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, texture_id_);
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
+ ASSERT_TRUE(glIsTexture(texture_id_));
+ }
+
+ void TearDown() override {
+ surface_texture_ = nullptr;
+ context_ = nullptr;
+ share_group_ = nullptr;
+ surface_ = nullptr;
+ gl::init::ShutdownGL();
+ }
+
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture_;
+ GLuint texture_id_ = 0;
+
+ scoped_refptr<gl::GLContext> context_;
+ scoped_refptr<gl::GLShareGroup> share_group_;
+ scoped_refptr<gl::GLSurface> surface_;
+ base::MessageLoop message_loop_;
+};
+
+// Verify that SurfaceTextureGLOwner creates a bindable GL texture, and deletes
+// it during destruction.
+TEST_F(SurfaceTextureGLOwnerTest, GLTextureIsCreatedAndDestroyed) {
+ // |texture_id| should not work anymore after we delete |surface_texture|.
+ surface_texture_ = nullptr;
+ ASSERT_FALSE(glIsTexture(texture_id_));
+}
+
+// Calling ReleaseBackBuffers shouldn't deallocate the texture handle.
+TEST_F(SurfaceTextureGLOwnerTest, ReleaseDoesntDestroyTexture) {
+ surface_texture_->ReleaseBackBuffers();
+ ASSERT_TRUE(glIsTexture(texture_id_));
+}
+
+// Make sure that |surface_texture_| remembers the correct context and surface.
+TEST_F(SurfaceTextureGLOwnerTest, ContextAndSurfaceAreCaptured) {
+ ASSERT_EQ(context_, surface_texture_->GetContext());
+ ASSERT_EQ(surface_, surface_texture_->GetSurface());
+}
+
+// Verify that destruction works even if some other context is current.
+TEST_F(SurfaceTextureGLOwnerTest, DestructionWorksWithWrongContext) {
+ scoped_refptr<gl::GLSurface> new_surface(
+ new gl::PbufferGLSurfaceEGL(gfx::Size(320, 240)));
+ new_surface->Initialize();
+
+ scoped_refptr<gl::GLShareGroup> new_share_group(new gl::GLShareGroup());
+ scoped_refptr<gl::GLContext> new_context(
+ new gl::GLContextEGL(new_share_group.get()));
+ new_context->Initialize(new_surface.get(), gl::GLContextAttribs());
+ ASSERT_TRUE(new_context->MakeCurrent(new_surface.get()));
+
+ surface_texture_ = nullptr;
+ ASSERT_FALSE(glIsTexture(texture_id_));
+
+ // |new_context| should still be current.
+ ASSERT_TRUE(new_context->IsCurrent(new_surface.get()));
+
+ new_context = nullptr;
+ new_share_group = nullptr;
+ new_surface = nullptr;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/video_frame_factory.h b/chromium/media/gpu/android/video_frame_factory.h
index ec5b150f5c2..09075dc44d9 100644
--- a/chromium/media/gpu/android/video_frame_factory.h
+++ b/chromium/media/gpu/android/video_frame_factory.h
@@ -10,12 +10,14 @@
#include "base/memory/ref_counted.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
+#include "media/gpu/android/promotion_hint_aggregator.h"
#include "media/gpu/media_gpu_export.h"
#include "ui/gfx/geometry/size.h"
namespace gpu {
class GpuCommandBufferStub;
-}
+struct SyncToken;
+} // namespace gpu
namespace media {
@@ -31,7 +33,7 @@ class MEDIA_GPU_EXPORT VideoFrameFactory {
using InitCb = base::Callback<void(scoped_refptr<SurfaceTextureGLOwner>)>;
// These mirror types from MojoVideoDecoderService.
- using ReleaseMailboxCB = base::Callback<void(const gpu::SyncToken&)>;
+ using ReleaseMailboxCB = base::OnceCallback<void(const gpu::SyncToken&)>;
using OutputWithReleaseMailboxCB =
base::Callback<void(ReleaseMailboxCB, const scoped_refptr<VideoFrame>&)>;
@@ -41,20 +43,22 @@ class MEDIA_GPU_EXPORT VideoFrameFactory {
// Initializes the factory and runs |init_cb| on the current thread when it's
// complete. If initialization fails, the returned surface texture will be
// null.
- virtual void Initialize(
- scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
- GetStubCb get_stub_cb,
- InitCb init_cb) = 0;
+ virtual void Initialize(InitCb init_cb) = 0;
// Creates a new VideoFrame backed by |output_buffer| and |surface_texture|.
// |surface_texture| may be null if the buffer is backed by an overlay
- // instead. Runs |frame_created_cb| on the current thread to return the frame.
+ // instead. Runs |output_cb| on the calling sequence to return the frame.
virtual void CreateVideoFrame(
std::unique_ptr<CodecOutputBuffer> output_buffer,
scoped_refptr<SurfaceTextureGLOwner> surface_texture,
base::TimeDelta timestamp,
gfx::Size natural_size,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
OutputWithReleaseMailboxCB output_cb) = 0;
+
+ // Runs |closure| on the calling sequence after all previous
+ // CreateVideoFrame() calls have completed.
+ virtual void RunAfterPendingVideoFrames(base::OnceClosure closure) = 0;
};
} // namespace media
diff --git a/chromium/media/gpu/android/video_frame_factory_impl.cc b/chromium/media/gpu/android/video_frame_factory_impl.cc
index 3f22dd0c2b3..ec84d6b2fb2 100644
--- a/chromium/media/gpu/android/video_frame_factory_impl.cc
+++ b/chromium/media/gpu/android/video_frame_factory_impl.cc
@@ -17,6 +17,7 @@
#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_command_buffer_stub.h"
#include "media/base/bind_to_current_loop.h"
+#include "media/base/scoped_callback_runner.h"
#include "media/base/video_frame.h"
#include "media/gpu//android/codec_image.h"
#include "media/gpu/android/codec_wrapper.h"
@@ -32,23 +33,27 @@ bool MakeContextCurrent(gpu::GpuCommandBufferStub* stub) {
} // namespace
-VideoFrameFactoryImpl::VideoFrameFactoryImpl() = default;
+VideoFrameFactoryImpl::VideoFrameFactoryImpl(
+ scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
+ GetStubCb get_stub_cb)
+ : gpu_task_runner_(std::move(gpu_task_runner)),
+ get_stub_cb_(std::move(get_stub_cb)) {}
VideoFrameFactoryImpl::~VideoFrameFactoryImpl() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (gpu_video_frame_factory_)
gpu_task_runner_->DeleteSoon(FROM_HERE, gpu_video_frame_factory_.release());
}
-void VideoFrameFactoryImpl::Initialize(
- scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
- GetStubCb get_stub_cb,
- InitCb init_cb) {
- gpu_task_runner_ = std::move(gpu_task_runner);
+void VideoFrameFactoryImpl::Initialize(InitCb init_cb) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(!gpu_video_frame_factory_);
gpu_video_frame_factory_ = base::MakeUnique<GpuVideoFrameFactory>();
base::PostTaskAndReplyWithResult(
gpu_task_runner_.get(), FROM_HERE,
base::Bind(&GpuVideoFrameFactory::Initialize,
- base::Unretained(gpu_video_frame_factory_.get()), get_stub_cb),
+ base::Unretained(gpu_video_frame_factory_.get()),
+ get_stub_cb_),
std::move(init_cb));
}
@@ -57,18 +62,32 @@ void VideoFrameFactoryImpl::CreateVideoFrame(
scoped_refptr<SurfaceTextureGLOwner> surface_texture,
base::TimeDelta timestamp,
gfx::Size natural_size,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
OutputWithReleaseMailboxCB output_cb) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
gpu_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuVideoFrameFactory::CreateVideoFrame,
- base::Unretained(gpu_video_frame_factory_.get()),
- base::Passed(&output_buffer), surface_texture,
- timestamp, natural_size, std::move(output_cb),
- base::ThreadTaskRunnerHandle::Get()));
+ FROM_HERE,
+ base::Bind(&GpuVideoFrameFactory::CreateVideoFrame,
+ base::Unretained(gpu_video_frame_factory_.get()),
+ base::Passed(&output_buffer), surface_texture, timestamp,
+ natural_size, std::move(promotion_hint_cb),
+ std::move(output_cb), base::ThreadTaskRunnerHandle::Get()));
}
-GpuVideoFrameFactory::GpuVideoFrameFactory() : weak_factory_(this) {}
+void VideoFrameFactoryImpl::RunAfterPendingVideoFrames(
+ base::OnceClosure closure) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ // Hop through |gpu_task_runner_| to ensure it comes after pending frames.
+ gpu_task_runner_->PostTaskAndReply(
+ FROM_HERE, base::BindOnce(&base::DoNothing), std::move(closure));
+}
+
+GpuVideoFrameFactory::GpuVideoFrameFactory() : weak_factory_(this) {
+ DETACH_FROM_THREAD(thread_checker_);
+}
GpuVideoFrameFactory::~GpuVideoFrameFactory() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (stub_)
stub_->RemoveDestructionObserver(this);
ClearTextureRefs();
@@ -76,6 +95,7 @@ GpuVideoFrameFactory::~GpuVideoFrameFactory() {
scoped_refptr<SurfaceTextureGLOwner> GpuVideoFrameFactory::Initialize(
VideoFrameFactoryImpl::GetStubCb get_stub_cb) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
stub_ = get_stub_cb.Run();
if (!MakeContextCurrent(stub_))
return nullptr;
@@ -89,15 +109,21 @@ void GpuVideoFrameFactory::CreateVideoFrame(
scoped_refptr<SurfaceTextureGLOwner> surface_texture,
base::TimeDelta timestamp,
gfx::Size natural_size,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
VideoFrameFactory::OutputWithReleaseMailboxCB output_cb,
scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
scoped_refptr<VideoFrame> frame;
scoped_refptr<gpu::gles2::TextureRef> texture_ref;
CreateVideoFrameInternal(std::move(output_buffer), std::move(surface_texture),
- timestamp, natural_size, &frame, &texture_ref);
+ timestamp, natural_size,
+ std::move(promotion_hint_cb), &frame, &texture_ref);
if (!frame || !texture_ref)
return;
+ // Try to render this frame if possible.
+ internal::MaybeRenderEarly(&images_);
+
// TODO(sandersd, watk): The VideoFrame release callback will not be called
// after MojoVideoDecoderService is destructed, so we have to release all
// our TextureRefs when |this| is destructed. This can unback outstanding
@@ -105,11 +131,17 @@ void GpuVideoFrameFactory::CreateVideoFrame(
// release callback lifetime should be separate from MCVD or
// MojoVideoDecoderService (http://crbug.com/737220).
texture_refs_[texture_ref.get()] = texture_ref;
- auto release_cb = BindToCurrentLoop(base::Bind(
- &GpuVideoFrameFactory::DropTextureRef, weak_factory_.GetWeakPtr(),
- base::Unretained(texture_ref.get())));
- task_runner->PostTask(FROM_HERE, base::Bind(output_cb, std::move(release_cb),
- std::move(frame)));
+ auto drop_texture_ref = base::Bind(&GpuVideoFrameFactory::DropTextureRef,
+ weak_factory_.GetWeakPtr(),
+ base::Unretained(texture_ref.get()));
+
+ // Guarantee that the TextureRef is released even if the VideoFrame is
+ // dropped. Otherwise we could keep TextureRefs we don't need alive.
+ auto release_cb = ScopedCallbackRunner(
+ ToOnceCallback(BindToCurrentLoop(drop_texture_ref)), gpu::SyncToken());
+ task_runner->PostTask(
+ FROM_HERE,
+ base::BindOnce(output_cb, std::move(release_cb), std::move(frame)));
}
void GpuVideoFrameFactory::CreateVideoFrameInternal(
@@ -117,8 +149,10 @@ void GpuVideoFrameFactory::CreateVideoFrameInternal(
scoped_refptr<SurfaceTextureGLOwner> surface_texture,
base::TimeDelta timestamp,
gfx::Size natural_size,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
scoped_refptr<VideoFrame>* video_frame_out,
scoped_refptr<gpu::gles2::TextureRef>* texture_ref_out) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (!MakeContextCurrent(stub_))
return;
@@ -131,26 +165,27 @@ void GpuVideoFrameFactory::CreateVideoFrameInternal(
gfx::Size size = output_buffer->size();
gfx::Rect visible_rect(size);
+ // The pixel format doesn't matter as long as it's valid for texture frames.
+ VideoPixelFormat pixel_format = PIXEL_FORMAT_ARGB;
+
// Check that we can create a VideoFrame for this config before creating the
// TextureRef so that we don't have to clean up the TextureRef if creating the
// frame fails.
- if (!VideoFrame::IsValidConfig(PIXEL_FORMAT_ARGB, VideoFrame::STORAGE_OPAQUE,
- size, visible_rect, natural_size)) {
+ if (!VideoFrame::IsValidConfig(pixel_format, VideoFrame::STORAGE_OPAQUE, size,
+ visible_rect, natural_size)) {
return;
}
- // Create a new Texture.
- auto texture_ref = decoder_helper_->CreateTexture(
- GL_TEXTURE_EXTERNAL_OES, GL_RGBA, size.width(), size.height(), GL_RGBA,
- GL_UNSIGNED_BYTE);
-
- // Create a new CodecImage to back the texture and try to render it early.
- auto image = make_scoped_refptr(
- new CodecImage(std::move(output_buffer), surface_texture,
- base::Bind(&GpuVideoFrameFactory::OnImageDestructed,
- weak_factory_.GetWeakPtr())));
+ // Create a Texture and a CodecImage to back it.
+ scoped_refptr<gpu::gles2::TextureRef> texture_ref =
+ decoder_helper_->CreateTexture(GL_TEXTURE_EXTERNAL_OES, GL_RGBA,
+ size.width(), size.height(), GL_RGBA,
+ GL_UNSIGNED_BYTE);
+ auto image = base::MakeRefCounted<CodecImage>(
+ std::move(output_buffer), surface_texture, std::move(promotion_hint_cb),
+ base::Bind(&GpuVideoFrameFactory::OnImageDestructed,
+ weak_factory_.GetWeakPtr()));
images_.push_back(image.get());
- internal::MaybeRenderEarly(&images_);
// Attach the image to the texture.
// If we're attaching a SurfaceTexture backed image, we set the state to
@@ -174,16 +209,24 @@ void GpuVideoFrameFactory::CreateVideoFrameInternal(
mailbox_holders[0] =
gpu::MailboxHolder(mailbox, gpu::SyncToken(), GL_TEXTURE_EXTERNAL_OES);
- // Note: The pixel format doesn't matter.
auto frame = VideoFrame::WrapNativeTextures(
- PIXEL_FORMAT_ARGB, mailbox_holders, VideoFrame::ReleaseMailboxCB(), size,
+ pixel_format, mailbox_holders, VideoFrame::ReleaseMailboxCB(), size,
visible_rect, natural_size, timestamp);
+ // The frames must be copied when threaded texture mailboxes are in use
+ // (http://crbug.com/582170).
+ if (stub_->GetGpuPreferences().enable_threaded_texture_mailboxes)
+ frame->metadata()->SetBoolean(VideoFrameMetadata::COPY_REQUIRED, true);
+
+ frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY,
+ !surface_texture);
+
*video_frame_out = std::move(frame);
*texture_ref_out = std::move(texture_ref);
}
void GpuVideoFrameFactory::OnWillDestroyStub() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(stub_);
ClearTextureRefs();
stub_ = nullptr;
@@ -191,6 +234,7 @@ void GpuVideoFrameFactory::OnWillDestroyStub() {
}
void GpuVideoFrameFactory::ClearTextureRefs() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(stub_ || texture_refs_.empty());
// If we fail to make the context current, we have to notify the TextureRefs
// so they don't try to delete textures without a context.
@@ -203,6 +247,7 @@ void GpuVideoFrameFactory::ClearTextureRefs() {
void GpuVideoFrameFactory::DropTextureRef(gpu::gles2::TextureRef* ref,
const gpu::SyncToken& token) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
auto it = texture_refs_.find(ref);
if (it == texture_refs_.end())
return;
@@ -214,6 +259,7 @@ void GpuVideoFrameFactory::DropTextureRef(gpu::gles2::TextureRef* ref,
}
void GpuVideoFrameFactory::OnImageDestructed(CodecImage* image) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
base::Erase(images_, image);
internal::MaybeRenderEarly(&images_);
}
diff --git a/chromium/media/gpu/android/video_frame_factory_impl.h b/chromium/media/gpu/android/video_frame_factory_impl.h
index f8faebb4d99..7042166ae76 100644
--- a/chromium/media/gpu/android/video_frame_factory_impl.h
+++ b/chromium/media/gpu/android/video_frame_factory_impl.h
@@ -12,10 +12,10 @@
#include "media/base/video_frame.h"
#include "media/gpu/android/codec_image.h"
#include "media/gpu/android/codec_wrapper.h"
+#include "media/gpu/android/surface_texture_gl_owner.h"
#include "media/gpu/android/video_frame_factory.h"
#include "media/gpu/gles2_decoder_helper.h"
#include "media/gpu/media_gpu_export.h"
-#include "media/gpu/surface_texture_gl_owner.h"
#include "ui/gl/gl_bindings.h"
namespace media {
@@ -23,26 +23,34 @@ class GpuVideoFrameFactory;
// VideoFrameFactoryImpl creates CodecOutputBuffer backed VideoFrames and tries
// to eagerly render them to their surface to release the buffers back to the
-// decoder as soon as possible. It's not thread safe, but it internally posts
-// calls to GpuVideoFrameFactory on the gpu thread.
+// decoder as soon as possible. It's not thread safe; it should be created, used
+// and destructed on a single sequence. It's implemented by proxying calls
+// to a helper class hosted on the gpu thread.
class MEDIA_GPU_EXPORT VideoFrameFactoryImpl : public VideoFrameFactory {
public:
- VideoFrameFactoryImpl();
+ // |get_stub_cb| will be run on |gpu_task_runner|.
+ VideoFrameFactoryImpl(
+ scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
+ GetStubCb get_stub_cb);
~VideoFrameFactoryImpl() override;
- void Initialize(scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
- GetStubCb get_stub_cb,
- InitCb init_cb) override;
- void CreateVideoFrame(std::unique_ptr<CodecOutputBuffer> output_buffer,
- scoped_refptr<SurfaceTextureGLOwner> surface_texture,
- base::TimeDelta timestamp,
- gfx::Size natural_size,
- OutputWithReleaseMailboxCB output_cb) override;
+ void Initialize(InitCb init_cb) override;
+ void CreateVideoFrame(
+ std::unique_ptr<CodecOutputBuffer> output_buffer,
+ scoped_refptr<SurfaceTextureGLOwner> surface_texture,
+ base::TimeDelta timestamp,
+ gfx::Size natural_size,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
+ OutputWithReleaseMailboxCB output_cb) override;
+ void RunAfterPendingVideoFrames(base::OnceClosure closure) override;
private:
- scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner_;
+ // The gpu thread side of the implementation.
std::unique_ptr<GpuVideoFrameFactory> gpu_video_frame_factory_;
+ scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner_;
+ GetStubCb get_stub_cb_;
+ SEQUENCE_CHECKER(sequence_checker_);
DISALLOW_COPY_AND_ASSIGN(VideoFrameFactoryImpl);
};
@@ -57,13 +65,13 @@ class GpuVideoFrameFactory
scoped_refptr<SurfaceTextureGLOwner> Initialize(
VideoFrameFactory::GetStubCb get_stub_cb);
- // Creates a VideoFrame and returns it via posting |output_cb| to
- // |task_runner|.
+ // Creates and returns a VideoFrame with its ReleaseMailboxCB.
void CreateVideoFrame(
std::unique_ptr<CodecOutputBuffer> output_buffer,
scoped_refptr<SurfaceTextureGLOwner> surface_texture,
base::TimeDelta timestamp,
gfx::Size natural_size,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
VideoFrameFactory::OutputWithReleaseMailboxCB output_cb,
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
@@ -74,6 +82,7 @@ class GpuVideoFrameFactory
scoped_refptr<SurfaceTextureGLOwner> surface_texture,
base::TimeDelta timestamp,
gfx::Size natural_size,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
scoped_refptr<VideoFrame>* video_frame_out,
scoped_refptr<gpu::gles2::TextureRef>* texture_ref_out);
@@ -102,8 +111,8 @@ class GpuVideoFrameFactory
// A helper for creating textures. Only valid while |stub_| is valid.
std::unique_ptr<GLES2DecoderHelper> decoder_helper_;
+ THREAD_CHECKER(thread_checker_);
base::WeakPtrFactory<GpuVideoFrameFactory> weak_factory_;
-
DISALLOW_COPY_AND_ASSIGN(GpuVideoFrameFactory);
};