summaryrefslogtreecommitdiff
path: root/chromium/media/gpu/vaapi
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/media/gpu/vaapi')
-rw-r--r--chromium/media/gpu/vaapi/BUILD.gn6
-rw-r--r--chromium/media/gpu/vaapi/accelerated_video_encoder.cc50
-rw-r--r--chromium/media/gpu/vaapi/accelerated_video_encoder.h148
-rw-r--r--chromium/media/gpu/vaapi/h264_encoder.cc482
-rw-r--r--chromium/media/gpu/vaapi/h264_encoder.h163
-rw-r--r--chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc6
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_decoder_unittest.cc4
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc20
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc32
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_encoder.h4
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc7
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc261
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc1457
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h199
-rw-r--r--chromium/media/gpu/vaapi/vaapi_vp8_accelerator.cc13
-rw-r--r--chromium/media/gpu/vaapi/vaapi_vp8_accelerator.h7
-rw-r--r--chromium/media/gpu/vaapi/vaapi_wrapper.cc14
-rw-r--r--chromium/media/gpu/vaapi/vaapi_wrapper.h2
-rw-r--r--chromium/media/gpu/vaapi/vp8_encoder.cc181
-rw-r--r--chromium/media/gpu/vaapi/vp8_encoder.h109
20 files changed, 2152 insertions, 1013 deletions
diff --git a/chromium/media/gpu/vaapi/BUILD.gn b/chromium/media/gpu/vaapi/BUILD.gn
index a3eaed3ac8f..d69ad3bd898 100644
--- a/chromium/media/gpu/vaapi/BUILD.gn
+++ b/chromium/media/gpu/vaapi/BUILD.gn
@@ -53,6 +53,10 @@ action("libva_generate_stubs") {
source_set("vaapi") {
defines = [ "MEDIA_GPU_IMPLEMENTATION" ]
sources = [
+ "accelerated_video_encoder.cc",
+ "accelerated_video_encoder.h",
+ "h264_encoder.cc",
+ "h264_encoder.h",
"va_surface.cc",
"va_surface.h",
"vaapi_common.cc",
@@ -81,6 +85,8 @@ source_set("vaapi") {
"vaapi_vp9_accelerator.h",
"vaapi_wrapper.cc",
"vaapi_wrapper.h",
+ "vp8_encoder.cc",
+ "vp8_encoder.h",
]
sources += get_target_outputs(":libva_generate_stubs")
diff --git a/chromium/media/gpu/vaapi/accelerated_video_encoder.cc b/chromium/media/gpu/vaapi/accelerated_video_encoder.cc
new file mode 100644
index 00000000000..0334356e0ff
--- /dev/null
+++ b/chromium/media/gpu/vaapi/accelerated_video_encoder.cc
@@ -0,0 +1,50 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/vaapi/accelerated_video_encoder.h"
+
+#include "media/base/video_frame.h"
+
+namespace media {
+
+AcceleratedVideoEncoder::EncodeJob::EncodeJob(
+ scoped_refptr<VideoFrame> input_frame,
+ bool keyframe,
+ base::OnceClosure execute_cb)
+ : input_frame_(input_frame),
+ timestamp_(input_frame->timestamp()),
+ keyframe_(keyframe),
+ execute_callback_(std::move(execute_cb)) {
+ DCHECK(!execute_callback_.is_null());
+}
+
+AcceleratedVideoEncoder::EncodeJob::~EncodeJob() = default;
+
+VaapiEncodeJob* AcceleratedVideoEncoder::EncodeJob::AsVaapiEncodeJob() {
+ CHECK(false);
+ return nullptr;
+}
+
+void AcceleratedVideoEncoder::EncodeJob::AddSetupCallback(
+ base::OnceClosure cb) {
+ DCHECK(!cb.is_null());
+ setup_callbacks_.push(std::move(cb));
+}
+
+void AcceleratedVideoEncoder::EncodeJob::AddReferencePicture(
+ scoped_refptr<CodecPicture> ref_pic) {
+ DCHECK(ref_pic);
+ reference_pictures_.push_back(ref_pic);
+}
+
+void AcceleratedVideoEncoder::EncodeJob::Execute() {
+ while (!setup_callbacks_.empty()) {
+ std::move(setup_callbacks_.front()).Run();
+ setup_callbacks_.pop();
+ }
+
+ std::move(execute_callback_).Run();
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/vaapi/accelerated_video_encoder.h b/chromium/media/gpu/vaapi/accelerated_video_encoder.h
new file mode 100644
index 00000000000..3564b248a32
--- /dev/null
+++ b/chromium/media/gpu/vaapi/accelerated_video_encoder.h
@@ -0,0 +1,148 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_VAAPI_ACCELERATED_VIDEO_ENCODER_H_
+#define MEDIA_GPU_VAAPI_ACCELERATED_VIDEO_ENCODER_H_
+
+#include <vector>
+
+#include "base/callback.h"
+#include "base/containers/queue.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/video_codecs.h"
+#include "media/gpu/codec_picture.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace media {
+
+class VaapiEncodeJob;
+class VideoFrame;
+
+// An AcceleratedVideoEncoder (AVE) performs high-level, platform-independent
+// encoding process tasks, such as managing codec state, reference frames, etc.,
+// but may require support from an external accelerator (typically a hardware
+// accelerator) to offload some stages of the actual encoding process, using
+// the parameters that AVE prepares beforehand.
+//
+// For each frame to be encoded, clients provide an EncodeJob object to be set
+// up by an AVE with job parameters, and execute the job afterwards. Any
+// resources required for the job are also provided by the clients, and
+// associated with the EncodeJob object.
+class AcceleratedVideoEncoder {
+ public:
+ AcceleratedVideoEncoder() = default;
+ virtual ~AcceleratedVideoEncoder() = default;
+
+ // An abstraction of an encode job for one frame. Parameters required for an
+ // EncodeJob to be executed are prepared by an AcceleratedVideoEncoder, while
+ // the accelerator-specific callbacks required to set up and execute it are
+ // provided by the accelerator itself, based on these parameters.
+ // Accelerators are also responsible for providing any resources (such as
+ // memory for output and reference pictures, etc.) as needed.
+ class EncodeJob : public base::RefCounted<EncodeJob> {
+ public:
+ // Creates an EncodeJob to encode |input_frame|, which will be executed
+ // by calling |execute_cb|. If |keyframe| is true, requests this job
+ // to produce a keyframe.
+ EncodeJob(scoped_refptr<VideoFrame> input_frame,
+ bool keyframe,
+ base::OnceClosure execute_cb);
+
+ // Schedules a callback to be run immediately before this job is executed.
+ // Can be called multiple times to schedule multiple callbacks, and all
+ // of them will be run, in order added.
+ // Callbacks can be used to e.g. set up hardware parameters before the job
+ // is executed.
+ void AddSetupCallback(base::OnceClosure cb);
+
+ // Adds |ref_pic| to the list of pictures to be used as reference pictures
+ // for this frame, to ensure they remain valid until the job is executed
+ // (or discarded).
+ void AddReferencePicture(scoped_refptr<CodecPicture> ref_pic);
+
+ // Runs all setup callbacks previously scheduled, if any, in order added,
+ // and executes the job by calling the execute callback. Note that the
+ // actual job execution may be asynchronous, and returning from this method
+ // does not have to indicate that the job has been finished. The execute
+ // callback is responsible for retaining references to any resources that
+ // may be in use after this method returns however, so it is safe to release
+ // the EncodeJob object itself immediately after this method returns.
+ void Execute();
+
+ // Requests this job to produce a keyframe; requesting a keyframe may not
+ // always result in one being produced by the encoder (e.g. if it would
+ // not fit in the bitrate budget).
+ void ProduceKeyframe() { keyframe_ = true; }
+
+ // Returns true if this job has been requested to produce a keyframe.
+ bool IsKeyframeRequested() const { return keyframe_; }
+
+ // Returns the timestamp associated with this job.
+ base::TimeDelta timestamp() const { return timestamp_; }
+
+ virtual VaapiEncodeJob* AsVaapiEncodeJob();
+
+ protected:
+ friend class base::RefCounted<EncodeJob>;
+ virtual ~EncodeJob();
+
+ private:
+ // Input VideoFrame to be encoded.
+ const scoped_refptr<VideoFrame> input_frame_;
+
+ // Source timestamp for |input_frame_|.
+ const base::TimeDelta timestamp_;
+
+ // True if this job is to produce a keyframe.
+ bool keyframe_;
+
+ // Callbacks to be run (in the same order as the order of AddSetupCallback()
+ // calls) to set up the job.
+ base::queue<base::OnceClosure> setup_callbacks_;
+
+ // Callback to be run to execute this job.
+ base::OnceClosure execute_callback_;
+
+ // Reference pictures required for this job.
+ std::vector<scoped_refptr<CodecPicture>> reference_pictures_;
+
+ DISALLOW_COPY_AND_ASSIGN(EncodeJob);
+ };
+
+ // Initializes the encoder to encode frames of |visible_size| into a stream
+ // for |profile|, at |initial_bitrate| and |initial_framerate|.
+ // Returns false if the requested set of parameters is not supported,
+ // true on success.
+ virtual bool Initialize(const gfx::Size& visible_size,
+ VideoCodecProfile profile,
+ uint32_t initial_bitrate,
+ uint32_t initial_framerate) = 0;
+
+ // Updates current framerate and/or bitrate to |framerate| in FPS
+ // and |bitrate| in bps.
+ virtual bool UpdateRates(uint32_t bitrate, uint32_t framerate) = 0;
+
+ // Returns coded size for the input buffers required to encode, in pixels;
+ // typically visible size adjusted to match codec alignment requirements.
+ virtual gfx::Size GetCodedSize() const = 0;
+
+ // Returns minimum size in bytes for bitstream buffers required to fit output
+ // stream buffers produced.
+ virtual size_t GetBitstreamBufferSize() const = 0;
+
+ // Returns maximum number of reference frames that may be used by the
+ // encoder to encode one frame. The client should be able to provide up to
+ // at least this many frames simultaneously for encode to make progress.
+ virtual size_t GetMaxNumOfRefFrames() const = 0;
+
+ // Prepares a new |encode_job| to be executed in Accelerator and returns true
+ // on success. The caller may then call Execute() on the job to run it.
+ virtual bool PrepareEncodeJob(EncodeJob* encode_job) = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_VAAPI_ACCELERATED_VIDEO_ENCODER_H_
diff --git a/chromium/media/gpu/vaapi/h264_encoder.cc b/chromium/media/gpu/vaapi/h264_encoder.cc
new file mode 100644
index 00000000000..d865ac8b31e
--- /dev/null
+++ b/chromium/media/gpu/vaapi/h264_encoder.cc
@@ -0,0 +1,482 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/vaapi/h264_encoder.h"
+
+#include "base/bits.h"
+#include "base/stl_util.h"
+
+#define DVLOGF(level) DVLOG(level) << __func__ << "(): "
+
+namespace media {
+namespace {
+// An IDR every 2048 frames, an I frame every 256 and no B frames.
+// We choose IDR period to equal MaxFrameNum so it must be a power of 2.
+constexpr int kIDRPeriod = 2048;
+constexpr int kIPeriod = 256;
+constexpr int kIPPeriod = 1;
+
+constexpr int kDefaultQP = 26;
+
+// Subjectively chosen bitrate window size for rate control, in ms.
+constexpr int kCPBWindowSizeMs = 1500;
+
+// Subjectively chosen.
+constexpr size_t kMaxNumReferenceFrames = 4;
+constexpr size_t kMaxRefIdxL0Size = kMaxNumReferenceFrames;
+constexpr size_t kMaxRefIdxL1Size = 0;
+
+// HRD parameters (ch. E.2.2 in H264 spec).
+constexpr int kBitRateScale = 0; // bit_rate_scale for SPS HRD parameters.
+constexpr int kCPBSizeScale = 0; // cpb_size_scale for SPS HRD parameters.
+
+// Default to H264 profile 4.1.
+constexpr int kDefaultLevelIDC = 41;
+
+// 4:2:0
+constexpr int kChromaFormatIDC = 1;
+} // namespace
+
+H264Encoder::EncodeParams::EncodeParams()
+ : idr_period_frames(kIDRPeriod),
+ i_period_frames(kIPeriod),
+ ip_period_frames(kIPPeriod),
+ bitrate_bps(0),
+ framerate(0),
+ cpb_window_size_ms(kCPBWindowSizeMs),
+ cpb_size_bits(0),
+ qp(kDefaultQP) {}
+
+H264Encoder::Accelerator::~Accelerator() = default;
+
+H264Encoder::H264Encoder(std::unique_ptr<Accelerator> accelerator)
+ : packed_sps_(new H264BitstreamBuffer()),
+ packed_pps_(new H264BitstreamBuffer()),
+ accelerator_(std::move(accelerator)) {}
+
+H264Encoder::~H264Encoder() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+}
+
+bool H264Encoder::Initialize(const gfx::Size& visible_size,
+ VideoCodecProfile profile,
+ uint32_t initial_bitrate,
+ uint32_t initial_framerate) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ switch (profile) {
+ case H264PROFILE_BASELINE:
+ case H264PROFILE_MAIN:
+ case H264PROFILE_HIGH:
+ break;
+
+ default:
+ NOTIMPLEMENTED() << "Unsupported profile " << GetProfileName(profile);
+ return false;
+ }
+
+ DCHECK(!visible_size.IsEmpty());
+ visible_size_ = visible_size;
+ // For 4:2:0, the pixel sizes have to be even.
+ DCHECK_EQ(visible_size_.width() % 2, 0);
+ DCHECK_EQ(visible_size_.height() % 2, 0);
+ constexpr size_t kH264MacroblockSizeInPixels = 16;
+ coded_size_ = gfx::Size(
+ base::bits::Align(visible_size_.width(), kH264MacroblockSizeInPixels),
+ base::bits::Align(visible_size_.height(), kH264MacroblockSizeInPixels));
+ mb_width_ = coded_size_.width() / kH264MacroblockSizeInPixels;
+ mb_height_ = coded_size_.height() / kH264MacroblockSizeInPixels;
+
+ profile_ = profile;
+ if (!UpdateRates(initial_bitrate, initial_framerate))
+ return false;
+
+ UpdateSPS();
+ UpdatePPS();
+
+ return true;
+}
+
+gfx::Size H264Encoder::GetCodedSize() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(!coded_size_.IsEmpty());
+
+ return coded_size_;
+}
+
+size_t H264Encoder::GetBitstreamBufferSize() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(!coded_size_.IsEmpty());
+
+ return coded_size_.GetArea();
+}
+
+size_t H264Encoder::GetMaxNumOfRefFrames() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ return kMaxNumReferenceFrames;
+}
+
+bool H264Encoder::PrepareEncodeJob(EncodeJob* encode_job) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ scoped_refptr<H264Picture> pic = accelerator_->GetPicture(encode_job);
+ DCHECK(pic);
+
+ if (encode_job->IsKeyframeRequested() || encoding_parameters_changed_)
+ frame_num_ = 0;
+
+ pic->frame_num = frame_num_++;
+ frame_num_ %= curr_params_.idr_period_frames;
+
+ if (pic->frame_num == 0) {
+ pic->idr = true;
+ // H264 spec mandates idr_pic_id to differ between two consecutive IDRs.
+ idr_pic_id_ ^= 1;
+ pic->idr_pic_id = idr_pic_id_;
+ ref_pic_list0_.clear();
+
+ encoding_parameters_changed_ = false;
+ encode_job->ProduceKeyframe();
+ }
+
+ if (pic->frame_num % curr_params_.i_period_frames == 0)
+ pic->type = H264SliceHeader::kISlice;
+ else
+ pic->type = H264SliceHeader::kPSlice;
+
+ if (curr_params_.ip_period_frames != 1) {
+ NOTIMPLEMENTED() << "B frames not implemented";
+ return false;
+ }
+
+ pic->ref = true;
+ pic->pic_order_cnt = pic->frame_num * 2;
+ pic->top_field_order_cnt = pic->pic_order_cnt;
+ pic->pic_order_cnt_lsb = pic->pic_order_cnt;
+
+ DVLOGF(4) << "Starting a new frame, type: " << pic->type
+ << (encode_job->IsKeyframeRequested() ? " (keyframe)" : "")
+ << " frame_num: " << pic->frame_num
+ << " POC: " << pic->pic_order_cnt;
+
+ if (!accelerator_->SubmitFrameParameters(
+ encode_job, curr_params_, current_sps_, current_pps_, pic,
+ ref_pic_list0_, std::list<scoped_refptr<H264Picture>>())) {
+ DVLOGF(1) << "Failed submitting frame parameters";
+ return false;
+ }
+
+ if (pic->type == H264SliceHeader::kISlice) {
+ if (!accelerator_->SubmitPackedHeaders(encode_job, packed_sps_,
+ packed_pps_)) {
+ DVLOGF(1) << "Failed submitting keyframe headers";
+ return false;
+ }
+ }
+
+ for (const auto& ref_pic : ref_pic_list0_)
+ encode_job->AddReferencePicture(ref_pic);
+
+ // Store the picture on the list of reference pictures and keep the list
+ // below maximum size, dropping oldest references.
+ if (pic->ref) {
+ ref_pic_list0_.push_front(pic);
+ const size_t max_num_ref_frames =
+ base::checked_cast<size_t>(current_sps_.max_num_ref_frames);
+ while (ref_pic_list0_.size() > max_num_ref_frames)
+ ref_pic_list0_.pop_back();
+ }
+
+ return true;
+}
+
+bool H264Encoder::UpdateRates(uint32_t bitrate, uint32_t framerate) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (bitrate == 0 || framerate == 0)
+ return false;
+
+ if (curr_params_.bitrate_bps == bitrate &&
+ curr_params_.framerate == framerate) {
+ return true;
+ }
+
+ curr_params_.bitrate_bps = bitrate;
+ curr_params_.framerate = framerate;
+ curr_params_.cpb_size_bits =
+ curr_params_.bitrate_bps * curr_params_.cpb_window_size_ms / 1000;
+
+ UpdateSPS();
+ return true;
+}
+
+void H264Encoder::UpdateSPS() {
+ memset(&current_sps_, 0, sizeof(H264SPS));
+
+ // Spec A.2 and A.3.
+ switch (profile_) {
+ case H264PROFILE_BASELINE:
+ // Due to https://crbug.com/345569, we don't distinguish between
+ // constrained and non-constrained baseline profiles. Since many codecs
+ // can't do non-constrained, and constrained is usually what we mean (and
+ // it's a subset of non-constrained), default to it.
+ current_sps_.profile_idc = H264SPS::kProfileIDCBaseline;
+ current_sps_.constraint_set0_flag = true;
+ break;
+ case H264PROFILE_MAIN:
+ current_sps_.profile_idc = H264SPS::kProfileIDCMain;
+ current_sps_.constraint_set1_flag = true;
+ break;
+ case H264PROFILE_HIGH:
+ current_sps_.profile_idc = H264SPS::kProfileIDCHigh;
+ break;
+ default:
+ NOTREACHED();
+ return;
+ }
+
+ current_sps_.level_idc = kDefaultLevelIDC;
+ current_sps_.seq_parameter_set_id = 0;
+ current_sps_.chroma_format_idc = kChromaFormatIDC;
+
+ DCHECK_GE(curr_params_.idr_period_frames, 16u)
+ << "idr_period_frames must be >= 16";
+ current_sps_.log2_max_frame_num_minus4 =
+ base::bits::Log2Ceiling(curr_params_.idr_period_frames) - 4;
+ current_sps_.pic_order_cnt_type = 0;
+ current_sps_.log2_max_pic_order_cnt_lsb_minus4 =
+ base::bits::Log2Ceiling(curr_params_.idr_period_frames * 2) - 4;
+ current_sps_.max_num_ref_frames = kMaxRefIdxL0Size;
+
+ current_sps_.frame_mbs_only_flag = true;
+
+ DCHECK_GT(mb_width_, 0u);
+ DCHECK_GT(mb_height_, 0u);
+ current_sps_.pic_width_in_mbs_minus1 = mb_width_ - 1;
+ DCHECK(current_sps_.frame_mbs_only_flag);
+ current_sps_.pic_height_in_map_units_minus1 = mb_height_ - 1;
+
+ if (visible_size_ != coded_size_) {
+ // Visible size differs from coded size, fill crop information.
+ current_sps_.frame_cropping_flag = true;
+ DCHECK(!current_sps_.separate_colour_plane_flag);
+ // Spec table 6-1. Only 4:2:0 for now.
+ DCHECK_EQ(current_sps_.chroma_format_idc, 1);
+ // Spec 7.4.2.1.1. Crop is in crop units, which is 2 pixels for 4:2:0.
+ const unsigned int crop_unit_x = 2;
+ const unsigned int crop_unit_y = 2 * (2 - current_sps_.frame_mbs_only_flag);
+ current_sps_.frame_crop_left_offset = 0;
+ current_sps_.frame_crop_right_offset =
+ (coded_size_.width() - visible_size_.width()) / crop_unit_x;
+ current_sps_.frame_crop_top_offset = 0;
+ current_sps_.frame_crop_bottom_offset =
+ (coded_size_.height() - visible_size_.height()) / crop_unit_y;
+ }
+
+ current_sps_.vui_parameters_present_flag = true;
+ current_sps_.timing_info_present_flag = true;
+ current_sps_.num_units_in_tick = 1;
+ current_sps_.time_scale =
+ curr_params_.framerate * 2; // See equation D-2 in spec.
+ current_sps_.fixed_frame_rate_flag = true;
+
+ current_sps_.nal_hrd_parameters_present_flag = true;
+ // H.264 spec ch. E.2.2.
+ current_sps_.cpb_cnt_minus1 = 0;
+ current_sps_.bit_rate_scale = kBitRateScale;
+ current_sps_.cpb_size_scale = kCPBSizeScale;
+ current_sps_.bit_rate_value_minus1[0] =
+ (curr_params_.bitrate_bps >>
+ (kBitRateScale + H264SPS::kBitRateScaleConstantTerm)) -
+ 1;
+ current_sps_.cpb_size_value_minus1[0] =
+ (curr_params_.cpb_size_bits >>
+ (kCPBSizeScale + H264SPS::kCPBSizeScaleConstantTerm)) -
+ 1;
+ current_sps_.cbr_flag[0] = true;
+ current_sps_.initial_cpb_removal_delay_length_minus_1 =
+ H264SPS::kDefaultInitialCPBRemovalDelayLength - 1;
+ current_sps_.cpb_removal_delay_length_minus1 =
+ H264SPS::kDefaultInitialCPBRemovalDelayLength - 1;
+ current_sps_.dpb_output_delay_length_minus1 =
+ H264SPS::kDefaultDPBOutputDelayLength - 1;
+ current_sps_.time_offset_length = H264SPS::kDefaultTimeOffsetLength;
+ current_sps_.low_delay_hrd_flag = false;
+
+ GeneratePackedSPS();
+ encoding_parameters_changed_ = true;
+}
+
+void H264Encoder::UpdatePPS() {
+ memset(&current_pps_, 0, sizeof(H264PPS));
+
+ current_pps_.seq_parameter_set_id = current_sps_.seq_parameter_set_id;
+ current_pps_.pic_parameter_set_id = 0;
+
+ current_pps_.entropy_coding_mode_flag =
+ current_sps_.profile_idc >= H264SPS::kProfileIDCMain;
+
+ DCHECK_GT(kMaxRefIdxL0Size, 0u);
+ current_pps_.num_ref_idx_l0_default_active_minus1 = kMaxRefIdxL0Size - 1;
+ current_pps_.num_ref_idx_l1_default_active_minus1 =
+ kMaxRefIdxL1Size > 0 ? kMaxRefIdxL1Size - 1 : kMaxRefIdxL1Size;
+ DCHECK_LE(curr_params_.qp, 51);
+ current_pps_.pic_init_qp_minus26 = curr_params_.qp - 26;
+ current_pps_.deblocking_filter_control_present_flag = true;
+ current_pps_.transform_8x8_mode_flag =
+ (current_sps_.profile_idc == H264SPS::kProfileIDCHigh);
+
+ GeneratePackedPPS();
+ encoding_parameters_changed_ = true;
+}
+
+void H264Encoder::GeneratePackedSPS() {
+ packed_sps_->Reset();
+
+ packed_sps_->BeginNALU(H264NALU::kSPS, 3);
+
+ packed_sps_->AppendBits(8, current_sps_.profile_idc);
+ packed_sps_->AppendBool(current_sps_.constraint_set0_flag);
+ packed_sps_->AppendBool(current_sps_.constraint_set1_flag);
+ packed_sps_->AppendBool(current_sps_.constraint_set2_flag);
+ packed_sps_->AppendBool(current_sps_.constraint_set3_flag);
+ packed_sps_->AppendBool(current_sps_.constraint_set4_flag);
+ packed_sps_->AppendBool(current_sps_.constraint_set5_flag);
+ packed_sps_->AppendBits(2, 0); // reserved_zero_2bits
+ packed_sps_->AppendBits(8, current_sps_.level_idc);
+ packed_sps_->AppendUE(current_sps_.seq_parameter_set_id);
+
+ if (current_sps_.profile_idc == H264SPS::kProfileIDCHigh) {
+ packed_sps_->AppendUE(current_sps_.chroma_format_idc);
+ if (current_sps_.chroma_format_idc == 3)
+ packed_sps_->AppendBool(current_sps_.separate_colour_plane_flag);
+ packed_sps_->AppendUE(current_sps_.bit_depth_luma_minus8);
+ packed_sps_->AppendUE(current_sps_.bit_depth_chroma_minus8);
+ packed_sps_->AppendBool(current_sps_.qpprime_y_zero_transform_bypass_flag);
+ packed_sps_->AppendBool(current_sps_.seq_scaling_matrix_present_flag);
+ CHECK(!current_sps_.seq_scaling_matrix_present_flag);
+ }
+
+ packed_sps_->AppendUE(current_sps_.log2_max_frame_num_minus4);
+ packed_sps_->AppendUE(current_sps_.pic_order_cnt_type);
+ if (current_sps_.pic_order_cnt_type == 0)
+ packed_sps_->AppendUE(current_sps_.log2_max_pic_order_cnt_lsb_minus4);
+ else if (current_sps_.pic_order_cnt_type == 1)
+ NOTREACHED();
+
+ packed_sps_->AppendUE(current_sps_.max_num_ref_frames);
+ packed_sps_->AppendBool(current_sps_.gaps_in_frame_num_value_allowed_flag);
+ packed_sps_->AppendUE(current_sps_.pic_width_in_mbs_minus1);
+ packed_sps_->AppendUE(current_sps_.pic_height_in_map_units_minus1);
+
+ packed_sps_->AppendBool(current_sps_.frame_mbs_only_flag);
+ if (!current_sps_.frame_mbs_only_flag)
+ packed_sps_->AppendBool(current_sps_.mb_adaptive_frame_field_flag);
+
+ packed_sps_->AppendBool(current_sps_.direct_8x8_inference_flag);
+
+ packed_sps_->AppendBool(current_sps_.frame_cropping_flag);
+ if (current_sps_.frame_cropping_flag) {
+ packed_sps_->AppendUE(current_sps_.frame_crop_left_offset);
+ packed_sps_->AppendUE(current_sps_.frame_crop_right_offset);
+ packed_sps_->AppendUE(current_sps_.frame_crop_top_offset);
+ packed_sps_->AppendUE(current_sps_.frame_crop_bottom_offset);
+ }
+
+ packed_sps_->AppendBool(current_sps_.vui_parameters_present_flag);
+ if (current_sps_.vui_parameters_present_flag) {
+ packed_sps_->AppendBool(false); // aspect_ratio_info_present_flag
+ packed_sps_->AppendBool(false); // overscan_info_present_flag
+ packed_sps_->AppendBool(false); // video_signal_type_present_flag
+ packed_sps_->AppendBool(false); // chroma_loc_info_present_flag
+
+ packed_sps_->AppendBool(current_sps_.timing_info_present_flag);
+ if (current_sps_.timing_info_present_flag) {
+ packed_sps_->AppendBits(32, current_sps_.num_units_in_tick);
+ packed_sps_->AppendBits(32, current_sps_.time_scale);
+ packed_sps_->AppendBool(current_sps_.fixed_frame_rate_flag);
+ }
+
+ packed_sps_->AppendBool(current_sps_.nal_hrd_parameters_present_flag);
+ if (current_sps_.nal_hrd_parameters_present_flag) {
+ packed_sps_->AppendUE(current_sps_.cpb_cnt_minus1);
+ packed_sps_->AppendBits(4, current_sps_.bit_rate_scale);
+ packed_sps_->AppendBits(4, current_sps_.cpb_size_scale);
+ CHECK_LT(base::checked_cast<size_t>(current_sps_.cpb_cnt_minus1),
+ base::size(current_sps_.bit_rate_value_minus1));
+ for (int i = 0; i <= current_sps_.cpb_cnt_minus1; ++i) {
+ packed_sps_->AppendUE(current_sps_.bit_rate_value_minus1[i]);
+ packed_sps_->AppendUE(current_sps_.cpb_size_value_minus1[i]);
+ packed_sps_->AppendBool(current_sps_.cbr_flag[i]);
+ }
+ packed_sps_->AppendBits(
+ 5, current_sps_.initial_cpb_removal_delay_length_minus_1);
+ packed_sps_->AppendBits(5, current_sps_.cpb_removal_delay_length_minus1);
+ packed_sps_->AppendBits(5, current_sps_.dpb_output_delay_length_minus1);
+ packed_sps_->AppendBits(5, current_sps_.time_offset_length);
+ }
+
+ packed_sps_->AppendBool(false); // vcl_hrd_parameters_flag
+ if (current_sps_.nal_hrd_parameters_present_flag)
+ packed_sps_->AppendBool(current_sps_.low_delay_hrd_flag);
+
+ packed_sps_->AppendBool(false); // pic_struct_present_flag
+ packed_sps_->AppendBool(true); // bitstream_restriction_flag
+
+ packed_sps_->AppendBool(false); // motion_vectors_over_pic_boundaries_flag
+ packed_sps_->AppendUE(2); // max_bytes_per_pic_denom
+ packed_sps_->AppendUE(1); // max_bits_per_mb_denom
+ packed_sps_->AppendUE(16); // log2_max_mv_length_horizontal
+ packed_sps_->AppendUE(16); // log2_max_mv_length_vertical
+
+ // Explicitly set max_num_reorder_frames to 0 to allow the decoder to
+ // output pictures early.
+ packed_sps_->AppendUE(0); // max_num_reorder_frames
+
+ // The value of max_dec_frame_buffering shall be greater than or equal to
+ // max_num_ref_frames.
+ const unsigned int max_dec_frame_buffering =
+ current_sps_.max_num_ref_frames;
+ packed_sps_->AppendUE(max_dec_frame_buffering);
+ }
+
+ packed_sps_->FinishNALU();
+}
+
+void H264Encoder::GeneratePackedPPS() {
+ packed_pps_->Reset();
+
+ packed_pps_->BeginNALU(H264NALU::kPPS, 3);
+
+ packed_pps_->AppendUE(current_pps_.pic_parameter_set_id);
+ packed_pps_->AppendUE(current_pps_.seq_parameter_set_id);
+ packed_pps_->AppendBool(current_pps_.entropy_coding_mode_flag);
+ packed_pps_->AppendBool(
+ current_pps_.bottom_field_pic_order_in_frame_present_flag);
+ CHECK_EQ(current_pps_.num_slice_groups_minus1, 0);
+ packed_pps_->AppendUE(current_pps_.num_slice_groups_minus1);
+
+ packed_pps_->AppendUE(current_pps_.num_ref_idx_l0_default_active_minus1);
+ packed_pps_->AppendUE(current_pps_.num_ref_idx_l1_default_active_minus1);
+
+ packed_pps_->AppendBool(current_pps_.weighted_pred_flag);
+ packed_pps_->AppendBits(2, current_pps_.weighted_bipred_idc);
+
+ packed_pps_->AppendSE(current_pps_.pic_init_qp_minus26);
+ packed_pps_->AppendSE(current_pps_.pic_init_qs_minus26);
+ packed_pps_->AppendSE(current_pps_.chroma_qp_index_offset);
+
+ packed_pps_->AppendBool(current_pps_.deblocking_filter_control_present_flag);
+ packed_pps_->AppendBool(current_pps_.constrained_intra_pred_flag);
+ packed_pps_->AppendBool(current_pps_.redundant_pic_cnt_present_flag);
+
+ packed_pps_->AppendBool(current_pps_.transform_8x8_mode_flag);
+ packed_pps_->AppendBool(current_pps_.pic_scaling_matrix_present_flag);
+ DCHECK(!current_pps_.pic_scaling_matrix_present_flag);
+ packed_pps_->AppendSE(current_pps_.second_chroma_qp_index_offset);
+
+ packed_pps_->FinishNALU();
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/vaapi/h264_encoder.h b/chromium/media/gpu/vaapi/h264_encoder.h
new file mode 100644
index 00000000000..83e9613c618
--- /dev/null
+++ b/chromium/media/gpu/vaapi/h264_encoder.h
@@ -0,0 +1,163 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_VAAPI_H264_ENCODER_H_
+#define MEDIA_GPU_VAAPI_H264_ENCODER_H_
+
+#include <stddef.h>
+#include <list>
+
+#include "base/macros.h"
+#include "base/sequence_checker.h"
+#include "media/filters/h264_bitstream_buffer.h"
+#include "media/gpu/h264_dpb.h"
+#include "media/gpu/vaapi/accelerated_video_encoder.h"
+
+namespace media {
+
+// This class provides an H264 encoder functionality, generating stream headers,
+// managing encoder state, reference frames, and other codec parameters, while
+// requiring support from an Accelerator to encode frame data based on these
+// parameters.
+//
+// This class must be created, called and destroyed on a single sequence.
+//
+// Names used in documentation of this class refer directly to naming used
+// in the H.264 specification (http://www.itu.int/rec/T-REC-H.264).
+class H264Encoder : public AcceleratedVideoEncoder {
+ public:
+ struct EncodeParams {
+ EncodeParams();
+
+ // Produce an IDR at least once per this many frames.
+ // Must be >= 16 (per spec).
+ size_t idr_period_frames;
+
+ // Produce an I frame at least once per this many frames.
+ size_t i_period_frames;
+
+ // How often do we need to have either an I or a P frame in the stream.
+ // A period of 1 implies no B frames.
+ size_t ip_period_frames;
+
+ // Bitrate in bps.
+ uint32_t bitrate_bps;
+
+ // Framerate in FPS.
+ uint32_t framerate;
+
+ // Bitrate window size in ms.
+ unsigned int cpb_window_size_ms;
+
+ // Bitrate window size in bits.
+ unsigned int cpb_size_bits;
+
+ // Quantization parameter.
+ int qp;
+ };
+
+ // An accelerator interface. The client must provide an appropriate
+ // implementation on creation.
+ class Accelerator {
+ public:
+ Accelerator() = default;
+ virtual ~Accelerator();
+
+ // Returns the H264Picture to be used as output for |job|.
+ virtual scoped_refptr<H264Picture> GetPicture(EncodeJob* job) = 0;
+
+ // Initializes |job| to insert the provided |packed_sps| and |packed_pps|
+ // before the frame produced by |job| into the output video stream.
+ virtual bool SubmitPackedHeaders(
+ EncodeJob* job,
+ scoped_refptr<H264BitstreamBuffer> packed_sps,
+ scoped_refptr<H264BitstreamBuffer> packed_pps) = 0;
+
+ // Initializes |job| to use the provided |sps|, |pps|, |encode_params|, and
+ // encoded picture parameters in |pic|, as well as |ref_pic_list0| and
+ // |ref_pic_list1| as the corresponding H264 reference frame lists
+ // (RefPicList0 and RefPicList1 per spec) for the frame to be produced.
+ virtual bool SubmitFrameParameters(
+ EncodeJob* job,
+ const H264Encoder::EncodeParams& encode_params,
+ const media::H264SPS& sps,
+ const media::H264PPS& pps,
+ scoped_refptr<H264Picture> pic,
+ const std::list<scoped_refptr<H264Picture>>& ref_pic_list0,
+ const std::list<scoped_refptr<H264Picture>>& ref_pic_list1) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Accelerator);
+ };
+
+ explicit H264Encoder(std::unique_ptr<Accelerator> accelerator);
+ ~H264Encoder() override;
+
+ // AcceleratedVideoEncoder implementation.
+ bool Initialize(const gfx::Size& visible_size,
+ VideoCodecProfile profile,
+ uint32_t initial_bitrate,
+ uint32_t initial_framerate) override;
+ bool UpdateRates(uint32_t bitrate, uint32_t framerate) override;
+ gfx::Size GetCodedSize() const override;
+ size_t GetBitstreamBufferSize() const override;
+ size_t GetMaxNumOfRefFrames() const override;
+ bool PrepareEncodeJob(EncodeJob* encode_job) override;
+
+ private:
+ // Fill current_sps_ and current_pps_ with current encoding state parameters.
+ void UpdateSPS();
+ void UpdatePPS();
+
+ // Generate packed SPS and PPS in packed_sps_ and packed_pps_, using values
+ // in current_sps_ and current_pps_.
+ void GeneratePackedSPS();
+ void GeneratePackedPPS();
+
+ // Current SPS, PPS and their packed versions. Packed versions are NALUs
+ // in AnnexB format *without* emulation prevention three-byte sequences
+ // (those are expected to be added by the client as needed).
+ media::H264SPS current_sps_;
+ scoped_refptr<media::H264BitstreamBuffer> packed_sps_;
+ media::H264PPS current_pps_;
+ scoped_refptr<media::H264BitstreamBuffer> packed_pps_;
+
+ // Current encoding parameters being used.
+ EncodeParams curr_params_;
+
+ // H264 profile currently used.
+ media::VideoCodecProfile profile_ = VIDEO_CODEC_PROFILE_UNKNOWN;
+
+ // Current visible and coded sizes in pixels.
+ gfx::Size visible_size_;
+ gfx::Size coded_size_;
+
+ // Width/height in macroblocks.
+ unsigned int mb_width_ = 0;
+ unsigned int mb_height_ = 0;
+
+ // frame_num (spec section 7.4.3) to be used for the next frame.
+ unsigned int frame_num_ = 0;
+
+ // idr_pic_id (spec section 7.4.3) to be used for the next frame.
+ unsigned int idr_pic_id_ = 0;
+
+ // True if encoding parameters have changed and we need to submit a keyframe
+ // with updated parameters.
+ bool encoding_parameters_changed_ = false;
+
+ // Currently active reference frames.
+ // RefPicList0 per spec (spec section 8.2.4.2).
+ std::list<scoped_refptr<H264Picture>> ref_pic_list0_;
+
+ // Accelerator instance used to prepare encode jobs.
+ const std::unique_ptr<Accelerator> accelerator_;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ DISALLOW_COPY_AND_ASSIGN(H264Encoder);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_VAAPI_H264_ENCODER_H_
diff --git a/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc
index 903890ac40f..89bfbb93a23 100644
--- a/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc
@@ -24,12 +24,12 @@ namespace {
// from ITU-T REC H.264 spec
// section 8.5.6
// "Inverse scanning process for 4x4 transform coefficients and scaling lists"
-static const int kZigzagScan4x4[16] = {0, 1, 4, 8, 5, 2, 3, 6,
- 9, 12, 13, 10, 7, 11, 14, 15};
+static constexpr int kZigzagScan4x4[16] = {0, 1, 4, 8, 5, 2, 3, 6,
+ 9, 12, 13, 10, 7, 11, 14, 15};
// section 8.5.7
// "Inverse scanning process for 8x8 transform coefficients and scaling lists"
-static const uint8_t kZigzagScan8x8[64] = {
+static constexpr uint8_t kZigzagScan8x8[64] = {
0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51,
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_decoder_unittest.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_decoder_unittest.cc
index 411b9257a39..35b7389b789 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_decoder_unittest.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_decoder_unittest.cc
@@ -26,8 +26,8 @@
namespace media {
namespace {
-const char* kTestFilename = "pixel-1280x720.jpg";
-const char* kExpectedMd5Sum = "6e9e1716073c9a9a1282e3f0e0dab743";
+constexpr char* kTestFilename = "pixel-1280x720.jpg";
+constexpr char* kExpectedMd5Sum = "6e9e1716073c9a9a1282e3f0e0dab743";
void LogOnError() {
LOG(FATAL) << "Oh noes! Decoder failed";
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
index 430331f2dfb..caf6ff832a6 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
@@ -30,7 +30,7 @@ namespace {
// JPEG format uses 2 bytes to denote the size of a segment, and the size
// includes the 2 bytes used for specifying it. Therefore, maximum data size
// allowed is: 65535 - 2 = 65533.
-const size_t kMaxExifSizeAllowed = 65533;
+constexpr size_t kMaxExifSizeAllowed = 65533;
// UMA results that the VaapiJpegEncodeAccelerator class reports.
// These values are persisted to logs, and should therefore never be renumbered
@@ -154,9 +154,17 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeTask(
exif_buffer = static_cast<uint8_t*>(request->exif_shm->memory());
exif_buffer_size = request->exif_shm->size();
}
- if (!jpeg_encoder_->Encode(input_size, exif_buffer, exif_buffer_size,
- request->quality, va_surface_id,
- cached_output_buffer_id_)) {
+
+ // When the exif buffer contains a thumbnail, the VAAPI encoder would
+ // generate a corrupted JPEG. We can work around the problem by supplying an
+ // all-zero buffer with the same size and fill in the real exif buffer after
+ // encoding.
+ // TODO(shenghao): Remove this mechanism after b/79840013 is fixed.
+ std::vector<uint8_t> exif_buffer_dummy(exif_buffer_size, 0);
+ size_t exif_offset = 0;
+ if (!jpeg_encoder_->Encode(input_size, exif_buffer_dummy.data(),
+ exif_buffer_size, request->quality, va_surface_id,
+ cached_output_buffer_id_, &exif_offset)) {
VLOGF(1) << "Encode JPEG failed";
notify_error_cb_.Run(buffer_id, PLATFORM_FAILURE);
return;
@@ -173,6 +181,10 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeTask(
notify_error_cb_.Run(buffer_id, PLATFORM_FAILURE);
}
+ // Copy the real exif buffer into preserved space.
+ memcpy(static_cast<uint8_t*>(request->output_shm->memory()) + exif_offset,
+ exif_buffer, exif_buffer_size);
+
video_frame_ready_cb_.Run(buffer_id, encoded_size);
}
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc
index 538b873ca2d..d12a1b05729 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc
@@ -26,25 +26,25 @@ namespace media {
namespace {
// JPEG header only uses 2 bytes to represent width and height.
-const int kMaxDimension = 65535;
-const size_t kDctSize2 = 64;
-const size_t kNumDcRunSizeBits = 16;
-const size_t kNumAcRunSizeBits = 16;
-const size_t kNumDcCodeWordsHuffVal = 12;
-const size_t kNumAcCodeWordsHuffVal = 162;
-const size_t kJpegDefaultHeaderSize =
+constexpr int kMaxDimension = 65535;
+constexpr size_t kDctSize2 = 64;
+constexpr size_t kNumDcRunSizeBits = 16;
+constexpr size_t kNumAcRunSizeBits = 16;
+constexpr size_t kNumDcCodeWordsHuffVal = 12;
+constexpr size_t kNumAcCodeWordsHuffVal = 162;
+constexpr size_t kJpegDefaultHeaderSize =
67 + (kDctSize2 * 2) + (kNumDcRunSizeBits * 2) +
(kNumDcCodeWordsHuffVal * 2) + (kNumAcRunSizeBits * 2) +
(kNumAcCodeWordsHuffVal * 2);
-const size_t kJFIFApp0Size = 16;
+constexpr size_t kJFIFApp0Size = 16;
-const uint8_t kZigZag8x8[64] = {
+constexpr uint8_t kZigZag8x8[64] = {
0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63};
-const JpegQuantizationTable kDefaultQuantTable[2] = {
+constexpr JpegQuantizationTable kDefaultQuantTable[2] = {
// Table K.1 Luminance quantization table values.
{
true,
@@ -174,7 +174,8 @@ size_t FillJpegHeader(const gfx::Size& input_size,
const uint8_t* exif_buffer,
size_t exif_buffer_size,
int quality,
- uint8_t* header) {
+ uint8_t* header,
+ size_t* exif_offset) {
unsigned int width = input_size.width();
unsigned int height = input_size.height();
@@ -193,6 +194,7 @@ size_t FillJpegHeader(const gfx::Size& input_size,
static_cast<uint8_t>(exif_segment_size % 256)};
memcpy(header + idx, kAppSegment, sizeof(kAppSegment));
idx += sizeof(kAppSegment);
+ *exif_offset = idx;
memcpy(header + idx, exif_buffer, exif_buffer_size);
idx += exif_buffer_size;
} else {
@@ -369,7 +371,8 @@ bool VaapiJpegEncoder::Encode(const gfx::Size& input_size,
size_t exif_buffer_size,
int quality,
VASurfaceID surface_id,
- VABufferID output_buffer_id) {
+ VABufferID output_buffer_id,
+ size_t* exif_offset) {
DCHECK_NE(surface_id, VA_INVALID_SURFACE);
if (input_size.width() > kMaxDimension ||
@@ -421,8 +424,9 @@ bool VaapiJpegEncoder::Encode(const gfx::Size& input_size,
? kJpegDefaultHeaderSize + exif_buffer_size
: kJpegDefaultHeaderSize + kJFIFApp0Size;
jpeg_header.resize(jpeg_header_size);
- size_t length_in_bits = FillJpegHeader(
- input_size, exif_buffer, exif_buffer_size, quality, jpeg_header.data());
+ size_t length_in_bits =
+ FillJpegHeader(input_size, exif_buffer, exif_buffer_size, quality,
+ jpeg_header.data(), exif_offset);
VAEncPackedHeaderParameterBuffer header_param;
memset(&header_param, 0, sizeof(header_param));
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.h b/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.h
index f38516950b3..6aa53dc89e0 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.h
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.h
@@ -40,13 +40,15 @@ class MEDIA_GPU_EXPORT VaapiJpegEncoder {
// |output_buffer_id| is the ID of VA buffer that encoded image will be
// stored. The size of it should be at least as large as
// GetMaxCodedBufferSize().
+ // |exif_offset| is the offset where Exif data should be filled into.
// Return false on failure.
bool Encode(const gfx::Size& input_size,
const uint8_t* exif_buffer,
size_t exif_buffer_size,
int quality,
VASurfaceID surface_id,
- VABufferID output_buffer_id);
+ VABufferID output_buffer_id,
+ size_t* exif_offset);
// Gets the maximum possible encoded result size.
// |size| is the dimension of the YUV image to be encoded.
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
index a6d995a8484..cac05e1dd1c 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
@@ -448,6 +448,13 @@ void VaapiVideoDecodeAccelerator::DecodeTask() {
RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream",
PLATFORM_FAILURE, );
return;
+
+ case AcceleratedVideoDecoder::kNoKey:
+ NOTREACHED() << "Should not reach here unless this class accepts "
+ "encrypted streams.";
+ RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream",
+ PLATFORM_FAILURE, );
+ return;
}
}
}
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
index 7c2159da0ab..a2c88a3a305 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
@@ -22,7 +22,7 @@ using ::testing::Invoke;
using ::testing::Return;
using ::testing::TestWithParam;
using ::testing::ValuesIn;
-using ::testing::WithArgs;
+using ::testing::WithArg;
namespace media {
@@ -34,9 +34,15 @@ ACTION_P(RunClosure, closure) {
constexpr VideoCodecProfile kCodecProfiles[] = {H264PROFILE_MIN, VP8PROFILE_MIN,
VP9PROFILE_MIN};
-constexpr int kBitstreamId = 123;
+constexpr int32_t kBitstreamId = 123;
constexpr size_t kInputSize = 256;
+constexpr size_t kNumPictures = 2;
+const gfx::Size kPictureSize(64, 48);
+
+constexpr size_t kNewNumPictures = 3;
+const gfx::Size kNewPictureSize(64, 48);
+
} // namespace
class MockAcceleratedVideoDecoder : public AcceleratedVideoDecoder {
@@ -44,7 +50,9 @@ class MockAcceleratedVideoDecoder : public AcceleratedVideoDecoder {
MockAcceleratedVideoDecoder() = default;
~MockAcceleratedVideoDecoder() override = default;
- MOCK_METHOD3(SetStream, void(int32_t id, const uint8_t* ptr, size_t size));
+ MOCK_METHOD4(
+ SetStream,
+ void(int32_t id, const uint8_t* ptr, size_t size, const DecryptConfig*));
MOCK_METHOD0(Flush, bool());
MOCK_METHOD0(Reset, void());
MOCK_METHOD0(Decode, DecodeResult());
@@ -177,6 +185,112 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<VideoCodecProfile>,
run_loop.Run();
}
+ // Try and QueueInputBuffer()s, where we pretend that |mock_decoder_| requests
+ // to kAllocateNewSurfaces: |vda_| will ping us to ProvidePictureBuffers().
+ // If |expect_dismiss_picture_buffers| is signalled, then we expect as well
+ // that |vda_| will emit |num_picture_buffers_to_dismiss| DismissPictureBuffer
+ // calls.
+ void QueueInputBufferSequence(size_t num_pictures,
+ const gfx::Size& picture_size,
+ int32_t bitstream_id,
+ bool expect_dismiss_picture_buffers = false,
+ size_t num_picture_buffers_to_dismiss = 0) {
+ ::testing::InSequence s;
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+ EXPECT_CALL(*mock_decoder_, SetStream(_, _, kInputSize, nullptr));
+ EXPECT_CALL(*mock_decoder_, Decode())
+ .WillOnce(Return(AcceleratedVideoDecoder::kAllocateNewSurfaces));
+
+ EXPECT_CALL(*mock_decoder_, GetRequiredNumOfPictures())
+ .WillOnce(Return(num_pictures));
+ EXPECT_CALL(*mock_decoder_, GetPicSize()).WillOnce(Return(picture_size));
+ EXPECT_CALL(*mock_vaapi_wrapper_, DestroySurfaces());
+
+ if (expect_dismiss_picture_buffers) {
+ EXPECT_CALL(*this, DismissPictureBuffer(_))
+ .Times(num_picture_buffers_to_dismiss);
+ }
+
+ EXPECT_CALL(*this,
+ ProvidePictureBuffers(num_pictures, _, 1, picture_size, _))
+ .WillOnce(RunClosure(quit_closure));
+
+ base::SharedMemoryHandle handle;
+ handle = base::SharedMemory::DuplicateHandle(in_shm_->handle());
+ BitstreamBuffer bitstream_buffer(bitstream_id, handle, kInputSize);
+
+ QueueInputBuffer(bitstream_buffer);
+ run_loop.Run();
+ }
+
+ // Calls AssignPictureBuffers(), expecting the corresponding mock calls; we
+ // pretend |mock_decoder_| has kRanOutOfStreamData (i.e. it's finished
+ // decoding) and expect |vda_| to emit a NotifyEndOfBitstreamBuffer().
+ // QueueInputBufferSequence() must have been called beforehand.
+ void AssignPictureBuffersSequence(size_t num_pictures,
+ const gfx::Size& picture_size,
+ int32_t bitstream_id) {
+ ASSERT_TRUE(vda_.curr_input_buffer_)
+ << "QueueInputBuffer() should have been called";
+
+ ::testing::InSequence s;
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+
+ EXPECT_CALL(*mock_vaapi_wrapper_,
+ CreateSurfaces(_, picture_size, num_pictures, _))
+ .WillOnce(DoAll(
+ WithArg<3>(Invoke(
+ [num_pictures](std::vector<VASurfaceID>* va_surface_ids) {
+ va_surface_ids->resize(num_pictures);
+ })),
+ Return(true)));
+ EXPECT_CALL(*mock_vaapi_picture_factory_,
+ MockCreateVaapiPicture(mock_vaapi_wrapper_.get(), picture_size))
+ .Times(num_pictures);
+
+ EXPECT_CALL(*mock_decoder_, Decode())
+ .WillOnce(Return(AcceleratedVideoDecoder::kRanOutOfStreamData));
+ EXPECT_CALL(*this, NotifyEndOfBitstreamBuffer(bitstream_id))
+ .WillOnce(RunClosure(quit_closure));
+
+ const auto tex_target = mock_vaapi_picture_factory_->GetGLTextureTarget();
+ int irrelevant_id = 2;
+ std::vector<PictureBuffer> picture_buffers;
+ for (size_t picture = 0; picture < num_pictures; ++picture) {
+ // The picture buffer id, client id and service texture ids are
+ // arbitrarily chosen.
+ picture_buffers.push_back({irrelevant_id++, picture_size,
+ PictureBuffer::TextureIds{irrelevant_id++},
+ PictureBuffer::TextureIds{irrelevant_id++},
+ tex_target, PIXEL_FORMAT_XRGB});
+ }
+
+ AssignPictureBuffers(picture_buffers);
+ run_loop.Run();
+ }
+
+ // Calls QueueInputBuffer(); we instruct from |mock_decoder_| that it has
+ // kRanOutOfStreamData (i.e. it's finished decoding). This is a fast method
+ // because the Decode() is (almost) immediate.
+ void DecodeOneFrameFast(int32_t bitstream_id) {
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+ EXPECT_CALL(*mock_decoder_, SetStream(_, _, kInputSize, nullptr));
+ EXPECT_CALL(*mock_decoder_, Decode())
+ .WillOnce(Return(AcceleratedVideoDecoder::kRanOutOfStreamData));
+ EXPECT_CALL(*this, NotifyEndOfBitstreamBuffer(bitstream_id))
+ .WillOnce(RunClosure(quit_closure));
+
+ base::SharedMemoryHandle handle;
+ handle = base::SharedMemory::DuplicateHandle(in_shm_->handle());
+ BitstreamBuffer bitstream_buffer(bitstream_id, handle, kInputSize);
+
+ QueueInputBuffer(bitstream_buffer);
+ run_loop.Run();
+ }
+
// VideoDecodeAccelerator::Client methods.
MOCK_METHOD1(NotifyInitializationComplete, void(bool));
MOCK_METHOD5(
@@ -209,6 +323,22 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<VideoCodecProfile>,
DISALLOW_COPY_AND_ASSIGN(VaapiVideoDecodeAcceleratorTest);
};
+// Verify that it is possible to select DRM(egl) and TFP(glx) at runtime.
+TEST_P(VaapiVideoDecodeAcceleratorTest, SupportedPlatforms) {
+ EXPECT_EQ(VaapiPictureFactory::kVaapiImplementationNone,
+ mock_vaapi_picture_factory_->GetVaapiImplementation(
+ gl::kGLImplementationNone));
+ EXPECT_EQ(VaapiPictureFactory::kVaapiImplementationDrm,
+ mock_vaapi_picture_factory_->GetVaapiImplementation(
+ gl::kGLImplementationEGLGLES2));
+
+#if defined(USE_X11)
+ EXPECT_EQ(VaapiPictureFactory::kVaapiImplementationX11,
+ mock_vaapi_picture_factory_->GetVaapiImplementation(
+ gl::kGLImplementationDesktopGL));
+#endif
+}
+
// This test checks that QueueInputBuffer() fails when state is kUnitialized.
TEST_P(VaapiVideoDecodeAcceleratorTest, QueueInputBufferAndError) {
SetVdaStateToUnitialized();
@@ -230,7 +360,7 @@ TEST_P(VaapiVideoDecodeAcceleratorTest, QueueInputBufferAndDecodeError) {
base::RunLoop run_loop;
base::Closure quit_closure = run_loop.QuitClosure();
- EXPECT_CALL(*mock_decoder_, SetStream(_, _, kInputSize));
+ EXPECT_CALL(*mock_decoder_, SetStream(_, _, kInputSize, nullptr));
EXPECT_CALL(*mock_decoder_, Decode())
.WillOnce(Return(AcceleratedVideoDecoder::kDecodeError));
EXPECT_CALL(*this, NotifyError(VaapiVideoDecodeAccelerator::PLATFORM_FAILURE))
@@ -240,113 +370,46 @@ TEST_P(VaapiVideoDecodeAcceleratorTest, QueueInputBufferAndDecodeError) {
run_loop.Run();
}
-// Tests usual startup sequence: a BitstreamBuffer is enqueued for decode,
-// |vda_| asks for PictureBuffers, that we provide, and then the same Decode()
-// is tried again.
-TEST_P(VaapiVideoDecodeAcceleratorTest,
- QueueInputBufferAndAssignPictureBuffersAndDecode) {
- // Try and QueueInputBuffer(), |vda_| will ping us to ProvidePictureBuffers().
- const uint32_t kNumPictures = 2;
- const gfx::Size kPictureSize(64, 48);
- {
- base::SharedMemoryHandle handle;
- handle = base::SharedMemory::DuplicateHandle(in_shm_->handle());
- BitstreamBuffer bitstream_buffer(kBitstreamId, handle, kInputSize);
-
- base::RunLoop run_loop;
- base::Closure quit_closure = run_loop.QuitClosure();
- EXPECT_CALL(*mock_decoder_, SetStream(_, _, kInputSize));
- EXPECT_CALL(*mock_decoder_, Decode())
- .WillOnce(Return(AcceleratedVideoDecoder::kAllocateNewSurfaces));
-
- EXPECT_CALL(*mock_decoder_, GetRequiredNumOfPictures())
- .WillOnce(Return(kNumPictures));
- EXPECT_CALL(*mock_decoder_, GetPicSize()).WillOnce(Return(kPictureSize));
- EXPECT_CALL(*mock_vaapi_wrapper_, DestroySurfaces());
-
- EXPECT_CALL(*this,
- ProvidePictureBuffers(kNumPictures, _, 1, kPictureSize, _))
- .WillOnce(RunClosure(quit_closure));
-
- QueueInputBuffer(bitstream_buffer);
- run_loop.Run();
- }
- // AssignPictureBuffers() accordingly and expect another go at Decode().
- {
- base::RunLoop run_loop;
- base::Closure quit_closure = run_loop.QuitClosure();
-
- const uint32_t tex_target =
- mock_vaapi_picture_factory_->GetGLTextureTarget();
-
- // These client and service texture ids are arbitrarily chosen.
- const std::vector<PictureBuffer> kPictureBuffers(
- {{2, kPictureSize, PictureBuffer::TextureIds{0},
- PictureBuffer::TextureIds{1}, tex_target, PIXEL_FORMAT_XRGB},
- {3, kPictureSize, PictureBuffer::TextureIds{2},
- PictureBuffer::TextureIds{3}, tex_target, PIXEL_FORMAT_XRGB}});
- EXPECT_EQ(kPictureBuffers.size(), kNumPictures);
+// Verifies a single fast frame decoding..
+TEST_P(VaapiVideoDecodeAcceleratorTest, DecodeOneFrame) {
+ DecodeOneFrameFast(kBitstreamId);
- EXPECT_CALL(*mock_vaapi_wrapper_,
- CreateSurfaces(_, kPictureSize, kNumPictures, _))
- .WillOnce(DoAll(
- WithArgs<3>(Invoke([](std::vector<VASurfaceID>* va_surface_ids) {
- va_surface_ids->resize(kNumPictures);
- })),
- Return(true)));
- EXPECT_CALL(*mock_vaapi_picture_factory_,
- MockCreateVaapiPicture(mock_vaapi_wrapper_.get(), kPictureSize))
- .Times(2);
+ ResetSequence();
+}
- EXPECT_CALL(*mock_decoder_, Decode())
- .WillOnce(Return(AcceleratedVideoDecoder::kRanOutOfStreamData));
- EXPECT_CALL(*this, NotifyEndOfBitstreamBuffer(kBitstreamId))
- .WillOnce(RunClosure(quit_closure));
+// Tests usual startup sequence: a BitstreamBuffer is enqueued for decode;
+// |vda_| asks for PictureBuffers, that we provide via AssignPictureBuffers().
+TEST_P(VaapiVideoDecodeAcceleratorTest,
+ QueueInputBuffersAndAssignPictureBuffers) {
+ QueueInputBufferSequence(kNumPictures, kPictureSize, kBitstreamId);
- AssignPictureBuffers(kPictureBuffers);
- run_loop.Run();
- }
+ AssignPictureBuffersSequence(kNumPictures, kPictureSize, kBitstreamId);
ResetSequence();
}
-// Verifies that Decode() replying kRanOutOfStreamData (to signal it's finished)
-// rolls to a NotifyEndOfBitstreamBuffer().
-TEST_P(VaapiVideoDecodeAcceleratorTest, QueueInputBufferAndDecodeFinished) {
- base::SharedMemoryHandle handle;
- handle = base::SharedMemory::DuplicateHandle(in_shm_->handle());
- BitstreamBuffer bitstream_buffer(kBitstreamId, handle, kInputSize);
+// Tests a typical resolution change sequence: a BitstreamBuffer is enqueued;
+// |vda_| asks for PictureBuffers, we them provide via AssignPictureBuffers().
+// We then try to enqueue a few BitstreamBuffers of a different resolution: we
+// then expect the old ones to be dismissed and new ones provided.This sequence
+// is purely ingress-wise, i.e. there's no decoded output checks.
+TEST_P(VaapiVideoDecodeAcceleratorTest,
+ QueueInputBuffersAndAssignPictureBuffersAndReallocate) {
+ QueueInputBufferSequence(kNumPictures, kPictureSize, kBitstreamId);
- {
- base::RunLoop run_loop;
- base::Closure quit_closure = run_loop.QuitClosure();
- EXPECT_CALL(*mock_decoder_, SetStream(_, _, kInputSize));
- EXPECT_CALL(*mock_decoder_, Decode())
- .WillOnce(Return(AcceleratedVideoDecoder::kRanOutOfStreamData));
- EXPECT_CALL(*this, NotifyEndOfBitstreamBuffer(kBitstreamId))
- .WillOnce(RunClosure(quit_closure));
+ AssignPictureBuffersSequence(kNumPictures, kPictureSize, kBitstreamId);
- QueueInputBuffer(bitstream_buffer);
- run_loop.Run();
- }
+ // Decode a few frames. This step is not necessary.
+ for (int i = 0; i < 5; ++i)
+ DecodeOneFrameFast(kBitstreamId + i);
- ResetSequence();
-}
+ QueueInputBufferSequence(kNewNumPictures, kNewPictureSize, kBitstreamId,
+ true /* expect_dismiss_picture_buffers */,
+ kNumPictures /* num_picture_buffers_to_dismiss */);
-// Verify that it is possible to select DRM(egl) and TFP(glx) at runtime.
-TEST_P(VaapiVideoDecodeAcceleratorTest, SupportedPlatforms) {
- EXPECT_EQ(VaapiPictureFactory::kVaapiImplementationNone,
- mock_vaapi_picture_factory_->GetVaapiImplementation(
- gl::kGLImplementationNone));
- EXPECT_EQ(VaapiPictureFactory::kVaapiImplementationDrm,
- mock_vaapi_picture_factory_->GetVaapiImplementation(
- gl::kGLImplementationEGLGLES2));
+ AssignPictureBuffersSequence(kNewNumPictures, kNewPictureSize, kBitstreamId);
-#if defined(USE_X11)
- EXPECT_EQ(VaapiPictureFactory::kVaapiImplementationX11,
- mock_vaapi_picture_factory_->GetVaapiImplementation(
- gl::kGLImplementationDesktopGL));
-#endif
+ ResetSequence();
}
INSTANTIATE_TEST_CASE_P(/* No prefix. */,
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
index 57bb2fcfdff..a1ebebe4012 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
@@ -6,22 +6,31 @@
#include <string.h>
+#include <algorithm>
#include <memory>
#include <utility>
#include <va/va.h>
+#include <va/va_enc_h264.h>
+#include <va/va_enc_vp8.h>
#include "base/bind.h"
#include "base/callback.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
#include "base/numerics/safe_conversions.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "media/base/bind_to_current_loop.h"
#include "media/gpu/h264_dpb.h"
#include "media/gpu/shared_memory_region.h"
+#include "media/gpu/vaapi/h264_encoder.h"
+#include "media/gpu/vaapi/vaapi_common.h"
+#include "media/gpu/vaapi/vp8_encoder.h"
+#include "media/gpu/vp8_reference_frame_vector.h"
#define VLOGF(level) VLOG(level) << __func__ << "(): "
#define DVLOGF(level) DVLOG(level) << __func__ << "(): "
@@ -37,69 +46,77 @@
namespace media {
namespace {
+// Minimum number of frames in flight for pipeline depth, adjust to this number
+// if encoder requests less.
+constexpr size_t kMinNumFramesInFlight = 4;
+
// Need 2 surfaces for each frame: one for input data and one for
// reconstructed picture, which is later used for reference.
-const size_t kMinSurfacesToEncode = 2;
+constexpr size_t kNumSurfacesPerFrame = 2;
+// TODO(owenlin): Adjust the value after b/71367113 is fixed
+constexpr size_t kExtraOutputBufferSizeInBytes = 32768;
-// Subjectively chosen.
-const size_t kNumInputBuffers = 4;
-const size_t kMaxNumReferenceFrames = 4;
+constexpr int kDefaultFramerate = 30;
-// TODO(owenlin): Adjust the value after b/71367113 is fixed
-const size_t kExtraOutputBufferSize = 32768; // bytes
-
-// We need up to kMaxNumReferenceFrames surfaces for reference, plus one
-// for input and one for encode (which will be added to the set of reference
-// frames for subsequent frames). Actual execution of HW encode is done
-// in parallel, and we want to process more frames in the meantime.
-// To have kNumInputBuffers in flight, we need a full set of reference +
-// encode surfaces (i.e. kMaxNumReferenceFrames + kMinSurfacesToEncode), and
-// (kNumInputBuffers - 1) of kMinSurfacesToEncode for the remaining frames
-// in flight.
-const size_t kNumSurfaces = kMaxNumReferenceFrames + kMinSurfacesToEncode +
- kMinSurfacesToEncode * (kNumInputBuffers - 1);
-
-// An IDR every 2048 frames, an I frame every 256 and no B frames.
-// We choose IDR period to equal MaxFrameNum so it must be a power of 2.
-const int kIDRPeriod = 2048;
-const int kIPeriod = 256;
-const int kIPPeriod = 1;
-
-const int kDefaultFramerate = 30;
-
-// HRD parameters (ch. E.2.2 in spec).
-const int kBitRateScale = 0; // bit_rate_scale for SPS HRD parameters.
-const int kCPBSizeScale = 0; // cpb_size_scale for SPS HRD parameters.
-
-const int kDefaultQP = 26;
-// All Intel codecs can do at least 4.1.
-const int kDefaultLevelIDC = 41;
-const int kChromaFormatIDC = 1; // 4:2:0
-
-// Arbitrarily chosen bitrate window size for rate control, in ms.
-const int kCPBWindowSizeMs = 1500;
+// Percentage of bitrate set to be targeted by the HW encoder.
+constexpr unsigned int kTargetBitratePercentage = 90;
// UMA errors that the VaapiVideoEncodeAccelerator class reports.
enum VAVEAEncoderFailure {
VAAPI_ERROR = 0,
VAVEA_ENCODER_FAILURES_MAX,
};
-}
-
-// Round |value| up to |alignment|, which must be a power of 2.
-static inline size_t RoundUpToPowerOf2(size_t value, size_t alignment) {
- // Check that |alignment| is a power of 2.
- DCHECK((alignment + (alignment - 1)) == (alignment | (alignment - 1)));
- return ((value + (alignment - 1)) & ~(alignment - 1));
-}
static void ReportToUMA(VAVEAEncoderFailure failure) {
UMA_HISTOGRAM_ENUMERATION("Media.VAVEA.EncoderFailure", failure,
VAVEA_ENCODER_FAILURES_MAX + 1);
}
+} // namespace
+
+// Encode job for one frame. Created when an input frame is awaiting and
+// enough resources are available to proceed. Once the job is prepared and
+// submitted to the hardware, it awaits on the |submitted_encode_jobs_| queue
+// for an output bitstream buffer to become available. Once one is ready,
+// the encoded bytes are downloaded to it, job resources are released
+// and become available for reuse.
+class VaapiEncodeJob : public AcceleratedVideoEncoder::EncodeJob {
+ public:
+ VaapiEncodeJob(scoped_refptr<VideoFrame> input_frame,
+ bool keyframe,
+ base::OnceClosure execute_cb,
+ scoped_refptr<VASurface> input_surface,
+ scoped_refptr<VASurface> reconstructed_surface,
+ VABufferID coded_buffer_id);
+
+ VaapiEncodeJob* AsVaapiEncodeJob() override { return this; }
+
+ VABufferID coded_buffer_id() const { return coded_buffer_id_; }
+ const scoped_refptr<VASurface> input_surface() const {
+ return input_surface_;
+ }
+ const scoped_refptr<VASurface> reconstructed_surface() const {
+ return reconstructed_surface_;
+ }
+
+ private:
+ ~VaapiEncodeJob() override = default;
+
+ // Input surface for video frame data.
+ const scoped_refptr<VASurface> input_surface_;
+
+ // Surface for the reconstructed picture, used for reference
+ // for subsequent frames.
+ const scoped_refptr<VASurface> reconstructed_surface_;
+
+ // Buffer that will contain the output bitstream data for this frame.
+ VABufferID coded_buffer_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(VaapiEncodeJob);
+};
+
struct VaapiVideoEncodeAccelerator::InputFrameRef {
- InputFrameRef(const scoped_refptr<VideoFrame>& frame, bool force_keyframe)
+ InputFrameRef(scoped_refptr<VideoFrame> frame, bool force_keyframe)
: frame(frame), force_keyframe(force_keyframe) {}
const scoped_refptr<VideoFrame> frame;
const bool force_keyframe;
@@ -117,40 +134,64 @@ VaapiVideoEncodeAccelerator::GetSupportedProfiles() {
return VaapiWrapper::GetSupportedEncodeProfiles();
}
-static unsigned int Log2OfPowerOf2(unsigned int x) {
- CHECK_GT(x, 0u);
- DCHECK_EQ(x & (x - 1), 0u);
+class VaapiVideoEncodeAccelerator::H264Accelerator
+ : public H264Encoder::Accelerator {
+ public:
+ explicit H264Accelerator(VaapiVideoEncodeAccelerator* vea) : vea_(vea) {}
+
+ ~H264Accelerator() override = default;
+
+ // H264Encoder::Accelerator implementation.
+ scoped_refptr<H264Picture> GetPicture(
+ AcceleratedVideoEncoder::EncodeJob* job) override;
+
+ bool SubmitPackedHeaders(
+ AcceleratedVideoEncoder::EncodeJob* job,
+ scoped_refptr<H264BitstreamBuffer> packed_sps,
+ scoped_refptr<H264BitstreamBuffer> packed_pps) override;
+
+ bool SubmitFrameParameters(
+ AcceleratedVideoEncoder::EncodeJob* job,
+ const H264Encoder::EncodeParams& encode_params,
+ const media::H264SPS& sps,
+ const media::H264PPS& pps,
+ scoped_refptr<H264Picture> pic,
+ const std::list<scoped_refptr<H264Picture>>& ref_pic_list0,
+ const std::list<scoped_refptr<H264Picture>>& ref_pic_list1) override;
+
+ private:
+ VaapiVideoEncodeAccelerator* const vea_;
+};
- int log = 0;
- while (x > 1) {
- x >>= 1;
- ++log;
- }
- return log;
-}
+class VaapiVideoEncodeAccelerator::VP8Accelerator
+ : public VP8Encoder::Accelerator {
+ public:
+ explicit VP8Accelerator(VaapiVideoEncodeAccelerator* vea) : vea_(vea) {}
+
+ ~VP8Accelerator() override = default;
+
+ // VP8Encoder::Accelerator implementation.
+ scoped_refptr<VP8Picture> GetPicture(
+ AcceleratedVideoEncoder::EncodeJob* job) override;
+
+ bool SubmitFrameParameters(
+ AcceleratedVideoEncoder::EncodeJob* job,
+ const media::VP8Encoder::EncodeParams& encode_params,
+ scoped_refptr<VP8Picture> pic,
+ const Vp8ReferenceFrameVector& ref_frames) override;
+
+ private:
+ VaapiVideoEncodeAccelerator* const vea_;
+};
VaapiVideoEncodeAccelerator::VaapiVideoEncodeAccelerator()
- : profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
- mb_width_(0),
- mb_height_(0),
+ : codec_(kUnknownVideoCodec),
output_buffer_byte_size_(0),
state_(kUninitialized),
- frame_num_(0),
- idr_pic_id_(0),
- bitrate_(0),
- framerate_(0),
- cpb_size_(0),
- encoding_parameters_changed_(false),
encoder_thread_("VAVEAEncoderThread"),
child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
weak_this_ptr_factory_(this) {
VLOGF(2);
- weak_this_ = weak_this_ptr_factory_.GetWeakPtr();
- max_ref_idx_l0_size_ = kMaxNumReferenceFrames;
- qp_ = kDefaultQP;
- idr_period_ = kIDRPeriod;
- i_period_ = kIPeriod;
- ip_period_ = kIPPeriod;
}
VaapiVideoEncodeAccelerator::~VaapiVideoEncodeAccelerator() {
@@ -178,6 +219,18 @@ bool VaapiVideoEncodeAccelerator::Initialize(
client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
client_ = client_ptr_factory_->GetWeakPtr();
+ codec_ = VideoCodecProfileToVideoCodec(output_profile);
+ if (codec_ != kCodecH264 && codec_ != kCodecVP8) {
+ DVLOGF(1) << "Unsupported profile: " << GetProfileName(output_profile);
+ return false;
+ }
+
+ if (format != PIXEL_FORMAT_I420) {
+ DVLOGF(1) << "Unsupported input format: "
+ << VideoPixelFormatToString(format);
+ return false;
+ }
+
const SupportedProfiles& profiles = GetSupportedProfiles();
auto profile = find_if(profiles.begin(), profiles.end(),
[output_profile](const SupportedProfile& profile) {
@@ -187,6 +240,7 @@ bool VaapiVideoEncodeAccelerator::Initialize(
VLOGF(1) << "Unsupported output profile " << GetProfileName(output_profile);
return false;
}
+
if (input_visible_size.width() > profile->max_resolution.width() ||
input_visible_size.height() > profile->max_resolution.height()) {
VLOGF(1) << "Input size too big: " << input_visible_size.ToString()
@@ -194,29 +248,10 @@ bool VaapiVideoEncodeAccelerator::Initialize(
return false;
}
- if (format != PIXEL_FORMAT_I420) {
- VLOGF(1) << "Unsupported input format: "
- << VideoPixelFormatToString(format);
- return false;
- }
-
- profile_ = output_profile;
- visible_size_ = input_visible_size;
- // 4:2:0 format has to be 2-aligned.
- DCHECK_EQ(visible_size_.width() % 2, 0);
- DCHECK_EQ(visible_size_.height() % 2, 0);
- coded_size_ = gfx::Size(RoundUpToPowerOf2(visible_size_.width(), 16),
- RoundUpToPowerOf2(visible_size_.height(), 16));
- mb_width_ = coded_size_.width() / 16;
- mb_height_ = coded_size_.height() / 16;
- output_buffer_byte_size_ = coded_size_.GetArea() + kExtraOutputBufferSize;
-
- UpdateRates(initial_bitrate, kDefaultFramerate);
-
vaapi_wrapper_ =
VaapiWrapper::CreateForVideoCodec(VaapiWrapper::kEncode, output_profile,
base::Bind(&ReportToUMA, VAAPI_ERROR));
- if (!vaapi_wrapper_.get()) {
+ if (!vaapi_wrapper_) {
VLOGF(1) << "Failed initializing VAAPI for profile "
<< GetProfileName(output_profile);
return false;
@@ -226,42 +261,74 @@ bool VaapiVideoEncodeAccelerator::Initialize(
VLOGF(1) << "Failed to start encoder thread";
return false;
}
+
encoder_thread_task_runner_ = encoder_thread_.task_runner();
- // Finish the remaining initialization on the encoder thread.
+ // Finish remaining initialization on the encoder thread.
encoder_thread_task_runner_->PostTask(
FROM_HERE, base::Bind(&VaapiVideoEncodeAccelerator::InitializeTask,
- base::Unretained(this)));
-
+ base::Unretained(this), input_visible_size,
+ output_profile, initial_bitrate));
return true;
}
-void VaapiVideoEncodeAccelerator::InitializeTask() {
+void VaapiVideoEncodeAccelerator::InitializeTask(const gfx::Size& visible_size,
+ VideoCodecProfile profile,
+ uint32_t bitrate) {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kUninitialized);
VLOGF(2);
+ weak_this_ = weak_this_ptr_factory_.GetWeakPtr();
+
+ switch (codec_) {
+ case kCodecH264:
+ encoder_ = std::make_unique<H264Encoder>(
+ std::make_unique<H264Accelerator>(this));
+ break;
+
+ case kCodecVP8:
+ encoder_ =
+ std::make_unique<VP8Encoder>(std::make_unique<VP8Accelerator>(this));
+ break;
+
+ default:
+ NOTREACHED() << "Unsupported codec type " << GetCodecName(codec_);
+ return;
+ }
+
+ if (!encoder_->Initialize(visible_size, profile, bitrate,
+ kDefaultFramerate)) {
+ NOTIFY_ERROR(kInvalidArgumentError, "Failed initializing encoder");
+ return;
+ }
+
+ coded_size_ = encoder_->GetCodedSize();
+ output_buffer_byte_size_ =
+ encoder_->GetBitstreamBufferSize() + kExtraOutputBufferSizeInBytes;
+ const size_t max_ref_frames = encoder_->GetMaxNumOfRefFrames();
+ // Use at least kMinNumFramesInFlight if encoder requested less for
+ // pipeline depth.
+ const size_t num_frames_in_flight =
+ std::max(kMinNumFramesInFlight, max_ref_frames);
+ const size_t num_surfaces = (num_frames_in_flight + 1) * kNumSurfacesPerFrame;
+ DVLOGF(1) << "Frames in flight: " << num_frames_in_flight;
+
va_surface_release_cb_ = BindToCurrentLoop(
base::Bind(&VaapiVideoEncodeAccelerator::RecycleVASurfaceID,
base::Unretained(this)));
if (!vaapi_wrapper_->CreateSurfaces(VA_RT_FORMAT_YUV420, coded_size_,
- kNumSurfaces,
+ num_surfaces,
&available_va_surface_ids_)) {
NOTIFY_ERROR(kPlatformFailureError, "Failed creating VASurfaces");
return;
}
- UpdateSPS();
- GeneratePackedSPS();
-
- UpdatePPS();
- GeneratePackedPPS();
-
child_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&Client::RequireBitstreamBuffers, client_, kNumInputBuffers,
- coded_size_, output_buffer_byte_size_));
+ FROM_HERE, base::BindOnce(&Client::RequireBitstreamBuffers, client_,
+ num_frames_in_flight, coded_size_,
+ output_buffer_byte_size_));
SetState(kEncoding);
}
@@ -272,264 +339,50 @@ void VaapiVideoEncodeAccelerator::RecycleVASurfaceID(
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
available_va_surface_ids_.push_back(va_surface_id);
- EncodeFrameTask();
+ EncodePendingInputs();
}
-void VaapiVideoEncodeAccelerator::BeginFrame(bool force_keyframe) {
- current_pic_ = new H264Picture();
-
- // If the current picture is an IDR picture, frame_num shall be equal to 0.
- if (force_keyframe)
- frame_num_ = 0;
-
- current_pic_->frame_num = frame_num_++;
- frame_num_ %= idr_period_;
-
- if (current_pic_->frame_num == 0) {
- current_pic_->idr = true;
- // H264 spec mandates idr_pic_id to differ between two consecutive IDRs.
- idr_pic_id_ ^= 1;
- ref_pic_list0_.clear();
- }
-
- if (current_pic_->frame_num % i_period_ == 0)
- current_pic_->type = H264SliceHeader::kISlice;
- else
- current_pic_->type = H264SliceHeader::kPSlice;
-
- if (current_pic_->type != H264SliceHeader::kBSlice)
- current_pic_->ref = true;
-
- current_pic_->pic_order_cnt = current_pic_->frame_num * 2;
- current_pic_->top_field_order_cnt = current_pic_->pic_order_cnt;
- current_pic_->pic_order_cnt_lsb = current_pic_->pic_order_cnt;
-
- current_encode_job_->keyframe = current_pic_->idr;
-
- DVLOGF(4) << "Starting a new frame, type: " << current_pic_->type
- << (force_keyframe ? " (forced keyframe)" : "")
- << " frame_num: " << current_pic_->frame_num
- << " POC: " << current_pic_->pic_order_cnt;
+void VaapiVideoEncodeAccelerator::ExecuteEncode(VASurfaceID va_surface_id) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+ if (!vaapi_wrapper_->ExecuteAndDestroyPendingBuffers(va_surface_id))
+ NOTIFY_ERROR(kPlatformFailureError, "Failed to execute encode");
}
-void VaapiVideoEncodeAccelerator::EndFrame() {
- DCHECK(current_pic_);
- // Store the picture on the list of reference pictures and keep the list
- // below maximum size, dropping oldest references.
- if (current_pic_->ref)
- ref_pic_list0_.push_front(current_encode_job_->recon_surface);
- size_t max_num_ref_frames =
- base::checked_cast<size_t>(current_sps_.max_num_ref_frames);
- while (ref_pic_list0_.size() > max_num_ref_frames)
- ref_pic_list0_.pop_back();
-
- submitted_encode_jobs_.push(std::move(current_encode_job_));
+void VaapiVideoEncodeAccelerator::UploadFrame(scoped_refptr<VideoFrame> frame,
+ VASurfaceID va_surface_id) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+ if (!vaapi_wrapper_->UploadVideoFrameToSurface(frame, va_surface_id))
+ NOTIFY_ERROR(kPlatformFailureError, "Failed to upload frame");
}
-static void InitVAPicture(VAPictureH264* va_pic) {
- memset(va_pic, 0, sizeof(*va_pic));
- va_pic->picture_id = VA_INVALID_ID;
- va_pic->flags = VA_PICTURE_H264_INVALID;
+void VaapiVideoEncodeAccelerator::SubmitBuffer(
+ VABufferType type,
+ scoped_refptr<base::RefCountedBytes> buffer) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+ if (!vaapi_wrapper_->SubmitBuffer(type, buffer->size(), buffer->front()))
+ NOTIFY_ERROR(kPlatformFailureError, "Failed submitting a buffer");
}
-bool VaapiVideoEncodeAccelerator::SubmitFrameParameters() {
- DCHECK(current_pic_);
- VAEncSequenceParameterBufferH264 seq_param;
- memset(&seq_param, 0, sizeof(seq_param));
-
-#define SPS_TO_SP(a) seq_param.a = current_sps_.a;
- SPS_TO_SP(seq_parameter_set_id);
- SPS_TO_SP(level_idc);
-
- seq_param.intra_period = i_period_;
- seq_param.intra_idr_period = idr_period_;
- seq_param.ip_period = ip_period_;
- seq_param.bits_per_second = bitrate_;
-
- SPS_TO_SP(max_num_ref_frames);
- seq_param.picture_width_in_mbs = mb_width_;
- seq_param.picture_height_in_mbs = mb_height_;
-
-#define SPS_TO_SP_FS(a) seq_param.seq_fields.bits.a = current_sps_.a;
- SPS_TO_SP_FS(chroma_format_idc);
- SPS_TO_SP_FS(frame_mbs_only_flag);
- SPS_TO_SP_FS(log2_max_frame_num_minus4);
- SPS_TO_SP_FS(pic_order_cnt_type);
- SPS_TO_SP_FS(log2_max_pic_order_cnt_lsb_minus4);
-#undef SPS_TO_SP_FS
-
- SPS_TO_SP(bit_depth_luma_minus8);
- SPS_TO_SP(bit_depth_chroma_minus8);
-
- SPS_TO_SP(frame_cropping_flag);
- if (current_sps_.frame_cropping_flag) {
- SPS_TO_SP(frame_crop_left_offset);
- SPS_TO_SP(frame_crop_right_offset);
- SPS_TO_SP(frame_crop_top_offset);
- SPS_TO_SP(frame_crop_bottom_offset);
- }
-
- SPS_TO_SP(vui_parameters_present_flag);
-#define SPS_TO_SP_VF(a) seq_param.vui_fields.bits.a = current_sps_.a;
- SPS_TO_SP_VF(timing_info_present_flag);
-#undef SPS_TO_SP_VF
- SPS_TO_SP(num_units_in_tick);
- SPS_TO_SP(time_scale);
-#undef SPS_TO_SP
-
- if (!vaapi_wrapper_->SubmitBuffer(VAEncSequenceParameterBufferType,
- sizeof(seq_param), &seq_param))
- return false;
-
- VAEncPictureParameterBufferH264 pic_param;
- memset(&pic_param, 0, sizeof(pic_param));
-
- pic_param.CurrPic.picture_id = current_encode_job_->recon_surface->id();
- pic_param.CurrPic.TopFieldOrderCnt = current_pic_->top_field_order_cnt;
- pic_param.CurrPic.BottomFieldOrderCnt = current_pic_->bottom_field_order_cnt;
- pic_param.CurrPic.flags = 0;
-
- for (size_t i = 0; i < arraysize(pic_param.ReferenceFrames); ++i)
- InitVAPicture(&pic_param.ReferenceFrames[i]);
-
- DCHECK_LE(ref_pic_list0_.size(), arraysize(pic_param.ReferenceFrames));
- RefPicList::const_iterator iter = ref_pic_list0_.begin();
- for (size_t i = 0;
- i < arraysize(pic_param.ReferenceFrames) && iter != ref_pic_list0_.end();
- ++iter, ++i) {
- pic_param.ReferenceFrames[i].picture_id = (*iter)->id();
- pic_param.ReferenceFrames[i].flags = 0;
- }
-
- pic_param.coded_buf = current_encode_job_->coded_buffer;
- pic_param.pic_parameter_set_id = current_pps_.pic_parameter_set_id;
- pic_param.seq_parameter_set_id = current_pps_.seq_parameter_set_id;
- pic_param.frame_num = current_pic_->frame_num;
- pic_param.pic_init_qp = qp_;
- pic_param.num_ref_idx_l0_active_minus1 = max_ref_idx_l0_size_ - 1;
- pic_param.pic_fields.bits.idr_pic_flag = current_pic_->idr;
- pic_param.pic_fields.bits.reference_pic_flag = current_pic_->ref;
-#define PPS_TO_PP_PF(a) pic_param.pic_fields.bits.a = current_pps_.a;
- PPS_TO_PP_PF(entropy_coding_mode_flag);
- PPS_TO_PP_PF(transform_8x8_mode_flag);
- PPS_TO_PP_PF(deblocking_filter_control_present_flag);
-#undef PPS_TO_PP_PF
-
- if (!vaapi_wrapper_->SubmitBuffer(VAEncPictureParameterBufferType,
- sizeof(pic_param), &pic_param))
- return false;
-
- VAEncSliceParameterBufferH264 slice_param;
- memset(&slice_param, 0, sizeof(slice_param));
-
- slice_param.num_macroblocks = mb_width_ * mb_height_;
- slice_param.macroblock_info = VA_INVALID_ID;
- slice_param.slice_type = current_pic_->type;
- slice_param.pic_parameter_set_id = current_pps_.pic_parameter_set_id;
- slice_param.idr_pic_id = idr_pic_id_;
- slice_param.pic_order_cnt_lsb = current_pic_->pic_order_cnt_lsb;
- slice_param.num_ref_idx_active_override_flag = true;
-
- for (size_t i = 0; i < arraysize(slice_param.RefPicList0); ++i)
- InitVAPicture(&slice_param.RefPicList0[i]);
-
- for (size_t i = 0; i < arraysize(slice_param.RefPicList1); ++i)
- InitVAPicture(&slice_param.RefPicList1[i]);
-
- DCHECK_LE(ref_pic_list0_.size(), arraysize(slice_param.RefPicList0));
- iter = ref_pic_list0_.begin();
- for (size_t i = 0;
- i < arraysize(slice_param.RefPicList0) && iter != ref_pic_list0_.end();
- ++iter, ++i) {
- InitVAPicture(&slice_param.RefPicList0[i]);
- slice_param.RefPicList0[i].picture_id = (*iter)->id();
- slice_param.RefPicList0[i].flags = 0;
+void VaapiVideoEncodeAccelerator::SubmitVAEncMiscParamBuffer(
+ VAEncMiscParameterType type,
+ scoped_refptr<base::RefCountedBytes> buffer) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+ if (!vaapi_wrapper_->SubmitVAEncMiscParamBuffer(type, buffer->size(),
+ buffer->front())) {
+ NOTIFY_ERROR(kPlatformFailureError, "Failed submitting a parameter buffer");
}
-
- if (!vaapi_wrapper_->SubmitBuffer(VAEncSliceParameterBufferType,
- sizeof(slice_param), &slice_param))
- return false;
-
- VAEncMiscParameterRateControl rate_control_param;
- memset(&rate_control_param, 0, sizeof(rate_control_param));
- rate_control_param.bits_per_second = bitrate_;
- rate_control_param.target_percentage = 90;
- rate_control_param.window_size = kCPBWindowSizeMs;
- rate_control_param.initial_qp = qp_;
- rate_control_param.rc_flags.bits.disable_frame_skip = true;
-
- if (!vaapi_wrapper_->SubmitVAEncMiscParamBuffer(
- VAEncMiscParameterTypeRateControl, sizeof(rate_control_param),
- &rate_control_param))
- return false;
-
- VAEncMiscParameterFrameRate framerate_param;
- memset(&framerate_param, 0, sizeof(framerate_param));
- framerate_param.framerate = framerate_;
- if (!vaapi_wrapper_->SubmitVAEncMiscParamBuffer(
- VAEncMiscParameterTypeFrameRate, sizeof(framerate_param),
- &framerate_param))
- return false;
-
- VAEncMiscParameterHRD hrd_param;
- memset(&hrd_param, 0, sizeof(hrd_param));
- hrd_param.buffer_size = cpb_size_;
- hrd_param.initial_buffer_fullness = cpb_size_ / 2;
- if (!vaapi_wrapper_->SubmitVAEncMiscParamBuffer(
- VAEncMiscParameterTypeHRD, sizeof(hrd_param), &hrd_param))
- return false;
-
- return true;
}
-bool VaapiVideoEncodeAccelerator::SubmitHeadersIfNeeded() {
- DCHECK(current_pic_);
- if (current_pic_->type != H264SliceHeader::kISlice)
- return true;
-
- // Submit SPS.
- VAEncPackedHeaderParameterBuffer par_buffer;
- memset(&par_buffer, 0, sizeof(par_buffer));
- par_buffer.type = VAEncPackedHeaderSequence;
- par_buffer.bit_length = packed_sps_.BytesInBuffer() * 8;
-
- if (!vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderParameterBufferType,
- sizeof(par_buffer), &par_buffer))
- return false;
-
- if (!vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderDataBufferType,
- packed_sps_.BytesInBuffer(),
- packed_sps_.data()))
- return false;
-
- // Submit PPS.
- memset(&par_buffer, 0, sizeof(par_buffer));
- par_buffer.type = VAEncPackedHeaderPicture;
- par_buffer.bit_length = packed_pps_.BytesInBuffer() * 8;
-
- if (!vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderParameterBufferType,
- sizeof(par_buffer), &par_buffer))
- return false;
-
+void VaapiVideoEncodeAccelerator::SubmitH264BitstreamBuffer(
+ scoped_refptr<H264BitstreamBuffer> buffer) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+ // TODO(crbug.com/844303): use vaMapBuffer in VaapiWrapper::SubmitBuffer()
+ // instead to avoid this.
+ void* non_const_ptr = const_cast<uint8_t*>(buffer->data());
if (!vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderDataBufferType,
- packed_pps_.BytesInBuffer(),
- packed_pps_.data()))
- return false;
-
- return true;
-}
-
-bool VaapiVideoEncodeAccelerator::ExecuteEncode() {
- DCHECK(current_pic_);
- DVLOGF(4) << "Encoding frame_num: " << current_pic_->frame_num;
- return vaapi_wrapper_->ExecuteAndDestroyPendingBuffers(
- current_encode_job_->input_surface->id());
-}
-
-bool VaapiVideoEncodeAccelerator::UploadFrame(
- const scoped_refptr<VideoFrame>& frame) {
- return vaapi_wrapper_->UploadVideoFrameToSurface(
- frame, current_encode_job_->input_surface->id());
+ buffer->BytesInBuffer(), non_const_ptr)) {
+ NOTIFY_ERROR(kPlatformFailureError, "Failed submitting a bitstream buffer");
+ }
}
void VaapiVideoEncodeAccelerator::TryToReturnBitstreamBuffer() {
@@ -538,45 +391,51 @@ void VaapiVideoEncodeAccelerator::TryToReturnBitstreamBuffer() {
if (state_ != kEncoding)
return;
- while (!submitted_encode_jobs_.empty()) {
+ while (!submitted_encode_jobs_.empty() &&
+ submitted_encode_jobs_.front() == nullptr) {
// A null job indicates a flush command.
- if (submitted_encode_jobs_.front() == nullptr) {
- submitted_encode_jobs_.pop();
- DVLOGF(2) << "FlushDone";
- DCHECK(flush_callback_);
- child_task_runner_->PostTask(
- FROM_HERE, base::BindOnce(std::move(flush_callback_), true));
- continue;
- }
+ submitted_encode_jobs_.pop();
+ DVLOGF(2) << "FlushDone";
+ DCHECK(flush_callback_);
+ child_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(std::move(flush_callback_), true));
+ }
- if (available_bitstream_buffers_.empty())
- break;
+ if (submitted_encode_jobs_.empty() || available_bitstream_buffers_.empty())
+ return;
- const auto encode_job = std::move(submitted_encode_jobs_.front());
- submitted_encode_jobs_.pop();
- const auto buffer = std::move(available_bitstream_buffers_.front());
- available_bitstream_buffers_.pop();
+ auto buffer = std::move(available_bitstream_buffers_.front());
+ available_bitstream_buffers_.pop();
+ auto encode_job = submitted_encode_jobs_.front();
+ submitted_encode_jobs_.pop();
- uint8_t* target_data = reinterpret_cast<uint8_t*>(buffer->shm->memory());
+ ReturnBitstreamBuffer(encode_job, std::move(buffer));
+}
- size_t data_size = 0;
- if (!vaapi_wrapper_->DownloadAndDestroyCodedBuffer(
- encode_job->coded_buffer, encode_job->input_surface->id(),
- target_data, buffer->shm->size(), &data_size)) {
- NOTIFY_ERROR(kPlatformFailureError, "Failed downloading coded buffer");
- return;
- }
+void VaapiVideoEncodeAccelerator::ReturnBitstreamBuffer(
+ scoped_refptr<VaapiEncodeJob> encode_job,
+ std::unique_ptr<BitstreamBufferRef> buffer) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
- DVLOGF(4) << "Returning bitstream buffer "
- << (encode_job->keyframe ? "(keyframe)" : "")
- << " id: " << buffer->id << " size: " << data_size;
+ uint8_t* target_data = reinterpret_cast<uint8_t*>(buffer->shm->memory());
+ size_t data_size = 0;
- child_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&Client::BitstreamBufferReady, client_, buffer->id,
- data_size, encode_job->keyframe, encode_job->timestamp));
- break;
+ if (!vaapi_wrapper_->DownloadAndDestroyCodedBuffer(
+ encode_job->coded_buffer_id(), encode_job->input_surface()->id(),
+ target_data, buffer->shm->size(), &data_size)) {
+ NOTIFY_ERROR(kPlatformFailureError, "Failed downloading coded buffer");
+ return;
}
+
+ DVLOGF(4) << "Returning bitstream buffer "
+ << (encode_job->IsKeyframeRequested() ? "(keyframe)" : "")
+ << " id: " << buffer->id << " size: " << data_size;
+
+ child_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&Client::BitstreamBufferReady, client_, buffer->id,
+ data_size, encode_job->IsKeyframeRequested(),
+ encode_job->timestamp()));
}
void VaapiVideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
@@ -590,99 +449,87 @@ void VaapiVideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
base::Unretained(this), frame, force_keyframe));
}
-bool VaapiVideoEncodeAccelerator::PrepareNextJob(base::TimeDelta timestamp) {
- if (available_va_surface_ids_.size() < kMinSurfacesToEncode)
- return false;
+void VaapiVideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
+ bool force_keyframe) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+ DCHECK_NE(state_, kUninitialized);
- DCHECK(!current_encode_job_);
- current_encode_job_.reset(new EncodeJob());
+ input_queue_.push(std::make_unique<InputFrameRef>(frame, force_keyframe));
+ EncodePendingInputs();
+}
+
+scoped_refptr<VaapiEncodeJob> VaapiVideoEncodeAccelerator::CreateEncodeJob(
+ scoped_refptr<VideoFrame> frame,
+ bool force_keyframe) {
+ DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+
+ if (available_va_surface_ids_.size() < kNumSurfacesPerFrame) {
+ DVLOGF(4) << "Not enough surfaces available";
+ return nullptr;
+ }
+ VABufferID coded_buffer_id;
if (!vaapi_wrapper_->CreateCodedBuffer(output_buffer_byte_size_,
- &current_encode_job_->coded_buffer)) {
+ &coded_buffer_id)) {
NOTIFY_ERROR(kPlatformFailureError, "Failed creating coded buffer");
- return false;
+ return nullptr;
}
- current_encode_job_->timestamp = timestamp;
-
- current_encode_job_->input_surface = new VASurface(
+ static_assert(kNumSurfacesPerFrame == 2, "kNumSurfacesPerFrame must be 2");
+ scoped_refptr<VASurface> input_surface = new VASurface(
available_va_surface_ids_.back(), coded_size_,
vaapi_wrapper_->va_surface_format(), va_surface_release_cb_);
available_va_surface_ids_.pop_back();
- current_encode_job_->recon_surface = new VASurface(
+ scoped_refptr<VASurface> reconstructed_surface = new VASurface(
available_va_surface_ids_.back(), coded_size_,
vaapi_wrapper_->va_surface_format(), va_surface_release_cb_);
available_va_surface_ids_.pop_back();
- // Reference surfaces are needed until the job is done, but they get
- // removed from ref_pic_list0_ when it's full at the end of job submission.
- // Keep refs to them along with the job and only release after sync.
- current_encode_job_->reference_surfaces = ref_pic_list0_;
+ auto job = base::MakeRefCounted<VaapiEncodeJob>(
+ frame, force_keyframe,
+ base::BindOnce(&VaapiVideoEncodeAccelerator::ExecuteEncode,
+ base::Unretained(this), input_surface->id()),
+ input_surface, reconstructed_surface, coded_buffer_id);
- return true;
-}
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::UploadFrame,
+ base::Unretained(this), frame, input_surface->id()));
-void VaapiVideoEncodeAccelerator::EncodeTask(
- const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) {
- DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
- DCHECK_NE(state_, kUninitialized);
-
- encoder_input_queue_.push(
- std::make_unique<InputFrameRef>(frame, force_keyframe));
- EncodeFrameTask();
+ return job;
}
-void VaapiVideoEncodeAccelerator::EncodeFrameTask() {
+void VaapiVideoEncodeAccelerator::EncodePendingInputs() {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
+ DVLOGF(4);
+
+ while (state_ == kEncoding && !input_queue_.empty()) {
+ const std::unique_ptr<InputFrameRef>& input_frame = input_queue_.front();
+
+ // If this is a flush (null) frame, don't create/submit a new encode job for
+ // it, but forward a null job to the submitted_encode_jobs_ queue.
+ scoped_refptr<VaapiEncodeJob> job;
+ if (input_frame) {
+ job = CreateEncodeJob(input_frame->frame, input_frame->force_keyframe);
+ if (!job)
+ return;
+ }
- if (state_ != kEncoding || encoder_input_queue_.empty())
- return;
-
- // Pass the nullptr to the next queue |submitted_encode_jobs_|.
- if (encoder_input_queue_.front() == nullptr) {
- encoder_input_queue_.pop();
- submitted_encode_jobs_.push(nullptr);
- TryToReturnBitstreamBuffer();
- return;
- }
-
- if (!PrepareNextJob(encoder_input_queue_.front()->frame->timestamp())) {
- DVLOGF(4) << "Not ready for next frame yet";
- return;
- }
-
- const auto frame_ref = std::move(encoder_input_queue_.front());
- encoder_input_queue_.pop();
-
- TRACE_EVENT0("media,gpu", "VAVEA::EncodeFrameTask");
-
- if (!UploadFrame(frame_ref->frame)) {
- NOTIFY_ERROR(kPlatformFailureError, "Failed uploading source frame to HW.");
- return;
- }
-
- BeginFrame(frame_ref->force_keyframe || encoding_parameters_changed_);
- encoding_parameters_changed_ = false;
+ input_queue_.pop();
- if (!SubmitFrameParameters()) {
- NOTIFY_ERROR(kPlatformFailureError, "Failed submitting frame parameters.");
- return;
- }
+ if (job) {
+ if (!encoder_->PrepareEncodeJob(job.get())) {
+ NOTIFY_ERROR(kPlatformFailureError, "Failed preparing an encode job.");
+ return;
+ }
- if (!SubmitHeadersIfNeeded()) {
- NOTIFY_ERROR(kPlatformFailureError, "Failed submitting frame headers.");
- return;
- }
+ TRACE_EVENT0("media,gpu", "VAVEA: Execute");
+ job->Execute();
+ }
- if (!ExecuteEncode()) {
- NOTIFY_ERROR(kPlatformFailureError, "Failed submitting encode job to HW.");
- return;
+ submitted_encode_jobs_.push(job);
+ TryToReturnBitstreamBuffer();
}
-
- EndFrame();
- TryToReturnBitstreamBuffer();
}
void VaapiVideoEncodeAccelerator::UseOutputBitstreamBuffer(
@@ -695,15 +542,8 @@ void VaapiVideoEncodeAccelerator::UseOutputBitstreamBuffer(
return;
}
- std::unique_ptr<SharedMemoryRegion> shm(
- new SharedMemoryRegion(buffer, false));
- if (!shm->Map()) {
- NOTIFY_ERROR(kPlatformFailureError, "Failed mapping shared memory.");
- return;
- }
-
- std::unique_ptr<BitstreamBufferRef> buffer_ref(
- new BitstreamBufferRef(buffer.id(), std::move(shm)));
+ auto buffer_ref = std::make_unique<BitstreamBufferRef>(
+ buffer.id(), std::make_unique<SharedMemoryRegion>(buffer, false));
encoder_thread_task_runner_->PostTask(
FROM_HERE,
@@ -716,6 +556,11 @@ void VaapiVideoEncodeAccelerator::UseOutputBitstreamBufferTask(
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
DCHECK_NE(state_, kUninitialized);
+ if (!buffer_ref->shm->Map()) {
+ NOTIFY_ERROR(kPlatformFailureError, "Failed mapping shared memory.");
+ return;
+ }
+
available_bitstream_buffers_.push(std::move(buffer_ref));
TryToReturnBitstreamBuffer();
}
@@ -733,17 +578,6 @@ void VaapiVideoEncodeAccelerator::RequestEncodingParametersChange(
base::Unretained(this), bitrate, framerate));
}
-void VaapiVideoEncodeAccelerator::UpdateRates(uint32_t bitrate,
- uint32_t framerate) {
- if (encoder_thread_.IsRunning())
- DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
- DCHECK_NE(bitrate, 0u);
- DCHECK_NE(framerate, 0u);
- bitrate_ = bitrate;
- framerate_ = framerate;
- cpb_size_ = bitrate_ * kCPBWindowSizeMs / 1000;
-}
-
void VaapiVideoEncodeAccelerator::RequestEncodingParametersChangeTask(
uint32_t bitrate,
uint32_t framerate) {
@@ -751,26 +585,8 @@ void VaapiVideoEncodeAccelerator::RequestEncodingParametersChangeTask(
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
DCHECK_NE(state_, kUninitialized);
- // This is a workaround to zero being temporarily, as part of the initial
- // setup, provided by the webrtc video encode and a zero bitrate and
- // framerate not being accepted by VAAPI
- // TODO: This code is common with v4l2_video_encode_accelerator.cc, perhaps
- // it could be pulled up to RTCVideoEncoder
- if (bitrate < 1)
- bitrate = 1;
- if (framerate < 1)
- framerate = 1;
-
- if (bitrate_ == bitrate && framerate_ == framerate)
- return;
-
- UpdateRates(bitrate, framerate);
-
- UpdateSPS();
- GeneratePackedSPS();
-
- // Submit new parameters along with next frame that will be processed.
- encoding_parameters_changed_ = true;
+ if (!encoder_->UpdateRates(bitrate, framerate))
+ VLOGF(1) << "Failed to update rates to " << bitrate << " " << framerate;
}
void VaapiVideoEncodeAccelerator::Flush(FlushCallback flush_callback) {
@@ -791,18 +607,14 @@ void VaapiVideoEncodeAccelerator::FlushTask() {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
// Insert an null job to indicate a flush command.
- encoder_input_queue_.push(std::unique_ptr<InputFrameRef>(nullptr));
- EncodeFrameTask();
+ input_queue_.push(std::unique_ptr<InputFrameRef>(nullptr));
+ EncodePendingInputs();
}
void VaapiVideoEncodeAccelerator::Destroy() {
+ DVLOGF(2);
DCHECK(child_task_runner_->BelongsToCurrentThread());
- // Can't call client anymore after Destroy() returns.
- client_ptr_factory_.reset();
- weak_this_ptr_factory_.InvalidateWeakPtrs();
-
- // Early-exit encoder tasks if they are running and join the thread.
if (encoder_thread_.IsRunning()) {
encoder_thread_.task_runner()->PostTask(
FROM_HERE, base::Bind(&VaapiVideoEncodeAccelerator::DestroyTask,
@@ -813,301 +625,460 @@ void VaapiVideoEncodeAccelerator::Destroy() {
if (flush_callback_)
std::move(flush_callback_).Run(false);
+ weak_this_ptr_factory_.InvalidateWeakPtrs();
delete this;
}
void VaapiVideoEncodeAccelerator::DestroyTask() {
VLOGF(2);
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
- SetState(kError);
+
+ // Clean up members that are to be accessed on the encoder thread only.
+ available_va_surface_ids_.clear();
+ available_va_buffer_ids_.clear();
+
+ while (!available_bitstream_buffers_.empty())
+ available_bitstream_buffers_.pop();
+
+ while (!input_queue_.empty())
+ input_queue_.pop();
+
+ while (!submitted_encode_jobs_.empty())
+ submitted_encode_jobs_.pop();
+
+ encoder_ = nullptr;
}
-void VaapiVideoEncodeAccelerator::UpdateSPS() {
- memset(&current_sps_, 0, sizeof(H264SPS));
-
- // Spec A.2 and A.3.
- switch (profile_) {
- case H264PROFILE_BASELINE:
- // Due to https://crbug.com/345569, we don't distinguish between
- // constrained and non-constrained baseline profiles. Since many codecs
- // can't do non-constrained, and constrained is usually what we mean (and
- // it's a subset of non-constrained), default to it.
- current_sps_.profile_idc = H264SPS::kProfileIDCBaseline;
- current_sps_.constraint_set0_flag = true;
- break;
- case H264PROFILE_MAIN:
- current_sps_.profile_idc = H264SPS::kProfileIDCMain;
- current_sps_.constraint_set1_flag = true;
- break;
- case H264PROFILE_HIGH:
- current_sps_.profile_idc = H264SPS::kProfileIDCHigh;
- break;
- default:
- NOTIMPLEMENTED();
- return;
+void VaapiVideoEncodeAccelerator::SetState(State state) {
+ // Only touch state on encoder thread, unless it's not running.
+ if (encoder_thread_.IsRunning() &&
+ !encoder_thread_task_runner_->BelongsToCurrentThread()) {
+ encoder_thread_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&VaapiVideoEncodeAccelerator::SetState,
+ base::Unretained(this), state));
+ return;
+ }
+
+ VLOGF(2) << "setting state to: " << state;
+ state_ = state;
+}
+
+void VaapiVideoEncodeAccelerator::NotifyError(Error error) {
+ if (!child_task_runner_->BelongsToCurrentThread()) {
+ child_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&VaapiVideoEncodeAccelerator::NotifyError,
+ weak_this_, error));
+ return;
}
- current_sps_.level_idc = kDefaultLevelIDC;
- current_sps_.seq_parameter_set_id = 0;
- current_sps_.chroma_format_idc = kChromaFormatIDC;
-
- DCHECK_GE(idr_period_, 1u << 4);
- current_sps_.log2_max_frame_num_minus4 = Log2OfPowerOf2(idr_period_) - 4;
- current_sps_.pic_order_cnt_type = 0;
- current_sps_.log2_max_pic_order_cnt_lsb_minus4 =
- Log2OfPowerOf2(idr_period_ * 2) - 4;
- current_sps_.max_num_ref_frames = max_ref_idx_l0_size_;
-
- current_sps_.frame_mbs_only_flag = true;
-
- DCHECK_GT(mb_width_, 0u);
- DCHECK_GT(mb_height_, 0u);
- current_sps_.pic_width_in_mbs_minus1 = mb_width_ - 1;
- DCHECK(current_sps_.frame_mbs_only_flag);
- current_sps_.pic_height_in_map_units_minus1 = mb_height_ - 1;
-
- if (visible_size_ != coded_size_) {
- // Visible size differs from coded size, fill crop information.
- current_sps_.frame_cropping_flag = true;
- DCHECK(!current_sps_.separate_colour_plane_flag);
- // Spec table 6-1. Only 4:2:0 for now.
- DCHECK_EQ(current_sps_.chroma_format_idc, 1);
- // Spec 7.4.2.1.1. Crop is in crop units, which is 2 pixels for 4:2:0.
- const unsigned int crop_unit_x = 2;
- const unsigned int crop_unit_y = 2 * (2 - current_sps_.frame_mbs_only_flag);
- current_sps_.frame_crop_left_offset = 0;
- current_sps_.frame_crop_right_offset =
- (coded_size_.width() - visible_size_.width()) / crop_unit_x;
- current_sps_.frame_crop_top_offset = 0;
- current_sps_.frame_crop_bottom_offset =
- (coded_size_.height() - visible_size_.height()) / crop_unit_y;
+ if (client_) {
+ client_->NotifyError(error);
+ client_ptr_factory_.reset();
}
+}
+
+VaapiEncodeJob::VaapiEncodeJob(scoped_refptr<VideoFrame> input_frame,
+ bool keyframe,
+ base::OnceClosure execute_cb,
+ scoped_refptr<VASurface> input_surface,
+ scoped_refptr<VASurface> reconstructed_surface,
+ VABufferID coded_buffer_id)
+ : EncodeJob(input_frame, keyframe, std::move(execute_cb)),
+ input_surface_(input_surface),
+ reconstructed_surface_(reconstructed_surface),
+ coded_buffer_id_(coded_buffer_id) {
+ DCHECK(input_surface_);
+ DCHECK(reconstructed_surface_);
+ DCHECK_NE(coded_buffer_id_, VA_INVALID_ID);
+}
+
+static void InitVAPictureH264(VAPictureH264* va_pic) {
+ *va_pic = {};
+ va_pic->picture_id = VA_INVALID_ID;
+ va_pic->flags = VA_PICTURE_H264_INVALID;
+}
- current_sps_.vui_parameters_present_flag = true;
- current_sps_.timing_info_present_flag = true;
- current_sps_.num_units_in_tick = 1;
- current_sps_.time_scale = framerate_ * 2; // See equation D-2 in spec.
- current_sps_.fixed_frame_rate_flag = true;
-
- current_sps_.nal_hrd_parameters_present_flag = true;
- // H.264 spec ch. E.2.2.
- current_sps_.cpb_cnt_minus1 = 0;
- current_sps_.bit_rate_scale = kBitRateScale;
- current_sps_.cpb_size_scale = kCPBSizeScale;
- current_sps_.bit_rate_value_minus1[0] =
- (bitrate_ >> (kBitRateScale + H264SPS::kBitRateScaleConstantTerm)) - 1;
- current_sps_.cpb_size_value_minus1[0] =
- (cpb_size_ >> (kCPBSizeScale + H264SPS::kCPBSizeScaleConstantTerm)) - 1;
- current_sps_.cbr_flag[0] = true;
- current_sps_.initial_cpb_removal_delay_length_minus_1 =
- H264SPS::kDefaultInitialCPBRemovalDelayLength - 1;
- current_sps_.cpb_removal_delay_length_minus1 =
- H264SPS::kDefaultInitialCPBRemovalDelayLength - 1;
- current_sps_.dpb_output_delay_length_minus1 =
- H264SPS::kDefaultDPBOutputDelayLength - 1;
- current_sps_.time_offset_length = H264SPS::kDefaultTimeOffsetLength;
- current_sps_.low_delay_hrd_flag = false;
+static scoped_refptr<base::RefCountedBytes> MakeRefCountedBytes(void* ptr,
+ size_t size) {
+ return base::MakeRefCounted<base::RefCountedBytes>(
+ reinterpret_cast<uint8_t*>(ptr), size);
}
-void VaapiVideoEncodeAccelerator::GeneratePackedSPS() {
- packed_sps_.Reset();
-
- packed_sps_.BeginNALU(H264NALU::kSPS, 3);
-
- packed_sps_.AppendBits(8, current_sps_.profile_idc);
- packed_sps_.AppendBool(current_sps_.constraint_set0_flag);
- packed_sps_.AppendBool(current_sps_.constraint_set1_flag);
- packed_sps_.AppendBool(current_sps_.constraint_set2_flag);
- packed_sps_.AppendBool(current_sps_.constraint_set3_flag);
- packed_sps_.AppendBool(current_sps_.constraint_set4_flag);
- packed_sps_.AppendBool(current_sps_.constraint_set5_flag);
- packed_sps_.AppendBits(2, 0); // reserved_zero_2bits
- packed_sps_.AppendBits(8, current_sps_.level_idc);
- packed_sps_.AppendUE(current_sps_.seq_parameter_set_id);
-
- if (current_sps_.profile_idc == H264SPS::kProfileIDCHigh) {
- packed_sps_.AppendUE(current_sps_.chroma_format_idc);
- if (current_sps_.chroma_format_idc == 3)
- packed_sps_.AppendBool(current_sps_.separate_colour_plane_flag);
- packed_sps_.AppendUE(current_sps_.bit_depth_luma_minus8);
- packed_sps_.AppendUE(current_sps_.bit_depth_chroma_minus8);
- packed_sps_.AppendBool(current_sps_.qpprime_y_zero_transform_bypass_flag);
- packed_sps_.AppendBool(current_sps_.seq_scaling_matrix_present_flag);
- CHECK(!current_sps_.seq_scaling_matrix_present_flag);
+bool VaapiVideoEncodeAccelerator::H264Accelerator::SubmitFrameParameters(
+ AcceleratedVideoEncoder::EncodeJob* job,
+ const media::H264Encoder::EncodeParams& encode_params,
+ const media::H264SPS& sps,
+ const media::H264PPS& pps,
+ scoped_refptr<H264Picture> pic,
+ const std::list<scoped_refptr<H264Picture>>& ref_pic_list0,
+ const std::list<scoped_refptr<H264Picture>>& ref_pic_list1) {
+ VAEncSequenceParameterBufferH264 seq_param = {};
+
+#define SPS_TO_SP(a) seq_param.a = sps.a;
+ SPS_TO_SP(seq_parameter_set_id);
+ SPS_TO_SP(level_idc);
+
+ seq_param.intra_period = encode_params.i_period_frames;
+ seq_param.intra_idr_period = encode_params.idr_period_frames;
+ seq_param.ip_period = encode_params.ip_period_frames;
+ seq_param.bits_per_second = encode_params.bitrate_bps;
+
+ SPS_TO_SP(max_num_ref_frames);
+ base::Optional<gfx::Size> coded_size = sps.GetCodedSize();
+ if (!coded_size) {
+ DVLOGF(1) << "Invalid coded size";
+ return false;
}
+ constexpr int kH264MacroblockSizeInPixels = 16;
+ seq_param.picture_width_in_mbs =
+ coded_size->width() / kH264MacroblockSizeInPixels;
+ seq_param.picture_height_in_mbs =
+ coded_size->height() / kH264MacroblockSizeInPixels;
+
+#define SPS_TO_SP_FS(a) seq_param.seq_fields.bits.a = sps.a;
+ SPS_TO_SP_FS(chroma_format_idc);
+ SPS_TO_SP_FS(frame_mbs_only_flag);
+ SPS_TO_SP_FS(log2_max_frame_num_minus4);
+ SPS_TO_SP_FS(pic_order_cnt_type);
+ SPS_TO_SP_FS(log2_max_pic_order_cnt_lsb_minus4);
+#undef SPS_TO_SP_FS
+
+ SPS_TO_SP(bit_depth_luma_minus8);
+ SPS_TO_SP(bit_depth_chroma_minus8);
- packed_sps_.AppendUE(current_sps_.log2_max_frame_num_minus4);
- packed_sps_.AppendUE(current_sps_.pic_order_cnt_type);
- if (current_sps_.pic_order_cnt_type == 0)
- packed_sps_.AppendUE(current_sps_.log2_max_pic_order_cnt_lsb_minus4);
- else if (current_sps_.pic_order_cnt_type == 1) {
- CHECK(1);
+ SPS_TO_SP(frame_cropping_flag);
+ if (sps.frame_cropping_flag) {
+ SPS_TO_SP(frame_crop_left_offset);
+ SPS_TO_SP(frame_crop_right_offset);
+ SPS_TO_SP(frame_crop_top_offset);
+ SPS_TO_SP(frame_crop_bottom_offset);
}
- packed_sps_.AppendUE(current_sps_.max_num_ref_frames);
- packed_sps_.AppendBool(current_sps_.gaps_in_frame_num_value_allowed_flag);
- packed_sps_.AppendUE(current_sps_.pic_width_in_mbs_minus1);
- packed_sps_.AppendUE(current_sps_.pic_height_in_map_units_minus1);
+ SPS_TO_SP(vui_parameters_present_flag);
+#define SPS_TO_SP_VF(a) seq_param.vui_fields.bits.a = sps.a;
+ SPS_TO_SP_VF(timing_info_present_flag);
+#undef SPS_TO_SP_VF
+ SPS_TO_SP(num_units_in_tick);
+ SPS_TO_SP(time_scale);
+#undef SPS_TO_SP
- packed_sps_.AppendBool(current_sps_.frame_mbs_only_flag);
- if (!current_sps_.frame_mbs_only_flag)
- packed_sps_.AppendBool(current_sps_.mb_adaptive_frame_field_flag);
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitBuffer,
+ base::Unretained(vea_), VAEncSequenceParameterBufferType,
+ MakeRefCountedBytes(&seq_param, sizeof(seq_param))));
- packed_sps_.AppendBool(current_sps_.direct_8x8_inference_flag);
+ VAEncPictureParameterBufferH264 pic_param = {};
- packed_sps_.AppendBool(current_sps_.frame_cropping_flag);
- if (current_sps_.frame_cropping_flag) {
- packed_sps_.AppendUE(current_sps_.frame_crop_left_offset);
- packed_sps_.AppendUE(current_sps_.frame_crop_right_offset);
- packed_sps_.AppendUE(current_sps_.frame_crop_top_offset);
- packed_sps_.AppendUE(current_sps_.frame_crop_bottom_offset);
- }
+ auto va_surface_id = pic->AsVaapiH264Picture()->GetVASurfaceID();
+ pic_param.CurrPic.picture_id = va_surface_id;
+ pic_param.CurrPic.TopFieldOrderCnt = pic->top_field_order_cnt;
+ pic_param.CurrPic.BottomFieldOrderCnt = pic->bottom_field_order_cnt;
+ pic_param.CurrPic.flags = 0;
- packed_sps_.AppendBool(current_sps_.vui_parameters_present_flag);
- if (current_sps_.vui_parameters_present_flag) {
- packed_sps_.AppendBool(false); // aspect_ratio_info_present_flag
- packed_sps_.AppendBool(false); // overscan_info_present_flag
- packed_sps_.AppendBool(false); // video_signal_type_present_flag
- packed_sps_.AppendBool(false); // chroma_loc_info_present_flag
-
- packed_sps_.AppendBool(current_sps_.timing_info_present_flag);
- if (current_sps_.timing_info_present_flag) {
- packed_sps_.AppendBits(32, current_sps_.num_units_in_tick);
- packed_sps_.AppendBits(32, current_sps_.time_scale);
- packed_sps_.AppendBool(current_sps_.fixed_frame_rate_flag);
- }
+ pic_param.coded_buf = job->AsVaapiEncodeJob()->coded_buffer_id();
+ pic_param.pic_parameter_set_id = pps.pic_parameter_set_id;
+ pic_param.seq_parameter_set_id = pps.seq_parameter_set_id;
+ pic_param.frame_num = pic->frame_num;
+ pic_param.pic_init_qp = pps.pic_init_qp_minus26 + 26;
+ pic_param.num_ref_idx_l0_active_minus1 =
+ pps.num_ref_idx_l0_default_active_minus1;
+
+ pic_param.pic_fields.bits.idr_pic_flag = pic->idr;
+ pic_param.pic_fields.bits.reference_pic_flag = pic->ref;
+#define PPS_TO_PP_PF(a) pic_param.pic_fields.bits.a = pps.a;
+ PPS_TO_PP_PF(entropy_coding_mode_flag);
+ PPS_TO_PP_PF(transform_8x8_mode_flag);
+ PPS_TO_PP_PF(deblocking_filter_control_present_flag);
+#undef PPS_TO_PP_PF
- packed_sps_.AppendBool(current_sps_.nal_hrd_parameters_present_flag);
- if (current_sps_.nal_hrd_parameters_present_flag) {
- packed_sps_.AppendUE(current_sps_.cpb_cnt_minus1);
- packed_sps_.AppendBits(4, current_sps_.bit_rate_scale);
- packed_sps_.AppendBits(4, current_sps_.cpb_size_scale);
- CHECK_LT(base::checked_cast<size_t>(current_sps_.cpb_cnt_minus1),
- arraysize(current_sps_.bit_rate_value_minus1));
- for (int i = 0; i <= current_sps_.cpb_cnt_minus1; ++i) {
- packed_sps_.AppendUE(current_sps_.bit_rate_value_minus1[i]);
- packed_sps_.AppendUE(current_sps_.cpb_size_value_minus1[i]);
- packed_sps_.AppendBool(current_sps_.cbr_flag[i]);
- }
- packed_sps_.AppendBits(
- 5, current_sps_.initial_cpb_removal_delay_length_minus_1);
- packed_sps_.AppendBits(5, current_sps_.cpb_removal_delay_length_minus1);
- packed_sps_.AppendBits(5, current_sps_.dpb_output_delay_length_minus1);
- packed_sps_.AppendBits(5, current_sps_.time_offset_length);
- }
+ VAEncSliceParameterBufferH264 slice_param = {};
- packed_sps_.AppendBool(false); // vcl_hrd_parameters_flag
- if (current_sps_.nal_hrd_parameters_present_flag)
- packed_sps_.AppendBool(current_sps_.low_delay_hrd_flag);
+ slice_param.num_macroblocks =
+ seq_param.picture_width_in_mbs * seq_param.picture_height_in_mbs;
+ slice_param.macroblock_info = VA_INVALID_ID;
+ slice_param.slice_type = pic->type;
+ slice_param.pic_parameter_set_id = pps.pic_parameter_set_id;
+ slice_param.idr_pic_id = pic->idr_pic_id;
+ slice_param.pic_order_cnt_lsb = pic->pic_order_cnt_lsb;
+ slice_param.num_ref_idx_active_override_flag = true;
- packed_sps_.AppendBool(false); // pic_struct_present_flag
- packed_sps_.AppendBool(true); // bitstream_restriction_flag
+ for (size_t i = 0; i < base::size(pic_param.ReferenceFrames); ++i)
+ InitVAPictureH264(&pic_param.ReferenceFrames[i]);
+
+ for (size_t i = 0; i < base::size(slice_param.RefPicList0); ++i)
+ InitVAPictureH264(&slice_param.RefPicList0[i]);
+
+ for (size_t i = 0; i < base::size(slice_param.RefPicList1); ++i)
+ InitVAPictureH264(&slice_param.RefPicList1[i]);
+
+ VAPictureH264* ref_frames_entry = pic_param.ReferenceFrames;
+ VAPictureH264* ref_list_entry = slice_param.RefPicList0;
+ // Initialize the current entry on slice and picture reference lists to
+ // |ref_pic| and advance list pointers.
+ auto fill_ref_frame = [&ref_frames_entry,
+ &ref_list_entry](scoped_refptr<H264Picture> ref_pic) {
+ VAPictureH264 va_pic_h264;
+ InitVAPictureH264(&va_pic_h264);
+ va_pic_h264.picture_id = ref_pic->AsVaapiH264Picture()->GetVASurfaceID();
+ va_pic_h264.flags = 0;
+
+ *ref_frames_entry = va_pic_h264;
+ *ref_list_entry = va_pic_h264;
+ ++ref_frames_entry;
+ ++ref_list_entry;
+ };
+
+ // Fill slice_param.RefPicList{0,1} with pictures from ref_pic_list{0,1},
+ // respectively, and pic_param.ReferenceFrames with entries from both.
+ std::for_each(ref_pic_list0.begin(), ref_pic_list0.end(), fill_ref_frame);
+ ref_list_entry = slice_param.RefPicList1;
+ std::for_each(ref_pic_list1.begin(), ref_pic_list1.end(), fill_ref_frame);
+
+ VAEncMiscParameterRateControl rate_control_param = {};
+ rate_control_param.bits_per_second = encode_params.bitrate_bps;
+ rate_control_param.target_percentage = kTargetBitratePercentage;
+ rate_control_param.window_size = encode_params.cpb_window_size_ms;
+ rate_control_param.initial_qp = pic_param.pic_init_qp;
+ rate_control_param.rc_flags.bits.disable_frame_skip = true;
- packed_sps_.AppendBool(false); // motion_vectors_over_pic_boundaries_flag
- packed_sps_.AppendUE(2); // max_bytes_per_pic_denom
- packed_sps_.AppendUE(1); // max_bits_per_mb_denom
- packed_sps_.AppendUE(16); // log2_max_mv_length_horizontal
- packed_sps_.AppendUE(16); // log2_max_mv_length_vertical
+ VAEncMiscParameterFrameRate framerate_param = {};
+ framerate_param.framerate = encode_params.framerate;
- // Explicitly set max_num_reorder_frames to 0 to allow the decoder to
- // output pictures early.
- packed_sps_.AppendUE(0); // max_num_reorder_frames
+ VAEncMiscParameterHRD hrd_param = {};
+ hrd_param.buffer_size = encode_params.cpb_size_bits;
+ hrd_param.initial_buffer_fullness = hrd_param.buffer_size / 2;
- // The value of max_dec_frame_buffering shall be greater than or equal to
- // max_num_ref_frames.
- const unsigned int max_dec_frame_buffering =
- current_sps_.max_num_ref_frames;
- packed_sps_.AppendUE(max_dec_frame_buffering);
- }
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitBuffer,
+ base::Unretained(vea_), VAEncPictureParameterBufferType,
+ MakeRefCountedBytes(&pic_param, sizeof(pic_param))));
- packed_sps_.FinishNALU();
-}
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitBuffer,
+ base::Unretained(vea_), VAEncSliceParameterBufferType,
+ MakeRefCountedBytes(&slice_param, sizeof(slice_param))));
-void VaapiVideoEncodeAccelerator::UpdatePPS() {
- memset(&current_pps_, 0, sizeof(H264PPS));
+ job->AddSetupCallback(base::BindOnce(
+ &VaapiVideoEncodeAccelerator::SubmitVAEncMiscParamBuffer,
+ base::Unretained(vea_), VAEncMiscParameterTypeRateControl,
+ MakeRefCountedBytes(&rate_control_param, sizeof(rate_control_param))));
- current_pps_.seq_parameter_set_id = current_sps_.seq_parameter_set_id;
- current_pps_.pic_parameter_set_id = 0;
+ job->AddSetupCallback(base::BindOnce(
+ &VaapiVideoEncodeAccelerator::SubmitVAEncMiscParamBuffer,
+ base::Unretained(vea_), VAEncMiscParameterTypeFrameRate,
+ MakeRefCountedBytes(&framerate_param, sizeof(framerate_param))));
- current_pps_.entropy_coding_mode_flag =
- current_sps_.profile_idc >= H264SPS::kProfileIDCMain;
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitVAEncMiscParamBuffer,
+ base::Unretained(vea_), VAEncMiscParameterTypeHRD,
+ MakeRefCountedBytes(&hrd_param, sizeof(hrd_param))));
- CHECK_GT(max_ref_idx_l0_size_, 0u);
- current_pps_.num_ref_idx_l0_default_active_minus1 = max_ref_idx_l0_size_ - 1;
- current_pps_.num_ref_idx_l1_default_active_minus1 = 0;
- DCHECK_LE(qp_, 51u);
- current_pps_.pic_init_qp_minus26 = qp_ - 26;
- current_pps_.deblocking_filter_control_present_flag = true;
- current_pps_.transform_8x8_mode_flag =
- (current_sps_.profile_idc == H264SPS::kProfileIDCHigh);
+ return true;
}
-void VaapiVideoEncodeAccelerator::GeneratePackedPPS() {
- packed_pps_.Reset();
+scoped_refptr<H264Picture>
+VaapiVideoEncodeAccelerator::H264Accelerator::GetPicture(
+ AcceleratedVideoEncoder::EncodeJob* job) {
+ return base::MakeRefCounted<VaapiH264Picture>(
+ job->AsVaapiEncodeJob()->reconstructed_surface());
+}
- packed_pps_.BeginNALU(H264NALU::kPPS, 3);
+bool VaapiVideoEncodeAccelerator::H264Accelerator::SubmitPackedHeaders(
+ AcceleratedVideoEncoder::EncodeJob* job,
+ scoped_refptr<H264BitstreamBuffer> packed_sps,
+ scoped_refptr<H264BitstreamBuffer> packed_pps) {
+ // Submit SPS.
+ VAEncPackedHeaderParameterBuffer par_buffer = {};
+ par_buffer.type = VAEncPackedHeaderSequence;
+ par_buffer.bit_length = packed_sps->BytesInBuffer() * 8;
- packed_pps_.AppendUE(current_pps_.pic_parameter_set_id);
- packed_pps_.AppendUE(current_pps_.seq_parameter_set_id);
- packed_pps_.AppendBool(current_pps_.entropy_coding_mode_flag);
- packed_pps_.AppendBool(
- current_pps_.bottom_field_pic_order_in_frame_present_flag);
- CHECK_EQ(current_pps_.num_slice_groups_minus1, 0);
- packed_pps_.AppendUE(current_pps_.num_slice_groups_minus1);
+ job->AddSetupCallback(base::BindOnce(
+ &VaapiVideoEncodeAccelerator::SubmitBuffer, base::Unretained(vea_),
+ VAEncPackedHeaderParameterBufferType,
+ MakeRefCountedBytes(&par_buffer, sizeof(par_buffer))));
- packed_pps_.AppendUE(current_pps_.num_ref_idx_l0_default_active_minus1);
- packed_pps_.AppendUE(current_pps_.num_ref_idx_l1_default_active_minus1);
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitH264BitstreamBuffer,
+ base::Unretained(vea_), packed_sps));
- packed_pps_.AppendBool(current_pps_.weighted_pred_flag);
- packed_pps_.AppendBits(2, current_pps_.weighted_bipred_idc);
+ // Submit PPS.
+ par_buffer = {};
+ par_buffer.type = VAEncPackedHeaderPicture;
+ par_buffer.bit_length = packed_pps->BytesInBuffer() * 8;
- packed_pps_.AppendSE(current_pps_.pic_init_qp_minus26);
- packed_pps_.AppendSE(current_pps_.pic_init_qs_minus26);
- packed_pps_.AppendSE(current_pps_.chroma_qp_index_offset);
+ job->AddSetupCallback(base::BindOnce(
+ &VaapiVideoEncodeAccelerator::SubmitBuffer, base::Unretained(vea_),
+ VAEncPackedHeaderParameterBufferType,
+ MakeRefCountedBytes(&par_buffer, sizeof(par_buffer))));
- packed_pps_.AppendBool(current_pps_.deblocking_filter_control_present_flag);
- packed_pps_.AppendBool(current_pps_.constrained_intra_pred_flag);
- packed_pps_.AppendBool(current_pps_.redundant_pic_cnt_present_flag);
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitH264BitstreamBuffer,
+ base::Unretained(vea_), packed_pps));
- packed_pps_.AppendBool(current_pps_.transform_8x8_mode_flag);
- packed_pps_.AppendBool(current_pps_.pic_scaling_matrix_present_flag);
- DCHECK(!current_pps_.pic_scaling_matrix_present_flag);
- packed_pps_.AppendSE(current_pps_.second_chroma_qp_index_offset);
+ return true;
+}
- packed_pps_.FinishNALU();
+scoped_refptr<VP8Picture>
+VaapiVideoEncodeAccelerator::VP8Accelerator::GetPicture(
+ AcceleratedVideoEncoder::EncodeJob* job) {
+ return base::MakeRefCounted<VaapiVP8Picture>(
+ job->AsVaapiEncodeJob()->reconstructed_surface());
}
-void VaapiVideoEncodeAccelerator::SetState(State state) {
- // Only touch state on encoder thread, unless it's not running.
- if (encoder_thread_.IsRunning() &&
- !encoder_thread_task_runner_->BelongsToCurrentThread()) {
- encoder_thread_task_runner_->PostTask(
- FROM_HERE, base::Bind(&VaapiVideoEncodeAccelerator::SetState,
- base::Unretained(this), state));
- return;
+bool VaapiVideoEncodeAccelerator::VP8Accelerator::SubmitFrameParameters(
+ AcceleratedVideoEncoder::EncodeJob* job,
+ const media::VP8Encoder::EncodeParams& encode_params,
+ scoped_refptr<VP8Picture> pic,
+ const Vp8ReferenceFrameVector& ref_frames) {
+ VAEncSequenceParameterBufferVP8 seq_param = {};
+
+ const auto& frame_header = pic->frame_hdr;
+ seq_param.frame_width = frame_header->width;
+ seq_param.frame_height = frame_header->height;
+ seq_param.frame_width_scale = frame_header->horizontal_scale;
+ seq_param.frame_height_scale = frame_header->vertical_scale;
+ seq_param.error_resilient = 1;
+ seq_param.bits_per_second = encode_params.bitrate_bps;
+ seq_param.intra_period = encode_params.kf_period_frames;
+
+ VAEncPictureParameterBufferVP8 pic_param = {};
+
+ pic_param.reconstructed_frame = pic->AsVaapiVP8Picture()->GetVASurfaceID();
+ DCHECK_NE(pic_param.reconstructed_frame, VA_INVALID_ID);
+
+ auto last_frame = ref_frames.GetFrame(Vp8RefType::VP8_FRAME_LAST);
+ pic_param.ref_last_frame =
+ last_frame ? last_frame->AsVaapiVP8Picture()->GetVASurfaceID()
+ : VA_INVALID_ID;
+ auto golden_frame = ref_frames.GetFrame(Vp8RefType::VP8_FRAME_GOLDEN);
+ pic_param.ref_gf_frame =
+ golden_frame ? golden_frame->AsVaapiVP8Picture()->GetVASurfaceID()
+ : VA_INVALID_ID;
+ auto alt_frame = ref_frames.GetFrame(Vp8RefType::VP8_FRAME_ALTREF);
+ pic_param.ref_arf_frame =
+ alt_frame ? alt_frame->AsVaapiVP8Picture()->GetVASurfaceID()
+ : VA_INVALID_ID;
+ pic_param.coded_buf = job->AsVaapiEncodeJob()->coded_buffer_id();
+ DCHECK_NE(pic_param.coded_buf, VA_INVALID_ID);
+
+ if (frame_header->IsKeyframe())
+ pic_param.ref_flags.bits.force_kf = true;
+
+ pic_param.pic_flags.bits.frame_type = frame_header->frame_type;
+ pic_param.pic_flags.bits.version = frame_header->version;
+ pic_param.pic_flags.bits.show_frame = frame_header->show_frame;
+ pic_param.pic_flags.bits.loop_filter_type = frame_header->loopfilter_hdr.type;
+ pic_param.pic_flags.bits.num_token_partitions =
+ frame_header->num_of_dct_partitions;
+ pic_param.pic_flags.bits.segmentation_enabled =
+ frame_header->segmentation_hdr.segmentation_enabled;
+ pic_param.pic_flags.bits.update_mb_segmentation_map =
+ frame_header->segmentation_hdr.update_mb_segmentation_map;
+ pic_param.pic_flags.bits.update_segment_feature_data =
+ frame_header->segmentation_hdr.update_segment_feature_data;
+
+ pic_param.pic_flags.bits.loop_filter_adj_enable =
+ frame_header->loopfilter_hdr.loop_filter_adj_enable;
+
+ pic_param.pic_flags.bits.refresh_entropy_probs =
+ frame_header->refresh_entropy_probs;
+ pic_param.pic_flags.bits.refresh_golden_frame =
+ frame_header->refresh_golden_frame;
+ pic_param.pic_flags.bits.refresh_alternate_frame =
+ frame_header->refresh_alternate_frame;
+ pic_param.pic_flags.bits.refresh_last = frame_header->refresh_last;
+ pic_param.pic_flags.bits.copy_buffer_to_golden =
+ frame_header->copy_buffer_to_golden;
+ pic_param.pic_flags.bits.copy_buffer_to_alternate =
+ frame_header->copy_buffer_to_alternate;
+ pic_param.pic_flags.bits.sign_bias_golden = frame_header->sign_bias_golden;
+ pic_param.pic_flags.bits.sign_bias_alternate =
+ frame_header->sign_bias_alternate;
+ pic_param.pic_flags.bits.mb_no_coeff_skip = frame_header->mb_no_skip_coeff;
+ if (frame_header->IsKeyframe())
+ pic_param.pic_flags.bits.forced_lf_adjustment = true;
+
+ static_assert(
+ arraysize(pic_param.loop_filter_level) ==
+ arraysize(pic_param.ref_lf_delta) &&
+ arraysize(pic_param.ref_lf_delta) ==
+ arraysize(pic_param.mode_lf_delta) &&
+ arraysize(pic_param.ref_lf_delta) ==
+ arraysize(frame_header->loopfilter_hdr.ref_frame_delta) &&
+ arraysize(pic_param.mode_lf_delta) ==
+ arraysize(frame_header->loopfilter_hdr.mb_mode_delta),
+ "Invalid loop filter array sizes");
+
+ for (size_t i = 0; i < base::size(pic_param.loop_filter_level); ++i) {
+ pic_param.loop_filter_level[i] = frame_header->loopfilter_hdr.level;
+ pic_param.ref_lf_delta[i] = frame_header->loopfilter_hdr.ref_frame_delta[i];
+ pic_param.mode_lf_delta[i] = frame_header->loopfilter_hdr.mb_mode_delta[i];
}
- VLOGF(2) << "setting state to: " << state;
- state_ = state;
-}
+ pic_param.sharpness_level = frame_header->loopfilter_hdr.sharpness_level;
+ pic_param.clamp_qindex_high = encode_params.max_qp;
+ pic_param.clamp_qindex_low = encode_params.min_qp;
+
+ VAQMatrixBufferVP8 qmatrix_buf = {};
+ for (size_t i = 0; i < base::size(qmatrix_buf.quantization_index); ++i)
+ qmatrix_buf.quantization_index[i] = frame_header->quantization_hdr.y_ac_qi;
+
+ qmatrix_buf.quantization_index_delta[0] =
+ frame_header->quantization_hdr.y_dc_delta;
+ qmatrix_buf.quantization_index_delta[1] =
+ frame_header->quantization_hdr.y2_dc_delta;
+ qmatrix_buf.quantization_index_delta[2] =
+ frame_header->quantization_hdr.y2_ac_delta;
+ qmatrix_buf.quantization_index_delta[3] =
+ frame_header->quantization_hdr.uv_dc_delta;
+ qmatrix_buf.quantization_index_delta[4] =
+ frame_header->quantization_hdr.uv_ac_delta;
+
+ VAEncMiscParameterRateControl rate_control_param = {};
+ rate_control_param.bits_per_second = encode_params.bitrate_bps;
+ rate_control_param.target_percentage = kTargetBitratePercentage;
+ rate_control_param.window_size = encode_params.cpb_window_size_ms;
+ rate_control_param.initial_qp = encode_params.initial_qp;
+ rate_control_param.rc_flags.bits.disable_frame_skip = true;
-void VaapiVideoEncodeAccelerator::NotifyError(Error error) {
- if (!child_task_runner_->BelongsToCurrentThread()) {
- child_task_runner_->PostTask(
- FROM_HERE, base::Bind(&VaapiVideoEncodeAccelerator::NotifyError,
- weak_this_, error));
- return;
- }
+ VAEncMiscParameterFrameRate framerate_param = {};
+ framerate_param.framerate = encode_params.framerate;
- if (client_) {
- client_->NotifyError(error);
- client_ptr_factory_.reset();
- }
-}
+ VAEncMiscParameterHRD hrd_param = {};
+ hrd_param.buffer_size = encode_params.cpb_size_bits;
+ hrd_param.initial_buffer_fullness = hrd_param.buffer_size / 2;
+
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitBuffer,
+ base::Unretained(vea_), VAEncSequenceParameterBufferType,
+ MakeRefCountedBytes(&seq_param, sizeof(seq_param))));
+
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitBuffer,
+ base::Unretained(vea_), VAEncPictureParameterBufferType,
+ MakeRefCountedBytes(&pic_param, sizeof(pic_param))));
-VaapiVideoEncodeAccelerator::EncodeJob::EncodeJob()
- : coded_buffer(VA_INVALID_ID), keyframe(false) {}
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitBuffer,
+ base::Unretained(vea_), VAQMatrixBufferType,
+ MakeRefCountedBytes(&qmatrix_buf, sizeof(qmatrix_buf))));
-VaapiVideoEncodeAccelerator::EncodeJob::~EncodeJob() {}
+ job->AddSetupCallback(base::BindOnce(
+ &VaapiVideoEncodeAccelerator::SubmitVAEncMiscParamBuffer,
+ base::Unretained(vea_), VAEncMiscParameterTypeRateControl,
+ MakeRefCountedBytes(&rate_control_param, sizeof(rate_control_param))));
+
+ job->AddSetupCallback(base::BindOnce(
+ &VaapiVideoEncodeAccelerator::SubmitVAEncMiscParamBuffer,
+ base::Unretained(vea_), VAEncMiscParameterTypeFrameRate,
+ MakeRefCountedBytes(&framerate_param, sizeof(framerate_param))));
+
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitVAEncMiscParamBuffer,
+ base::Unretained(vea_), VAEncMiscParameterTypeHRD,
+ MakeRefCountedBytes(&hrd_param, sizeof(hrd_param))));
+
+ return true;
+}
} // namespace media
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
index 4d6754dcc81..bcd4fcc8d8f 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
@@ -8,24 +8,24 @@
#include <stddef.h>
#include <stdint.h>
-#include <list>
#include <memory>
#include "base/containers/queue.h"
#include "base/macros.h"
+#include "base/memory/ref_counted_memory.h"
#include "base/threading/thread.h"
#include "media/filters/h264_bitstream_buffer.h"
-#include "media/gpu/h264_dpb.h"
#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/vaapi/accelerated_video_encoder.h"
#include "media/gpu/vaapi/va_surface.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
#include "media/video/video_encode_accelerator.h"
namespace media {
+class VaapiEncodeJob;
// A VideoEncodeAccelerator implementation that uses VA-API
-// (http://www.freedesktop.org/wiki/Software/vaapi) for HW-accelerated
-// video encode.
+// (https://01.org/vaapi) for HW-accelerated video encode.
class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
: public VideoEncodeAccelerator {
public:
@@ -48,36 +48,8 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
void Flush(FlushCallback flush_callback) override;
private:
- // Reference picture list.
- typedef std::list<scoped_refptr<VASurface>> RefPicList;
-
- // Encode job for one frame. Created when an input frame is awaiting and
- // enough resources are available to proceed. Once the job is prepared and
- // submitted to the hardware, it awaits on the submitted_encode_jobs_ queue
- // for an output bitstream buffer to become available. Once one is ready,
- // the encoded bytes are downloaded to it and job resources are released
- // and become available for reuse.
- struct EncodeJob {
- // Input surface for video frame data.
- scoped_refptr<VASurface> input_surface;
- // Surface for a reconstructed picture, which is used for reference
- // for subsequent frames.
- scoped_refptr<VASurface> recon_surface;
- // Buffer that will contain output bitstream for this frame.
- VABufferID coded_buffer;
- // Reference surfaces required to encode this picture. We keep references
- // to them here, because we may discard some of them from ref_pic_list*
- // before the HW job is done.
- RefPicList reference_surfaces;
- // True if this job will produce a keyframe. Used to report
- // to BitstreamBufferReady().
- bool keyframe;
- // Source timestamp.
- base::TimeDelta timestamp;
-
- EncodeJob();
- ~EncodeJob();
- };
+ class H264Accelerator;
+ class VP8Accelerator;
// Encoder state.
enum State {
@@ -91,98 +63,87 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
// Holds output buffers coming from the client ready to be filled.
struct BitstreamBufferRef;
+ //
// Tasks for each of the VEA interface calls to be executed on the
// encoder thread.
- void InitializeTask();
- void EncodeTask(const scoped_refptr<VideoFrame>& frame, bool force_keyframe);
+ //
+ void InitializeTask(const gfx::Size& visible_size,
+ VideoCodecProfile profile,
+ uint32_t bitrate);
+
+ // Enqueues |frame| onto the queue of pending inputs and attempts to continue
+ // encoding.
+ void EncodeTask(scoped_refptr<VideoFrame> frame, bool force_keyframe);
+
+ // Maps |buffer_ref|, push it onto the available_bitstream_buffers_, and
+ // attempts to return any pending encoded data in it, if any.
void UseOutputBitstreamBufferTask(
std::unique_ptr<BitstreamBufferRef> buffer_ref);
+
void RequestEncodingParametersChangeTask(uint32_t bitrate,
uint32_t framerate);
void DestroyTask();
void FlushTask();
- // Prepare and schedule an encode job if we have an input to encode
- // and enough resources to proceed.
- void EncodeFrameTask();
-
- // Fill current_sps_/current_pps_ with current values.
- void UpdateSPS();
- void UpdatePPS();
- void UpdateRates(uint32_t bitrate, uint32_t framerate);
-
- // Generate packed SPS and PPS in packed_sps_/packed_pps_, using
- // values in current_sps_/current_pps_.
- void GeneratePackedSPS();
- void GeneratePackedPPS();
-
- // Check if we have sufficient resources for a new encode job, claim them and
- // fill current_encode_job_ with them.
- // Return false if we cannot start a new job yet, true otherwise.
- bool PrepareNextJob(base::TimeDelta timestamp);
-
- // Begin a new frame, making it a keyframe if |force_keyframe| is true,
- // updating current_pic_.
- void BeginFrame(bool force_keyframe);
+ // Checks if sufficient resources for a new encode job with |frame| as input
+ // are available, and if so, claims them by associating them with
+ // a VaapiEncodeJob, and returns the newly-created job, nullptr otherwise.
+ scoped_refptr<VaapiEncodeJob> CreateEncodeJob(scoped_refptr<VideoFrame> frame,
+ bool force_keyframe);
- // End current frame, updating reference picture lists and storing current
- // job in the jobs awaiting completion on submitted_encode_jobs_.
- void EndFrame();
+ // Continues encoding frames as long as input_queue_ is not empty, and we are
+ // able to create new EncodeJobs.
+ void EncodePendingInputs();
- // Submit parameters for the current frame to the hardware.
- bool SubmitFrameParameters();
- // Submit keyframe headers to the hardware if the current frame is a keyframe.
- bool SubmitHeadersIfNeeded();
+ // Uploads image data from |frame| to |va_surface_id|.
+ void UploadFrame(scoped_refptr<VideoFrame> frame, VASurfaceID va_surface_id);
- // Upload image data from |frame| to the input surface for current job.
- bool UploadFrame(const scoped_refptr<VideoFrame>& frame);
-
- // Execute encode in hardware. This does not block and will return before
+ // Executes encode in hardware. This does not block and may return before
// the job is finished.
- bool ExecuteEncode();
+ void ExecuteEncode(VASurfaceID va_surface_id);
// Callback that returns a no longer used VASurfaceID to
// available_va_surface_ids_ for reuse.
void RecycleVASurfaceID(VASurfaceID va_surface_id);
- // Tries to return a bitstream buffer if both a submitted job awaits to
- // be completed and we have bitstream buffers from the client available
- // to download the encoded data to.
+ // Returns a bitstream buffer to the client if both a previously executed job
+ // awaits to be completed and we have bitstream buffers available to download
+ // the encoded data into.
void TryToReturnBitstreamBuffer();
- // Puts the encoder into en error state and notifies client about the error.
+ // Downloads encoded data produced as a result of running |encode_job| into
+ // |buffer|, and returns it to the client.
+ void ReturnBitstreamBuffer(scoped_refptr<VaapiEncodeJob> encode_job,
+ std::unique_ptr<BitstreamBufferRef> buffer);
+
+ // Puts the encoder into en error state and notifies the client
+ // about the error.
void NotifyError(Error error);
- // Sets the encoder state on the correct thread.
+ // Sets the encoder state to |state| on the correct thread.
void SetState(State state);
+ // Submits |buffer| of |type| to the driver.
+ void SubmitBuffer(VABufferType type,
+ scoped_refptr<base::RefCountedBytes> buffer);
+
+ // Submits a VAEncMiscParameterBuffer |buffer| of type |type| to the driver.
+ void SubmitVAEncMiscParamBuffer(VAEncMiscParameterType type,
+ scoped_refptr<base::RefCountedBytes> buffer);
+
+ // Submits a H264BitstreamBuffer |buffer| to the driver.
+ void SubmitH264BitstreamBuffer(scoped_refptr<H264BitstreamBuffer> buffer);
+
// VaapiWrapper is the owner of all HW resources (surfaces and buffers)
// and will free them on destruction.
scoped_refptr<VaapiWrapper> vaapi_wrapper_;
// Input profile and sizes.
- VideoCodecProfile profile_;
+ VideoCodec codec_;
gfx::Size visible_size_;
- gfx::Size coded_size_; // Macroblock-aligned.
- // Width/height in macroblocks.
- unsigned int mb_width_;
- unsigned int mb_height_;
-
- // Maximum size of the reference list 0.
- unsigned int max_ref_idx_l0_size_;
-
- // Initial QP.
- unsigned int qp_;
-
- // IDR frame period.
- unsigned int idr_period_;
- // I frame period.
- unsigned int i_period_;
- // IP period, i.e. how often do we need to have either an I or a P frame in
- // the stream. Period of 1 means we can have no B frames.
- unsigned int ip_period_;
-
- // Size in bytes required for input bitstream buffers.
+ gfx::Size coded_size_;
+
+ // Size in bytes required for output bitstream buffers.
size_t output_buffer_byte_size_;
// All of the members below must be accessed on the encoder_thread_,
@@ -191,34 +152,8 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
// Encoder state. Encode tasks will only run in kEncoding state.
State state_;
- // frame_num to be used for the next frame.
- unsigned int frame_num_;
- // idr_pic_id to be used for the next frame.
- unsigned int idr_pic_id_;
-
- // Current bitrate in bps.
- unsigned int bitrate_;
- // Current fps.
- unsigned int framerate_;
- // CPB size in bits, i.e. bitrate in kbps * window size in ms/1000.
- unsigned int cpb_size_;
- // True if the parameters have changed and we need to submit a keyframe
- // with updated parameters.
- bool encoding_parameters_changed_;
-
- // Job currently being prepared for encode.
- std::unique_ptr<EncodeJob> current_encode_job_;
-
- // Current SPS, PPS and their packed versions. Packed versions are their NALUs
- // in AnnexB format *without* emulation prevention three-byte sequences
- // (those will be added by the driver).
- H264SPS current_sps_;
- H264BitstreamBuffer packed_sps_;
- H264PPS current_pps_;
- H264BitstreamBuffer packed_pps_;
-
- // Picture currently being prepared for encode.
- scoped_refptr<H264Picture> current_pic_;
+ // Encoder instance managing video codec state and preparing encode jobs.
+ std::unique_ptr<AcceleratedVideoEncoder> encoder_;
// VA surfaces available for reuse.
std::vector<VASurfaceID> available_va_surface_ids_;
@@ -226,22 +161,18 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
// VA buffers for coded frames.
std::vector<VABufferID> available_va_buffer_ids_;
- // Currently active reference surfaces.
- RefPicList ref_pic_list0_;
-
// Callback via which finished VA surfaces are returned to us.
VASurface::ReleaseCB va_surface_release_cb_;
- // VideoFrames passed from the client, waiting to be encoded.
- base::queue<std::unique_ptr<InputFrameRef>> encoder_input_queue_;
+ // Queue of input frames to be encoded.
+ base::queue<std::unique_ptr<InputFrameRef>> input_queue_;
- // BitstreamBuffers mapped, ready to be filled.
+ // BitstreamBuffers mapped, ready to be filled with encoded stream data.
base::queue<std::unique_ptr<BitstreamBufferRef>> available_bitstream_buffers_;
- // Jobs submitted for encode, awaiting bitstream buffers to become available.
- // A pending flush command, indicated by a null job, will be also put in the
- // queue.
- base::queue<std::unique_ptr<EncodeJob>> submitted_encode_jobs_;
+ // Jobs submitted to driver for encode, awaiting bitstream buffers to become
+ // available.
+ base::queue<scoped_refptr<VaapiEncodeJob>> submitted_encode_jobs_;
// Encoder thread. All tasks are executed on it.
base::Thread encoder_thread_;
diff --git a/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.cc
index c66a9304603..2e763ebdd19 100644
--- a/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.cc
@@ -42,15 +42,13 @@ scoped_refptr<VP8Picture> VaapiVP8Accelerator::CreateVP8Picture() {
}
bool VaapiVP8Accelerator::SubmitDecode(
- const scoped_refptr<VP8Picture>& pic,
- const Vp8FrameHeader* frame_hdr,
- const scoped_refptr<VP8Picture>& last_frame,
- const scoped_refptr<VP8Picture>& golden_frame,
- const scoped_refptr<VP8Picture>& alt_frame) {
+ scoped_refptr<VP8Picture> pic,
+ const Vp8ReferenceFrameVector& reference_frames) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
VAIQMatrixBufferVP8 iq_matrix_buf;
memset(&iq_matrix_buf, 0, sizeof(VAIQMatrixBufferVP8));
+ const auto& frame_hdr = pic->frame_hdr;
const Vp8SegmentationHeader& sgmnt_hdr = frame_hdr->segmentation_hdr;
const Vp8QuantizationHeader& quant_hdr = frame_hdr->quantization_hdr;
static_assert(arraysize(iq_matrix_buf.quantization_index) == kMaxMBSegments,
@@ -98,6 +96,7 @@ bool VaapiVP8Accelerator::SubmitDecode(
pic_param.frame_width = frame_hdr->width;
pic_param.frame_height = frame_hdr->height;
+ const auto last_frame = reference_frames.GetFrame(Vp8RefType::VP8_FRAME_LAST);
if (last_frame) {
pic_param.last_ref_frame =
last_frame->AsVaapiVP8Picture()->GetVASurfaceID();
@@ -105,6 +104,8 @@ bool VaapiVP8Accelerator::SubmitDecode(
pic_param.last_ref_frame = VA_INVALID_SURFACE;
}
+ const auto golden_frame =
+ reference_frames.GetFrame(Vp8RefType::VP8_FRAME_GOLDEN);
if (golden_frame) {
pic_param.golden_ref_frame =
golden_frame->AsVaapiVP8Picture()->GetVASurfaceID();
@@ -112,6 +113,8 @@ bool VaapiVP8Accelerator::SubmitDecode(
pic_param.golden_ref_frame = VA_INVALID_SURFACE;
}
+ const auto alt_frame =
+ reference_frames.GetFrame(Vp8RefType::VP8_FRAME_ALTREF);
if (alt_frame) {
pic_param.alt_ref_frame = alt_frame->AsVaapiVP8Picture()->GetVASurfaceID();
} else {
diff --git a/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.h b/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.h
index 2d251a5b9c4..0889ab7d5fb 100644
--- a/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.h
@@ -23,11 +23,8 @@ class VaapiVP8Accelerator : public VP8Decoder::VP8Accelerator {
// VP8Decoder::VP8Accelerator implementation.
scoped_refptr<VP8Picture> CreateVP8Picture() override;
- bool SubmitDecode(const scoped_refptr<VP8Picture>& pic,
- const Vp8FrameHeader* frame_hdr,
- const scoped_refptr<VP8Picture>& last_frame,
- const scoped_refptr<VP8Picture>& golden_frame,
- const scoped_refptr<VP8Picture>& alt_frame) override;
+ bool SubmitDecode(scoped_refptr<VP8Picture> picture,
+ const Vp8ReferenceFrameVector& reference_frames) override;
bool OutputPicture(const scoped_refptr<VP8Picture>& pic) override;
private:
diff --git a/chromium/media/gpu/vaapi/vaapi_wrapper.cc b/chromium/media/gpu/vaapi/vaapi_wrapper.cc
index c8ee25eeff7..4068c9f69f8 100644
--- a/chromium/media/gpu/vaapi/vaapi_wrapper.cc
+++ b/chromium/media/gpu/vaapi/vaapi_wrapper.cc
@@ -24,6 +24,8 @@
#include "base/sys_info.h"
#include "build/build_config.h"
+#include "media/base/media_switches.h"
+
// Auto-generated for dlopen libva libraries
#include "media/gpu/vaapi/va_stubs.h"
@@ -124,7 +126,7 @@ namespace {
// Maximum framerate of encoded profile. This value is an arbitary limit
// and not taken from HW documentation.
-const int kMaxEncoderFramerate = 30;
+constexpr int kMaxEncoderFramerate = 30;
// A map between VideoCodecProfile and VAProfile.
static const struct {
@@ -176,6 +178,14 @@ bool IsBlackListedDriver(const std::string& va_vendor_string,
return true;
}
}
+
+ // TODO(posciak): Remove once VP8 encoding is to be enabled by default.
+ if (mode == VaapiWrapper::CodecMode::kEncode &&
+ va_profile == VAProfileVP8Version0_3 &&
+ !base::FeatureList::IsEnabled(kVaapiVP8Encoder)) {
+ return true;
+ }
+
return false;
}
@@ -925,7 +935,7 @@ bool VaapiWrapper::SubmitBuffer(VABufferType va_buffer_type,
bool VaapiWrapper::SubmitVAEncMiscParamBuffer(
VAEncMiscParameterType misc_param_type,
size_t size,
- void* buffer) {
+ const void* buffer) {
base::AutoLock auto_lock(*va_lock_);
VABufferID buffer_id;
diff --git a/chromium/media/gpu/vaapi/vaapi_wrapper.h b/chromium/media/gpu/vaapi/vaapi_wrapper.h
index f77ced9968f..1dd6a3cf29e 100644
--- a/chromium/media/gpu/vaapi/vaapi_wrapper.h
+++ b/chromium/media/gpu/vaapi/vaapi_wrapper.h
@@ -132,7 +132,7 @@ class MEDIA_GPU_EXPORT VaapiWrapper
// DestroyPendingBuffers() is used to cancel a pending job.
bool SubmitVAEncMiscParamBuffer(VAEncMiscParameterType misc_param_type,
size_t size,
- void* buffer);
+ const void* buffer);
// Cancel and destroy all buffers queued to the HW codec via SubmitBuffer().
// Useful when a pending job is to be cancelled (on reset or error).
diff --git a/chromium/media/gpu/vaapi/vp8_encoder.cc b/chromium/media/gpu/vaapi/vp8_encoder.cc
new file mode 100644
index 00000000000..a7b8cd413f5
--- /dev/null
+++ b/chromium/media/gpu/vaapi/vp8_encoder.cc
@@ -0,0 +1,181 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/vaapi/vp8_encoder.h"
+
+#include "base/bits.h"
+
+#define DVLOGF(level) DVLOG(level) << __func__ << "(): "
+
+namespace media {
+
+namespace {
+// Keyframe period.
+const size_t kKFPeriod = 3000;
+
+// Arbitrarily chosen bitrate window size for rate control, in ms.
+const int kCPBWindowSizeMs = 1500;
+
+// Based on WebRTC's defaults.
+const int kMinQP = 4;
+const int kMaxQP = 112;
+const int kDefaultQP = (3 * kMinQP + kMaxQP) / 4;
+} // namespace
+
+VP8Encoder::EncodeParams::EncodeParams()
+ : kf_period_frames(kKFPeriod),
+ bitrate_bps(0),
+ framerate(0),
+ cpb_window_size_ms(kCPBWindowSizeMs),
+ cpb_size_bits(0),
+ initial_qp(kDefaultQP),
+ min_qp(kMinQP),
+ max_qp(kMaxQP),
+ error_resilient_mode(false) {}
+
+void VP8Encoder::Reset() {
+ current_params_ = EncodeParams();
+ reference_frames_.Clear();
+ frame_num_ = 0;
+
+ InitializeFrameHeader();
+}
+
+VP8Encoder::VP8Encoder(std::unique_ptr<Accelerator> accelerator)
+ : accelerator_(std::move(accelerator)) {}
+
+VP8Encoder::~VP8Encoder() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+}
+
+bool VP8Encoder::Initialize(const gfx::Size& visible_size,
+ VideoCodecProfile profile,
+ uint32_t initial_bitrate,
+ uint32_t initial_framerate) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(profile >= VP8PROFILE_MIN && profile <= VP8PROFILE_MAX);
+
+ DCHECK(!visible_size.IsEmpty());
+ // 4:2:0 format has to be 2-aligned.
+ DCHECK_EQ(visible_size.width() % 2, 0);
+ DCHECK_EQ(visible_size.height() % 2, 0);
+
+ visible_size_ = visible_size;
+ coded_size_ = gfx::Size(base::bits::Align(visible_size_.width(), 16),
+ base::bits::Align(visible_size_.height(), 16));
+
+ Reset();
+
+ return UpdateRates(initial_bitrate, initial_framerate);
+}
+
+gfx::Size VP8Encoder::GetCodedSize() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(!coded_size_.IsEmpty());
+
+ return coded_size_;
+}
+
+size_t VP8Encoder::GetBitstreamBufferSize() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(!coded_size_.IsEmpty());
+
+ return coded_size_.GetArea();
+}
+
+size_t VP8Encoder::GetMaxNumOfRefFrames() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ return kNumVp8ReferenceBuffers;
+}
+
+bool VP8Encoder::PrepareEncodeJob(EncodeJob* encode_job) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (encode_job->IsKeyframeRequested())
+ frame_num_ = 0;
+
+ if (frame_num_ == 0)
+ encode_job->ProduceKeyframe();
+
+ frame_num_++;
+ frame_num_ %= current_params_.kf_period_frames;
+
+ scoped_refptr<VP8Picture> picture = accelerator_->GetPicture(encode_job);
+ DCHECK(picture);
+
+ UpdateFrameHeader(encode_job->IsKeyframeRequested());
+ *picture->frame_hdr = current_frame_hdr_;
+
+ if (!accelerator_->SubmitFrameParameters(encode_job, current_params_, picture,
+ reference_frames_)) {
+ LOG(ERROR) << "Failed submitting frame parameters";
+ return false;
+ }
+
+ UpdateReferenceFrames(picture);
+ return true;
+}
+
+bool VP8Encoder::UpdateRates(uint32_t bitrate, uint32_t framerate) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (bitrate == 0 || framerate == 0)
+ return false;
+
+ if (current_params_.bitrate_bps == bitrate &&
+ current_params_.framerate == framerate) {
+ return true;
+ }
+
+ current_params_.bitrate_bps = bitrate;
+ current_params_.framerate = framerate;
+
+ current_params_.cpb_size_bits =
+ current_params_.bitrate_bps * current_params_.cpb_window_size_ms / 1000;
+
+ return true;
+}
+
+void VP8Encoder::InitializeFrameHeader() {
+ current_frame_hdr_ = {};
+ DCHECK(!visible_size_.IsEmpty());
+ current_frame_hdr_.width = visible_size_.width();
+ current_frame_hdr_.height = visible_size_.height();
+ current_frame_hdr_.quantization_hdr.y_ac_qi = current_params_.initial_qp;
+ current_frame_hdr_.show_frame = true;
+ // TODO(sprang): Make this dynamic. Value based on reference implementation
+ // in libyami (https://github.com/intel/libyami).
+ current_frame_hdr_.loopfilter_hdr.level = 19;
+}
+
+void VP8Encoder::UpdateFrameHeader(bool keyframe) {
+ current_frame_hdr_.frame_type =
+ keyframe ? Vp8FrameHeader::KEYFRAME : Vp8FrameHeader::INTERFRAME;
+}
+
+void VP8Encoder::UpdateReferenceFrames(scoped_refptr<VP8Picture> picture) {
+ if (current_frame_hdr_.IsKeyframe()) {
+ current_frame_hdr_.refresh_last = true;
+ current_frame_hdr_.refresh_golden_frame = true;
+ current_frame_hdr_.refresh_alternate_frame = true;
+ current_frame_hdr_.copy_buffer_to_golden =
+ Vp8FrameHeader::NO_GOLDEN_REFRESH;
+ current_frame_hdr_.copy_buffer_to_alternate =
+ Vp8FrameHeader::NO_ALT_REFRESH;
+ } else {
+ // TODO(sprang): Add temporal layer support.
+ current_frame_hdr_.refresh_last = true;
+ current_frame_hdr_.refresh_golden_frame = false;
+ current_frame_hdr_.refresh_alternate_frame = false;
+ current_frame_hdr_.copy_buffer_to_golden =
+ Vp8FrameHeader::COPY_LAST_TO_GOLDEN;
+ current_frame_hdr_.copy_buffer_to_alternate =
+ Vp8FrameHeader::COPY_GOLDEN_TO_ALT;
+ }
+
+ reference_frames_.Refresh(picture);
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/vaapi/vp8_encoder.h b/chromium/media/gpu/vaapi/vp8_encoder.h
new file mode 100644
index 00000000000..66abde9ce8d
--- /dev/null
+++ b/chromium/media/gpu/vaapi/vp8_encoder.h
@@ -0,0 +1,109 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_VAAPI_VP8_ENCODER_H_
+#define MEDIA_GPU_VAAPI_VP8_ENCODER_H_
+
+#include <list>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/sequence_checker.h"
+#include "media/filters/vp8_parser.h"
+#include "media/gpu/vaapi/accelerated_video_encoder.h"
+#include "media/gpu/vp8_picture.h"
+#include "media/gpu/vp8_reference_frame_vector.h"
+
+namespace media {
+
+class VP8Encoder : public AcceleratedVideoEncoder {
+ public:
+ struct EncodeParams {
+ EncodeParams();
+
+ // Produce a keyframe at least once per this many frames.
+ size_t kf_period_frames;
+
+ // Bitrate in bps.
+ uint32_t bitrate_bps;
+
+ // Framerate in FPS.
+ uint32_t framerate;
+
+ // Bitrate window size in ms.
+ unsigned int cpb_window_size_ms;
+
+ // Coded picture buffer size in bits.
+ unsigned int cpb_size_bits;
+
+ int initial_qp;
+ int min_qp;
+ int max_qp;
+
+ bool error_resilient_mode;
+ };
+
+ // An accelerator interface. The client must provide an appropriate
+ // implementation on creation.
+ class Accelerator {
+ public:
+ Accelerator() = default;
+ virtual ~Accelerator() = default;
+
+ // Returns the VP8Picture to be used as output for |job|.
+ virtual scoped_refptr<VP8Picture> GetPicture(EncodeJob* job) = 0;
+
+ // Initializes |job| to use the provided |encode_params| as its parameters,
+ // and |pic| as the target, as well as |ref_frames| as reference frames for
+ // it. Returns true on success.
+ virtual bool SubmitFrameParameters(
+ EncodeJob* job,
+ const media::VP8Encoder::EncodeParams& encode_params,
+ scoped_refptr<VP8Picture> pic,
+ const Vp8ReferenceFrameVector& ref_frames) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Accelerator);
+ };
+
+ explicit VP8Encoder(std::unique_ptr<Accelerator> accelerator);
+ ~VP8Encoder() override;
+
+ // AcceleratedVideoEncoder implementation.
+ bool Initialize(const gfx::Size& visible_size,
+ VideoCodecProfile profile,
+ uint32_t initial_bitrate,
+ uint32_t initial_framerate) override;
+ bool UpdateRates(uint32_t bitrate, uint32_t framerate) override;
+ gfx::Size GetCodedSize() const override;
+ size_t GetBitstreamBufferSize() const override;
+ size_t GetMaxNumOfRefFrames() const override;
+ bool PrepareEncodeJob(EncodeJob* encode_job) override;
+
+ private:
+ void InitializeFrameHeader();
+ void UpdateFrameHeader(bool keyframe);
+ void UpdateReferenceFrames(scoped_refptr<VP8Picture> picture);
+ void Reset();
+
+ gfx::Size visible_size_;
+ gfx::Size coded_size_; // Macroblock-aligned.
+
+ // Frame count since last keyframe, reset to 0 every keyframe period.
+ size_t frame_num_ = 0;
+
+ EncodeParams current_params_;
+
+ Vp8FrameHeader current_frame_hdr_;
+ Vp8ReferenceFrameVector reference_frames_;
+
+ const std::unique_ptr<Accelerator> accelerator_;
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ DISALLOW_COPY_AND_ASSIGN(VP8Encoder);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_VAAPI_VP8_ENCODER_H_