summaryrefslogtreecommitdiff
path: root/chromium/third_party/webrtc/video
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/webrtc/video')
-rw-r--r--chromium/third_party/webrtc/video/BUILD.gn27
-rw-r--r--chromium/third_party/webrtc/video/adaptation/BUILD.gn12
-rw-r--r--chromium/third_party/webrtc/video/adaptation/encode_usage_resource.cc16
-rw-r--r--chromium/third_party/webrtc/video/adaptation/encode_usage_resource.h11
-rw-r--r--chromium/third_party/webrtc/video/adaptation/overuse_frame_detector_unittest.cc4
-rw-r--r--chromium/third_party/webrtc/video/adaptation/quality_rampup_experiment_helper.cc80
-rw-r--r--chromium/third_party/webrtc/video/adaptation/quality_rampup_experiment_helper.h68
-rw-r--r--chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.cc44
-rw-r--r--chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.h25
-rw-r--r--chromium/third_party/webrtc/video/adaptation/quality_scaler_resource_unittest.cc7
-rw-r--r--chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource.cc85
-rw-r--r--chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource.h80
-rw-r--r--chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager.cc485
-rw-r--r--chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager.h149
-rw-r--r--chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager_unittest.cc98
-rw-r--r--chromium/third_party/webrtc/video/call_stats2_unittest.cc2
-rw-r--r--chromium/third_party/webrtc/video/call_stats_unittest.cc2
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/bandwidth_tests.cc13
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/codec_tests.cc30
-rw-r--r--chromium/third_party/webrtc/video/full_stack_tests.cc109
-rw-r--r--chromium/third_party/webrtc/video/pc_full_stack_tests.cc102
-rw-r--r--chromium/third_party/webrtc/video/receive_statistics_proxy2.cc11
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver.cc62
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver.h60
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver2.cc1154
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver2.h367
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver2_unittest.cc1221
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc10
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h17
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc130
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver_unittest.cc53
-rw-r--r--chromium/third_party/webrtc/video/send_statistics_proxy.cc44
-rw-r--r--chromium/third_party/webrtc/video/send_statistics_proxy.h5
-rw-r--r--chromium/third_party/webrtc/video/test/mock_video_stream_encoder.h54
-rw-r--r--chromium/third_party/webrtc/video/video_quality_test.cc5
-rw-r--r--chromium/third_party/webrtc/video/video_receive_stream.h1
-rw-r--r--chromium/third_party/webrtc/video/video_receive_stream2.cc188
-rw-r--r--chromium/third_party/webrtc/video/video_receive_stream2.h53
-rw-r--r--chromium/third_party/webrtc/video/video_receive_stream2_unittest.cc571
-rw-r--r--chromium/third_party/webrtc/video/video_receive_stream_unittest.cc34
-rw-r--r--chromium/third_party/webrtc/video/video_send_stream.cc12
-rw-r--r--chromium/third_party/webrtc/video/video_send_stream.h3
-rw-r--r--chromium/third_party/webrtc/video/video_send_stream_impl_unittest.cc66
-rw-r--r--chromium/third_party/webrtc/video/video_send_stream_tests.cc46
-rw-r--r--chromium/third_party/webrtc/video/video_source_sink_controller.cc22
-rw-r--r--chromium/third_party/webrtc/video/video_source_sink_controller.h2
-rw-r--r--chromium/third_party/webrtc/video/video_source_sink_controller_unittest.cc17
-rw-r--r--chromium/third_party/webrtc/video/video_stream_decoder_impl_unittest.cc36
-rw-r--r--chromium/third_party/webrtc/video/video_stream_encoder.cc199
-rw-r--r--chromium/third_party/webrtc/video/video_stream_encoder.h37
-rw-r--r--chromium/third_party/webrtc/video/video_stream_encoder_unittest.cc680
51 files changed, 5142 insertions, 1467 deletions
diff --git a/chromium/third_party/webrtc/video/BUILD.gn b/chromium/third_party/webrtc/video/BUILD.gn
index bfc3b0695cf..84dce1fdd08 100644
--- a/chromium/third_party/webrtc/video/BUILD.gn
+++ b/chromium/third_party/webrtc/video/BUILD.gn
@@ -34,6 +34,8 @@ rtc_library("video") {
"rtp_streams_synchronizer2.h",
"rtp_video_stream_receiver.cc",
"rtp_video_stream_receiver.h",
+ "rtp_video_stream_receiver2.cc",
+ "rtp_video_stream_receiver2.h",
"rtp_video_stream_receiver_frame_transformer_delegate.cc",
"rtp_video_stream_receiver_frame_transformer_delegate.h",
"send_delay_stats.cc",
@@ -77,7 +79,6 @@ rtc_library("video") {
"../api/crypto:options",
"../api/rtc_event_log",
"../api/task_queue",
- "../api/transport/media:media_transport_interface",
"../api/units:timestamp",
"../api/video:encoded_image",
"../api/video:recordable_encoded_frame",
@@ -111,6 +112,7 @@ rtc_library("video") {
"../modules/video_coding:nack_module",
"../modules/video_coding:video_codec_interface",
"../modules/video_coding:video_coding_utility",
+ "../modules/video_coding/deprecated:nack_module",
"../modules/video_processing",
"../rtc_base:checks",
"../rtc_base:rate_limiter",
@@ -135,6 +137,8 @@ rtc_library("video") {
"../system_wrappers",
"../system_wrappers:field_trial",
"../system_wrappers:metrics",
+ ]
+ absl_deps = [
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/memory",
@@ -166,8 +170,8 @@ rtc_library("video_stream_decoder_impl") {
"../rtc_base:rtc_base_approved",
"../rtc_base:rtc_task_queue",
"../system_wrappers",
- "//third_party/abseil-cpp/absl/types:optional",
]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("frame_dumping_decoder") {
@@ -209,6 +213,7 @@ rtc_library("video_stream_encoder_impl") {
deps = [
"../api:rtp_parameters",
+ "../api/adaptation:resource_adaptation_api",
"../api/task_queue:task_queue",
"../api/units:data_rate",
"../api/video:encoded_image",
@@ -250,6 +255,8 @@ rtc_library("video_stream_encoder_impl") {
"../system_wrappers",
"../system_wrappers:field_trial",
"adaptation:video_adaptation",
+ ]
+ absl_deps = [
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/types:optional",
@@ -291,7 +298,6 @@ if (rtc_include_tests) {
"../api/rtc_event_log:rtc_event_log_factory",
"../api/task_queue",
"../api/task_queue:default_task_queue_factory",
- "../api/transport/media:media_transport_interface",
"../api/video:builtin_video_bitrate_allocator_factory",
"../api/video:video_bitrate_allocator_factory",
"../api/video:video_frame",
@@ -333,6 +339,8 @@ if (rtc_include_tests) {
"../test:test_support_test_artifacts",
"../test:video_test_common",
"../test:video_test_support",
+ ]
+ absl_deps = [
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/flags:flag",
"//third_party/abseil-cpp/absl/flags:parse",
@@ -363,8 +371,11 @@ if (rtc_include_tests) {
"../test:test_common",
"../test:test_support",
"//testing/gtest",
+ ]
+ absl_deps = [
"//third_party/abseil-cpp/absl/flags:flag",
"//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/types:optional",
]
}
@@ -415,8 +426,11 @@ if (rtc_include_tests) {
"../test:test_renderer",
"../test:test_support",
"//testing/gtest",
+ ]
+ absl_deps = [
"//third_party/abseil-cpp/absl/flags:flag",
"//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/types:optional",
]
}
@@ -485,6 +499,7 @@ if (rtc_include_tests) {
"//testing/gtest",
"//third_party/abseil-cpp/absl/flags:flag",
"//third_party/abseil-cpp/absl/flags:parse",
+ "//third_party/abseil-cpp/absl/types:optional",
]
}
@@ -528,12 +543,14 @@ if (rtc_include_tests) {
"receive_statistics_proxy2_unittest.cc",
"receive_statistics_proxy_unittest.cc",
"report_block_stats_unittest.cc",
+ "rtp_video_stream_receiver2_unittest.cc",
"rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc",
"rtp_video_stream_receiver_unittest.cc",
"send_delay_stats_unittest.cc",
"send_statistics_proxy_unittest.cc",
"stats_counter_unittest.cc",
"stream_synchronization_unittest.cc",
+ "video_receive_stream2_unittest.cc",
"video_receive_stream_unittest.cc",
"video_send_stream_impl_unittest.cc",
"video_send_stream_tests.cc",
@@ -559,6 +576,7 @@ if (rtc_include_tests) {
"../api:scoped_refptr",
"../api:simulated_network_api",
"../api:transport_api",
+ "../api/adaptation:resource_adaptation_api",
"../api/crypto:options",
"../api/rtc_event_log",
"../api/task_queue",
@@ -614,6 +632,7 @@ if (rtc_include_tests) {
"../modules/video_coding:webrtc_vp9",
"../rtc_base",
"../rtc_base:checks",
+ "../rtc_base:gunit_helpers",
"../rtc_base:rate_limiter",
"../rtc_base:rtc_base_approved",
"../rtc_base:rtc_base_tests_utils",
@@ -643,6 +662,8 @@ if (rtc_include_tests) {
"../test/time_controller",
"adaptation:video_adaptation",
"//testing/gtest",
+ ]
+ absl_deps = [
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/types:optional",
diff --git a/chromium/third_party/webrtc/video/adaptation/BUILD.gn b/chromium/third_party/webrtc/video/adaptation/BUILD.gn
index 51e6a2d84ec..b88fe92b74c 100644
--- a/chromium/third_party/webrtc/video/adaptation/BUILD.gn
+++ b/chromium/third_party/webrtc/video/adaptation/BUILD.gn
@@ -14,8 +14,12 @@ rtc_library("video_adaptation") {
"encode_usage_resource.h",
"overuse_frame_detector.cc",
"overuse_frame_detector.h",
+ "quality_rampup_experiment_helper.cc",
+ "quality_rampup_experiment_helper.h",
"quality_scaler_resource.cc",
"quality_scaler_resource.h",
+ "video_stream_encoder_resource.cc",
+ "video_stream_encoder_resource.h",
"video_stream_encoder_resource_manager.cc",
"video_stream_encoder_resource_manager.h",
]
@@ -23,7 +27,9 @@ rtc_library("video_adaptation") {
deps = [
"../../api:rtp_parameters",
"../../api:scoped_refptr",
+ "../../api/adaptation:resource_adaptation_api",
"../../api/task_queue:task_queue",
+ "../../api/units:data_rate",
"../../api/video:video_adaptation",
"../../api/video:video_frame",
"../../api/video:video_stream_encoder",
@@ -44,8 +50,11 @@ rtc_library("video_adaptation") {
"../../rtc_base/experiments:quality_scaler_settings",
"../../rtc_base/synchronization:sequence_checker",
"../../rtc_base/task_utils:repeating_task",
+ "../../rtc_base/task_utils:to_queued_task",
"../../system_wrappers:field_trial",
"../../system_wrappers:system_wrappers",
+ ]
+ absl_deps = [
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/types:optional",
@@ -60,7 +69,6 @@ if (rtc_include_tests) {
sources = [
"overuse_frame_detector_unittest.cc",
"quality_scaler_resource_unittest.cc",
- "video_stream_encoder_resource_manager_unittest.cc",
]
deps = [
":video_adaptation",
@@ -85,7 +93,7 @@ if (rtc_include_tests) {
"//test:rtc_expect_death",
"//test:test_support",
"//testing/gtest",
- "//third_party/abseil-cpp/absl/types:optional",
]
+ absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
}
diff --git a/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.cc b/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.cc
index 49531a3aa42..d6f2334fa1a 100644
--- a/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.cc
+++ b/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.cc
@@ -15,18 +15,28 @@
#include "api/video/video_adaptation_reason.h"
#include "rtc_base/checks.h"
+#include "rtc_base/ref_counted_object.h"
namespace webrtc {
+// static
+rtc::scoped_refptr<EncodeUsageResource> EncodeUsageResource::Create(
+ std::unique_ptr<OveruseFrameDetector> overuse_detector) {
+ return new rtc::RefCountedObject<EncodeUsageResource>(
+ std::move(overuse_detector));
+}
+
EncodeUsageResource::EncodeUsageResource(
std::unique_ptr<OveruseFrameDetector> overuse_detector)
- : rtc::RefCountedObject<Resource>(),
+ : VideoStreamEncoderResource("EncoderUsageResource"),
overuse_detector_(std::move(overuse_detector)),
is_started_(false),
target_frame_rate_(absl::nullopt) {
RTC_DCHECK(overuse_detector_);
}
+EncodeUsageResource::~EncodeUsageResource() {}
+
bool EncodeUsageResource::is_started() const {
RTC_DCHECK_RUN_ON(encoder_queue());
return is_started_;
@@ -81,7 +91,7 @@ void EncodeUsageResource::AdaptUp() {
RTC_DCHECK_RUN_ON(encoder_queue());
// Reference counting guarantees that this object is still alive by the time
// the task is executed.
- resource_adaptation_queue()->PostTask(
+ MaybePostTaskToResourceAdaptationQueue(
[this_ref = rtc::scoped_refptr<EncodeUsageResource>(this)] {
RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue());
this_ref->OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse);
@@ -92,7 +102,7 @@ void EncodeUsageResource::AdaptDown() {
RTC_DCHECK_RUN_ON(encoder_queue());
// Reference counting guarantees that this object is still alive by the time
// the task is executed.
- resource_adaptation_queue()->PostTask(
+ MaybePostTaskToResourceAdaptationQueue(
[this_ref = rtc::scoped_refptr<EncodeUsageResource>(this)] {
RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue());
this_ref->OnResourceUsageStateMeasured(ResourceUsageState::kOveruse);
diff --git a/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.h b/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.h
index 3c6f02b2437..257988fa127 100644
--- a/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.h
+++ b/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.h
@@ -15,11 +15,12 @@
#include <string>
#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
#include "api/video/video_adaptation_reason.h"
-#include "call/adaptation/resource.h"
#include "rtc_base/ref_counted_object.h"
#include "rtc_base/task_queue.h"
#include "video/adaptation/overuse_frame_detector.h"
+#include "video/adaptation/video_stream_encoder_resource.h"
namespace webrtc {
@@ -28,11 +29,15 @@ namespace webrtc {
// indirectly by usage in the ResourceAdaptationProcessor (which is only tested
// because of its usage in VideoStreamEncoder); all tests are currently in
// video_stream_encoder_unittest.cc.
-class EncodeUsageResource : public rtc::RefCountedObject<Resource>,
+class EncodeUsageResource : public VideoStreamEncoderResource,
public OveruseFrameDetectorObserverInterface {
public:
+ static rtc::scoped_refptr<EncodeUsageResource> Create(
+ std::unique_ptr<OveruseFrameDetector> overuse_detector);
+
explicit EncodeUsageResource(
std::unique_ptr<OveruseFrameDetector> overuse_detector);
+ ~EncodeUsageResource() override;
bool is_started() const;
@@ -51,8 +56,6 @@ class EncodeUsageResource : public rtc::RefCountedObject<Resource>,
void AdaptUp() override;
void AdaptDown() override;
- std::string name() const override { return "EncoderUsageResource"; }
-
private:
int TargetFrameRateAsInt();
diff --git a/chromium/third_party/webrtc/video/adaptation/overuse_frame_detector_unittest.cc b/chromium/third_party/webrtc/video/adaptation/overuse_frame_detector_unittest.cc
index bb34224b025..d4bf910faa6 100644
--- a/chromium/third_party/webrtc/video/adaptation/overuse_frame_detector_unittest.cc
+++ b/chromium/third_party/webrtc/video/adaptation/overuse_frame_detector_unittest.cc
@@ -41,8 +41,8 @@ class MockCpuOveruseObserver : public OveruseFrameDetectorObserverInterface {
MockCpuOveruseObserver() {}
virtual ~MockCpuOveruseObserver() {}
- MOCK_METHOD0(AdaptUp, void());
- MOCK_METHOD0(AdaptDown, void());
+ MOCK_METHOD(void, AdaptUp, (), (override));
+ MOCK_METHOD(void, AdaptDown, (), (override));
};
class CpuOveruseObserverImpl : public OveruseFrameDetectorObserverInterface {
diff --git a/chromium/third_party/webrtc/video/adaptation/quality_rampup_experiment_helper.cc b/chromium/third_party/webrtc/video/adaptation/quality_rampup_experiment_helper.cc
new file mode 100644
index 00000000000..6d82503fc6f
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/quality_rampup_experiment_helper.cc
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/quality_rampup_experiment_helper.h"
+
+#include <memory>
+#include <utility>
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+QualityRampUpExperimentHelper::QualityRampUpExperimentHelper(
+ QualityRampUpExperimentListener* experiment_listener,
+ Clock* clock,
+ QualityRampupExperiment experiment)
+ : experiment_listener_(experiment_listener),
+ clock_(clock),
+ quality_rampup_experiment_(std::move(experiment)),
+ cpu_adapted_(false),
+ qp_resolution_adaptations_(0) {
+ RTC_DCHECK(experiment_listener_);
+ RTC_DCHECK(clock_);
+}
+
+std::unique_ptr<QualityRampUpExperimentHelper>
+QualityRampUpExperimentHelper::CreateIfEnabled(
+ QualityRampUpExperimentListener* experiment_listener,
+ Clock* clock) {
+ QualityRampupExperiment experiment = QualityRampupExperiment::ParseSettings();
+ if (experiment.Enabled()) {
+ return std::unique_ptr<QualityRampUpExperimentHelper>(
+ new QualityRampUpExperimentHelper(experiment_listener, clock,
+ experiment));
+ }
+ return nullptr;
+}
+
+void QualityRampUpExperimentHelper::PerformQualityRampupExperiment(
+ rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource,
+ DataRate bandwidth,
+ DataRate encoder_target_bitrate,
+ DataRate max_bitrate,
+ int pixels) {
+ if (!quality_scaler_resource->is_started())
+ return;
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ quality_rampup_experiment_.SetMaxBitrate(pixels, max_bitrate.kbps());
+
+ bool try_quality_rampup = false;
+ if (quality_rampup_experiment_.BwHigh(now_ms, bandwidth.kbps())) {
+ // Verify that encoder is at max bitrate and the QP is low.
+ if (encoder_target_bitrate == max_bitrate &&
+ quality_scaler_resource->QpFastFilterLow()) {
+ try_quality_rampup = true;
+ }
+ }
+ if (try_quality_rampup && qp_resolution_adaptations_ > 0 && !cpu_adapted_) {
+ experiment_listener_->OnQualityRampUp();
+ }
+}
+
+void QualityRampUpExperimentHelper::cpu_adapted(bool cpu_adapted) {
+ cpu_adapted_ = cpu_adapted;
+}
+
+void QualityRampUpExperimentHelper::qp_resolution_adaptations(
+ int qp_resolution_adaptations) {
+ qp_resolution_adaptations_ = qp_resolution_adaptations;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/adaptation/quality_rampup_experiment_helper.h b/chromium/third_party/webrtc/video/adaptation/quality_rampup_experiment_helper.h
new file mode 100644
index 00000000000..81be982e7c7
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/quality_rampup_experiment_helper.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_
+#define VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_
+
+#include <memory>
+
+#include "api/scoped_refptr.h"
+#include "api/units/data_rate.h"
+#include "rtc_base/experiments/quality_rampup_experiment.h"
+#include "system_wrappers/include/clock.h"
+#include "video/adaptation/quality_scaler_resource.h"
+
+namespace webrtc {
+
+class QualityRampUpExperimentListener {
+ public:
+ virtual ~QualityRampUpExperimentListener() = default;
+ virtual void OnQualityRampUp() = 0;
+};
+
+// Helper class for orchestrating the WebRTC-Video-QualityRampupSettings
+// experiment.
+class QualityRampUpExperimentHelper {
+ public:
+ // Returns a QualityRampUpExperimentHelper if the experiment is enabled,
+ // an nullptr otherwise.
+ static std::unique_ptr<QualityRampUpExperimentHelper> CreateIfEnabled(
+ QualityRampUpExperimentListener* experiment_listener,
+ Clock* clock);
+
+ QualityRampUpExperimentHelper(const QualityRampUpExperimentHelper&) = delete;
+ QualityRampUpExperimentHelper& operator=(
+ const QualityRampUpExperimentHelper&) = delete;
+
+ void cpu_adapted(bool cpu_adapted);
+ void qp_resolution_adaptations(int qp_adaptations);
+
+ void PerformQualityRampupExperiment(
+ rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource,
+ DataRate bandwidth,
+ DataRate encoder_target_bitrate,
+ DataRate max_bitrate,
+ int pixels);
+
+ private:
+ QualityRampUpExperimentHelper(
+ QualityRampUpExperimentListener* experiment_listener,
+ Clock* clock,
+ QualityRampupExperiment experiment);
+ QualityRampUpExperimentListener* const experiment_listener_;
+ Clock* clock_;
+ QualityRampupExperiment quality_rampup_experiment_;
+ bool cpu_adapted_;
+ int qp_resolution_adaptations_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_
diff --git a/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.cc b/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.cc
index 403f6080cac..514a2d765f7 100644
--- a/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.cc
+++ b/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.cc
@@ -13,12 +13,27 @@
#include <utility>
#include "rtc_base/experiments/balanced_degradation_settings.h"
+#include "rtc_base/ref_counted_object.h"
+#include "rtc_base/task_utils/to_queued_task.h"
+#include "rtc_base/time_utils.h"
namespace webrtc {
+namespace {
+
+const int64_t kUnderuseDueToDisabledCooldownMs = 1000;
+
+} // namespace
+
+// static
+rtc::scoped_refptr<QualityScalerResource> QualityScalerResource::Create() {
+ return new rtc::RefCountedObject<QualityScalerResource>();
+}
+
QualityScalerResource::QualityScalerResource()
- : rtc::RefCountedObject<Resource>(),
+ : VideoStreamEncoderResource("QualityScalerResource"),
quality_scaler_(nullptr),
+ last_underuse_due_to_disabled_timestamp_ms_(absl::nullopt),
num_handled_callbacks_(0),
pending_callbacks_(),
adaptation_processor_(nullptr),
@@ -82,11 +97,18 @@ void QualityScalerResource::OnEncodeCompleted(const EncodedImage& encoded_image,
// mid call.
// Instead it should be done at a higher layer in the same way for all
// resources.
- resource_adaptation_queue()->PostTask(
- [this_ref = rtc::scoped_refptr<QualityScalerResource>(this)] {
- RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue());
- this_ref->OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse);
- });
+ int64_t timestamp_ms = rtc::TimeMillis();
+ if (!last_underuse_due_to_disabled_timestamp_ms_.has_value() ||
+ timestamp_ms - last_underuse_due_to_disabled_timestamp_ms_.value() >=
+ kUnderuseDueToDisabledCooldownMs) {
+ last_underuse_due_to_disabled_timestamp_ms_ = timestamp_ms;
+ MaybePostTaskToResourceAdaptationQueue(
+ [this_ref = rtc::scoped_refptr<QualityScalerResource>(this)] {
+ RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue());
+ this_ref->OnResourceUsageStateMeasured(
+ ResourceUsageState::kUnderuse);
+ });
+ }
}
}
@@ -111,7 +133,7 @@ void QualityScalerResource::OnReportQpUsageHigh(
size_t callback_id = QueuePendingCallback(callback);
// Reference counting guarantees that this object is still alive by the time
// the task is executed.
- resource_adaptation_queue()->PostTask(
+ MaybePostTaskToResourceAdaptationQueue(
[this_ref = rtc::scoped_refptr<QualityScalerResource>(this),
callback_id] {
RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue());
@@ -131,7 +153,7 @@ void QualityScalerResource::OnReportQpUsageLow(
size_t callback_id = QueuePendingCallback(callback);
// Reference counting guarantees that this object is still alive by the time
// the task is executed.
- resource_adaptation_queue()->PostTask(
+ MaybePostTaskToResourceAdaptationQueue(
[this_ref = rtc::scoped_refptr<QualityScalerResource>(this),
callback_id] {
RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue());
@@ -191,8 +213,8 @@ void QualityScalerResource::HandlePendingCallback(size_t callback_id,
// Reference counting guarantees that this object is still alive by the time
// the task is executed.
encoder_queue()->PostTask(
- [this_ref = rtc::scoped_refptr<QualityScalerResource>(this), callback_id,
- clear_qp_samples] {
+ ToQueuedTask([this_ref = rtc::scoped_refptr<QualityScalerResource>(this),
+ callback_id, clear_qp_samples] {
RTC_DCHECK_RUN_ON(this_ref->encoder_queue());
if (this_ref->num_handled_callbacks_ >= callback_id) {
// The callback with this ID has already been handled.
@@ -205,7 +227,7 @@ void QualityScalerResource::HandlePendingCallback(size_t callback_id,
clear_qp_samples);
++this_ref->num_handled_callbacks_;
this_ref->pending_callbacks_.pop();
- });
+ }));
}
void QualityScalerResource::AbortPendingCallbacks() {
diff --git a/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.h b/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.h
index 78685823c39..372d0c91b8a 100644
--- a/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.h
+++ b/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.h
@@ -15,21 +15,27 @@
#include <queue>
#include <string>
+#include "absl/types/optional.h"
+#include "api/scoped_refptr.h"
#include "api/video/video_adaptation_reason.h"
#include "api/video_codecs/video_encoder.h"
-#include "call/adaptation/resource.h"
+#include "call/adaptation/adaptation_listener.h"
#include "call/adaptation/resource_adaptation_processor_interface.h"
#include "modules/video_coding/utility/quality_scaler.h"
#include "rtc_base/critical_section.h"
#include "rtc_base/ref_counted_object.h"
#include "rtc_base/task_queue.h"
+#include "video/adaptation/video_stream_encoder_resource.h"
namespace webrtc {
// Handles interaction with the QualityScaler.
-class QualityScalerResource : public rtc::RefCountedObject<Resource>,
+class QualityScalerResource : public VideoStreamEncoderResource,
+ public AdaptationListener,
public QualityScalerQpUsageHandlerInterface {
public:
+ static rtc::scoped_refptr<QualityScalerResource> Create();
+
QualityScalerResource();
~QualityScalerResource() override;
@@ -55,9 +61,7 @@ class QualityScalerResource : public rtc::RefCountedObject<Resource>,
rtc::scoped_refptr<QualityScalerQpUsageHandlerCallbackInterface> callback)
override;
- std::string name() const override { return "QualityScalerResource"; }
-
- // Resource implementation.
+ // AdaptationListener implementation.
void OnAdaptationApplied(
const VideoStreamInputState& input_state,
const VideoSourceRestrictions& restrictions_before,
@@ -74,10 +78,15 @@ class QualityScalerResource : public rtc::RefCountedObject<Resource>,
// Members accessed on the encoder queue.
std::unique_ptr<QualityScaler> quality_scaler_
RTC_GUARDED_BY(encoder_queue());
+ // The timestamp of the last time we reported underuse because this resource
+ // was disabled in order to prevent getting stuck with QP adaptations. Used to
+ // make sure underuse reporting is not too spammy.
+ absl::optional<int64_t> last_underuse_due_to_disabled_timestamp_ms_
+ RTC_GUARDED_BY(encoder_queue());
// Every OnReportQpUsageHigh/Low() operation has a callback that MUST be
- // invoked on the |encoder_queue_|. Because usage measurements are reported on
- // the |encoder_queue_| but handled by the processor on the the
- // |resource_adaptation_queue_|, handling a measurement entails a task queue
+ // invoked on the encoder_queue(). Because usage measurements are reported on
+ // the encoder_queue() but handled by the processor on the the
+ // resource_adaptation_queue_(), handling a measurement entails a task queue
// "ping" round-trip. Multiple callbacks in-flight is thus possible.
size_t num_handled_callbacks_ RTC_GUARDED_BY(encoder_queue());
std::queue<rtc::scoped_refptr<QualityScalerQpUsageHandlerCallbackInterface>>
diff --git a/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource_unittest.cc b/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource_unittest.cc
index 66f4e138700..e2098d71b7a 100644
--- a/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource_unittest.cc
+++ b/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource_unittest.cc
@@ -74,9 +74,10 @@ class QualityScalerResourceTest : public ::testing::Test {
encoder_queue_(task_queue_factory_->CreateTaskQueue(
"EncoderQueue",
TaskQueueFactory::Priority::NORMAL)),
- quality_scaler_resource_(new QualityScalerResource()) {
- quality_scaler_resource_->Initialize(&encoder_queue_,
- &resource_adaptation_queue_);
+ quality_scaler_resource_(QualityScalerResource::Create()) {
+ quality_scaler_resource_->RegisterEncoderTaskQueue(encoder_queue_.Get());
+ quality_scaler_resource_->RegisterAdaptationTaskQueue(
+ resource_adaptation_queue_.Get());
rtc::Event event;
encoder_queue_.PostTask([this, &event] {
quality_scaler_resource_->StartCheckForOveruse(
diff --git a/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource.cc b/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource.cc
new file mode 100644
index 00000000000..4e99a1dbb32
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/video_stream_encoder_resource.h"
+
+#include <algorithm>
+#include <utility>
+
+namespace webrtc {
+
+VideoStreamEncoderResource::VideoStreamEncoderResource(std::string name)
+ : lock_(),
+ name_(std::move(name)),
+ encoder_queue_(nullptr),
+ resource_adaptation_queue_(nullptr),
+ listener_(nullptr) {}
+
+VideoStreamEncoderResource::~VideoStreamEncoderResource() {
+ RTC_DCHECK(!listener_)
+ << "There is a listener depending on a VideoStreamEncoderResource being "
+ << "destroyed.";
+}
+
+void VideoStreamEncoderResource::RegisterEncoderTaskQueue(
+ TaskQueueBase* encoder_queue) {
+ RTC_DCHECK(!encoder_queue_);
+ RTC_DCHECK(encoder_queue);
+ encoder_queue_ = encoder_queue;
+}
+
+void VideoStreamEncoderResource::RegisterAdaptationTaskQueue(
+ TaskQueueBase* resource_adaptation_queue) {
+ rtc::CritScope crit(&lock_);
+ RTC_DCHECK(!resource_adaptation_queue_);
+ RTC_DCHECK(resource_adaptation_queue);
+ resource_adaptation_queue_ = resource_adaptation_queue;
+}
+
+void VideoStreamEncoderResource::UnregisterAdaptationTaskQueue() {
+ rtc::CritScope crit(&lock_);
+ RTC_DCHECK(resource_adaptation_queue_);
+ RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
+ resource_adaptation_queue_ = nullptr;
+}
+
+void VideoStreamEncoderResource::SetResourceListener(
+ ResourceListener* listener) {
+ RTC_DCHECK_RUN_ON(resource_adaptation_queue());
+ // If you want to change listener you need to unregister the old listener by
+ // setting it to null first.
+ RTC_DCHECK(!listener_ || !listener) << "A listener is already set";
+ listener_ = listener;
+}
+
+std::string VideoStreamEncoderResource::Name() const {
+ return name_;
+}
+
+void VideoStreamEncoderResource::OnResourceUsageStateMeasured(
+ ResourceUsageState usage_state) {
+ RTC_DCHECK_RUN_ON(resource_adaptation_queue());
+ if (listener_) {
+ listener_->OnResourceUsageStateMeasured(this, usage_state);
+ }
+}
+
+TaskQueueBase* VideoStreamEncoderResource::encoder_queue() const {
+ return encoder_queue_;
+}
+
+TaskQueueBase* VideoStreamEncoderResource::resource_adaptation_queue() const {
+ rtc::CritScope crit(&lock_);
+ RTC_DCHECK(resource_adaptation_queue_);
+ RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
+ return resource_adaptation_queue_;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource.h b/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource.h
new file mode 100644
index 00000000000..739702c3635
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_
+#define VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_
+
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/adaptation/resource.h"
+#include "api/task_queue/task_queue_base.h"
+#include "call/adaptation/adaptation_constraint.h"
+#include "call/adaptation/adaptation_listener.h"
+#include "rtc_base/critical_section.h"
+#include "rtc_base/synchronization/sequence_checker.h"
+
+namespace webrtc {
+
+class VideoStreamEncoderResource : public Resource {
+ public:
+ ~VideoStreamEncoderResource() override;
+
+ // Registering task queues must be performed as part of initialization.
+ void RegisterEncoderTaskQueue(TaskQueueBase* encoder_queue);
+
+ // Resource implementation.
+ std::string Name() const override;
+ void SetResourceListener(ResourceListener* listener) override;
+
+ // Provides a pointer to the adaptation task queue. After this call, all
+ // methods defined in this interface, including
+ // UnregisterAdaptationTaskQueue() MUST be invoked on the adaptation task
+ // queue. Registering the adaptation task queue may, however, happen off the
+ // adaptation task queue.
+ void RegisterAdaptationTaskQueue(TaskQueueBase* resource_adaptation_queue);
+ // Signals that the adaptation task queue is no longer safe to use. No
+ // assumptions must be made as to whether or not tasks in-flight will run.
+ void UnregisterAdaptationTaskQueue();
+
+ protected:
+ explicit VideoStreamEncoderResource(std::string name);
+
+ void OnResourceUsageStateMeasured(ResourceUsageState usage_state);
+
+ // The caller is responsible for ensuring the task queue is still valid.
+ TaskQueueBase* encoder_queue() const;
+ // Validity of returned pointer is ensured by only allowing this method to be
+ // called on the adaptation task queue. Designed for use with RTC_GUARDED_BY.
+ // For posting from a different queue, use
+ // MaybePostTaskToResourceAdaptationQueue() instead, which only posts if the
+ // task queue is currently registered.
+ TaskQueueBase* resource_adaptation_queue() const;
+ template <typename Closure>
+ void MaybePostTaskToResourceAdaptationQueue(Closure&& closure) {
+ rtc::CritScope crit(&lock_);
+ if (!resource_adaptation_queue_)
+ return;
+ resource_adaptation_queue_->PostTask(ToQueuedTask(closure));
+ }
+
+ private:
+ rtc::CriticalSection lock_;
+ const std::string name_;
+ // Treated as const after initialization.
+ TaskQueueBase* encoder_queue_;
+ TaskQueueBase* resource_adaptation_queue_ RTC_GUARDED_BY(lock_);
+ ResourceListener* listener_ RTC_GUARDED_BY(resource_adaptation_queue());
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_
diff --git a/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager.cc b/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager.cc
index b309dd34559..450b10f8bcd 100644
--- a/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager.cc
+++ b/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager.cc
@@ -10,22 +10,21 @@
#include "video/adaptation/video_stream_encoder_resource_manager.h"
-#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
-#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/base/macros.h"
+#include "api/adaptation/resource.h"
#include "api/task_queue/task_queue_base.h"
#include "api/video/video_adaptation_reason.h"
#include "api/video/video_source_interface.h"
-#include "call/adaptation/resource.h"
#include "call/adaptation/video_source_restrictions.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/ref_counted_object.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/time_utils.h"
@@ -55,15 +54,6 @@ std::string ToString(VideoAdaptationReason reason) {
}
}
-VideoAdaptationReason OtherReason(VideoAdaptationReason reason) {
- switch (reason) {
- case VideoAdaptationReason::kQuality:
- return VideoAdaptationReason::kCpu;
- case VideoAdaptationReason::kCpu:
- return VideoAdaptationReason::kQuality;
- }
-}
-
} // namespace
class VideoStreamEncoderResourceManager::InitialFrameDropper {
@@ -138,96 +128,47 @@ class VideoStreamEncoderResourceManager::InitialFrameDropper {
int initial_framedrop_;
};
-VideoStreamEncoderResourceManager::PreventAdaptUpDueToActiveCounts::
- PreventAdaptUpDueToActiveCounts(VideoStreamEncoderResourceManager* manager)
- : rtc::RefCountedObject<Resource>(),
- manager_(manager),
- adaptation_processor_(nullptr) {}
-
-void VideoStreamEncoderResourceManager::PreventAdaptUpDueToActiveCounts::
- SetAdaptationProcessor(
- ResourceAdaptationProcessorInterface* adaptation_processor) {
- RTC_DCHECK_RUN_ON(resource_adaptation_queue());
- adaptation_processor_ = adaptation_processor;
-}
-
-bool VideoStreamEncoderResourceManager::PreventAdaptUpDueToActiveCounts::
- IsAdaptationUpAllowed(const VideoStreamInputState& input_state,
- const VideoSourceRestrictions& restrictions_before,
- const VideoSourceRestrictions& restrictions_after,
- rtc::scoped_refptr<Resource> reason_resource) const {
- RTC_DCHECK_RUN_ON(resource_adaptation_queue());
- RTC_DCHECK(adaptation_processor_);
- VideoAdaptationReason reason =
- manager_->GetReasonFromResource(reason_resource);
- {
- // This is the same as |resource_adaptation_queue_|, but need to
- // RTC_DCHECK_RUN_ON() both to avoid compiler error when accessing
- // |manager_->active_counts_|.
- RTC_DCHECK_RUN_ON(manager_->resource_adaptation_queue_);
- // We can't adapt up if we're already at the highest setting.
- // Note that this only includes counts relevant to the current degradation
- // preference. e.g. we previously adapted resolution, now prefer adpating
- // fps, only count the fps adaptations and not the previous resolution
- // adaptations.
- // TODO(hbos): Why would the reason matter? If a particular resource doesn't
- // want us to go up it should prevent us from doing so itself rather than to
- // have this catch-all reason- and stats-based approach.
- int num_downgrades =
- FilterVideoAdaptationCountersByDegradationPreference(
- manager_->active_counts_[reason],
- adaptation_processor_->effective_degradation_preference())
- .Total();
- RTC_DCHECK_GE(num_downgrades, 0);
- return num_downgrades > 0;
- }
-}
-
-VideoStreamEncoderResourceManager::
- PreventIncreaseResolutionDueToBitrateResource::
- PreventIncreaseResolutionDueToBitrateResource(
- VideoStreamEncoderResourceManager* manager)
- : rtc::RefCountedObject<Resource>(),
- manager_(manager),
+VideoStreamEncoderResourceManager::BitrateConstraint::BitrateConstraint(
+ VideoStreamEncoderResourceManager* manager)
+ : manager_(manager),
+ resource_adaptation_queue_(nullptr),
encoder_settings_(absl::nullopt),
encoder_target_bitrate_bps_(absl::nullopt) {}
-void VideoStreamEncoderResourceManager::
- PreventIncreaseResolutionDueToBitrateResource::OnEncoderSettingsUpdated(
- absl::optional<EncoderSettings> encoder_settings) {
- RTC_DCHECK_RUN_ON(encoder_queue());
- resource_adaptation_queue()->PostTask(
- [this_ref =
- rtc::scoped_refptr<PreventIncreaseResolutionDueToBitrateResource>(
- this),
- encoder_settings] {
- RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue());
+void VideoStreamEncoderResourceManager::BitrateConstraint::SetAdaptationQueue(
+ TaskQueueBase* resource_adaptation_queue) {
+ resource_adaptation_queue_ = resource_adaptation_queue;
+}
+
+void VideoStreamEncoderResourceManager::BitrateConstraint::
+ OnEncoderSettingsUpdated(absl::optional<EncoderSettings> encoder_settings) {
+ RTC_DCHECK_RUN_ON(manager_->encoder_queue_);
+ resource_adaptation_queue_->PostTask(
+ ToQueuedTask([this_ref = rtc::scoped_refptr<BitrateConstraint>(this),
+ encoder_settings] {
+ RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue_);
this_ref->encoder_settings_ = std::move(encoder_settings);
- });
+ }));
}
-void VideoStreamEncoderResourceManager::
- PreventIncreaseResolutionDueToBitrateResource::
- OnEncoderTargetBitrateUpdated(
- absl::optional<uint32_t> encoder_target_bitrate_bps) {
- RTC_DCHECK_RUN_ON(encoder_queue());
- resource_adaptation_queue()->PostTask(
- [this_ref =
- rtc::scoped_refptr<PreventIncreaseResolutionDueToBitrateResource>(
- this),
- encoder_target_bitrate_bps] {
- RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue());
+void VideoStreamEncoderResourceManager::BitrateConstraint::
+ OnEncoderTargetBitrateUpdated(
+ absl::optional<uint32_t> encoder_target_bitrate_bps) {
+ RTC_DCHECK_RUN_ON(manager_->encoder_queue_);
+ resource_adaptation_queue_->PostTask(
+ ToQueuedTask([this_ref = rtc::scoped_refptr<BitrateConstraint>(this),
+ encoder_target_bitrate_bps] {
+ RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue_);
this_ref->encoder_target_bitrate_bps_ = encoder_target_bitrate_bps;
- });
+ }));
}
-bool VideoStreamEncoderResourceManager::
- PreventIncreaseResolutionDueToBitrateResource::IsAdaptationUpAllowed(
- const VideoStreamInputState& input_state,
- const VideoSourceRestrictions& restrictions_before,
- const VideoSourceRestrictions& restrictions_after,
- rtc::scoped_refptr<Resource> reason_resource) const {
- RTC_DCHECK_RUN_ON(resource_adaptation_queue());
+bool VideoStreamEncoderResourceManager::BitrateConstraint::
+ IsAdaptationUpAllowed(const VideoStreamInputState& input_state,
+ const VideoSourceRestrictions& restrictions_before,
+ const VideoSourceRestrictions& restrictions_after,
+ rtc::scoped_refptr<Resource> reason_resource) const {
+ RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
VideoAdaptationReason reason =
manager_->GetReasonFromResource(reason_resource);
// If increasing resolution due to kQuality, make sure bitrate limits are not
@@ -256,38 +197,43 @@ bool VideoStreamEncoderResourceManager::
return true;
}
-VideoStreamEncoderResourceManager::PreventAdaptUpInBalancedResource::
- PreventAdaptUpInBalancedResource(VideoStreamEncoderResourceManager* manager)
- : rtc::RefCountedObject<Resource>(),
- manager_(manager),
+VideoStreamEncoderResourceManager::BalancedConstraint::BalancedConstraint(
+ VideoStreamEncoderResourceManager* manager)
+ : manager_(manager),
+ resource_adaptation_queue_(nullptr),
adaptation_processor_(nullptr),
encoder_target_bitrate_bps_(absl::nullopt) {}
-void VideoStreamEncoderResourceManager::PreventAdaptUpInBalancedResource::
+void VideoStreamEncoderResourceManager::BalancedConstraint::SetAdaptationQueue(
+ TaskQueueBase* resource_adaptation_queue) {
+ resource_adaptation_queue_ = resource_adaptation_queue;
+}
+
+void VideoStreamEncoderResourceManager::BalancedConstraint::
SetAdaptationProcessor(
ResourceAdaptationProcessorInterface* adaptation_processor) {
- RTC_DCHECK_RUN_ON(resource_adaptation_queue());
+ RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
adaptation_processor_ = adaptation_processor;
}
-void VideoStreamEncoderResourceManager::PreventAdaptUpInBalancedResource::
+void VideoStreamEncoderResourceManager::BalancedConstraint::
OnEncoderTargetBitrateUpdated(
absl::optional<uint32_t> encoder_target_bitrate_bps) {
- RTC_DCHECK_RUN_ON(encoder_queue());
- resource_adaptation_queue()->PostTask(
- [this_ref = rtc::scoped_refptr<PreventAdaptUpInBalancedResource>(this),
- encoder_target_bitrate_bps] {
- RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue());
+ RTC_DCHECK_RUN_ON(manager_->encoder_queue_);
+ resource_adaptation_queue_->PostTask(
+ ToQueuedTask([this_ref = rtc::scoped_refptr<BalancedConstraint>(this),
+ encoder_target_bitrate_bps] {
+ RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue_);
this_ref->encoder_target_bitrate_bps_ = encoder_target_bitrate_bps;
- });
+ }));
}
-bool VideoStreamEncoderResourceManager::PreventAdaptUpInBalancedResource::
+bool VideoStreamEncoderResourceManager::BalancedConstraint::
IsAdaptationUpAllowed(const VideoStreamInputState& input_state,
const VideoSourceRestrictions& restrictions_before,
const VideoSourceRestrictions& restrictions_after,
rtc::scoped_refptr<Resource> reason_resource) const {
- RTC_DCHECK_RUN_ON(resource_adaptation_queue());
+ RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
RTC_DCHECK(adaptation_processor_);
VideoAdaptationReason reason =
manager_->GetReasonFromResource(reason_resource);
@@ -321,15 +267,11 @@ VideoStreamEncoderResourceManager::VideoStreamEncoderResourceManager(
Clock* clock,
bool experiment_cpu_load_estimator,
std::unique_ptr<OveruseFrameDetector> overuse_detector)
- : prevent_adapt_up_due_to_active_counts_(
- new PreventAdaptUpDueToActiveCounts(this)),
- prevent_increase_resolution_due_to_bitrate_resource_(
- new PreventIncreaseResolutionDueToBitrateResource(this)),
- prevent_adapt_up_in_balanced_resource_(
- new PreventAdaptUpInBalancedResource(this)),
+ : bitrate_constraint_(new rtc::RefCountedObject<BitrateConstraint>(this)),
+ balanced_constraint_(new rtc::RefCountedObject<BalancedConstraint>(this)),
encode_usage_resource_(
- new EncodeUsageResource(std::move(overuse_detector))),
- quality_scaler_resource_(new QualityScalerResource()),
+ EncodeUsageResource::Create(std::move(overuse_detector))),
+ quality_scaler_resource_(QualityScalerResource::Create()),
encoder_queue_(nullptr),
resource_adaptation_queue_(nullptr),
input_state_provider_(input_state_provider),
@@ -343,17 +285,10 @@ VideoStreamEncoderResourceManager::VideoStreamEncoderResourceManager(
std::make_unique<InitialFrameDropper>(quality_scaler_resource_)),
quality_scaling_experiment_enabled_(QualityScalingExperiment::Enabled()),
encoder_target_bitrate_bps_(absl::nullopt),
- quality_rampup_done_(false),
- quality_rampup_experiment_(QualityRampupExperiment::ParseSettings()),
- encoder_settings_(absl::nullopt),
- active_counts_() {
+ quality_rampup_experiment_(
+ QualityRampUpExperimentHelper::CreateIfEnabled(this, clock_)),
+ encoder_settings_(absl::nullopt) {
RTC_DCHECK(encoder_stats_observer_);
- MapResourceToReason(prevent_adapt_up_due_to_active_counts_,
- VideoAdaptationReason::kQuality);
- MapResourceToReason(prevent_increase_resolution_due_to_bitrate_resource_,
- VideoAdaptationReason::kQuality);
- MapResourceToReason(prevent_adapt_up_in_balanced_resource_,
- VideoAdaptationReason::kQuality);
MapResourceToReason(encode_usage_resource_, VideoAdaptationReason::kCpu);
MapResourceToReason(quality_scaler_resource_,
VideoAdaptationReason::kQuality);
@@ -370,26 +305,21 @@ void VideoStreamEncoderResourceManager::Initialize(
RTC_DCHECK(resource_adaptation_queue);
encoder_queue_ = encoder_queue;
resource_adaptation_queue_ = resource_adaptation_queue;
- prevent_adapt_up_due_to_active_counts_->Initialize(
- encoder_queue_, resource_adaptation_queue_);
- prevent_increase_resolution_due_to_bitrate_resource_->Initialize(
- encoder_queue_, resource_adaptation_queue_);
- prevent_adapt_up_in_balanced_resource_->Initialize(
- encoder_queue_, resource_adaptation_queue_);
- encode_usage_resource_->Initialize(encoder_queue_,
- resource_adaptation_queue_);
- quality_scaler_resource_->Initialize(encoder_queue_,
- resource_adaptation_queue_);
+ bitrate_constraint_->SetAdaptationQueue(resource_adaptation_queue_->Get());
+ balanced_constraint_->SetAdaptationQueue(resource_adaptation_queue_->Get());
+ encode_usage_resource_->RegisterEncoderTaskQueue(encoder_queue_->Get());
+ encode_usage_resource_->RegisterAdaptationTaskQueue(
+ resource_adaptation_queue_->Get());
+ quality_scaler_resource_->RegisterEncoderTaskQueue(encoder_queue_->Get());
+ quality_scaler_resource_->RegisterAdaptationTaskQueue(
+ resource_adaptation_queue_->Get());
}
void VideoStreamEncoderResourceManager::SetAdaptationProcessor(
ResourceAdaptationProcessorInterface* adaptation_processor) {
RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
adaptation_processor_ = adaptation_processor;
- prevent_adapt_up_due_to_active_counts_->SetAdaptationProcessor(
- adaptation_processor);
- prevent_adapt_up_in_balanced_resource_->SetAdaptationProcessor(
- adaptation_processor);
+ balanced_constraint_->SetAdaptationProcessor(adaptation_processor);
quality_scaler_resource_->SetAdaptationProcessor(adaptation_processor);
}
@@ -428,7 +358,7 @@ void VideoStreamEncoderResourceManager::MapResourceToReason(
[resource](const ResourceAndReason& r) {
return r.resource == resource;
}) == resources_.end())
- << "Resource " << resource->name() << " already was inserted";
+ << "Resource " << resource->Name() << " already was inserted";
resources_.emplace_back(resource, reason);
}
@@ -442,6 +372,16 @@ VideoStreamEncoderResourceManager::MappedResources() const {
return resources;
}
+std::vector<AdaptationConstraint*>
+VideoStreamEncoderResourceManager::AdaptationConstraints() const {
+ return {bitrate_constraint_, balanced_constraint_};
+}
+
+std::vector<AdaptationListener*>
+VideoStreamEncoderResourceManager::AdaptationListeners() const {
+ return {quality_scaler_resource_};
+}
+
rtc::scoped_refptr<QualityScalerResource>
VideoStreamEncoderResourceManager::quality_scaler_resource_for_testing() {
rtc::CritScope crit(&resource_lock_);
@@ -452,12 +392,7 @@ void VideoStreamEncoderResourceManager::SetEncoderSettings(
EncoderSettings encoder_settings) {
RTC_DCHECK_RUN_ON(encoder_queue_);
encoder_settings_ = std::move(encoder_settings);
- prevent_increase_resolution_due_to_bitrate_resource_
- ->OnEncoderSettingsUpdated(encoder_settings_);
-
- quality_rampup_experiment_.SetMaxBitrate(
- LastInputFrameSizeOrDefault(),
- encoder_settings_->video_codec().maxBitrate);
+ bitrate_constraint_->OnEncoderSettingsUpdated(encoder_settings_);
MaybeUpdateTargetFrameRate();
}
@@ -466,9 +401,9 @@ void VideoStreamEncoderResourceManager::SetStartBitrate(
RTC_DCHECK_RUN_ON(encoder_queue_);
if (!start_bitrate.IsZero()) {
encoder_target_bitrate_bps_ = start_bitrate.bps();
- prevent_increase_resolution_due_to_bitrate_resource_
- ->OnEncoderTargetBitrateUpdated(encoder_target_bitrate_bps_);
- prevent_adapt_up_in_balanced_resource_->OnEncoderTargetBitrateUpdated(
+ bitrate_constraint_->OnEncoderTargetBitrateUpdated(
+ encoder_target_bitrate_bps_);
+ balanced_constraint_->OnEncoderTargetBitrateUpdated(
encoder_target_bitrate_bps_);
}
initial_frame_dropper_->SetStartBitrate(start_bitrate,
@@ -480,9 +415,9 @@ void VideoStreamEncoderResourceManager::SetTargetBitrate(
RTC_DCHECK_RUN_ON(encoder_queue_);
if (!target_bitrate.IsZero()) {
encoder_target_bitrate_bps_ = target_bitrate.bps();
- prevent_increase_resolution_due_to_bitrate_resource_
- ->OnEncoderTargetBitrateUpdated(encoder_target_bitrate_bps_);
- prevent_adapt_up_in_balanced_resource_->OnEncoderTargetBitrateUpdated(
+ bitrate_constraint_->OnEncoderTargetBitrateUpdated(
+ encoder_target_bitrate_bps_);
+ balanced_constraint_->OnEncoderTargetBitrateUpdated(
encoder_target_bitrate_bps_);
}
initial_frame_dropper_->SetTargetBitrate(target_bitrate,
@@ -551,7 +486,16 @@ bool VideoStreamEncoderResourceManager::DropInitialFrames() const {
void VideoStreamEncoderResourceManager::OnMaybeEncodeFrame() {
RTC_DCHECK_RUN_ON(encoder_queue_);
initial_frame_dropper_->OnMaybeEncodeFrame();
- MaybePerformQualityRampupExperiment();
+ if (quality_rampup_experiment_) {
+ DataRate bandwidth = encoder_rates_.has_value()
+ ? encoder_rates_->bandwidth_allocation
+ : DataRate::Zero();
+ quality_rampup_experiment_->PerformQualityRampupExperiment(
+ quality_scaler_resource_, bandwidth,
+ DataRate::BitsPerSec(encoder_target_bitrate_bps_.value_or(0)),
+ DataRate::KilobitsPerSec(encoder_settings_->video_codec().maxBitrate),
+ LastInputFrameSizeOrDefault());
+ }
}
void VideoStreamEncoderResourceManager::UpdateQualityScalerSettings(
@@ -616,7 +560,7 @@ VideoAdaptationReason VideoStreamEncoderResourceManager::GetReasonFromResource(
return r.resource == resource;
});
RTC_DCHECK(registered_resource != resources_.end())
- << resource->name() << " not found.";
+ << resource->Name() << " not found.";
return registered_resource->reason;
}
@@ -654,29 +598,11 @@ void VideoStreamEncoderResourceManager::OnVideoSourceRestrictionsUpdated(
const VideoAdaptationCounters& adaptation_counters,
rtc::scoped_refptr<Resource> reason) {
RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
- VideoAdaptationCounters previous_adaptation_counters =
- active_counts_[VideoAdaptationReason::kQuality] +
- active_counts_[VideoAdaptationReason::kCpu];
- int adaptation_counters_total_abs_diff = std::abs(
- adaptation_counters.Total() - previous_adaptation_counters.Total());
- if (reason) {
- // A resource signal triggered this adaptation. The adaptation counters have
- // to be updated every time the adaptation counter is incremented or
- // decremented due to a resource.
- RTC_DCHECK_EQ(adaptation_counters_total_abs_diff, 1);
- VideoAdaptationReason reason_type = GetReasonFromResource(reason);
- UpdateAdaptationStats(adaptation_counters, reason_type);
- } else if (adaptation_counters.Total() == 0) {
+ // TODO(bugs.webrtc.org/11553) Remove reason parameter and add reset callback.
+ if (!reason && adaptation_counters.Total() == 0) {
// Adaptation was manually reset - clear the per-reason counters too.
- ResetActiveCounts();
encoder_stats_observer_->ClearAdaptationStats();
- } else {
- // If a reason did not increase or decrease the Total() by 1 and the
- // restrictions were not just reset, the adaptation counters MUST not have
- // been modified and there is nothing to do stats-wise.
- RTC_DCHECK_EQ(adaptation_counters_total_abs_diff, 0);
}
- RTC_LOG(LS_INFO) << ActiveCountsToString();
// The VideoStreamEncoder makes the manager outlive the encoder queue. This
// means that if the task gets executed, |this| has not been freed yet.
@@ -687,6 +613,49 @@ void VideoStreamEncoderResourceManager::OnVideoSourceRestrictionsUpdated(
});
}
+void VideoStreamEncoderResourceManager::OnResourceLimitationChanged(
+ rtc::scoped_refptr<Resource> resource,
+ const std::map<rtc::scoped_refptr<Resource>, VideoAdaptationCounters>&
+ resource_limitations) {
+ RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
+ if (!resource) {
+ encoder_stats_observer_->ClearAdaptationStats();
+ return;
+ }
+
+ std::map<VideoAdaptationReason, VideoAdaptationCounters> limitations;
+ for (auto& resource_counter : resource_limitations) {
+ std::map<VideoAdaptationReason, VideoAdaptationCounters>::iterator it;
+ bool inserted;
+ std::tie(it, inserted) = limitations.emplace(
+ GetReasonFromResource(resource_counter.first), resource_counter.second);
+ if (!inserted && it->second.Total() < resource_counter.second.Total()) {
+ it->second = resource_counter.second;
+ }
+ }
+
+ VideoAdaptationReason adaptation_reason = GetReasonFromResource(resource);
+ encoder_stats_observer_->OnAdaptationChanged(
+ adaptation_reason, limitations[VideoAdaptationReason::kCpu],
+ limitations[VideoAdaptationReason::kQuality]);
+
+ encoder_queue_->PostTask(ToQueuedTask(
+ [cpu_limited = limitations.at(VideoAdaptationReason::kCpu).Total() > 0,
+ qp_resolution_adaptations =
+ limitations.at(VideoAdaptationReason::kQuality)
+ .resolution_adaptations,
+ this]() {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ if (quality_rampup_experiment_) {
+ quality_rampup_experiment_->cpu_adapted(cpu_limited);
+ quality_rampup_experiment_->qp_resolution_adaptations(
+ qp_resolution_adaptations);
+ }
+ }));
+
+ RTC_LOG(LS_INFO) << ActiveCountsToString(limitations);
+}
+
void VideoStreamEncoderResourceManager::MaybeUpdateTargetFrameRate() {
RTC_DCHECK_RUN_ON(encoder_queue_);
absl::optional<double> codec_max_frame_rate =
@@ -708,84 +677,6 @@ void VideoStreamEncoderResourceManager::MaybeUpdateTargetFrameRate() {
encode_usage_resource_->SetTargetFrameRate(target_frame_rate);
}
-void VideoStreamEncoderResourceManager::OnAdaptationCountChanged(
- const VideoAdaptationCounters& adaptation_count,
- VideoAdaptationCounters* active_count,
- VideoAdaptationCounters* other_active) {
- RTC_DCHECK(active_count);
- RTC_DCHECK(other_active);
- const int active_total = active_count->Total();
- const int other_total = other_active->Total();
- const VideoAdaptationCounters prev_total = *active_count + *other_active;
- const int delta_resolution_adaptations =
- adaptation_count.resolution_adaptations -
- prev_total.resolution_adaptations;
- const int delta_fps_adaptations =
- adaptation_count.fps_adaptations - prev_total.fps_adaptations;
-
- RTC_DCHECK_EQ(
- std::abs(delta_resolution_adaptations) + std::abs(delta_fps_adaptations),
- 1)
- << "Adaptation took more than one step!";
-
- if (delta_resolution_adaptations > 0) {
- ++active_count->resolution_adaptations;
- } else if (delta_resolution_adaptations < 0) {
- if (active_count->resolution_adaptations == 0) {
- RTC_DCHECK_GT(active_count->fps_adaptations, 0) << "No downgrades left";
- RTC_DCHECK_GT(other_active->resolution_adaptations, 0)
- << "No resolution adaptation to borrow from";
- // Lend an fps adaptation to other and take one resolution adaptation.
- --active_count->fps_adaptations;
- ++other_active->fps_adaptations;
- --other_active->resolution_adaptations;
- } else {
- --active_count->resolution_adaptations;
- }
- }
- if (delta_fps_adaptations > 0) {
- ++active_count->fps_adaptations;
- } else if (delta_fps_adaptations < 0) {
- if (active_count->fps_adaptations == 0) {
- RTC_DCHECK_GT(active_count->resolution_adaptations, 0)
- << "No downgrades left";
- RTC_DCHECK_GT(other_active->fps_adaptations, 0)
- << "No fps adaptation to borrow from";
- // Lend a resolution adaptation to other and take one fps adaptation.
- --active_count->resolution_adaptations;
- ++other_active->resolution_adaptations;
- --other_active->fps_adaptations;
- } else {
- --active_count->fps_adaptations;
- }
- }
-
- RTC_DCHECK(*active_count + *other_active == adaptation_count);
- RTC_DCHECK_EQ(other_active->Total(), other_total);
- RTC_DCHECK_EQ(
- active_count->Total(),
- active_total + delta_resolution_adaptations + delta_fps_adaptations);
- RTC_DCHECK_GE(active_count->resolution_adaptations, 0);
- RTC_DCHECK_GE(active_count->fps_adaptations, 0);
- RTC_DCHECK_GE(other_active->resolution_adaptations, 0);
- RTC_DCHECK_GE(other_active->fps_adaptations, 0);
-}
-
-void VideoStreamEncoderResourceManager::UpdateAdaptationStats(
- const VideoAdaptationCounters& total_counts,
- VideoAdaptationReason reason) {
- RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
- // Update active counts
- VideoAdaptationCounters& active_count = active_counts_[reason];
- VideoAdaptationCounters& other_active = active_counts_[OtherReason(reason)];
-
- OnAdaptationCountChanged(total_counts, &active_count, &other_active);
-
- encoder_stats_observer_->OnAdaptationChanged(
- reason, active_counts_[VideoAdaptationReason::kCpu],
- active_counts_[VideoAdaptationReason::kQuality]);
-}
-
void VideoStreamEncoderResourceManager::UpdateStatsAdaptationSettings() const {
RTC_DCHECK_RUN_ON(encoder_queue_);
VideoStreamEncoderObserver::AdaptationSettings cpu_settings(
@@ -800,76 +691,19 @@ void VideoStreamEncoderResourceManager::UpdateStatsAdaptationSettings() const {
quality_settings);
}
-void VideoStreamEncoderResourceManager::MaybePerformQualityRampupExperiment() {
- RTC_DCHECK_RUN_ON(encoder_queue_);
- if (!quality_scaler_resource_->is_started())
- return;
-
- if (quality_rampup_done_)
- return;
-
- int64_t now_ms = clock_->TimeInMilliseconds();
- uint32_t bw_kbps = encoder_rates_.has_value()
- ? encoder_rates_.value().bandwidth_allocation.kbps()
- : 0;
-
- bool try_quality_rampup = false;
- if (quality_rampup_experiment_.BwHigh(now_ms, bw_kbps)) {
- // Verify that encoder is at max bitrate and the QP is low.
- if (encoder_settings_ &&
- encoder_target_bitrate_bps_.value_or(0) ==
- encoder_settings_->video_codec().maxBitrate * 1000 &&
- quality_scaler_resource_->QpFastFilterLow()) {
- try_quality_rampup = true;
- }
- }
- if (try_quality_rampup) {
- // The VideoStreamEncoder makes the manager outlive the adaptation queue.
- // This means that if the task gets executed, |this| has not been freed yet.
- // TODO(https://crbug.com/webrtc/11565): When the manager no longer outlives
- // the adaptation queue, add logic to prevent use-after-free on |this|.
- resource_adaptation_queue_->PostTask([this] {
- RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
- if (!adaptation_processor_) {
- // The processor nulled before this task had a chance to execute. This
- // happens if the processor is destroyed. No action needed.
- return;
- }
- // TODO(https://crbug.com/webrtc/11392): See if we can rely on the total
- // counts or the stats, and not the active counts.
- const VideoAdaptationCounters& qp_counts =
- active_counts_[VideoAdaptationReason::kQuality];
- const VideoAdaptationCounters& cpu_counts =
- active_counts_[VideoAdaptationReason::kCpu];
- if (!quality_rampup_done_ && qp_counts.resolution_adaptations > 0 &&
- cpu_counts.Total() == 0) {
- RTC_LOG(LS_INFO) << "Reset quality limitations.";
- adaptation_processor_->ResetVideoSourceRestrictions();
- quality_rampup_done_ = true;
- }
- });
- }
-}
-
-void VideoStreamEncoderResourceManager::ResetActiveCounts() {
- RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
- active_counts_.clear();
- active_counts_[VideoAdaptationReason::kCpu] = VideoAdaptationCounters();
- active_counts_[VideoAdaptationReason::kQuality] = VideoAdaptationCounters();
-}
-
-std::string VideoStreamEncoderResourceManager::ActiveCountsToString() const {
- RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
- RTC_DCHECK_EQ(2, active_counts_.size());
+// static
+std::string VideoStreamEncoderResourceManager::ActiveCountsToString(
+ const std::map<VideoAdaptationReason, VideoAdaptationCounters>&
+ active_counts) {
rtc::StringBuilder ss;
ss << "Downgrade counts: fps: {";
- for (auto& reason_count : active_counts_) {
+ for (auto& reason_count : active_counts) {
ss << ToString(reason_count.first) << ":";
ss << reason_count.second.fps_adaptations;
}
ss << "}, resolution {";
- for (auto& reason_count : active_counts_) {
+ for (auto& reason_count : active_counts) {
ss << ToString(reason_count.first) << ":";
ss << reason_count.second.resolution_adaptations;
}
@@ -877,4 +711,23 @@ std::string VideoStreamEncoderResourceManager::ActiveCountsToString() const {
return ss.Release();
}
+
+void VideoStreamEncoderResourceManager::OnQualityRampUp() {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ // The VideoStreamEncoder makes the manager outlive the adaptation queue.
+ // This means that if the task gets executed, |this| has not been freed yet.
+ // TODO(https://crbug.com/webrtc/11565): When the manager no longer outlives
+ // the adaptation queue, add logic to prevent use-after-free on |this|.
+ resource_adaptation_queue_->PostTask([this] {
+ RTC_DCHECK_RUN_ON(resource_adaptation_queue_);
+ if (!adaptation_processor_) {
+ // The processor nulled before this task had a chance to execute. This
+ // happens if the processor is destroyed. No action needed.
+ return;
+ }
+ RTC_LOG(LS_INFO) << "Reset quality limitations.";
+ adaptation_processor_->ResetVideoSourceRestrictions();
+ });
+ quality_rampup_experiment_.reset();
+}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager.h b/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager.h
index d028e5049a0..61ae29b6bf9 100644
--- a/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager.h
+++ b/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager.h
@@ -20,8 +20,10 @@
#include <vector>
#include "absl/types/optional.h"
+#include "api/adaptation/resource.h"
#include "api/rtp_parameters.h"
#include "api/scoped_refptr.h"
+#include "api/task_queue/task_queue_base.h"
#include "api/video/video_adaptation_counters.h"
#include "api/video/video_adaptation_reason.h"
#include "api/video/video_frame.h"
@@ -30,19 +32,20 @@
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/video_encoder_config.h"
-#include "call/adaptation/resource.h"
#include "call/adaptation/resource_adaptation_processor_interface.h"
#include "call/adaptation/video_stream_adapter.h"
#include "call/adaptation/video_stream_input_state_provider.h"
#include "rtc_base/critical_section.h"
-#include "rtc_base/experiments/quality_rampup_experiment.h"
#include "rtc_base/experiments/quality_scaler_settings.h"
+#include "rtc_base/ref_count.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/task_queue.h"
#include "system_wrappers/include/clock.h"
#include "video/adaptation/encode_usage_resource.h"
#include "video/adaptation/overuse_frame_detector.h"
+#include "video/adaptation/quality_rampup_experiment_helper.h"
#include "video/adaptation/quality_scaler_resource.h"
+#include "video/adaptation/video_stream_encoder_resource.h"
namespace webrtc {
@@ -61,7 +64,8 @@ extern const int kDefaultInputPixelsHeight;
// The manager is also involved with various mitigations not part of the
// ResourceAdaptationProcessor code such as the inital frame dropping.
class VideoStreamEncoderResourceManager
- : public ResourceAdaptationProcessorListener {
+ : public VideoSourceRestrictionsListener,
+ public QualityRampUpExperimentListener {
public:
VideoStreamEncoderResourceManager(
VideoStreamInputStateProvider* input_state_provider,
@@ -109,37 +113,31 @@ class VideoStreamEncoderResourceManager
void OnFrameDropped(EncodedImageCallback::DropReason reason);
// Resources need to be mapped to an AdaptReason (kCpu or kQuality) in order
- // to be able to update |active_counts_|, which is used...
- // - Legacy getStats() purposes.
- // - Preventing adapting up in some circumstances (which may be questionable).
- // TODO(hbos): Can we get rid of this?
+ // to update legacy getStats().
void MapResourceToReason(rtc::scoped_refptr<Resource> resource,
VideoAdaptationReason reason);
std::vector<rtc::scoped_refptr<Resource>> MappedResources() const;
+ std::vector<AdaptationConstraint*> AdaptationConstraints() const;
+ std::vector<AdaptationListener*> AdaptationListeners() const;
rtc::scoped_refptr<QualityScalerResource>
quality_scaler_resource_for_testing();
// If true, the VideoStreamEncoder should eexecute its logic to maybe drop
// frames baseed on size and bitrate.
bool DropInitialFrames() const;
- // ResourceAdaptationProcessorListener implementation.
- // Updates |video_source_restrictions_| and |active_counts_|.
+ // VideoSourceRestrictionsListener implementation.
+ // Updates |video_source_restrictions_|.
void OnVideoSourceRestrictionsUpdated(
VideoSourceRestrictions restrictions,
const VideoAdaptationCounters& adaptation_counters,
rtc::scoped_refptr<Resource> reason) override;
+ void OnResourceLimitationChanged(
+ rtc::scoped_refptr<Resource> resource,
+ const std::map<rtc::scoped_refptr<Resource>, VideoAdaptationCounters>&
+ resource_limitations) override;
- // For reasons of adaptation and statistics, we not only count the total
- // number of adaptations, but we also count the number of adaptations per
- // reason.
- // This method takes the new total number of adaptations and allocates that to
- // the "active" count - number of adaptations for the current reason.
- // The "other" count is the number of adaptations for the other reason.
- // This must be called for each adaptation step made.
- static void OnAdaptationCountChanged(
- const VideoAdaptationCounters& adaptation_count,
- VideoAdaptationCounters* active_count,
- VideoAdaptationCounters* other_active);
+ // QualityRampUpExperimentListener implementation.
+ void OnQualityRampUp() override;
private:
class InitialFrameDropper;
@@ -158,70 +156,28 @@ class VideoStreamEncoderResourceManager
void UpdateQualityScalerSettings(
absl::optional<VideoEncoder::QpThresholds> qp_thresholds);
- void UpdateAdaptationStats(const VideoAdaptationCounters& total_counts,
- VideoAdaptationReason reason);
void UpdateStatsAdaptationSettings() const;
- // Checks to see if we should execute the quality rampup experiment. The
- // experiment resets all video restrictions at the start of the call in the
- // case the bandwidth estimate is high enough.
- // TODO(https://crbug.com/webrtc/11222) Move experiment details into an inner
- // class.
- void MaybePerformQualityRampupExperiment();
-
- void ResetActiveCounts();
- std::string ActiveCountsToString() const;
-
- // TODO(hbos): Consider moving all of the manager's resources into separate
- // files for testability.
-
- // Does not trigger adaptations, only prevents adapting up based on
- // |active_counts_|.
- class PreventAdaptUpDueToActiveCounts final
- : public rtc::RefCountedObject<Resource> {
- public:
- explicit PreventAdaptUpDueToActiveCounts(
- VideoStreamEncoderResourceManager* manager);
- ~PreventAdaptUpDueToActiveCounts() override = default;
-
- void SetAdaptationProcessor(
- ResourceAdaptationProcessorInterface* adaptation_processor);
-
- // Resource overrides.
- std::string name() const override {
- return "PreventAdaptUpDueToActiveCounts";
- }
- bool IsAdaptationUpAllowed(
- const VideoStreamInputState& input_state,
- const VideoSourceRestrictions& restrictions_before,
- const VideoSourceRestrictions& restrictions_after,
- rtc::scoped_refptr<Resource> reason_resource) const override;
-
- private:
- // The |manager_| must be alive as long as this resource is added to the
- // ResourceAdaptationProcessor, i.e. when IsAdaptationUpAllowed() is called.
- VideoStreamEncoderResourceManager* const manager_;
- ResourceAdaptationProcessorInterface* adaptation_processor_
- RTC_GUARDED_BY(resource_adaptation_queue());
- };
+ static std::string ActiveCountsToString(
+ const std::map<VideoAdaptationReason, VideoAdaptationCounters>&
+ active_counts);
+ // TODO(hbos): Add tests for manager's constraints.
// Does not trigger adaptations, only prevents adapting up resolution.
- class PreventIncreaseResolutionDueToBitrateResource final
- : public rtc::RefCountedObject<Resource> {
+ class BitrateConstraint : public rtc::RefCountInterface,
+ public AdaptationConstraint {
public:
- explicit PreventIncreaseResolutionDueToBitrateResource(
- VideoStreamEncoderResourceManager* manager);
- ~PreventIncreaseResolutionDueToBitrateResource() override = default;
+ explicit BitrateConstraint(VideoStreamEncoderResourceManager* manager);
+ ~BitrateConstraint() override = default;
+ void SetAdaptationQueue(TaskQueueBase* resource_adaptation_queue);
void OnEncoderSettingsUpdated(
absl::optional<EncoderSettings> encoder_settings);
void OnEncoderTargetBitrateUpdated(
absl::optional<uint32_t> encoder_target_bitrate_bps);
- // Resource overrides.
- std::string name() const override {
- return "PreventIncreaseResolutionDueToBitrateResource";
- }
+ // AdaptationConstraint implementation.
+ std::string Name() const override { return "BitrateConstraint"; }
bool IsAdaptationUpAllowed(
const VideoStreamInputState& input_state,
const VideoSourceRestrictions& restrictions_before,
@@ -232,29 +188,28 @@ class VideoStreamEncoderResourceManager
// The |manager_| must be alive as long as this resource is added to the
// ResourceAdaptationProcessor, i.e. when IsAdaptationUpAllowed() is called.
VideoStreamEncoderResourceManager* const manager_;
+ TaskQueueBase* resource_adaptation_queue_;
absl::optional<EncoderSettings> encoder_settings_
- RTC_GUARDED_BY(resource_adaptation_queue());
+ RTC_GUARDED_BY(resource_adaptation_queue_);
absl::optional<uint32_t> encoder_target_bitrate_bps_
- RTC_GUARDED_BY(resource_adaptation_queue());
+ RTC_GUARDED_BY(resource_adaptation_queue_);
};
// Does not trigger adaptations, only prevents adapting up in BALANCED.
- class PreventAdaptUpInBalancedResource final
- : public rtc::RefCountedObject<Resource> {
+ class BalancedConstraint : public rtc::RefCountInterface,
+ public AdaptationConstraint {
public:
- explicit PreventAdaptUpInBalancedResource(
- VideoStreamEncoderResourceManager* manager);
- ~PreventAdaptUpInBalancedResource() override = default;
+ explicit BalancedConstraint(VideoStreamEncoderResourceManager* manager);
+ ~BalancedConstraint() override = default;
+ void SetAdaptationQueue(TaskQueueBase* resource_adaptation_queue);
void SetAdaptationProcessor(
ResourceAdaptationProcessorInterface* adaptation_processor);
void OnEncoderTargetBitrateUpdated(
absl::optional<uint32_t> encoder_target_bitrate_bps);
- // Resource overrides.
- std::string name() const override {
- return "PreventAdaptUpInBalancedResource";
- }
+ // AdaptationConstraint implementation.
+ std::string Name() const override { return "BalancedConstraint"; }
bool IsAdaptationUpAllowed(
const VideoStreamInputState& input_state,
const VideoSourceRestrictions& restrictions_before,
@@ -265,18 +220,15 @@ class VideoStreamEncoderResourceManager
// The |manager_| must be alive as long as this resource is added to the
// ResourceAdaptationProcessor, i.e. when IsAdaptationUpAllowed() is called.
VideoStreamEncoderResourceManager* const manager_;
+ TaskQueueBase* resource_adaptation_queue_;
ResourceAdaptationProcessorInterface* adaptation_processor_
- RTC_GUARDED_BY(resource_adaptation_queue());
+ RTC_GUARDED_BY(resource_adaptation_queue_);
absl::optional<uint32_t> encoder_target_bitrate_bps_
- RTC_GUARDED_BY(resource_adaptation_queue());
+ RTC_GUARDED_BY(resource_adaptation_queue_);
};
- const rtc::scoped_refptr<PreventAdaptUpDueToActiveCounts>
- prevent_adapt_up_due_to_active_counts_;
- const rtc::scoped_refptr<PreventIncreaseResolutionDueToBitrateResource>
- prevent_increase_resolution_due_to_bitrate_resource_;
- const rtc::scoped_refptr<PreventAdaptUpInBalancedResource>
- prevent_adapt_up_in_balanced_resource_;
+ const rtc::scoped_refptr<BitrateConstraint> bitrate_constraint_;
+ const rtc::scoped_refptr<BalancedConstraint> balanced_constraint_;
const rtc::scoped_refptr<EncodeUsageResource> encode_usage_resource_;
const rtc::scoped_refptr<QualityScalerResource> quality_scaler_resource_;
@@ -303,9 +255,7 @@ class VideoStreamEncoderResourceManager
RTC_GUARDED_BY(encoder_queue_);
absl::optional<VideoEncoder::RateControlParameters> encoder_rates_
RTC_GUARDED_BY(encoder_queue_);
- // Used on both the encoder queue and resource adaptation queue.
- std::atomic<bool> quality_rampup_done_;
- QualityRampupExperiment quality_rampup_experiment_
+ std::unique_ptr<QualityRampUpExperimentHelper> quality_rampup_experiment_
RTC_GUARDED_BY(encoder_queue_);
absl::optional<EncoderSettings> encoder_settings_
RTC_GUARDED_BY(encoder_queue_);
@@ -323,15 +273,6 @@ class VideoStreamEncoderResourceManager
};
rtc::CriticalSection resource_lock_;
std::vector<ResourceAndReason> resources_ RTC_GUARDED_BY(&resource_lock_);
- // One AdaptationCounter for each reason, tracking the number of times we have
- // adapted for each reason. The sum of active_counts_ MUST always equal the
- // total adaptation provided by the VideoSourceRestrictions.
- // TODO(https://crbug.com/webrtc/11542): When we have an adaptation queue,
- // guard the activec counts by it instead. The |encoder_stats_observer_| is
- // thread-safe anyway, and active counts are used by
- // PreventAdaptUpDueToActiveCounts to make decisions.
- std::unordered_map<VideoAdaptationReason, VideoAdaptationCounters>
- active_counts_ RTC_GUARDED_BY(resource_adaptation_queue_);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager_unittest.cc b/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager_unittest.cc
deleted file mode 100644
index 38ebba63346..00000000000
--- a/chromium/third_party/webrtc/video/adaptation/video_stream_encoder_resource_manager_unittest.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "video/adaptation/video_stream_encoder_resource_manager.h"
-
-#include "api/video/video_adaptation_counters.h"
-#include "test/gmock.h"
-#include "test/gtest.h"
-
-namespace webrtc {
-
-TEST(VideoStreamEncoderResourceManagerTest, FirstAdaptationDown_Fps) {
- VideoAdaptationCounters cpu;
- VideoAdaptationCounters qp;
- VideoAdaptationCounters total(0, 1);
-
- VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp);
- VideoAdaptationCounters expected_cpu(0, 1);
- VideoAdaptationCounters expected_qp;
- EXPECT_EQ(expected_cpu, cpu);
- EXPECT_EQ(expected_qp, qp);
-}
-
-TEST(VideoStreamEncoderResourceManagerTest, FirstAdaptationDown_Resolution) {
- VideoAdaptationCounters cpu;
- VideoAdaptationCounters qp;
- VideoAdaptationCounters total(1, 0);
-
- VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp);
- VideoAdaptationCounters expected_cpu(1, 0);
- VideoAdaptationCounters expected_qp;
- EXPECT_EQ(expected_cpu, cpu);
- EXPECT_EQ(expected_qp, qp);
-}
-
-TEST(VideoStreamEncoderResourceManagerTest, LastAdaptUp_Fps) {
- VideoAdaptationCounters cpu(0, 1);
- VideoAdaptationCounters qp;
- VideoAdaptationCounters total;
-
- VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp);
- VideoAdaptationCounters expected_cpu;
- VideoAdaptationCounters expected_qp;
- EXPECT_EQ(expected_cpu, cpu);
- EXPECT_EQ(expected_qp, qp);
-}
-
-TEST(VideoStreamEncoderResourceManagerTest, LastAdaptUp_Resolution) {
- VideoAdaptationCounters cpu(1, 0);
- VideoAdaptationCounters qp;
- VideoAdaptationCounters total;
-
- VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp);
- VideoAdaptationCounters expected_cpu;
- VideoAdaptationCounters expected_qp;
- EXPECT_EQ(expected_cpu, cpu);
- EXPECT_EQ(expected_qp, qp);
-}
-
-TEST(VideoStreamEncoderResourceManagerTest, AdaptUpWithBorrow_Resolution) {
- VideoAdaptationCounters cpu(0, 1);
- VideoAdaptationCounters qp(1, 0);
- VideoAdaptationCounters total(0, 1);
-
- // CPU adaptation for resolution, but no resolution adaptation left from CPU.
- // We then borrow the resolution adaptation from qp, and give qp the fps
- // adaptation from CPU.
- VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp);
-
- VideoAdaptationCounters expected_cpu(0, 0);
- VideoAdaptationCounters expected_qp(0, 1);
- EXPECT_EQ(expected_cpu, cpu);
- EXPECT_EQ(expected_qp, qp);
-}
-
-TEST(VideoStreamEncoderResourceManagerTest, AdaptUpWithBorrow_Fps) {
- VideoAdaptationCounters cpu(1, 0);
- VideoAdaptationCounters qp(0, 1);
- VideoAdaptationCounters total(1, 0);
-
- // CPU adaptation for fps, but no fps adaptation left from CPU. We then borrow
- // the fps adaptation from qp, and give qp the resolution adaptation from CPU.
- VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp);
-
- VideoAdaptationCounters expected_cpu(0, 0);
- VideoAdaptationCounters expected_qp(1, 0);
- EXPECT_EQ(expected_cpu, cpu);
- EXPECT_EQ(expected_qp, qp);
-}
-
-} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/call_stats2_unittest.cc b/chromium/third_party/webrtc/video/call_stats2_unittest.cc
index 73fe4b45ca6..b3d43cb92ab 100644
--- a/chromium/third_party/webrtc/video/call_stats2_unittest.cc
+++ b/chromium/third_party/webrtc/video/call_stats2_unittest.cc
@@ -33,7 +33,7 @@ class MockStatsObserver : public CallStatsObserver {
MockStatsObserver() {}
virtual ~MockStatsObserver() {}
- MOCK_METHOD2(OnRttUpdate, void(int64_t, int64_t));
+ MOCK_METHOD(void, OnRttUpdate, (int64_t, int64_t), (override));
};
class CallStats2Test : public ::testing::Test {
diff --git a/chromium/third_party/webrtc/video/call_stats_unittest.cc b/chromium/third_party/webrtc/video/call_stats_unittest.cc
index c560ccbee6b..e85c4f8c542 100644
--- a/chromium/third_party/webrtc/video/call_stats_unittest.cc
+++ b/chromium/third_party/webrtc/video/call_stats_unittest.cc
@@ -32,7 +32,7 @@ class MockStatsObserver : public CallStatsObserver {
MockStatsObserver() {}
virtual ~MockStatsObserver() {}
- MOCK_METHOD2(OnRttUpdate, void(int64_t, int64_t));
+ MOCK_METHOD(void, OnRttUpdate, (int64_t, int64_t), (override));
};
class CallStatsTest : public ::testing::Test {
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/bandwidth_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/bandwidth_tests.cc
index 16b35d68f8a..19384944066 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/bandwidth_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/bandwidth_tests.cc
@@ -16,7 +16,7 @@
#include "api/video/video_bitrate_allocation.h"
#include "call/fake_network_pipe.h"
#include "call/simulated_network.h"
-#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
#include "rtc_base/rate_limiter.h"
#include "rtc_base/task_queue_for_test.h"
#include "rtc_base/task_utils/to_queued_task.h"
@@ -205,8 +205,9 @@ TEST_F(BandwidthEndToEndTest, RembWithSendSideBwe) {
~BweObserver() override {
// Block until all already posted tasks run to avoid races when such task
- // accesses |this|.
- SendTask(RTC_FROM_HERE, task_queue_, [] {});
+ // accesses |this|. Also make sure we free |rtp_rtcp_| on the correct
+ // thread/task queue.
+ SendTask(RTC_FROM_HERE, task_queue_, [this]() { rtp_rtcp_ = nullptr; });
}
std::unique_ptr<test::PacketTransport> CreateReceiveTransport(
@@ -237,13 +238,13 @@ TEST_F(BandwidthEndToEndTest, RembWithSendSideBwe) {
encoder_config->max_bitrate_bps = 2000000;
ASSERT_EQ(1u, receive_configs->size());
- RtpRtcp::Configuration config;
+ RtpRtcpInterface::Configuration config;
config.receiver_only = true;
config.clock = clock_;
config.outgoing_transport = receive_transport_;
config.retransmission_rate_limiter = &retransmission_rate_limiter_;
config.local_media_ssrc = (*receive_configs)[0].rtp.local_ssrc;
- rtp_rtcp_ = RtpRtcp::Create(config);
+ rtp_rtcp_ = ModuleRtpRtcpImpl2::Create(config);
rtp_rtcp_->SetRemoteSSRC((*receive_configs)[0].rtp.remote_ssrc);
rtp_rtcp_->SetRTCPStatus(RtcpMode::kReducedSize);
}
@@ -302,7 +303,7 @@ TEST_F(BandwidthEndToEndTest, RembWithSendSideBwe) {
Clock* const clock_;
uint32_t sender_ssrc_;
int remb_bitrate_bps_;
- std::unique_ptr<RtpRtcp> rtp_rtcp_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
test::PacketTransport* receive_transport_;
TestState state_;
RateLimiter retransmission_rate_limiter_;
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/codec_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/codec_tests.cc
index b73b289ec83..d10e08daf10 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/codec_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/codec_tests.cc
@@ -34,18 +34,14 @@ enum : int { // The first valid value is 1.
};
} // namespace
-class CodecEndToEndTest : public test::CallTest,
- public ::testing::WithParamInterface<std::string> {
+class CodecEndToEndTest : public test::CallTest {
public:
- CodecEndToEndTest() : field_trial_(GetParam()) {
+ CodecEndToEndTest() {
RegisterRtpExtension(
RtpExtension(RtpExtension::kColorSpaceUri, kColorSpaceExtensionId));
RegisterRtpExtension(RtpExtension(RtpExtension::kVideoRotationUri,
kVideoRotationExtensionId));
}
-
- private:
- test::ScopedFieldTrials field_trial_;
};
class CodecObserver : public test::EndToEndTest,
@@ -121,13 +117,7 @@ class CodecObserver : public test::EndToEndTest,
int frame_counter_;
};
-INSTANTIATE_TEST_SUITE_P(
- GenericDescriptor,
- CodecEndToEndTest,
- ::testing::Values("WebRTC-GenericDescriptor/Disabled/",
- "WebRTC-GenericDescriptor/Enabled/"));
-
-TEST_P(CodecEndToEndTest, SendsAndReceivesVP8) {
+TEST_F(CodecEndToEndTest, SendsAndReceivesVP8) {
test::FunctionVideoEncoderFactory encoder_factory(
[]() { return VP8Encoder::Create(); });
test::FunctionVideoDecoderFactory decoder_factory(
@@ -137,7 +127,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesVP8) {
RunBaseTest(&test);
}
-TEST_P(CodecEndToEndTest, SendsAndReceivesVP8Rotation90) {
+TEST_F(CodecEndToEndTest, SendsAndReceivesVP8Rotation90) {
test::FunctionVideoEncoderFactory encoder_factory(
[]() { return VP8Encoder::Create(); });
test::FunctionVideoDecoderFactory decoder_factory(
@@ -148,7 +138,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesVP8Rotation90) {
}
#if defined(RTC_ENABLE_VP9)
-TEST_P(CodecEndToEndTest, SendsAndReceivesVP9) {
+TEST_F(CodecEndToEndTest, SendsAndReceivesVP9) {
test::FunctionVideoEncoderFactory encoder_factory(
[]() { return VP9Encoder::Create(); });
test::FunctionVideoDecoderFactory decoder_factory(
@@ -158,7 +148,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesVP9) {
RunBaseTest(&test);
}
-TEST_P(CodecEndToEndTest, SendsAndReceivesVP9VideoRotation90) {
+TEST_F(CodecEndToEndTest, SendsAndReceivesVP9VideoRotation90) {
test::FunctionVideoEncoderFactory encoder_factory(
[]() { return VP9Encoder::Create(); });
test::FunctionVideoDecoderFactory decoder_factory(
@@ -168,7 +158,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesVP9VideoRotation90) {
RunBaseTest(&test);
}
-TEST_P(CodecEndToEndTest, SendsAndReceivesVP9ExplicitColorSpace) {
+TEST_F(CodecEndToEndTest, SendsAndReceivesVP9ExplicitColorSpace) {
test::FunctionVideoEncoderFactory encoder_factory(
[]() { return VP9Encoder::Create(); });
test::FunctionVideoDecoderFactory decoder_factory(
@@ -179,7 +169,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesVP9ExplicitColorSpace) {
RunBaseTest(&test);
}
-TEST_P(CodecEndToEndTest,
+TEST_F(CodecEndToEndTest,
SendsAndReceivesVP9ExplicitColorSpaceWithHdrMetadata) {
test::FunctionVideoEncoderFactory encoder_factory(
[]() { return VP9Encoder::Create(); });
@@ -192,7 +182,7 @@ TEST_P(CodecEndToEndTest,
}
// Mutiplex tests are using VP9 as the underlying implementation.
-TEST_P(CodecEndToEndTest, SendsAndReceivesMultiplex) {
+TEST_F(CodecEndToEndTest, SendsAndReceivesMultiplex) {
InternalEncoderFactory internal_encoder_factory;
InternalDecoderFactory internal_decoder_factory;
test::FunctionVideoEncoderFactory encoder_factory(
@@ -211,7 +201,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesMultiplex) {
RunBaseTest(&test);
}
-TEST_P(CodecEndToEndTest, SendsAndReceivesMultiplexVideoRotation90) {
+TEST_F(CodecEndToEndTest, SendsAndReceivesMultiplexVideoRotation90) {
InternalEncoderFactory internal_encoder_factory;
InternalDecoderFactory internal_decoder_factory;
test::FunctionVideoEncoderFactory encoder_factory(
diff --git a/chromium/third_party/webrtc/video/full_stack_tests.cc b/chromium/third_party/webrtc/video/full_stack_tests.cc
index 7307b462b73..a12b3342301 100644
--- a/chromium/third_party/webrtc/video/full_stack_tests.cc
+++ b/chromium/third_party/webrtc/video/full_stack_tests.cc
@@ -90,26 +90,6 @@ std::string ClipNameToClipPath(const char* clip_name) {
// logs // bool
// };
-class GenericDescriptorTest : public ::testing::TestWithParam<std::string> {
- public:
- GenericDescriptorTest()
- : field_trial_(AppendFieldTrials(GetParam())),
- generic_descriptor_enabled_(
- field_trial::IsEnabled("WebRTC-GenericDescriptor")) {}
-
- std::string GetTestName(std::string base) {
- if (generic_descriptor_enabled_)
- base += "_generic_descriptor";
- return base;
- }
-
- bool GenericDescriptorEnabled() const { return generic_descriptor_enabled_; }
-
- private:
- test::ScopedFieldTrials field_trial_;
- bool generic_descriptor_enabled_;
-};
-
#if defined(RTC_ENABLE_VP9)
TEST(FullStackTest, ForemanCifWithoutPacketLossVp9) {
auto fixture = CreateVideoQualityTestFixture();
@@ -125,7 +105,7 @@ TEST(FullStackTest, ForemanCifWithoutPacketLossVp9) {
fixture->RunWithAnalyzer(foreman_cif);
}
-TEST_P(GenericDescriptorTest, ForemanCifPlr5Vp9) {
+TEST(GenericDescriptorTest, ForemanCifPlr5Vp9) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging foreman_cif;
foreman_cif.call.send_side_bwe = true;
@@ -134,11 +114,11 @@ TEST_P(GenericDescriptorTest, ForemanCifPlr5Vp9) {
30000, 500000, 2000000, false,
"VP9", 1, 0, 0,
false, false, true, ClipNameToClipPath("foreman_cif")};
- foreman_cif.analyzer = {GetTestName("foreman_cif_delay_50_0_plr_5_VP9"), 0.0,
- 0.0, kFullStackTestDurationSecs};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_VP9_generic_descriptor",
+ 0.0, 0.0, kFullStackTestDurationSecs};
foreman_cif.config->loss_percent = 5;
foreman_cif.config->queue_delay_ms = 50;
- foreman_cif.call.generic_descriptor = GenericDescriptorEnabled();
+ foreman_cif.call.generic_descriptor = true;
fixture->RunWithAnalyzer(foreman_cif);
}
@@ -217,7 +197,7 @@ TEST(FullStackTest, MAYBE_ParisQcifWithoutPacketLoss) {
fixture->RunWithAnalyzer(paris_qcif);
}
-TEST_P(GenericDescriptorTest, ForemanCifWithoutPacketLoss) {
+TEST(GenericDescriptorTest, ForemanCifWithoutPacketLoss) {
auto fixture = CreateVideoQualityTestFixture();
// TODO(pbos): Decide on psnr/ssim thresholds for foreman_cif.
ParamsWithLogging foreman_cif;
@@ -227,13 +207,13 @@ TEST_P(GenericDescriptorTest, ForemanCifWithoutPacketLoss) {
700000, 700000, 700000, false,
"VP8", 1, 0, 0,
false, false, true, ClipNameToClipPath("foreman_cif")};
- foreman_cif.analyzer = {GetTestName("foreman_cif_net_delay_0_0_plr_0"), 0.0,
- 0.0, kFullStackTestDurationSecs};
- foreman_cif.call.generic_descriptor = GenericDescriptorEnabled();
+ foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0_generic_descriptor",
+ 0.0, 0.0, kFullStackTestDurationSecs};
+ foreman_cif.call.generic_descriptor = true;
fixture->RunWithAnalyzer(foreman_cif);
}
-TEST_P(GenericDescriptorTest, ForemanCif30kbpsWithoutPacketLoss) {
+TEST(GenericDescriptorTest, ForemanCif30kbpsWithoutPacketLoss) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging foreman_cif;
foreman_cif.call.send_side_bwe = true;
@@ -242,15 +222,16 @@ TEST_P(GenericDescriptorTest, ForemanCif30kbpsWithoutPacketLoss) {
30000, 30000, 30000, false,
"VP8", 1, 0, 0,
false, false, true, ClipNameToClipPath("foreman_cif")};
- foreman_cif.analyzer = {GetTestName("foreman_cif_30kbps_net_delay_0_0_plr_0"),
- 0.0, 0.0, kFullStackTestDurationSecs};
- foreman_cif.call.generic_descriptor = GenericDescriptorEnabled();
+ foreman_cif.analyzer = {
+ "foreman_cif_30kbps_net_delay_0_0_plr_0_generic_descriptor", 0.0, 0.0,
+ kFullStackTestDurationSecs};
+ foreman_cif.call.generic_descriptor = true;
fixture->RunWithAnalyzer(foreman_cif);
}
// TODO(webrtc:9722): Remove when experiment is cleaned up.
-TEST_P(GenericDescriptorTest,
- ForemanCif30kbpsWithoutPacketLossTrustedRateControl) {
+TEST(GenericDescriptorTest,
+ ForemanCif30kbpsWithoutPacketLossTrustedRateControl) {
test::ScopedFieldTrials override_field_trials(
AppendFieldTrials(kVp8TrustedRateControllerFieldTrial));
auto fixture = CreateVideoQualityTestFixture();
@@ -263,9 +244,10 @@ TEST_P(GenericDescriptorTest,
"VP8", 1, 0, 0,
false, false, true, ClipNameToClipPath("foreman_cif")};
foreman_cif.analyzer = {
- GetTestName("foreman_cif_30kbps_net_delay_0_0_plr_0_trusted_rate_ctrl"),
+ "foreman_cif_30kbps_net_delay_0_0_plr_0_trusted_rate_ctrl_generic_"
+ "descriptor",
0.0, 0.0, kFullStackTestDurationSecs};
- foreman_cif.call.generic_descriptor = GenericDescriptorEnabled();
+ foreman_cif.call.generic_descriptor = true;
fixture->RunWithAnalyzer(foreman_cif);
}
@@ -328,7 +310,7 @@ TEST(FullStackTest, ForemanCifMediaCapacitySmallLossAndQueue) {
fixture->RunWithAnalyzer(foreman_cif);
}
-TEST_P(GenericDescriptorTest, ForemanCifPlr5) {
+TEST(GenericDescriptorTest, ForemanCifPlr5) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging foreman_cif;
foreman_cif.call.send_side_bwe = true;
@@ -337,15 +319,15 @@ TEST_P(GenericDescriptorTest, ForemanCifPlr5) {
30000, 500000, 2000000, false,
"VP8", 1, 0, 0,
false, false, true, ClipNameToClipPath("foreman_cif")};
- foreman_cif.analyzer = {GetTestName("foreman_cif_delay_50_0_plr_5"), 0.0, 0.0,
- kFullStackTestDurationSecs};
+ foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_generic_descriptor",
+ 0.0, 0.0, kFullStackTestDurationSecs};
foreman_cif.config->loss_percent = 5;
foreman_cif.config->queue_delay_ms = 50;
- foreman_cif.call.generic_descriptor = GenericDescriptorEnabled();
+ foreman_cif.call.generic_descriptor = true;
fixture->RunWithAnalyzer(foreman_cif);
}
-TEST_P(GenericDescriptorTest, ForemanCifPlr5Ulpfec) {
+TEST(GenericDescriptorTest, ForemanCifPlr5Ulpfec) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging foreman_cif;
foreman_cif.call.send_side_bwe = true;
@@ -354,11 +336,12 @@ TEST_P(GenericDescriptorTest, ForemanCifPlr5Ulpfec) {
30000, 500000, 2000000, false,
"VP8", 1, 0, 0,
true, false, true, ClipNameToClipPath("foreman_cif")};
- foreman_cif.analyzer = {GetTestName("foreman_cif_delay_50_0_plr_5_ulpfec"),
- 0.0, 0.0, kFullStackTestDurationSecs};
+ foreman_cif.analyzer = {
+ "foreman_cif_delay_50_0_plr_5_ulpfec_generic_descriptor", 0.0, 0.0,
+ kFullStackTestDurationSecs};
foreman_cif.config->loss_percent = 5;
foreman_cif.config->queue_delay_ms = 50;
- foreman_cif.call.generic_descriptor = GenericDescriptorEnabled();
+ foreman_cif.call.generic_descriptor = true;
fixture->RunWithAnalyzer(foreman_cif);
}
@@ -442,7 +425,7 @@ TEST(FullStackTest, ForemanCif30kbpsWithoutPacketlossH264) {
fixture->RunWithAnalyzer(foreman_cif);
}
-TEST_P(GenericDescriptorTest, ForemanCifPlr5H264) {
+TEST(GenericDescriptorTest, ForemanCifPlr5H264) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging foreman_cif;
foreman_cif.call.send_side_bwe = true;
@@ -451,11 +434,12 @@ TEST_P(GenericDescriptorTest, ForemanCifPlr5H264) {
30000, 500000, 2000000, false,
"H264", 1, 0, 0,
false, false, true, ClipNameToClipPath("foreman_cif")};
- foreman_cif.analyzer = {GetTestName("foreman_cif_delay_50_0_plr_5_H264"), 0.0,
- 0.0, kFullStackTestDurationSecs};
+ foreman_cif.analyzer = {
+ "foreman_cif_delay_50_0_plr_5_H264_generic_descriptor", 0.0, 0.0,
+ kFullStackTestDurationSecs};
foreman_cif.config->loss_percent = 5;
foreman_cif.config->queue_delay_ms = 50;
- foreman_cif.call.generic_descriptor = GenericDescriptorEnabled();
+ foreman_cif.call.generic_descriptor = true;
fixture->RunWithAnalyzer(foreman_cif);
}
@@ -565,7 +549,7 @@ TEST(FullStackTest, ForemanCif500kbps100ms) {
fixture->RunWithAnalyzer(foreman_cif);
}
-TEST_P(GenericDescriptorTest, ForemanCif500kbps100msLimitedQueue) {
+TEST(GenericDescriptorTest, ForemanCif500kbps100msLimitedQueue) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging foreman_cif;
foreman_cif.call.send_side_bwe = true;
@@ -574,12 +558,13 @@ TEST_P(GenericDescriptorTest, ForemanCif500kbps100msLimitedQueue) {
30000, 500000, 2000000, false,
"VP8", 1, 0, 0,
false, false, true, ClipNameToClipPath("foreman_cif")};
- foreman_cif.analyzer = {GetTestName("foreman_cif_500kbps_100ms_32pkts_queue"),
- 0.0, 0.0, kFullStackTestDurationSecs};
+ foreman_cif.analyzer = {
+ "foreman_cif_500kbps_100ms_32pkts_queue_generic_descriptor", 0.0, 0.0,
+ kFullStackTestDurationSecs};
foreman_cif.config->queue_length_packets = 32;
foreman_cif.config->queue_delay_ms = 100;
foreman_cif.config->link_capacity_kbps = 500;
- foreman_cif.call.generic_descriptor = GenericDescriptorEnabled();
+ foreman_cif.call.generic_descriptor = true;
fixture->RunWithAnalyzer(foreman_cif);
}
@@ -666,7 +651,7 @@ TEST(FullStackTest, ConferenceMotionHd1TLModerateLimitsWhitelistVp8) {
fixture->RunWithAnalyzer(conf_motion_hd);
}
-TEST_P(GenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) {
+TEST(GenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging conf_motion_hd;
conf_motion_hd.call.send_side_bwe = true;
@@ -680,13 +665,13 @@ TEST_P(GenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) {
false, false,
false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
conf_motion_hd.analyzer = {
- GetTestName("conference_motion_hd_2tl_moderate_limits"), 0.0, 0.0,
+ "conference_motion_hd_2tl_moderate_limits_generic_descriptor", 0.0, 0.0,
kFullStackTestDurationSecs};
conf_motion_hd.config->queue_length_packets = 50;
conf_motion_hd.config->loss_percent = 3;
conf_motion_hd.config->queue_delay_ms = 100;
conf_motion_hd.config->link_capacity_kbps = 2000;
- conf_motion_hd.call.generic_descriptor = GenericDescriptorEnabled();
+ conf_motion_hd.call.generic_descriptor = true;
fixture->RunWithAnalyzer(conf_motion_hd);
}
@@ -867,7 +852,7 @@ TEST(FullStackTest, ScreenshareSlidesVP8_2TL_Scroll) {
fixture->RunWithAnalyzer(config);
}
-TEST_P(GenericDescriptorTest, ScreenshareSlidesVP8_2TL_LossyNet) {
+TEST(GenericDescriptorTest, ScreenshareSlidesVP8_2TL_LossyNet) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging screenshare;
screenshare.call.send_side_bwe = true;
@@ -875,12 +860,12 @@ TEST_P(GenericDescriptorTest, ScreenshareSlidesVP8_2TL_LossyNet) {
1000000, false, "VP8", 2, 1, 400000,
false, false, false, ""};
screenshare.screenshare[0] = {true, false, 10};
- screenshare.analyzer = {GetTestName("screenshare_slides_lossy_net"), 0.0, 0.0,
- kFullStackTestDurationSecs};
+ screenshare.analyzer = {"screenshare_slides_lossy_net_generic_descriptor",
+ 0.0, 0.0, kFullStackTestDurationSecs};
screenshare.config->loss_percent = 5;
screenshare.config->queue_delay_ms = 200;
screenshare.config->link_capacity_kbps = 500;
- screenshare.call.generic_descriptor = GenericDescriptorEnabled();
+ screenshare.call.generic_descriptor = true;
fixture->RunWithAnalyzer(screenshare);
}
@@ -1246,10 +1231,4 @@ TEST(FullStackTest, MAYBE_LargeRoomVP8_50thumb) {
fixture->RunWithAnalyzer(large_room);
}
-INSTANTIATE_TEST_SUITE_P(
- FullStackTest,
- GenericDescriptorTest,
- ::testing::Values("WebRTC-GenericDescriptor/Disabled/",
- "WebRTC-GenericDescriptor/Enabled/"));
-
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/pc_full_stack_tests.cc b/chromium/third_party/webrtc/video/pc_full_stack_tests.cc
index 4ec382ef54b..8a6029cee41 100644
--- a/chromium/third_party/webrtc/video/pc_full_stack_tests.cc
+++ b/chromium/third_party/webrtc/video/pc_full_stack_tests.cc
@@ -106,24 +106,6 @@ std::string ClipNameToClipPath(const char* clip_name) {
} // namespace
-class PCGenericDescriptorTest : public ::testing::TestWithParam<std::string> {
- public:
- PCGenericDescriptorTest()
- : field_trial_(AppendFieldTrials(GetParam())),
- generic_descriptor_enabled_(
- field_trial::IsEnabled("WebRTC-GenericDescriptor")) {}
-
- std::string GetTestName(std::string base) {
- if (generic_descriptor_enabled_)
- base += "_generic_descriptor";
- return base;
- }
-
- private:
- test::ScopedFieldTrials field_trial_;
- bool generic_descriptor_enabled_;
-};
-
#if defined(RTC_ENABLE_VP9)
TEST(PCFullStackTest, ForemanCifWithoutPacketLossVp9) {
std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
@@ -149,14 +131,14 @@ TEST(PCFullStackTest, ForemanCifWithoutPacketLossVp9) {
fixture->Run(std::move(run_params));
}
-TEST_P(PCGenericDescriptorTest, ForemanCifPlr5Vp9) {
+TEST(PCGenericDescriptorTest, ForemanCifPlr5Vp9) {
std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
CreateNetworkEmulationManager();
BuiltInNetworkBehaviorConfig config;
config.loss_percent = 5;
config.queue_delay_ms = 50;
auto fixture = CreateTestFixture(
- GetTestName("pc_foreman_cif_delay_50_0_plr_5_VP9"),
+ "pc_foreman_cif_delay_50_0_plr_5_VP9_generic_descriptor",
CreateTwoNetworkLinks(network_emulation_manager.get(), config),
[](PeerConfigurer* alice) {
VideoConfig video(352, 288, 30);
@@ -263,11 +245,11 @@ TEST(PCFullStackTest, ParisQcifWithoutPacketLoss) {
fixture->Run(std::move(run_params));
}
-TEST_P(PCGenericDescriptorTest, ForemanCifWithoutPacketLoss) {
+TEST(PCGenericDescriptorTest, ForemanCifWithoutPacketLoss) {
std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
CreateNetworkEmulationManager();
auto fixture = CreateTestFixture(
- GetTestName("pc_foreman_cif_net_delay_0_0_plr_0"),
+ "pc_foreman_cif_net_delay_0_0_plr_0_generic_descriptor",
CreateTwoNetworkLinks(network_emulation_manager.get(),
BuiltInNetworkBehaviorConfig()),
[](PeerConfigurer* alice) {
@@ -285,12 +267,12 @@ TEST_P(PCGenericDescriptorTest, ForemanCifWithoutPacketLoss) {
fixture->Run(std::move(run_params));
}
-TEST_P(PCGenericDescriptorTest, ForemanCif30kbpsWithoutPacketLoss) {
+TEST(PCGenericDescriptorTest, ForemanCif30kbpsWithoutPacketLoss) {
std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
CreateNetworkEmulationManager();
BuiltInNetworkBehaviorConfig config;
auto fixture = CreateTestFixture(
- GetTestName("pc_foreman_cif_30kbps_net_delay_0_0_plr_0"),
+ "pc_foreman_cif_30kbps_net_delay_0_0_plr_0_generic_descriptor",
CreateTwoNetworkLinks(network_emulation_manager.get(), config),
[](PeerConfigurer* alice) {
VideoConfig video(352, 288, 10);
@@ -299,11 +281,11 @@ TEST_P(PCGenericDescriptorTest, ForemanCif30kbpsWithoutPacketLoss) {
video, ClipNameToClipPath("foreman_cif"));
alice->AddVideoConfig(std::move(video), std::move(frame_generator));
- PeerConnectionInterface::BitrateParameters bitrate_params;
- bitrate_params.min_bitrate_bps = 30000;
- bitrate_params.current_bitrate_bps = 30000;
- bitrate_params.max_bitrate_bps = 30000;
- alice->SetBitrateParameters(bitrate_params);
+ BitrateSettings bitrate_settings;
+ bitrate_settings.min_bitrate_bps = 30000;
+ bitrate_settings.start_bitrate_bps = 30000;
+ bitrate_settings.max_bitrate_bps = 30000;
+ alice->SetBitrateSettings(bitrate_settings);
},
[](PeerConfigurer* bob) {});
RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
@@ -314,16 +296,16 @@ TEST_P(PCGenericDescriptorTest, ForemanCif30kbpsWithoutPacketLoss) {
}
// TODO(webrtc:9722): Remove when experiment is cleaned up.
-TEST_P(PCGenericDescriptorTest,
- ForemanCif30kbpsWithoutPacketLossTrustedRateControl) {
+TEST(PCGenericDescriptorTest,
+ ForemanCif30kbpsWithoutPacketLossTrustedRateControl) {
test::ScopedFieldTrials override_field_trials(
AppendFieldTrials(kVp8TrustedRateControllerFieldTrial));
std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
CreateNetworkEmulationManager();
BuiltInNetworkBehaviorConfig config;
auto fixture = CreateTestFixture(
- GetTestName(
- "pc_foreman_cif_30kbps_net_delay_0_0_plr_0_trusted_rate_ctrl"),
+ "pc_foreman_cif_30kbps_net_delay_0_0_plr_0_trusted_rate_ctrl_generic_"
+ "descriptor",
CreateTwoNetworkLinks(network_emulation_manager.get(), config),
[](PeerConfigurer* alice) {
VideoConfig video(352, 288, 10);
@@ -332,11 +314,11 @@ TEST_P(PCGenericDescriptorTest,
video, ClipNameToClipPath("foreman_cif"));
alice->AddVideoConfig(std::move(video), std::move(frame_generator));
- PeerConnectionInterface::BitrateParameters bitrate_params;
- bitrate_params.min_bitrate_bps = 30000;
- bitrate_params.current_bitrate_bps = 30000;
- bitrate_params.max_bitrate_bps = 30000;
- alice->SetBitrateParameters(bitrate_params);
+ BitrateSettings bitrate_settings;
+ bitrate_settings.min_bitrate_bps = 30000;
+ bitrate_settings.start_bitrate_bps = 30000;
+ bitrate_settings.max_bitrate_bps = 30000;
+ alice->SetBitrateSettings(bitrate_settings);
},
[](PeerConfigurer* bob) {});
RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
@@ -478,14 +460,14 @@ TEST(PCFullStackTest, ForemanCifMediaCapacitySmallLossAndQueue) {
fixture->Run(std::move(run_params));
}
-TEST_P(PCGenericDescriptorTest, ForemanCifPlr5) {
+TEST(PCGenericDescriptorTest, ForemanCifPlr5) {
std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
CreateNetworkEmulationManager();
BuiltInNetworkBehaviorConfig config;
config.loss_percent = 5;
config.queue_delay_ms = 50;
auto fixture = CreateTestFixture(
- GetTestName("pc_foreman_cif_delay_50_0_plr_5"),
+ "pc_foreman_cif_delay_50_0_plr_5_generic_descriptor",
CreateTwoNetworkLinks(network_emulation_manager.get(), config),
[](PeerConfigurer* alice) {
VideoConfig video(352, 288, 30);
@@ -502,14 +484,14 @@ TEST_P(PCGenericDescriptorTest, ForemanCifPlr5) {
fixture->Run(std::move(run_params));
}
-TEST_P(PCGenericDescriptorTest, ForemanCifPlr5Ulpfec) {
+TEST(PCGenericDescriptorTest, ForemanCifPlr5Ulpfec) {
std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
CreateNetworkEmulationManager();
BuiltInNetworkBehaviorConfig config;
config.loss_percent = 5;
config.queue_delay_ms = 50;
auto fixture = CreateTestFixture(
- GetTestName("pc_foreman_cif_delay_50_0_plr_5_ulpfec"),
+ "pc_foreman_cif_delay_50_0_plr_5_ulpfec_generic_descriptor",
CreateTwoNetworkLinks(network_emulation_manager.get(), config),
[](PeerConfigurer* alice) {
VideoConfig video(352, 288, 30);
@@ -637,11 +619,11 @@ TEST(PCFullStackTest, ForemanCif30kbpsWithoutPacketlossH264) {
video, ClipNameToClipPath("foreman_cif"));
alice->AddVideoConfig(std::move(video), std::move(frame_generator));
- PeerConnectionInterface::BitrateParameters bitrate_params;
- bitrate_params.min_bitrate_bps = 30000;
- bitrate_params.current_bitrate_bps = 30000;
- bitrate_params.max_bitrate_bps = 30000;
- alice->SetBitrateParameters(bitrate_params);
+ BitrateSettings bitrate_settings;
+ bitrate_settings.min_bitrate_bps = 30000;
+ bitrate_settings.start_bitrate_bps = 30000;
+ bitrate_settings.max_bitrate_bps = 30000;
+ alice->SetBitrateSettings(bitrate_settings);
},
[](PeerConfigurer* bob) {});
RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
@@ -651,14 +633,14 @@ TEST(PCFullStackTest, ForemanCif30kbpsWithoutPacketlossH264) {
fixture->Run(std::move(run_params));
}
-TEST_P(PCGenericDescriptorTest, ForemanCifPlr5H264) {
+TEST(PCGenericDescriptorTest, ForemanCifPlr5H264) {
std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
CreateNetworkEmulationManager();
BuiltInNetworkBehaviorConfig config;
config.loss_percent = 5;
config.queue_delay_ms = 50;
auto fixture = CreateTestFixture(
- GetTestName("pc_foreman_cif_delay_50_0_plr_5_H264"),
+ "pc_foreman_cif_delay_50_0_plr_5_H264_generic_descriptor",
CreateTwoNetworkLinks(network_emulation_manager.get(), config),
[](PeerConfigurer* alice) {
VideoConfig video(352, 288, 30);
@@ -828,7 +810,7 @@ TEST(PCFullStackTest, ForemanCif500kbps100ms) {
fixture->Run(std::move(run_params));
}
-TEST_P(PCGenericDescriptorTest, ForemanCif500kbps100msLimitedQueue) {
+TEST(PCGenericDescriptorTest, ForemanCif500kbps100msLimitedQueue) {
std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
CreateNetworkEmulationManager();
BuiltInNetworkBehaviorConfig config;
@@ -836,7 +818,7 @@ TEST_P(PCGenericDescriptorTest, ForemanCif500kbps100msLimitedQueue) {
config.queue_delay_ms = 100;
config.link_capacity_kbps = 500;
auto fixture = CreateTestFixture(
- GetTestName("pc_foreman_cif_500kbps_100ms_32pkts_queue"),
+ "pc_foreman_cif_500kbps_100ms_32pkts_queue_generic_descriptor",
CreateTwoNetworkLinks(network_emulation_manager.get(), config),
[](PeerConfigurer* alice) {
VideoConfig video(352, 288, 30);
@@ -956,7 +938,7 @@ TEST(PCFullStackTest, ConferenceMotionHd1TLModerateLimitsWhitelistVp8) {
/*
// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
-TEST_P(PCGenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) {
+TEST(PCGenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging conf_motion_hd;
conf_motion_hd.call.send_side_bwe = true;
@@ -970,7 +952,7 @@ TEST_P(PCGenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) {
false, false,
false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
conf_motion_hd.analyzer = {
- GetTestName("conference_motion_hd_2tl_moderate_limits"), 0.0, 0.0,
+ "conference_motion_hd_2tl_moderate_limits_generic_descriptor", 0.0, 0.0,
kTestDurationSec};
conf_motion_hd.config->queue_length_packets = 50;
conf_motion_hd.config->loss_percent = 3;
@@ -1295,7 +1277,7 @@ TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Scroll) {
}
// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework
-TEST_P(PCGenericDescriptorTest, ScreenshareSlidesVP8_2TL_LossyNet) {
+TEST(PCGenericDescriptorTest, ScreenshareSlidesVP8_2TL_LossyNet) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging screenshare;
screenshare.call.send_side_bwe = true;
@@ -1303,12 +1285,12 @@ TEST_P(PCGenericDescriptorTest, ScreenshareSlidesVP8_2TL_LossyNet) {
1000000, false, "VP8", 2, 1, 400000,
false, false, false, ""};
screenshare.screenshare[0] = {true, false, 10};
- screenshare.analyzer = {GetTestName("screenshare_slides_lossy_net"), 0.0, 0.0,
- kTestDurationSec};
+ screenshare.analyzer = {"screenshare_slides_lossy_net_generic_descriptor",
+ 0.0, 0.0, kTestDurationSec};
screenshare.config->loss_percent = 5;
screenshare.config->queue_delay_ms = 200;
screenshare.config->link_capacity_kbps = 500;
- screenshare.call.generic_descriptor = GenericDescriptorEnabled();
+ screenshare.call.generic_descriptor = true;
fixture->RunWithAnalyzer(screenshare);
}
@@ -1813,12 +1795,6 @@ TEST(PCFullStackTest, MAYBE_LargeRoomVP8_50thumb) {
}
*/
-INSTANTIATE_TEST_SUITE_P(
- PCFullStackTest,
- PCGenericDescriptorTest,
- ::testing::Values("WebRTC-GenericDescriptor/Disabled/",
- "WebRTC-GenericDescriptor/Enabled/"));
-
class PCDualStreamsTest : public ::testing::TestWithParam<int> {};
/*
diff --git a/chromium/third_party/webrtc/video/receive_statistics_proxy2.cc b/chromium/third_party/webrtc/video/receive_statistics_proxy2.cc
index 0ba4d5d44be..3cce3c8ea4e 100644
--- a/chromium/third_party/webrtc/video/receive_statistics_proxy2.cc
+++ b/chromium/third_party/webrtc/video/receive_statistics_proxy2.cc
@@ -782,10 +782,10 @@ void ReceiveStatisticsProxy::RtcpPacketTypesCounterUpdated(
return;
if (!IsCurrentTaskQueueOrThread(worker_thread_)) {
- // RtpRtcp::Configuration has a single RtcpPacketTypeCounterObserver and
- // that same configuration may be used for both receiver and sender
- // (see ModuleRtpRtcpImpl::ModuleRtpRtcpImpl).
- // The RTCPSender implementation currently makes calls to this function on a
+ // RtpRtcpInterface::Configuration has a single
+ // RtcpPacketTypeCounterObserver and that same configuration may be used for
+ // both receiver and sender (see ModuleRtpRtcpImpl::ModuleRtpRtcpImpl). The
+ // RTCPSender implementation currently makes calls to this function on a
// process thread whereas the RTCPReceiver implementation calls back on the
// [main] worker thread.
// So until the sender implementation has been updated, we work around this
@@ -1002,7 +1002,8 @@ void ReceiveStatisticsProxy::OnCompleteFrame(bool is_keyframe,
}
void ReceiveStatisticsProxy::OnDroppedFrames(uint32_t frames_dropped) {
- RTC_DCHECK_RUN_ON(&decode_queue_);
+ // Can be called on either the decode queue or the worker thread
+ // See FrameBuffer2 for more details.
worker_thread_->PostTask(ToQueuedTask(task_safety_, [frames_dropped, this]() {
RTC_DCHECK_RUN_ON(&main_thread_);
stats_.frames_dropped += frames_dropped;
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver.cc b/chromium/third_party/webrtc/video/rtp_video_stream_receiver.cc
index e1dd736be61..8bbb5866a07 100644
--- a/chromium/third_party/webrtc/video/rtp_video_stream_receiver.cc
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver.cc
@@ -25,7 +25,6 @@
#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
#include "modules/rtp_rtcp/include/receive_statistics.h"
#include "modules/rtp_rtcp/include/rtp_cvo.h"
-#include "modules/rtp_rtcp/include/rtp_rtcp.h"
#include "modules/rtp_rtcp/include/ulpfec_receiver.h"
#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
@@ -35,13 +34,14 @@
#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
#include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h"
#include "modules/utility/include/process_thread.h"
+#include "modules/video_coding/deprecated/nack_module.h"
#include "modules/video_coding/frame_object.h"
#include "modules/video_coding/h264_sprop_parameter_sets.h"
#include "modules/video_coding/h264_sps_pps_tracker.h"
-#include "modules/video_coding/nack_module.h"
#include "modules/video_coding/packet_buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/location.h"
@@ -77,8 +77,6 @@ int PacketBufferMaxSize() {
return packet_buffer_max_size;
}
-} // namespace
-
std::unique_ptr<RtpRtcp> CreateRtpRtcpModule(
Clock* clock,
ReceiveStatistics* receive_statistics,
@@ -87,7 +85,7 @@ std::unique_ptr<RtpRtcp> CreateRtpRtcpModule(
RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
RtcpCnameCallback* rtcp_cname_callback,
uint32_t local_ssrc) {
- RtpRtcp::Configuration configuration;
+ RtpRtcpInterface::Configuration configuration;
configuration.clock = clock;
configuration.audio = false;
configuration.receiver_only = true;
@@ -99,7 +97,7 @@ std::unique_ptr<RtpRtcp> CreateRtpRtcpModule(
configuration.rtcp_cname_callback = rtcp_cname_callback;
configuration.local_media_ssrc = local_ssrc;
- std::unique_ptr<RtpRtcp> rtp_rtcp = RtpRtcp::Create(configuration);
+ std::unique_ptr<RtpRtcp> rtp_rtcp = RtpRtcp::DEPRECATED_Create(configuration);
rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
return rtp_rtcp;
@@ -107,6 +105,8 @@ std::unique_ptr<RtpRtcp> CreateRtpRtcpModule(
static const int kPacketLogIntervalMs = 10000;
+} // namespace
+
RtpVideoStreamReceiver::RtcpFeedbackBuffer::RtcpFeedbackBuffer(
KeyFrameRequestSender* key_frame_request_sender,
NackSender* nack_sender,
@@ -136,7 +136,7 @@ void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendNack(
if (!buffering_allowed) {
// Note that while *buffering* is not allowed, *batching* is, meaning that
// previously buffered messages may be sent along with the current message.
- SendBufferedRtcpFeedback();
+ SendRtcpFeedback(ConsumeRtcpFeedbackLocked());
}
}
@@ -155,34 +155,44 @@ void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendLossNotification(
}
void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendBufferedRtcpFeedback() {
- bool request_key_frame = false;
- std::vector<uint16_t> nack_sequence_numbers;
- absl::optional<LossNotificationState> lntf_state;
+ SendRtcpFeedback(ConsumeRtcpFeedback());
+}
- {
- rtc::CritScope lock(&cs_);
- std::swap(request_key_frame, request_key_frame_);
- std::swap(nack_sequence_numbers, nack_sequence_numbers_);
- std::swap(lntf_state, lntf_state_);
- }
+RtpVideoStreamReceiver::RtcpFeedbackBuffer::ConsumedRtcpFeedback
+RtpVideoStreamReceiver::RtcpFeedbackBuffer::ConsumeRtcpFeedback() {
+ rtc::CritScope lock(&cs_);
+ return ConsumeRtcpFeedbackLocked();
+}
+
+RtpVideoStreamReceiver::RtcpFeedbackBuffer::ConsumedRtcpFeedback
+RtpVideoStreamReceiver::RtcpFeedbackBuffer::ConsumeRtcpFeedbackLocked() {
+ ConsumedRtcpFeedback feedback;
+ std::swap(feedback.request_key_frame, request_key_frame_);
+ std::swap(feedback.nack_sequence_numbers, nack_sequence_numbers_);
+ std::swap(feedback.lntf_state, lntf_state_);
+ return feedback;
+}
- if (lntf_state) {
+void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendRtcpFeedback(
+ ConsumedRtcpFeedback feedback) {
+ if (feedback.lntf_state) {
// If either a NACK or a key frame request is sent, we should buffer
// the LNTF and wait for them (NACK or key frame request) to trigger
// the compound feedback message.
// Otherwise, the LNTF should be sent out immediately.
const bool buffering_allowed =
- request_key_frame || !nack_sequence_numbers.empty();
+ feedback.request_key_frame || !feedback.nack_sequence_numbers.empty();
loss_notification_sender_->SendLossNotification(
- lntf_state->last_decoded_seq_num, lntf_state->last_received_seq_num,
- lntf_state->decodability_flag, buffering_allowed);
+ feedback.lntf_state->last_decoded_seq_num,
+ feedback.lntf_state->last_received_seq_num,
+ feedback.lntf_state->decodability_flag, buffering_allowed);
}
- if (request_key_frame) {
+ if (feedback.request_key_frame) {
key_frame_request_sender_->RequestKeyFrame();
- } else if (!nack_sequence_numbers.empty()) {
- nack_sender_->SendNack(nack_sequence_numbers, true);
+ } else if (!feedback.nack_sequence_numbers.empty()) {
+ nack_sender_->SendNack(feedback.nack_sequence_numbers, true);
}
}
@@ -305,8 +315,8 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver(
}
if (config_.rtp.nack.rtp_history_ms != 0) {
- nack_module_ = std::make_unique<NackModule>(clock_, &rtcp_feedback_buffer_,
- &rtcp_feedback_buffer_);
+ nack_module_ = std::make_unique<DEPRECATED_NackModule>(
+ clock_, &rtcp_feedback_buffer_, &rtcp_feedback_buffer_);
process_thread_->RegisterModule(nack_module_.get(), RTC_FROM_HERE);
}
@@ -507,7 +517,6 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
video_header.content_type = VideoContentType::UNSPECIFIED;
video_header.video_timing.flags = VideoSendTiming::kInvalid;
video_header.is_last_packet_in_frame |= rtp_packet.Marker();
- video_header.frame_marking.temporal_id = kNoTemporalIdx;
if (const auto* vp9_header =
absl::get_if<RTPVideoHeaderVP9>(&video_header.video_type_header)) {
@@ -525,7 +534,6 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
} else {
rtp_packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
}
- rtp_packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);
ParseGenericDependenciesResult generic_descriptor_state =
ParseGenericDependenciesExtension(rtp_packet, &video_header);
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver.h b/chromium/third_party/webrtc/video/rtp_video_stream_receiver.h
index 0289f23a078..68e23eee53c 100644
--- a/chromium/third_party/webrtc/video/rtp_video_stream_receiver.h
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver.h
@@ -53,7 +53,7 @@
namespace webrtc {
-class NackModule;
+class DEPRECATED_NackModule;
class PacketRouter;
class ProcessThread;
class ReceiveStatistics;
@@ -69,7 +69,8 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
public KeyFrameRequestSender,
public video_coding::OnCompleteFrameCallback,
public OnDecryptedFrameCallback,
- public OnDecryptionStatusChangeCallback {
+ public OnDecryptionStatusChangeCallback,
+ public RtpVideoFrameReceiver {
public:
// DEPRECATED due to dependency on ReceiveStatisticsProxy.
RtpVideoStreamReceiver(
@@ -205,9 +206,11 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
void AddSecondarySink(RtpPacketSinkInterface* sink);
void RemoveSecondarySink(const RtpPacketSinkInterface* sink);
- virtual void ManageFrame(std::unique_ptr<video_coding::RtpFrameObject> frame);
-
private:
+ // Implements RtpVideoFrameReceiver.
+ void ManageFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame) override;
+
// Used for buffering RTCP feedback messages and sending them all together.
// Note:
// 1. Key frame requests and NACKs are mutually exclusive, with the
@@ -225,35 +228,23 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
~RtcpFeedbackBuffer() override = default;
// KeyFrameRequestSender implementation.
- void RequestKeyFrame() override;
+ void RequestKeyFrame() RTC_LOCKS_EXCLUDED(cs_) override;
// NackSender implementation.
void SendNack(const std::vector<uint16_t>& sequence_numbers,
- bool buffering_allowed) override;
+ bool buffering_allowed) RTC_LOCKS_EXCLUDED(cs_) override;
// LossNotificationSender implementation.
void SendLossNotification(uint16_t last_decoded_seq_num,
uint16_t last_received_seq_num,
bool decodability_flag,
- bool buffering_allowed) override;
+ bool buffering_allowed)
+ RTC_LOCKS_EXCLUDED(cs_) override;
// Send all RTCP feedback messages buffered thus far.
- void SendBufferedRtcpFeedback();
+ void SendBufferedRtcpFeedback() RTC_LOCKS_EXCLUDED(cs_);
private:
- KeyFrameRequestSender* const key_frame_request_sender_;
- NackSender* const nack_sender_;
- LossNotificationSender* const loss_notification_sender_;
-
- // NACKs are accessible from two threads due to nack_module_ being a module.
- rtc::CriticalSection cs_;
-
- // Key-frame-request-related state.
- bool request_key_frame_ RTC_GUARDED_BY(cs_);
-
- // NACK-related state.
- std::vector<uint16_t> nack_sequence_numbers_ RTC_GUARDED_BY(cs_);
-
// LNTF-related state.
struct LossNotificationState {
LossNotificationState(uint16_t last_decoded_seq_num,
@@ -267,6 +258,31 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
uint16_t last_received_seq_num;
bool decodability_flag;
};
+ struct ConsumedRtcpFeedback {
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_sequence_numbers;
+ absl::optional<LossNotificationState> lntf_state;
+ };
+
+ ConsumedRtcpFeedback ConsumeRtcpFeedback() RTC_LOCKS_EXCLUDED(cs_);
+ ConsumedRtcpFeedback ConsumeRtcpFeedbackLocked()
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(cs_);
+ // This method is called both with and without cs_ held.
+ void SendRtcpFeedback(ConsumedRtcpFeedback feedback);
+
+ KeyFrameRequestSender* const key_frame_request_sender_;
+ NackSender* const nack_sender_;
+ LossNotificationSender* const loss_notification_sender_;
+
+ // NACKs are accessible from two threads due to nack_module_ being a module.
+ rtc::CriticalSection cs_;
+
+ // Key-frame-request-related state.
+ bool request_key_frame_ RTC_GUARDED_BY(cs_);
+
+ // NACK-related state.
+ std::vector<uint16_t> nack_sequence_numbers_ RTC_GUARDED_BY(cs_);
+
absl::optional<LossNotificationState> lntf_state_ RTC_GUARDED_BY(cs_);
};
enum ParseGenericDependenciesResult {
@@ -317,7 +333,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
KeyFrameRequestSender* const keyframe_request_sender_;
RtcpFeedbackBuffer rtcp_feedback_buffer_;
- std::unique_ptr<NackModule> nack_module_;
+ std::unique_ptr<DEPRECATED_NackModule> nack_module_;
std::unique_ptr<LossNotificationController> loss_notification_controller_;
video_coding::PacketBuffer packet_buffer_;
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver2.cc b/chromium/third_party/webrtc/video/rtp_video_stream_receiver2.cc
new file mode 100644
index 00000000000..3f11bb77c48
--- /dev/null
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver2.cc
@@ -0,0 +1,1154 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/rtp_video_stream_receiver2.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
+#include "absl/memory/memory.h"
+#include "absl/types/optional.h"
+#include "media/base/media_constants.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_cvo.h"
+#include "modules/rtp_rtcp/include/ulpfec_receiver.h"
+#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h"
+#include "modules/utility/include/process_thread.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/h264_sprop_parameter_sets.h"
+#include "modules/video_coding/h264_sps_pps_tracker.h"
+#include "modules/video_coding/nack_module2.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/location.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/ntp_time.h"
+#include "video/receive_statistics_proxy2.h"
+
+namespace webrtc {
+
+namespace {
+// TODO(philipel): Change kPacketBufferStartSize back to 32 in M63 see:
+// crbug.com/752886
+constexpr int kPacketBufferStartSize = 512;
+constexpr int kPacketBufferMaxSize = 2048;
+
+int PacketBufferMaxSize() {
+ // The group here must be a positive power of 2, in which case that is used as
+ // size. All other values shall result in the default value being used.
+ const std::string group_name =
+ webrtc::field_trial::FindFullName("WebRTC-PacketBufferMaxSize");
+ int packet_buffer_max_size = kPacketBufferMaxSize;
+ if (!group_name.empty() &&
+ (sscanf(group_name.c_str(), "%d", &packet_buffer_max_size) != 1 ||
+ packet_buffer_max_size <= 0 ||
+ // Verify that the number is a positive power of 2.
+ (packet_buffer_max_size & (packet_buffer_max_size - 1)) != 0)) {
+ RTC_LOG(LS_WARNING) << "Invalid packet buffer max size: " << group_name;
+ packet_buffer_max_size = kPacketBufferMaxSize;
+ }
+ return packet_buffer_max_size;
+}
+
+std::unique_ptr<ModuleRtpRtcpImpl2> CreateRtpRtcpModule(
+ Clock* clock,
+ ReceiveStatistics* receive_statistics,
+ Transport* outgoing_transport,
+ RtcpRttStats* rtt_stats,
+ RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
+ RtcpCnameCallback* rtcp_cname_callback,
+ uint32_t local_ssrc) {
+ RtpRtcpInterface::Configuration configuration;
+ configuration.clock = clock;
+ configuration.audio = false;
+ configuration.receiver_only = true;
+ configuration.receive_statistics = receive_statistics;
+ configuration.outgoing_transport = outgoing_transport;
+ configuration.rtt_stats = rtt_stats;
+ configuration.rtcp_packet_type_counter_observer =
+ rtcp_packet_type_counter_observer;
+ configuration.rtcp_cname_callback = rtcp_cname_callback;
+ configuration.local_media_ssrc = local_ssrc;
+
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp =
+ ModuleRtpRtcpImpl2::Create(configuration);
+ rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
+
+ return rtp_rtcp;
+}
+
+std::unique_ptr<NackModule2> MaybeConstructNackModule(
+ TaskQueueBase* current_queue,
+ const VideoReceiveStream::Config& config,
+ Clock* clock,
+ NackSender* nack_sender,
+ KeyFrameRequestSender* keyframe_request_sender) {
+ if (config.rtp.nack.rtp_history_ms == 0)
+ return nullptr;
+
+ return std::make_unique<NackModule2>(current_queue, clock, nack_sender,
+ keyframe_request_sender);
+}
+
+static const int kPacketLogIntervalMs = 10000;
+
+} // namespace
+
+RtpVideoStreamReceiver2::RtcpFeedbackBuffer::RtcpFeedbackBuffer(
+ KeyFrameRequestSender* key_frame_request_sender,
+ NackSender* nack_sender,
+ LossNotificationSender* loss_notification_sender)
+ : key_frame_request_sender_(key_frame_request_sender),
+ nack_sender_(nack_sender),
+ loss_notification_sender_(loss_notification_sender),
+ request_key_frame_(false) {
+ RTC_DCHECK(key_frame_request_sender_);
+ RTC_DCHECK(nack_sender_);
+ RTC_DCHECK(loss_notification_sender_);
+}
+
+void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::RequestKeyFrame() {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ request_key_frame_ = true;
+}
+
+void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendNack(
+ const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ RTC_DCHECK(!sequence_numbers.empty());
+ nack_sequence_numbers_.insert(nack_sequence_numbers_.end(),
+ sequence_numbers.cbegin(),
+ sequence_numbers.cend());
+ if (!buffering_allowed) {
+ // Note that while *buffering* is not allowed, *batching* is, meaning that
+ // previously buffered messages may be sent along with the current message.
+ SendBufferedRtcpFeedback();
+ }
+}
+
+void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendLossNotification(
+ uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ RTC_DCHECK(buffering_allowed);
+ RTC_DCHECK(!lntf_state_)
+ << "SendLossNotification() called twice in a row with no call to "
+ "SendBufferedRtcpFeedback() in between.";
+ lntf_state_ = absl::make_optional<LossNotificationState>(
+ last_decoded_seq_num, last_received_seq_num, decodability_flag);
+}
+
+void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendBufferedRtcpFeedback() {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_sequence_numbers;
+ absl::optional<LossNotificationState> lntf_state;
+
+ std::swap(request_key_frame, request_key_frame_);
+ std::swap(nack_sequence_numbers, nack_sequence_numbers_);
+ std::swap(lntf_state, lntf_state_);
+
+ if (lntf_state) {
+ // If either a NACK or a key frame request is sent, we should buffer
+ // the LNTF and wait for them (NACK or key frame request) to trigger
+ // the compound feedback message.
+ // Otherwise, the LNTF should be sent out immediately.
+ const bool buffering_allowed =
+ request_key_frame || !nack_sequence_numbers.empty();
+
+ loss_notification_sender_->SendLossNotification(
+ lntf_state->last_decoded_seq_num, lntf_state->last_received_seq_num,
+ lntf_state->decodability_flag, buffering_allowed);
+ }
+
+ if (request_key_frame) {
+ key_frame_request_sender_->RequestKeyFrame();
+ } else if (!nack_sequence_numbers.empty()) {
+ nack_sender_->SendNack(nack_sequence_numbers, true);
+ }
+}
+
+RtpVideoStreamReceiver2::RtpVideoStreamReceiver2(
+ TaskQueueBase* current_queue,
+ Clock* clock,
+ Transport* transport,
+ RtcpRttStats* rtt_stats,
+ PacketRouter* packet_router,
+ const VideoReceiveStream::Config* config,
+ ReceiveStatistics* rtp_receive_statistics,
+ RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
+ RtcpCnameCallback* rtcp_cname_callback,
+ ProcessThread* process_thread,
+ NackSender* nack_sender,
+ KeyFrameRequestSender* keyframe_request_sender,
+ video_coding::OnCompleteFrameCallback* complete_frame_callback,
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)
+ : clock_(clock),
+ config_(*config),
+ packet_router_(packet_router),
+ process_thread_(process_thread),
+ ntp_estimator_(clock),
+ rtp_header_extensions_(config_.rtp.extensions),
+ forced_playout_delay_max_ms_("max_ms", absl::nullopt),
+ forced_playout_delay_min_ms_("min_ms", absl::nullopt),
+ rtp_receive_statistics_(rtp_receive_statistics),
+ ulpfec_receiver_(UlpfecReceiver::Create(config->rtp.remote_ssrc,
+ this,
+ config->rtp.extensions)),
+ receiving_(false),
+ last_packet_log_ms_(-1),
+ rtp_rtcp_(CreateRtpRtcpModule(clock,
+ rtp_receive_statistics_,
+ transport,
+ rtt_stats,
+ rtcp_packet_type_counter_observer,
+ rtcp_cname_callback,
+ config_.rtp.local_ssrc)),
+ complete_frame_callback_(complete_frame_callback),
+ keyframe_request_sender_(keyframe_request_sender),
+ // TODO(bugs.webrtc.org/10336): Let |rtcp_feedback_buffer_| communicate
+ // directly with |rtp_rtcp_|.
+ rtcp_feedback_buffer_(this, nack_sender, this),
+ nack_module_(MaybeConstructNackModule(current_queue,
+ config_,
+ clock_,
+ &rtcp_feedback_buffer_,
+ &rtcp_feedback_buffer_)),
+ packet_buffer_(clock_, kPacketBufferStartSize, PacketBufferMaxSize()),
+ has_received_frame_(false),
+ frames_decryptable_(false),
+ absolute_capture_time_receiver_(clock) {
+ constexpr bool remb_candidate = true;
+ if (packet_router_)
+ packet_router_->AddReceiveRtpModule(rtp_rtcp_.get(), remb_candidate);
+
+ RTC_DCHECK(config_.rtp.rtcp_mode != RtcpMode::kOff)
+ << "A stream should not be configured with RTCP disabled. This value is "
+ "reserved for internal usage.";
+ // TODO(pbos): What's an appropriate local_ssrc for receive-only streams?
+ RTC_DCHECK(config_.rtp.local_ssrc != 0);
+ RTC_DCHECK(config_.rtp.remote_ssrc != config_.rtp.local_ssrc);
+
+ rtp_rtcp_->SetRTCPStatus(config_.rtp.rtcp_mode);
+ rtp_rtcp_->SetRemoteSSRC(config_.rtp.remote_ssrc);
+
+ static const int kMaxPacketAgeToNack = 450;
+ const int max_reordering_threshold = (config_.rtp.nack.rtp_history_ms > 0)
+ ? kMaxPacketAgeToNack
+ : kDefaultMaxReorderingThreshold;
+ rtp_receive_statistics_->SetMaxReorderingThreshold(config_.rtp.remote_ssrc,
+ max_reordering_threshold);
+ // TODO(nisse): For historic reasons, we applied the above
+ // max_reordering_threshold also for RTX stats, which makes little sense since
+ // we don't NACK rtx packets. Consider deleting the below block, and rely on
+ // the default threshold.
+ if (config_.rtp.rtx_ssrc) {
+ rtp_receive_statistics_->SetMaxReorderingThreshold(
+ config_.rtp.rtx_ssrc, max_reordering_threshold);
+ }
+ if (config_.rtp.rtcp_xr.receiver_reference_time_report)
+ rtp_rtcp_->SetRtcpXrRrtrStatus(true);
+
+ ParseFieldTrial(
+ {&forced_playout_delay_max_ms_, &forced_playout_delay_min_ms_},
+ field_trial::FindFullName("WebRTC-ForcePlayoutDelay"));
+
+ process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE);
+
+ if (config_.rtp.lntf.enabled) {
+ loss_notification_controller_ =
+ std::make_unique<LossNotificationController>(&rtcp_feedback_buffer_,
+ &rtcp_feedback_buffer_);
+ }
+
+ reference_finder_ =
+ std::make_unique<video_coding::RtpFrameReferenceFinder>(this);
+
+ // Only construct the encrypted receiver if frame encryption is enabled.
+ if (config_.crypto_options.sframe.require_frame_encryption) {
+ buffered_frame_decryptor_ =
+ std::make_unique<BufferedFrameDecryptor>(this, this);
+ if (frame_decryptor != nullptr) {
+ buffered_frame_decryptor_->SetFrameDecryptor(std::move(frame_decryptor));
+ }
+ }
+
+ if (frame_transformer) {
+ frame_transformer_delegate_ = new rtc::RefCountedObject<
+ RtpVideoStreamReceiverFrameTransformerDelegate>(
+ this, std::move(frame_transformer), rtc::Thread::Current(),
+ config_.rtp.remote_ssrc);
+ frame_transformer_delegate_->Init();
+ }
+}
+
+RtpVideoStreamReceiver2::~RtpVideoStreamReceiver2() {
+ RTC_DCHECK(secondary_sinks_.empty());
+
+ process_thread_->DeRegisterModule(rtp_rtcp_.get());
+
+ if (packet_router_)
+ packet_router_->RemoveReceiveRtpModule(rtp_rtcp_.get());
+ UpdateHistograms();
+ if (frame_transformer_delegate_)
+ frame_transformer_delegate_->Reset();
+}
+
+void RtpVideoStreamReceiver2::AddReceiveCodec(
+ const VideoCodec& video_codec,
+ const std::map<std::string, std::string>& codec_params,
+ bool raw_payload) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ payload_type_map_.emplace(
+ video_codec.plType,
+ raw_payload ? std::make_unique<VideoRtpDepacketizerRaw>()
+ : CreateVideoRtpDepacketizer(video_codec.codecType));
+ pt_codec_params_.emplace(video_codec.plType, codec_params);
+}
+
+absl::optional<Syncable::Info> RtpVideoStreamReceiver2::GetSyncInfo() const {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ Syncable::Info info;
+ if (rtp_rtcp_->RemoteNTP(&info.capture_time_ntp_secs,
+ &info.capture_time_ntp_frac, nullptr, nullptr,
+ &info.capture_time_source_clock) != 0) {
+ return absl::nullopt;
+ }
+
+ if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_ms_) {
+ return absl::nullopt;
+ }
+ info.latest_received_capture_timestamp = *last_received_rtp_timestamp_;
+ info.latest_receive_time_ms = *last_received_rtp_system_time_ms_;
+
+ // Leaves info.current_delay_ms uninitialized.
+ return info;
+}
+
+RtpVideoStreamReceiver2::ParseGenericDependenciesResult
+RtpVideoStreamReceiver2::ParseGenericDependenciesExtension(
+ const RtpPacketReceived& rtp_packet,
+ RTPVideoHeader* video_header) {
+ if (rtp_packet.HasExtension<RtpDependencyDescriptorExtension>()) {
+ webrtc::DependencyDescriptor dependency_descriptor;
+ if (!rtp_packet.GetExtension<RtpDependencyDescriptorExtension>(
+ video_structure_.get(), &dependency_descriptor)) {
+ // Descriptor is there, but failed to parse. Either it is invalid,
+ // or too old packet (after relevant video_structure_ changed),
+ // or too new packet (before relevant video_structure_ arrived).
+ // Drop such packet to be on the safe side.
+ // TODO(bugs.webrtc.org/10342): Stash too new packet.
+ RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
+ << " Failed to parse dependency descriptor.";
+ return kDropPacket;
+ }
+ if (dependency_descriptor.attached_structure != nullptr &&
+ !dependency_descriptor.first_packet_in_frame) {
+ RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
+ << "Invalid dependency descriptor: structure "
+ "attached to non first packet of a frame.";
+ return kDropPacket;
+ }
+ video_header->is_first_packet_in_frame =
+ dependency_descriptor.first_packet_in_frame;
+ video_header->is_last_packet_in_frame =
+ dependency_descriptor.last_packet_in_frame;
+
+ int64_t frame_id =
+ frame_id_unwrapper_.Unwrap(dependency_descriptor.frame_number);
+ auto& generic_descriptor_info = video_header->generic.emplace();
+ generic_descriptor_info.frame_id = frame_id;
+ generic_descriptor_info.spatial_index =
+ dependency_descriptor.frame_dependencies.spatial_id;
+ generic_descriptor_info.temporal_index =
+ dependency_descriptor.frame_dependencies.temporal_id;
+ for (int fdiff : dependency_descriptor.frame_dependencies.frame_diffs) {
+ generic_descriptor_info.dependencies.push_back(frame_id - fdiff);
+ }
+ generic_descriptor_info.decode_target_indications =
+ dependency_descriptor.frame_dependencies.decode_target_indications;
+ if (dependency_descriptor.resolution) {
+ video_header->width = dependency_descriptor.resolution->Width();
+ video_header->height = dependency_descriptor.resolution->Height();
+ }
+
+ // FrameDependencyStructure is sent in dependency descriptor of the first
+ // packet of a key frame and required for parsed dependency descriptor in
+ // all the following packets until next key frame.
+ // Save it if there is a (potentially) new structure.
+ if (dependency_descriptor.attached_structure) {
+ RTC_DCHECK(dependency_descriptor.first_packet_in_frame);
+ if (video_structure_frame_id_ > frame_id) {
+ RTC_LOG(LS_WARNING)
+ << "Arrived key frame with id " << frame_id << " and structure id "
+ << dependency_descriptor.attached_structure->structure_id
+ << " is older than the latest received key frame with id "
+ << *video_structure_frame_id_ << " and structure id "
+ << video_structure_->structure_id;
+ return kDropPacket;
+ }
+ video_structure_ = std::move(dependency_descriptor.attached_structure);
+ video_structure_frame_id_ = frame_id;
+ video_header->frame_type = VideoFrameType::kVideoFrameKey;
+ } else {
+ video_header->frame_type = VideoFrameType::kVideoFrameDelta;
+ }
+ return kHasGenericDescriptor;
+ }
+
+ RtpGenericFrameDescriptor generic_frame_descriptor;
+ if (!rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension00>(
+ &generic_frame_descriptor)) {
+ return kNoGenericDescriptor;
+ }
+
+ video_header->is_first_packet_in_frame =
+ generic_frame_descriptor.FirstPacketInSubFrame();
+ video_header->is_last_packet_in_frame =
+ generic_frame_descriptor.LastPacketInSubFrame();
+
+ if (generic_frame_descriptor.FirstPacketInSubFrame()) {
+ video_header->frame_type =
+ generic_frame_descriptor.FrameDependenciesDiffs().empty()
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+
+ auto& generic_descriptor_info = video_header->generic.emplace();
+ int64_t frame_id =
+ frame_id_unwrapper_.Unwrap(generic_frame_descriptor.FrameId());
+ generic_descriptor_info.frame_id = frame_id;
+ generic_descriptor_info.spatial_index =
+ generic_frame_descriptor.SpatialLayer();
+ generic_descriptor_info.temporal_index =
+ generic_frame_descriptor.TemporalLayer();
+ for (uint16_t fdiff : generic_frame_descriptor.FrameDependenciesDiffs()) {
+ generic_descriptor_info.dependencies.push_back(frame_id - fdiff);
+ }
+ }
+ video_header->width = generic_frame_descriptor.Width();
+ video_header->height = generic_frame_descriptor.Height();
+ return kHasGenericDescriptor;
+}
+
+void RtpVideoStreamReceiver2::OnReceivedPayloadData(
+ rtc::CopyOnWriteBuffer codec_payload,
+ const RtpPacketReceived& rtp_packet,
+ const RTPVideoHeader& video) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ auto packet = std::make_unique<video_coding::PacketBuffer::Packet>(
+ rtp_packet, video, ntp_estimator_.Estimate(rtp_packet.Timestamp()),
+ clock_->TimeInMilliseconds());
+
+ // Try to extrapolate absolute capture time if it is missing.
+ packet->packet_info.set_absolute_capture_time(
+ absolute_capture_time_receiver_.OnReceivePacket(
+ AbsoluteCaptureTimeReceiver::GetSource(packet->packet_info.ssrc(),
+ packet->packet_info.csrcs()),
+ packet->packet_info.rtp_timestamp(),
+ // Assume frequency is the same one for all video frames.
+ kVideoPayloadTypeFrequency,
+ packet->packet_info.absolute_capture_time()));
+
+ RTPVideoHeader& video_header = packet->video_header;
+ video_header.rotation = kVideoRotation_0;
+ video_header.content_type = VideoContentType::UNSPECIFIED;
+ video_header.video_timing.flags = VideoSendTiming::kInvalid;
+ video_header.is_last_packet_in_frame |= rtp_packet.Marker();
+
+ if (const auto* vp9_header =
+ absl::get_if<RTPVideoHeaderVP9>(&video_header.video_type_header)) {
+ video_header.is_last_packet_in_frame |= vp9_header->end_of_frame;
+ video_header.is_first_packet_in_frame |= vp9_header->beginning_of_frame;
+ }
+
+ rtp_packet.GetExtension<VideoOrientation>(&video_header.rotation);
+ rtp_packet.GetExtension<VideoContentTypeExtension>(
+ &video_header.content_type);
+ rtp_packet.GetExtension<VideoTimingExtension>(&video_header.video_timing);
+ if (forced_playout_delay_max_ms_ && forced_playout_delay_min_ms_) {
+ video_header.playout_delay.max_ms = *forced_playout_delay_max_ms_;
+ video_header.playout_delay.min_ms = *forced_playout_delay_min_ms_;
+ } else {
+ rtp_packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
+ }
+
+ ParseGenericDependenciesResult generic_descriptor_state =
+ ParseGenericDependenciesExtension(rtp_packet, &video_header);
+ if (generic_descriptor_state == kDropPacket)
+ return;
+
+ // Color space should only be transmitted in the last packet of a frame,
+ // therefore, neglect it otherwise so that last_color_space_ is not reset by
+ // mistake.
+ if (video_header.is_last_packet_in_frame) {
+ video_header.color_space = rtp_packet.GetExtension<ColorSpaceExtension>();
+ if (video_header.color_space ||
+ video_header.frame_type == VideoFrameType::kVideoFrameKey) {
+ // Store color space since it's only transmitted when changed or for key
+ // frames. Color space will be cleared if a key frame is transmitted
+ // without color space information.
+ last_color_space_ = video_header.color_space;
+ } else if (last_color_space_) {
+ video_header.color_space = last_color_space_;
+ }
+ }
+
+ if (loss_notification_controller_) {
+ if (rtp_packet.recovered()) {
+ // TODO(bugs.webrtc.org/10336): Implement support for reordering.
+ RTC_LOG(LS_INFO)
+ << "LossNotificationController does not support reordering.";
+ } else if (generic_descriptor_state == kNoGenericDescriptor) {
+ RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
+ "frame descriptor, but it is missing.";
+ } else {
+ if (video_header.is_first_packet_in_frame) {
+ RTC_DCHECK(video_header.generic);
+ LossNotificationController::FrameDetails frame;
+ frame.is_keyframe =
+ video_header.frame_type == VideoFrameType::kVideoFrameKey;
+ frame.frame_id = video_header.generic->frame_id;
+ frame.frame_dependencies = video_header.generic->dependencies;
+ loss_notification_controller_->OnReceivedPacket(
+ rtp_packet.SequenceNumber(), &frame);
+ } else {
+ loss_notification_controller_->OnReceivedPacket(
+ rtp_packet.SequenceNumber(), nullptr);
+ }
+ }
+ }
+
+ if (nack_module_) {
+ const bool is_keyframe =
+ video_header.is_first_packet_in_frame &&
+ video_header.frame_type == VideoFrameType::kVideoFrameKey;
+
+ packet->times_nacked = nack_module_->OnReceivedPacket(
+ rtp_packet.SequenceNumber(), is_keyframe, rtp_packet.recovered());
+ } else {
+ packet->times_nacked = -1;
+ }
+
+ if (codec_payload.size() == 0) {
+ NotifyReceiverOfEmptyPacket(packet->seq_num);
+ rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
+ return;
+ }
+
+ if (packet->codec() == kVideoCodecH264) {
+ // Only when we start to receive packets will we know what payload type
+ // that will be used. When we know the payload type insert the correct
+ // sps/pps into the tracker.
+ if (packet->payload_type != last_payload_type_) {
+ last_payload_type_ = packet->payload_type;
+ InsertSpsPpsIntoTracker(packet->payload_type);
+ }
+
+ video_coding::H264SpsPpsTracker::FixedBitstream fixed =
+ tracker_.CopyAndFixBitstream(
+ rtc::MakeArrayView(codec_payload.cdata(), codec_payload.size()),
+ &packet->video_header);
+
+ switch (fixed.action) {
+ case video_coding::H264SpsPpsTracker::kRequestKeyframe:
+ rtcp_feedback_buffer_.RequestKeyFrame();
+ rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
+ ABSL_FALLTHROUGH_INTENDED;
+ case video_coding::H264SpsPpsTracker::kDrop:
+ return;
+ case video_coding::H264SpsPpsTracker::kInsert:
+ packet->video_payload = std::move(fixed.bitstream);
+ break;
+ }
+
+ } else {
+ packet->video_payload = std::move(codec_payload);
+ }
+
+ rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
+ frame_counter_.Add(packet->timestamp);
+ OnInsertedPacket(packet_buffer_.InsertPacket(std::move(packet)));
+}
+
+void RtpVideoStreamReceiver2::OnRecoveredPacket(const uint8_t* rtp_packet,
+ size_t rtp_packet_length) {
+ RtpPacketReceived packet;
+ if (!packet.Parse(rtp_packet, rtp_packet_length))
+ return;
+ if (packet.PayloadType() == config_.rtp.red_payload_type) {
+ RTC_LOG(LS_WARNING) << "Discarding recovered packet with RED encapsulation";
+ return;
+ }
+
+ packet.IdentifyExtensions(rtp_header_extensions_);
+ packet.set_payload_type_frequency(kVideoPayloadTypeFrequency);
+ // TODO(nisse): UlpfecReceiverImpl::ProcessReceivedFec passes both
+ // original (decapsulated) media packets and recovered packets to
+ // this callback. We need a way to distinguish, for setting
+ // packet.recovered() correctly. Ideally, move RED decapsulation out
+ // of the Ulpfec implementation.
+
+ ReceivePacket(packet);
+}
+
+// This method handles both regular RTP packets and packets recovered
+// via FlexFEC.
+void RtpVideoStreamReceiver2::OnRtpPacket(const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+
+ if (!receiving_) {
+ return;
+ }
+
+ if (!packet.recovered()) {
+ // TODO(nisse): Exclude out-of-order packets?
+ int64_t now_ms = clock_->TimeInMilliseconds();
+
+ last_received_rtp_timestamp_ = packet.Timestamp();
+ last_received_rtp_system_time_ms_ = now_ms;
+
+ // Periodically log the RTP header of incoming packets.
+ if (now_ms - last_packet_log_ms_ > kPacketLogIntervalMs) {
+ rtc::StringBuilder ss;
+ ss << "Packet received on SSRC: " << packet.Ssrc()
+ << " with payload type: " << static_cast<int>(packet.PayloadType())
+ << ", timestamp: " << packet.Timestamp()
+ << ", sequence number: " << packet.SequenceNumber()
+ << ", arrival time: " << packet.arrival_time_ms();
+ int32_t time_offset;
+ if (packet.GetExtension<TransmissionOffset>(&time_offset)) {
+ ss << ", toffset: " << time_offset;
+ }
+ uint32_t send_time;
+ if (packet.GetExtension<AbsoluteSendTime>(&send_time)) {
+ ss << ", abs send time: " << send_time;
+ }
+ RTC_LOG(LS_INFO) << ss.str();
+ last_packet_log_ms_ = now_ms;
+ }
+ }
+
+ ReceivePacket(packet);
+
+ // Update receive statistics after ReceivePacket.
+ // Receive statistics will be reset if the payload type changes (make sure
+ // that the first packet is included in the stats).
+ if (!packet.recovered()) {
+ rtp_receive_statistics_->OnRtpPacket(packet);
+ }
+
+ for (RtpPacketSinkInterface* secondary_sink : secondary_sinks_) {
+ secondary_sink->OnRtpPacket(packet);
+ }
+}
+
+void RtpVideoStreamReceiver2::RequestKeyFrame() {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ // TODO(bugs.webrtc.org/10336): Allow the sender to ignore key frame requests
+ // issued by anything other than the LossNotificationController if it (the
+ // sender) is relying on LNTF alone.
+ if (keyframe_request_sender_) {
+ keyframe_request_sender_->RequestKeyFrame();
+ } else {
+ rtp_rtcp_->SendPictureLossIndication();
+ }
+}
+
+void RtpVideoStreamReceiver2::SendLossNotification(
+ uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) {
+ RTC_DCHECK(config_.rtp.lntf.enabled);
+ rtp_rtcp_->SendLossNotification(last_decoded_seq_num, last_received_seq_num,
+ decodability_flag, buffering_allowed);
+}
+
+bool RtpVideoStreamReceiver2::IsUlpfecEnabled() const {
+ return config_.rtp.ulpfec_payload_type != -1;
+}
+
+bool RtpVideoStreamReceiver2::IsRetransmissionsEnabled() const {
+ return config_.rtp.nack.rtp_history_ms > 0;
+}
+
+void RtpVideoStreamReceiver2::RequestPacketRetransmit(
+ const std::vector<uint16_t>& sequence_numbers) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ rtp_rtcp_->SendNack(sequence_numbers);
+}
+
+bool RtpVideoStreamReceiver2::IsDecryptable() const {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ return frames_decryptable_;
+}
+
+void RtpVideoStreamReceiver2::OnInsertedPacket(
+ video_coding::PacketBuffer::InsertResult result) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ video_coding::PacketBuffer::Packet* first_packet = nullptr;
+ int max_nack_count;
+ int64_t min_recv_time;
+ int64_t max_recv_time;
+ std::vector<rtc::ArrayView<const uint8_t>> payloads;
+ RtpPacketInfos::vector_type packet_infos;
+
+ bool frame_boundary = true;
+ for (auto& packet : result.packets) {
+ // PacketBuffer promisses frame boundaries are correctly set on each
+ // packet. Document that assumption with the DCHECKs.
+ RTC_DCHECK_EQ(frame_boundary, packet->is_first_packet_in_frame());
+ if (packet->is_first_packet_in_frame()) {
+ first_packet = packet.get();
+ max_nack_count = packet->times_nacked;
+ min_recv_time = packet->packet_info.receive_time_ms();
+ max_recv_time = packet->packet_info.receive_time_ms();
+ payloads.clear();
+ packet_infos.clear();
+ } else {
+ max_nack_count = std::max(max_nack_count, packet->times_nacked);
+ min_recv_time =
+ std::min(min_recv_time, packet->packet_info.receive_time_ms());
+ max_recv_time =
+ std::max(max_recv_time, packet->packet_info.receive_time_ms());
+ }
+ payloads.emplace_back(packet->video_payload);
+ packet_infos.push_back(packet->packet_info);
+
+ frame_boundary = packet->is_last_packet_in_frame();
+ if (packet->is_last_packet_in_frame()) {
+ auto depacketizer_it = payload_type_map_.find(first_packet->payload_type);
+ RTC_CHECK(depacketizer_it != payload_type_map_.end());
+
+ rtc::scoped_refptr<EncodedImageBuffer> bitstream =
+ depacketizer_it->second->AssembleFrame(payloads);
+ if (!bitstream) {
+ // Failed to assemble a frame. Discard and continue.
+ continue;
+ }
+
+ const video_coding::PacketBuffer::Packet& last_packet = *packet;
+ OnAssembledFrame(std::make_unique<video_coding::RtpFrameObject>(
+ first_packet->seq_num, //
+ last_packet.seq_num, //
+ last_packet.marker_bit, //
+ max_nack_count, //
+ min_recv_time, //
+ max_recv_time, //
+ first_packet->timestamp, //
+ first_packet->ntp_time_ms, //
+ last_packet.video_header.video_timing, //
+ first_packet->payload_type, //
+ first_packet->codec(), //
+ last_packet.video_header.rotation, //
+ last_packet.video_header.content_type, //
+ first_packet->video_header, //
+ last_packet.video_header.color_space, //
+ RtpPacketInfos(std::move(packet_infos)), //
+ std::move(bitstream)));
+ }
+ }
+ RTC_DCHECK(frame_boundary);
+ if (result.buffer_cleared) {
+ RequestKeyFrame();
+ }
+}
+
+void RtpVideoStreamReceiver2::OnAssembledFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ RTC_DCHECK(frame);
+
+ const absl::optional<RTPVideoHeader::GenericDescriptorInfo>& descriptor =
+ frame->GetRtpVideoHeader().generic;
+
+ if (loss_notification_controller_ && descriptor) {
+ loss_notification_controller_->OnAssembledFrame(
+ frame->first_seq_num(), descriptor->frame_id,
+ absl::c_linear_search(descriptor->decode_target_indications,
+ DecodeTargetIndication::kDiscardable),
+ descriptor->dependencies);
+ }
+
+ // If frames arrive before a key frame, they would not be decodable.
+ // In that case, request a key frame ASAP.
+ if (!has_received_frame_) {
+ if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
+ // |loss_notification_controller_|, if present, would have already
+ // requested a key frame when the first packet for the non-key frame
+ // had arrived, so no need to replicate the request.
+ if (!loss_notification_controller_) {
+ RequestKeyFrame();
+ }
+ }
+ has_received_frame_ = true;
+ }
+
+ // Reset |reference_finder_| if |frame| is new and the codec have changed.
+ if (current_codec_) {
+ bool frame_is_newer =
+ AheadOf(frame->Timestamp(), last_assembled_frame_rtp_timestamp_);
+
+ if (frame->codec_type() != current_codec_) {
+ if (frame_is_newer) {
+ // When we reset the |reference_finder_| we don't want new picture ids
+ // to overlap with old picture ids. To ensure that doesn't happen we
+ // start from the |last_completed_picture_id_| and add an offset in case
+ // of reordering.
+ reference_finder_ =
+ std::make_unique<video_coding::RtpFrameReferenceFinder>(
+ this, last_completed_picture_id_ +
+ std::numeric_limits<uint16_t>::max());
+ current_codec_ = frame->codec_type();
+ } else {
+ // Old frame from before the codec switch, discard it.
+ return;
+ }
+ }
+
+ if (frame_is_newer) {
+ last_assembled_frame_rtp_timestamp_ = frame->Timestamp();
+ }
+ } else {
+ current_codec_ = frame->codec_type();
+ last_assembled_frame_rtp_timestamp_ = frame->Timestamp();
+ }
+
+ if (buffered_frame_decryptor_ != nullptr) {
+ buffered_frame_decryptor_->ManageEncryptedFrame(std::move(frame));
+ } else if (frame_transformer_delegate_) {
+ frame_transformer_delegate_->TransformFrame(std::move(frame));
+ } else {
+ reference_finder_->ManageFrame(std::move(frame));
+ }
+}
+
+void RtpVideoStreamReceiver2::OnCompleteFrame(
+ std::unique_ptr<video_coding::EncodedFrame> frame) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ video_coding::RtpFrameObject* rtp_frame =
+ static_cast<video_coding::RtpFrameObject*>(frame.get());
+ last_seq_num_for_pic_id_[rtp_frame->id.picture_id] =
+ rtp_frame->last_seq_num();
+
+ last_completed_picture_id_ =
+ std::max(last_completed_picture_id_, frame->id.picture_id);
+ complete_frame_callback_->OnCompleteFrame(std::move(frame));
+}
+
+void RtpVideoStreamReceiver2::OnDecryptedFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ reference_finder_->ManageFrame(std::move(frame));
+}
+
+void RtpVideoStreamReceiver2::OnDecryptionStatusChange(
+ FrameDecryptorInterface::Status status) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ // Called from BufferedFrameDecryptor::DecryptFrame.
+ frames_decryptable_ =
+ (status == FrameDecryptorInterface::Status::kOk) ||
+ (status == FrameDecryptorInterface::Status::kRecoverable);
+}
+
+void RtpVideoStreamReceiver2::SetFrameDecryptor(
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ if (buffered_frame_decryptor_ == nullptr) {
+ buffered_frame_decryptor_ =
+ std::make_unique<BufferedFrameDecryptor>(this, this);
+ }
+ buffered_frame_decryptor_->SetFrameDecryptor(std::move(frame_decryptor));
+}
+
+void RtpVideoStreamReceiver2::SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ frame_transformer_delegate_ =
+ new rtc::RefCountedObject<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ this, std::move(frame_transformer), rtc::Thread::Current(),
+ config_.rtp.remote_ssrc);
+ frame_transformer_delegate_->Init();
+}
+
+void RtpVideoStreamReceiver2::UpdateRtt(int64_t max_rtt_ms) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ if (nack_module_)
+ nack_module_->UpdateRtt(max_rtt_ms);
+}
+
+absl::optional<int64_t> RtpVideoStreamReceiver2::LastReceivedPacketMs() const {
+ return packet_buffer_.LastReceivedPacketMs();
+}
+
+absl::optional<int64_t> RtpVideoStreamReceiver2::LastReceivedKeyframePacketMs()
+ const {
+ return packet_buffer_.LastReceivedKeyframePacketMs();
+}
+
+void RtpVideoStreamReceiver2::AddSecondarySink(RtpPacketSinkInterface* sink) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ RTC_DCHECK(!absl::c_linear_search(secondary_sinks_, sink));
+ secondary_sinks_.push_back(sink);
+}
+
+void RtpVideoStreamReceiver2::RemoveSecondarySink(
+ const RtpPacketSinkInterface* sink) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ auto it = absl::c_find(secondary_sinks_, sink);
+ if (it == secondary_sinks_.end()) {
+ // We might be rolling-back a call whose setup failed mid-way. In such a
+ // case, it's simpler to remove "everything" rather than remember what
+ // has already been added.
+ RTC_LOG(LS_WARNING) << "Removal of unknown sink.";
+ return;
+ }
+ secondary_sinks_.erase(it);
+}
+
+void RtpVideoStreamReceiver2::ManageFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ reference_finder_->ManageFrame(std::move(frame));
+}
+
+void RtpVideoStreamReceiver2::ReceivePacket(const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ if (packet.payload_size() == 0) {
+ // Padding or keep-alive packet.
+ // TODO(nisse): Could drop empty packets earlier, but need to figure out how
+ // they should be counted in stats.
+ NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
+ return;
+ }
+ if (packet.PayloadType() == config_.rtp.red_payload_type) {
+ ParseAndHandleEncapsulatingHeader(packet);
+ return;
+ }
+
+ const auto type_it = payload_type_map_.find(packet.PayloadType());
+ if (type_it == payload_type_map_.end()) {
+ return;
+ }
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload =
+ type_it->second->Parse(packet.PayloadBuffer());
+ if (parsed_payload == absl::nullopt) {
+ RTC_LOG(LS_WARNING) << "Failed parsing payload.";
+ return;
+ }
+
+ OnReceivedPayloadData(std::move(parsed_payload->video_payload), packet,
+ parsed_payload->video_header);
+}
+
+void RtpVideoStreamReceiver2::ParseAndHandleEncapsulatingHeader(
+ const RtpPacketReceived& packet) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ if (packet.PayloadType() == config_.rtp.red_payload_type &&
+ packet.payload_size() > 0) {
+ if (packet.payload()[0] == config_.rtp.ulpfec_payload_type) {
+ // Notify video_receiver about received FEC packets to avoid NACKing these
+ // packets.
+ NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
+ }
+ if (!ulpfec_receiver_->AddReceivedRedPacket(
+ packet, config_.rtp.ulpfec_payload_type)) {
+ return;
+ }
+ ulpfec_receiver_->ProcessReceivedFec();
+ }
+}
+
+// In the case of a video stream without picture ids and no rtx the
+// RtpFrameReferenceFinder will need to know about padding to
+// correctly calculate frame references.
+void RtpVideoStreamReceiver2::NotifyReceiverOfEmptyPacket(uint16_t seq_num) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+
+ reference_finder_->PaddingReceived(seq_num);
+
+ OnInsertedPacket(packet_buffer_.InsertPadding(seq_num));
+ if (nack_module_) {
+ nack_module_->OnReceivedPacket(seq_num, /* is_keyframe = */ false,
+ /* is _recovered = */ false);
+ }
+ if (loss_notification_controller_) {
+ // TODO(bugs.webrtc.org/10336): Handle empty packets.
+ RTC_LOG(LS_WARNING)
+ << "LossNotificationController does not expect empty packets.";
+ }
+}
+
+bool RtpVideoStreamReceiver2::DeliverRtcp(const uint8_t* rtcp_packet,
+ size_t rtcp_packet_length) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+
+ if (!receiving_) {
+ return false;
+ }
+
+ rtp_rtcp_->IncomingRtcpPacket(rtcp_packet, rtcp_packet_length);
+
+ int64_t rtt = 0;
+ rtp_rtcp_->RTT(config_.rtp.remote_ssrc, &rtt, nullptr, nullptr, nullptr);
+ if (rtt == 0) {
+ // Waiting for valid rtt.
+ return true;
+ }
+ uint32_t ntp_secs = 0;
+ uint32_t ntp_frac = 0;
+ uint32_t rtp_timestamp = 0;
+ uint32_t recieved_ntp_secs = 0;
+ uint32_t recieved_ntp_frac = 0;
+ if (rtp_rtcp_->RemoteNTP(&ntp_secs, &ntp_frac, &recieved_ntp_secs,
+ &recieved_ntp_frac, &rtp_timestamp) != 0) {
+ // Waiting for RTCP.
+ return true;
+ }
+ NtpTime recieved_ntp(recieved_ntp_secs, recieved_ntp_frac);
+ int64_t time_since_recieved =
+ clock_->CurrentNtpInMilliseconds() - recieved_ntp.ToMs();
+ // Don't use old SRs to estimate time.
+ if (time_since_recieved <= 1) {
+ ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp);
+ absl::optional<int64_t> remote_to_local_clock_offset_ms =
+ ntp_estimator_.EstimateRemoteToLocalClockOffsetMs();
+ if (remote_to_local_clock_offset_ms.has_value()) {
+ absolute_capture_time_receiver_.SetRemoteToLocalClockOffset(
+ Int64MsToQ32x32(*remote_to_local_clock_offset_ms));
+ }
+ }
+
+ return true;
+}
+
+void RtpVideoStreamReceiver2::FrameContinuous(int64_t picture_id) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ if (!nack_module_)
+ return;
+
+ int seq_num = -1;
+ auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
+ if (seq_num_it != last_seq_num_for_pic_id_.end())
+ seq_num = seq_num_it->second;
+ if (seq_num != -1)
+ nack_module_->ClearUpTo(seq_num);
+}
+
+void RtpVideoStreamReceiver2::FrameDecoded(int64_t picture_id) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ // Running on the decoder thread.
+ int seq_num = -1;
+ auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
+ if (seq_num_it != last_seq_num_for_pic_id_.end()) {
+ seq_num = seq_num_it->second;
+ last_seq_num_for_pic_id_.erase(last_seq_num_for_pic_id_.begin(),
+ ++seq_num_it);
+ }
+
+ if (seq_num != -1) {
+ packet_buffer_.ClearTo(seq_num);
+ reference_finder_->ClearTo(seq_num);
+ }
+}
+
+void RtpVideoStreamReceiver2::SignalNetworkState(NetworkState state) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ rtp_rtcp_->SetRTCPStatus(state == kNetworkUp ? config_.rtp.rtcp_mode
+ : RtcpMode::kOff);
+}
+
+void RtpVideoStreamReceiver2::StartReceive() {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ receiving_ = true;
+}
+
+void RtpVideoStreamReceiver2::StopReceive() {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ receiving_ = false;
+}
+
+void RtpVideoStreamReceiver2::UpdateHistograms() {
+ FecPacketCounter counter = ulpfec_receiver_->GetPacketCounter();
+ if (counter.first_packet_time_ms == -1)
+ return;
+
+ int64_t elapsed_sec =
+ (clock_->TimeInMilliseconds() - counter.first_packet_time_ms) / 1000;
+ if (elapsed_sec < metrics::kMinRunTimeInSeconds)
+ return;
+
+ if (counter.num_packets > 0) {
+ RTC_HISTOGRAM_PERCENTAGE(
+ "WebRTC.Video.ReceivedFecPacketsInPercent",
+ static_cast<int>(counter.num_fec_packets * 100 / counter.num_packets));
+ }
+ if (counter.num_fec_packets > 0) {
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.RecoveredMediaPacketsInPercentOfFec",
+ static_cast<int>(counter.num_recovered_packets *
+ 100 / counter.num_fec_packets));
+ }
+ if (config_.rtp.ulpfec_payload_type != -1) {
+ RTC_HISTOGRAM_COUNTS_10000(
+ "WebRTC.Video.FecBitrateReceivedInKbps",
+ static_cast<int>(counter.num_bytes * 8 / elapsed_sec / 1000));
+ }
+}
+
+void RtpVideoStreamReceiver2::InsertSpsPpsIntoTracker(uint8_t payload_type) {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+
+ auto codec_params_it = pt_codec_params_.find(payload_type);
+ if (codec_params_it == pt_codec_params_.end())
+ return;
+
+ RTC_LOG(LS_INFO) << "Found out of band supplied codec parameters for"
+ " payload type: "
+ << static_cast<int>(payload_type);
+
+ H264SpropParameterSets sprop_decoder;
+ auto sprop_base64_it =
+ codec_params_it->second.find(cricket::kH264FmtpSpropParameterSets);
+
+ if (sprop_base64_it == codec_params_it->second.end())
+ return;
+
+ if (!sprop_decoder.DecodeSprop(sprop_base64_it->second.c_str()))
+ return;
+
+ tracker_.InsertSpsPpsNalus(sprop_decoder.sps_nalu(),
+ sprop_decoder.pps_nalu());
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver2.h b/chromium/third_party/webrtc/video/rtp_video_stream_receiver2.h
new file mode 100644
index 00000000000..d82a7abbfe0
--- /dev/null
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver2.h
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_RTP_VIDEO_STREAM_RECEIVER2_H_
+#define VIDEO_RTP_VIDEO_STREAM_RECEIVER2_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/crypto/frame_decryptor_interface.h"
+#include "api/video/color_space.h"
+#include "api/video_codecs/video_codec.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "call/syncable.h"
+#include "call/video_receive_stream.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/absolute_capture_time_receiver.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h"
+#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "modules/video_coding/h264_sps_pps_tracker.h"
+#include "modules/video_coding/loss_notification_controller.h"
+#include "modules/video_coding/packet_buffer.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "modules/video_coding/unique_timestamp_counter.h"
+#include "rtc_base/constructor_magic.h"
+#include "rtc_base/experiments/field_trial_parser.h"
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "rtc_base/synchronization/sequence_checker.h"
+#include "rtc_base/thread_annotations.h"
+#include "video/buffered_frame_decryptor.h"
+#include "video/rtp_video_stream_receiver_frame_transformer_delegate.h"
+
+namespace webrtc {
+
+class NackModule2;
+class PacketRouter;
+class ProcessThread;
+class ReceiveStatistics;
+class RtcpRttStats;
+class RtpPacketReceived;
+class Transport;
+class UlpfecReceiver;
+
+class RtpVideoStreamReceiver2 : public LossNotificationSender,
+ public RecoveredPacketReceiver,
+ public RtpPacketSinkInterface,
+ public KeyFrameRequestSender,
+ public video_coding::OnCompleteFrameCallback,
+ public OnDecryptedFrameCallback,
+ public OnDecryptionStatusChangeCallback,
+ public RtpVideoFrameReceiver {
+ public:
+ RtpVideoStreamReceiver2(
+ TaskQueueBase* current_queue,
+ Clock* clock,
+ Transport* transport,
+ RtcpRttStats* rtt_stats,
+ // The packet router is optional; if provided, the RtpRtcp module for this
+ // stream is registered as a candidate for sending REMB and transport
+ // feedback.
+ PacketRouter* packet_router,
+ const VideoReceiveStream::Config* config,
+ ReceiveStatistics* rtp_receive_statistics,
+ RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
+ RtcpCnameCallback* rtcp_cname_callback,
+ ProcessThread* process_thread,
+ NackSender* nack_sender,
+ // The KeyFrameRequestSender is optional; if not provided, key frame
+ // requests are sent via the internal RtpRtcp module.
+ KeyFrameRequestSender* keyframe_request_sender,
+ video_coding::OnCompleteFrameCallback* complete_frame_callback,
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer);
+ ~RtpVideoStreamReceiver2() override;
+
+ void AddReceiveCodec(const VideoCodec& video_codec,
+ const std::map<std::string, std::string>& codec_params,
+ bool raw_payload);
+
+ void StartReceive();
+ void StopReceive();
+
+ // Produces the transport-related timestamps; current_delay_ms is left unset.
+ absl::optional<Syncable::Info> GetSyncInfo() const;
+
+ bool DeliverRtcp(const uint8_t* rtcp_packet, size_t rtcp_packet_length);
+
+ void FrameContinuous(int64_t seq_num);
+
+ void FrameDecoded(int64_t seq_num);
+
+ void SignalNetworkState(NetworkState state);
+
+ // Returns number of different frames seen.
+ int GetUniqueFramesSeen() const {
+ RTC_DCHECK_RUN_ON(&worker_task_checker_);
+ return frame_counter_.GetUniqueSeen();
+ }
+
+ // Implements RtpPacketSinkInterface.
+ void OnRtpPacket(const RtpPacketReceived& packet) override;
+
+ // TODO(philipel): Stop using VCMPacket in the new jitter buffer and then
+ // remove this function. Public only for tests.
+ void OnReceivedPayloadData(rtc::CopyOnWriteBuffer codec_payload,
+ const RtpPacketReceived& rtp_packet,
+ const RTPVideoHeader& video);
+
+ // Implements RecoveredPacketReceiver.
+ void OnRecoveredPacket(const uint8_t* packet, size_t packet_length) override;
+
+ // Send an RTCP keyframe request.
+ void RequestKeyFrame() override;
+
+ // Implements LossNotificationSender.
+ void SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) override;
+
+ bool IsUlpfecEnabled() const;
+ bool IsRetransmissionsEnabled() const;
+
+ // Returns true if a decryptor is attached and frames can be decrypted.
+ // Updated by OnDecryptionStatusChangeCallback. Note this refers to Frame
+ // Decryption not SRTP.
+ bool IsDecryptable() const;
+
+ // Don't use, still experimental.
+ void RequestPacketRetransmit(const std::vector<uint16_t>& sequence_numbers);
+
+ // Implements OnCompleteFrameCallback.
+ void OnCompleteFrame(
+ std::unique_ptr<video_coding::EncodedFrame> frame) override;
+
+ // Implements OnDecryptedFrameCallback.
+ void OnDecryptedFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame) override;
+
+ // Implements OnDecryptionStatusChangeCallback.
+ void OnDecryptionStatusChange(
+ FrameDecryptorInterface::Status status) override;
+
+ // Optionally set a frame decryptor after a stream has started. This will not
+ // reset the decoder state.
+ void SetFrameDecryptor(
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor);
+
+ // Sets a frame transformer after a stream has started, if no transformer
+ // has previously been set. Does not reset the decoder state.
+ void SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer);
+
+ // Called by VideoReceiveStream when stats are updated.
+ void UpdateRtt(int64_t max_rtt_ms);
+
+ absl::optional<int64_t> LastReceivedPacketMs() const;
+ absl::optional<int64_t> LastReceivedKeyframePacketMs() const;
+
+ // RtpDemuxer only forwards a given RTP packet to one sink. However, some
+ // sinks, such as FlexFEC, might wish to be informed of all of the packets
+ // a given sink receives (or any set of sinks). They may do so by registering
+ // themselves as secondary sinks.
+ void AddSecondarySink(RtpPacketSinkInterface* sink);
+ void RemoveSecondarySink(const RtpPacketSinkInterface* sink);
+
+ private:
+ // Implements RtpVideoFrameReceiver.
+ void ManageFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame) override;
+
+ // Used for buffering RTCP feedback messages and sending them all together.
+ // Note:
+ // 1. Key frame requests and NACKs are mutually exclusive, with the
+ // former taking precedence over the latter.
+ // 2. Loss notifications are orthogonal to either. (That is, may be sent
+ // alongside either.)
+ class RtcpFeedbackBuffer : public KeyFrameRequestSender,
+ public NackSender,
+ public LossNotificationSender {
+ public:
+ RtcpFeedbackBuffer(KeyFrameRequestSender* key_frame_request_sender,
+ NackSender* nack_sender,
+ LossNotificationSender* loss_notification_sender);
+
+ ~RtcpFeedbackBuffer() override = default;
+
+ // KeyFrameRequestSender implementation.
+ void RequestKeyFrame() override;
+
+ // NackSender implementation.
+ void SendNack(const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed) override;
+
+ // LossNotificationSender implementation.
+ void SendLossNotification(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag,
+ bool buffering_allowed) override;
+
+ // Send all RTCP feedback messages buffered thus far.
+ void SendBufferedRtcpFeedback();
+
+ private:
+ // LNTF-related state.
+ struct LossNotificationState {
+ LossNotificationState(uint16_t last_decoded_seq_num,
+ uint16_t last_received_seq_num,
+ bool decodability_flag)
+ : last_decoded_seq_num(last_decoded_seq_num),
+ last_received_seq_num(last_received_seq_num),
+ decodability_flag(decodability_flag) {}
+
+ uint16_t last_decoded_seq_num;
+ uint16_t last_received_seq_num;
+ bool decodability_flag;
+ };
+
+ SequenceChecker worker_task_checker_;
+ KeyFrameRequestSender* const key_frame_request_sender_;
+ NackSender* const nack_sender_;
+ LossNotificationSender* const loss_notification_sender_;
+
+ // Key-frame-request-related state.
+ bool request_key_frame_ RTC_GUARDED_BY(worker_task_checker_);
+
+ // NACK-related state.
+ std::vector<uint16_t> nack_sequence_numbers_
+ RTC_GUARDED_BY(worker_task_checker_);
+
+ absl::optional<LossNotificationState> lntf_state_
+ RTC_GUARDED_BY(worker_task_checker_);
+ };
+ enum ParseGenericDependenciesResult {
+ kDropPacket,
+ kHasGenericDescriptor,
+ kNoGenericDescriptor
+ };
+
+ // Entry point doing non-stats work for a received packet. Called
+ // for the same packet both before and after RED decapsulation.
+ void ReceivePacket(const RtpPacketReceived& packet);
+ // Parses and handles RED headers.
+ // This function assumes that it's being called from only one thread.
+ void ParseAndHandleEncapsulatingHeader(const RtpPacketReceived& packet);
+ void NotifyReceiverOfEmptyPacket(uint16_t seq_num);
+ void UpdateHistograms();
+ bool IsRedEnabled() const;
+ void InsertSpsPpsIntoTracker(uint8_t payload_type);
+ void OnInsertedPacket(video_coding::PacketBuffer::InsertResult result);
+ ParseGenericDependenciesResult ParseGenericDependenciesExtension(
+ const RtpPacketReceived& rtp_packet,
+ RTPVideoHeader* video_header) RTC_RUN_ON(worker_task_checker_);
+ void OnAssembledFrame(std::unique_ptr<video_coding::RtpFrameObject> frame);
+
+ Clock* const clock_;
+ // Ownership of this object lies with VideoReceiveStream, which owns |this|.
+ const VideoReceiveStream::Config& config_;
+ PacketRouter* const packet_router_;
+ ProcessThread* const process_thread_;
+
+ RemoteNtpTimeEstimator ntp_estimator_;
+
+ RtpHeaderExtensionMap rtp_header_extensions_;
+ // Set by the field trial WebRTC-ForcePlayoutDelay to override any playout
+ // delay that is specified in the received packets.
+ FieldTrialOptional<int> forced_playout_delay_max_ms_;
+ FieldTrialOptional<int> forced_playout_delay_min_ms_;
+ ReceiveStatistics* const rtp_receive_statistics_;
+ std::unique_ptr<UlpfecReceiver> ulpfec_receiver_;
+
+ SequenceChecker worker_task_checker_;
+ bool receiving_ RTC_GUARDED_BY(worker_task_checker_);
+ int64_t last_packet_log_ms_ RTC_GUARDED_BY(worker_task_checker_);
+
+ const std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
+
+ video_coding::OnCompleteFrameCallback* complete_frame_callback_;
+ KeyFrameRequestSender* const keyframe_request_sender_;
+
+ RtcpFeedbackBuffer rtcp_feedback_buffer_;
+ const std::unique_ptr<NackModule2> nack_module_;
+ std::unique_ptr<LossNotificationController> loss_notification_controller_;
+
+ video_coding::PacketBuffer packet_buffer_;
+ UniqueTimestampCounter frame_counter_ RTC_GUARDED_BY(worker_task_checker_);
+ SeqNumUnwrapper<uint16_t> frame_id_unwrapper_
+ RTC_GUARDED_BY(worker_task_checker_);
+
+ // Video structure provided in the dependency descriptor in a first packet
+ // of a key frame. It is required to parse dependency descriptor in the
+ // following delta packets.
+ std::unique_ptr<FrameDependencyStructure> video_structure_
+ RTC_GUARDED_BY(worker_task_checker_);
+ // Frame id of the last frame with the attached video structure.
+ // absl::nullopt when `video_structure_ == nullptr`;
+ absl::optional<int64_t> video_structure_frame_id_
+ RTC_GUARDED_BY(worker_task_checker_);
+
+ std::unique_ptr<video_coding::RtpFrameReferenceFinder> reference_finder_
+ RTC_GUARDED_BY(worker_task_checker_);
+ absl::optional<VideoCodecType> current_codec_
+ RTC_GUARDED_BY(worker_task_checker_);
+ uint32_t last_assembled_frame_rtp_timestamp_
+ RTC_GUARDED_BY(worker_task_checker_);
+
+ std::map<int64_t, uint16_t> last_seq_num_for_pic_id_
+ RTC_GUARDED_BY(worker_task_checker_);
+ video_coding::H264SpsPpsTracker tracker_ RTC_GUARDED_BY(worker_task_checker_);
+
+ // Maps payload id to the depacketizer.
+ std::map<uint8_t, std::unique_ptr<VideoRtpDepacketizer>> payload_type_map_
+ RTC_GUARDED_BY(worker_task_checker_);
+
+ // TODO(johan): Remove pt_codec_params_ once
+ // https://bugs.chromium.org/p/webrtc/issues/detail?id=6883 is resolved.
+ // Maps a payload type to a map of out-of-band supplied codec parameters.
+ std::map<uint8_t, std::map<std::string, std::string>> pt_codec_params_
+ RTC_GUARDED_BY(worker_task_checker_);
+ int16_t last_payload_type_ RTC_GUARDED_BY(worker_task_checker_) = -1;
+
+ bool has_received_frame_ RTC_GUARDED_BY(worker_task_checker_);
+
+ std::vector<RtpPacketSinkInterface*> secondary_sinks_
+ RTC_GUARDED_BY(worker_task_checker_);
+
+ absl::optional<uint32_t> last_received_rtp_timestamp_
+ RTC_GUARDED_BY(worker_task_checker_);
+ absl::optional<int64_t> last_received_rtp_system_time_ms_
+ RTC_GUARDED_BY(worker_task_checker_);
+
+ // Handles incoming encrypted frames and forwards them to the
+ // rtp_reference_finder if they are decryptable.
+ std::unique_ptr<BufferedFrameDecryptor> buffered_frame_decryptor_
+ RTC_PT_GUARDED_BY(worker_task_checker_);
+ bool frames_decryptable_ RTC_GUARDED_BY(worker_task_checker_);
+ absl::optional<ColorSpace> last_color_space_;
+
+ AbsoluteCaptureTimeReceiver absolute_capture_time_receiver_
+ RTC_GUARDED_BY(worker_task_checker_);
+
+ int64_t last_completed_picture_id_ = 0;
+
+ rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate>
+ frame_transformer_delegate_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_RTP_VIDEO_STREAM_RECEIVER2_H_
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver2_unittest.cc b/chromium/third_party/webrtc/video/rtp_video_stream_receiver2_unittest.cc
new file mode 100644
index 00000000000..cda0fe5cfaf
--- /dev/null
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver2_unittest.cc
@@ -0,0 +1,1221 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/rtp_video_stream_receiver2.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/video/video_codec_type.h"
+#include "api/video/video_frame_type.h"
+#include "common_video/h264/h264_common.h"
+#include "media/base/media_constants.h"
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_format_vp9.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/utility/include/process_thread.h"
+#include "modules/video_coding/frame_object.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "modules/video_coding/rtp_frame_reference_finder.h"
+#include "rtc_base/byte_buffer.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/field_trial.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_frame_transformer.h"
+#include "test/time_controller/simulated_task_queue.h"
+
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::Invoke;
+using ::testing::SizeIs;
+using ::testing::Values;
+
+namespace webrtc {
+
+namespace {
+
+const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01};
+
+std::vector<uint64_t> GetAbsoluteCaptureTimestamps(
+ const video_coding::EncodedFrame* frame) {
+ std::vector<uint64_t> result;
+ for (const auto& packet_info : frame->PacketInfos()) {
+ if (packet_info.absolute_capture_time()) {
+ result.push_back(
+ packet_info.absolute_capture_time()->absolute_capture_timestamp);
+ }
+ }
+ return result;
+}
+
+RTPVideoHeader GetGenericVideoHeader(VideoFrameType frame_type) {
+ RTPVideoHeader video_header;
+ video_header.is_first_packet_in_frame = true;
+ video_header.is_last_packet_in_frame = true;
+ video_header.codec = kVideoCodecGeneric;
+ video_header.frame_type = frame_type;
+ return video_header;
+}
+
+class MockTransport : public Transport {
+ public:
+ MOCK_METHOD(bool,
+ SendRtp,
+ (const uint8_t*, size_t length, const PacketOptions& options),
+ (override));
+ MOCK_METHOD(bool, SendRtcp, (const uint8_t*, size_t length), (override));
+};
+
+class MockNackSender : public NackSender {
+ public:
+ MOCK_METHOD(void,
+ SendNack,
+ (const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed),
+ (override));
+};
+
+class MockKeyFrameRequestSender : public KeyFrameRequestSender {
+ public:
+ MOCK_METHOD(void, RequestKeyFrame, (), (override));
+};
+
+class MockOnCompleteFrameCallback
+ : public video_coding::OnCompleteFrameCallback {
+ public:
+ MOCK_METHOD(void, DoOnCompleteFrame, (video_coding::EncodedFrame*), ());
+ MOCK_METHOD(void,
+ DoOnCompleteFrameFailNullptr,
+ (video_coding::EncodedFrame*),
+ ());
+ MOCK_METHOD(void,
+ DoOnCompleteFrameFailLength,
+ (video_coding::EncodedFrame*),
+ ());
+ MOCK_METHOD(void,
+ DoOnCompleteFrameFailBitstream,
+ (video_coding::EncodedFrame*),
+ ());
+ void OnCompleteFrame(
+ std::unique_ptr<video_coding::EncodedFrame> frame) override {
+ if (!frame) {
+ DoOnCompleteFrameFailNullptr(nullptr);
+ return;
+ }
+ EXPECT_EQ(buffer_.Length(), frame->size());
+ if (buffer_.Length() != frame->size()) {
+ DoOnCompleteFrameFailLength(frame.get());
+ return;
+ }
+ if (frame->size() != buffer_.Length() ||
+ memcmp(buffer_.Data(), frame->data(), buffer_.Length()) != 0) {
+ DoOnCompleteFrameFailBitstream(frame.get());
+ return;
+ }
+ DoOnCompleteFrame(frame.get());
+ }
+
+ void ClearExpectedBitstream() { buffer_.Clear(); }
+
+ void AppendExpectedBitstream(const uint8_t data[], size_t size_in_bytes) {
+ // TODO(Johan): Let rtc::ByteBuffer handle uint8_t* instead of char*.
+ buffer_.WriteBytes(reinterpret_cast<const char*>(data), size_in_bytes);
+ }
+ rtc::ByteBufferWriter buffer_;
+};
+
+class MockRtpPacketSink : public RtpPacketSinkInterface {
+ public:
+ MOCK_METHOD(void, OnRtpPacket, (const RtpPacketReceived&), (override));
+};
+
+constexpr uint32_t kSsrc = 111;
+constexpr uint16_t kSequenceNumber = 222;
+constexpr int kPayloadType = 100;
+constexpr int kRedPayloadType = 125;
+
+std::unique_ptr<RtpPacketReceived> CreateRtpPacketReceived() {
+ auto packet = std::make_unique<RtpPacketReceived>();
+ packet->SetSsrc(kSsrc);
+ packet->SetSequenceNumber(kSequenceNumber);
+ packet->SetPayloadType(kPayloadType);
+ return packet;
+}
+
+MATCHER_P(SamePacketAs, other, "") {
+ return arg.Ssrc() == other.Ssrc() &&
+ arg.SequenceNumber() == other.SequenceNumber();
+}
+
+} // namespace
+
+class RtpVideoStreamReceiver2Test : public ::testing::Test {
+ public:
+ RtpVideoStreamReceiver2Test() : RtpVideoStreamReceiver2Test("") {}
+ explicit RtpVideoStreamReceiver2Test(std::string field_trials)
+ : override_field_trials_(field_trials),
+ config_(CreateConfig()),
+ process_thread_(ProcessThread::Create("TestThread")) {
+ rtp_receive_statistics_ =
+ ReceiveStatistics::Create(Clock::GetRealTimeClock());
+ rtp_video_stream_receiver_ = std::make_unique<RtpVideoStreamReceiver2>(
+ TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_,
+ nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr,
+ nullptr, process_thread_.get(), &mock_nack_sender_,
+ &mock_key_frame_request_sender_, &mock_on_complete_frame_callback_,
+ nullptr, nullptr);
+ VideoCodec codec;
+ codec.plType = kPayloadType;
+ codec.codecType = kVideoCodecGeneric;
+ rtp_video_stream_receiver_->AddReceiveCodec(codec, {},
+ /*raw_payload=*/false);
+ }
+
+ RTPVideoHeader GetDefaultH264VideoHeader() {
+ RTPVideoHeader video_header;
+ video_header.codec = kVideoCodecH264;
+ video_header.video_type_header.emplace<RTPVideoHeaderH264>();
+ return video_header;
+ }
+
+ // TODO(Johan): refactor h264_sps_pps_tracker_unittests.cc to avoid duplicate
+ // code.
+ void AddSps(RTPVideoHeader* video_header,
+ uint8_t sps_id,
+ rtc::CopyOnWriteBuffer* data) {
+ NaluInfo info;
+ info.type = H264::NaluType::kSps;
+ info.sps_id = sps_id;
+ info.pps_id = -1;
+ data->AppendData({H264::NaluType::kSps, sps_id});
+ auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
+ h264.nalus[h264.nalus_length++] = info;
+ }
+
+ void AddPps(RTPVideoHeader* video_header,
+ uint8_t sps_id,
+ uint8_t pps_id,
+ rtc::CopyOnWriteBuffer* data) {
+ NaluInfo info;
+ info.type = H264::NaluType::kPps;
+ info.sps_id = sps_id;
+ info.pps_id = pps_id;
+ data->AppendData({H264::NaluType::kPps, pps_id});
+ auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
+ h264.nalus[h264.nalus_length++] = info;
+ }
+
+ void AddIdr(RTPVideoHeader* video_header, int pps_id) {
+ NaluInfo info;
+ info.type = H264::NaluType::kIdr;
+ info.sps_id = -1;
+ info.pps_id = pps_id;
+ auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
+ h264.nalus[h264.nalus_length++] = info;
+ }
+
+ protected:
+ static VideoReceiveStream::Config CreateConfig() {
+ VideoReceiveStream::Config config(nullptr);
+ config.rtp.remote_ssrc = 1111;
+ config.rtp.local_ssrc = 2222;
+ config.rtp.red_payload_type = kRedPayloadType;
+ return config;
+ }
+
+ TokenTaskQueue task_queue_;
+ TokenTaskQueue::CurrentTaskQueueSetter task_queue_setter_{&task_queue_};
+
+ const webrtc::test::ScopedFieldTrials override_field_trials_;
+ VideoReceiveStream::Config config_;
+ MockNackSender mock_nack_sender_;
+ MockKeyFrameRequestSender mock_key_frame_request_sender_;
+ MockTransport mock_transport_;
+ MockOnCompleteFrameCallback mock_on_complete_frame_callback_;
+ std::unique_ptr<ProcessThread> process_thread_;
+ std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+ std::unique_ptr<RtpVideoStreamReceiver2> rtp_video_stream_receiver_;
+};
+
+TEST_F(RtpVideoStreamReceiver2Test, CacheColorSpaceFromLastPacketOfKeyframe) {
+ // Test that color space is cached from the last packet of a key frame and
+ // that it's not reset by padding packets without color space.
+ constexpr int kVp9PayloadType = 99;
+ const ColorSpace kColorSpace(
+ ColorSpace::PrimaryID::kFILM, ColorSpace::TransferID::kBT2020_12,
+ ColorSpace::MatrixID::kBT2020_NCL, ColorSpace::RangeID::kFull);
+ const std::vector<uint8_t> kKeyFramePayload = {0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10};
+ const std::vector<uint8_t> kDeltaFramePayload = {0, 1, 2, 3, 4};
+
+ // Anonymous helper class that generates received packets.
+ class {
+ public:
+ void SetPayload(const std::vector<uint8_t>& payload,
+ VideoFrameType video_frame_type) {
+ video_frame_type_ = video_frame_type;
+ RtpPacketizer::PayloadSizeLimits pay_load_size_limits;
+ // Reduce max payload length to make sure the key frame generates two
+ // packets.
+ pay_load_size_limits.max_payload_len = 8;
+ RTPVideoHeaderVP9 rtp_video_header_vp9;
+ rtp_video_header_vp9.InitRTPVideoHeaderVP9();
+ rtp_video_header_vp9.inter_pic_predicted =
+ (video_frame_type == VideoFrameType::kVideoFrameDelta);
+ rtp_packetizer_ = std::make_unique<RtpPacketizerVp9>(
+ payload, pay_load_size_limits, rtp_video_header_vp9);
+ }
+
+ size_t NumPackets() { return rtp_packetizer_->NumPackets(); }
+ void SetColorSpace(const ColorSpace& color_space) {
+ color_space_ = color_space;
+ }
+
+ RtpPacketReceived NextPacket() {
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<ColorSpaceExtension>(1);
+ RtpPacketToSend packet_to_send(&extension_map);
+ packet_to_send.SetSequenceNumber(sequence_number_++);
+ packet_to_send.SetSsrc(kSsrc);
+ packet_to_send.SetPayloadType(kVp9PayloadType);
+ bool include_color_space =
+ (rtp_packetizer_->NumPackets() == 1u &&
+ video_frame_type_ == VideoFrameType::kVideoFrameKey);
+ if (include_color_space) {
+ EXPECT_TRUE(
+ packet_to_send.SetExtension<ColorSpaceExtension>(color_space_));
+ }
+ rtp_packetizer_->NextPacket(&packet_to_send);
+
+ RtpPacketReceived received_packet(&extension_map);
+ received_packet.Parse(packet_to_send.data(), packet_to_send.size());
+ return received_packet;
+ }
+
+ private:
+ uint16_t sequence_number_ = 0;
+ VideoFrameType video_frame_type_;
+ ColorSpace color_space_;
+ std::unique_ptr<RtpPacketizer> rtp_packetizer_;
+ } received_packet_generator;
+ received_packet_generator.SetColorSpace(kColorSpace);
+
+ // Prepare the receiver for VP9.
+ VideoCodec codec;
+ codec.plType = kVp9PayloadType;
+ codec.codecType = kVideoCodecVP9;
+ std::map<std::string, std::string> codec_params;
+ rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params,
+ /*raw_payload=*/false);
+
+ // Generate key frame packets.
+ received_packet_generator.SetPayload(kKeyFramePayload,
+ VideoFrameType::kVideoFrameKey);
+ EXPECT_EQ(received_packet_generator.NumPackets(), 2u);
+ RtpPacketReceived key_frame_packet1 = received_packet_generator.NextPacket();
+ RtpPacketReceived key_frame_packet2 = received_packet_generator.NextPacket();
+
+ // Generate delta frame packet.
+ received_packet_generator.SetPayload(kDeltaFramePayload,
+ VideoFrameType::kVideoFrameDelta);
+ EXPECT_EQ(received_packet_generator.NumPackets(), 1u);
+ RtpPacketReceived delta_frame_packet = received_packet_generator.NextPacket();
+
+ rtp_video_stream_receiver_->StartReceive();
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kKeyFramePayload.data(), kKeyFramePayload.size());
+
+ // Send the key frame and expect a callback with color space information.
+ EXPECT_FALSE(key_frame_packet1.GetExtension<ColorSpaceExtension>());
+ EXPECT_TRUE(key_frame_packet2.GetExtension<ColorSpaceExtension>());
+ rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
+ .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) {
+ ASSERT_TRUE(frame->EncodedImage().ColorSpace());
+ EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
+ }));
+ rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet2);
+ // Resend the first key frame packet to simulate padding for example.
+ rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1);
+
+ mock_on_complete_frame_callback_.ClearExpectedBitstream();
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kDeltaFramePayload.data(), kDeltaFramePayload.size());
+
+ // Expect delta frame to have color space set even though color space not
+ // included in the RTP packet.
+ EXPECT_FALSE(delta_frame_packet.GetExtension<ColorSpaceExtension>());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
+ .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) {
+ ASSERT_TRUE(frame->EncodedImage().ColorSpace());
+ EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace);
+ }));
+ rtp_video_stream_receiver_->OnRtpPacket(delta_frame_packet);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrame) {
+ RtpPacketReceived rtp_packet;
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(1);
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, PacketInfoIsPropagatedIntoVideoFrames) {
+ constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
+ constexpr int kId0 = 1;
+
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
+ RtpPacketReceived rtp_packet(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
+ rtp_packet.SetSequenceNumber(1);
+ rtp_packet.SetTimestamp(1);
+ rtp_packet.SetSsrc(kSsrc);
+ rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
+ AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
+ /*estimated_capture_clock_offset=*/absl::nullopt});
+
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
+ .WillOnce(Invoke(
+ [kAbsoluteCaptureTimestamp](video_coding::EncodedFrame* frame) {
+ EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame),
+ ElementsAre(kAbsoluteCaptureTimestamp));
+ }));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test,
+ MissingAbsoluteCaptureTimeIsFilledWithExtrapolatedValue) {
+ constexpr uint64_t kAbsoluteCaptureTimestamp = 12;
+ constexpr int kId0 = 1;
+
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
+ RtpPacketReceived rtp_packet(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
+
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
+ uint16_t sequence_number = 1;
+ uint32_t rtp_timestamp = 1;
+ rtp_packet.SetSequenceNumber(sequence_number);
+ rtp_packet.SetTimestamp(rtp_timestamp);
+ rtp_packet.SetSsrc(kSsrc);
+ rtp_packet.SetExtension<AbsoluteCaptureTimeExtension>(
+ AbsoluteCaptureTime{kAbsoluteCaptureTimestamp,
+ /*estimated_capture_clock_offset=*/absl::nullopt});
+
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+
+ // Rtp packet without absolute capture time.
+ rtp_packet = RtpPacketReceived(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(++sequence_number);
+ rtp_packet.SetTimestamp(++rtp_timestamp);
+ rtp_packet.SetSsrc(kSsrc);
+
+ // There is no absolute capture time in the second packet.
+ // Expect rtp video stream receiver to extrapolate it for the resulting video
+ // frame using absolute capture time from the previous packet.
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
+ .WillOnce(Invoke([](video_coding::EncodedFrame* frame) {
+ EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), SizeIs(1));
+ }));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test,
+ NoInfiniteRecursionOnEncapsulatedRedPacket) {
+ const std::vector<uint8_t> data({
+ 0x80, // RTP version.
+ kRedPayloadType, // Payload type.
+ 0, 0, 0, 0, 0, 0, // Don't care.
+ 0, 0, 0x4, 0x57, // SSRC
+ kRedPayloadType, // RED header.
+ 0, 0, 0, 0, 0 // Don't care.
+ });
+ RtpPacketReceived packet;
+ EXPECT_TRUE(packet.Parse(data.data(), data.size()));
+ rtp_video_stream_receiver_->StartReceive();
+ rtp_video_stream_receiver_->OnRtpPacket(packet);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test,
+ DropsPacketWithRedPayloadTypeAndEmptyPayload) {
+ const uint8_t kRedPayloadType = 125;
+ config_.rtp.red_payload_type = kRedPayloadType;
+ SetUp(); // re-create rtp_video_stream_receiver with red payload type.
+ // clang-format off
+ const uint8_t data[] = {
+ 0x80, // RTP version.
+ kRedPayloadType, // Payload type.
+ 0, 0, 0, 0, 0, 0, // Don't care.
+ 0, 0, 0x4, 0x57, // SSRC
+ // Empty rtp payload.
+ };
+ // clang-format on
+ RtpPacketReceived packet;
+ // Manually convert to CopyOnWriteBuffer to be sure capacity == size
+ // and asan bot can catch read buffer overflow.
+ EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(data)));
+ rtp_video_stream_receiver_->StartReceive();
+ rtp_video_stream_receiver_->OnRtpPacket(packet);
+ // Expect asan doesn't find anything.
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, GenericKeyFrameBitstreamError) {
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
+ rtp_packet.SetSequenceNumber(1);
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ constexpr uint8_t expected_bitsteam[] = {1, 2, 3, 0xff};
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ expected_bitsteam, sizeof(expected_bitsteam));
+ EXPECT_CALL(mock_on_complete_frame_callback_,
+ DoOnCompleteFrameFailBitstream(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+class RtpVideoStreamReceiver2TestH264
+ : public RtpVideoStreamReceiver2Test,
+ public ::testing::WithParamInterface<std::string> {
+ protected:
+ RtpVideoStreamReceiver2TestH264() : RtpVideoStreamReceiver2Test(GetParam()) {}
+};
+
+INSTANTIATE_TEST_SUITE_P(SpsPpsIdrIsKeyframe,
+ RtpVideoStreamReceiver2TestH264,
+ Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
+
+// Fails on MSAN: https://bugs.chromium.org/p/webrtc/issues/detail?id=11376.
+#if defined(MEMORY_SANITIZER)
+#define MAYBE_InBandSpsPps DISABLED_InBandSpsPps
+#else
+#define MAYBE_InBandSpsPps InBandSpsPps
+#endif
+TEST_P(RtpVideoStreamReceiver2TestH264, MAYBE_InBandSpsPps) {
+ rtc::CopyOnWriteBuffer sps_data;
+ RtpPacketReceived rtp_packet;
+ RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader();
+ AddSps(&sps_video_header, 0, &sps_data);
+ rtp_packet.SetSequenceNumber(0);
+ rtp_packet.SetPayloadType(kPayloadType);
+ sps_video_header.is_first_packet_in_frame = true;
+ sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(),
+ sps_data.size());
+ rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
+ sps_video_header);
+
+ rtc::CopyOnWriteBuffer pps_data;
+ RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
+ AddPps(&pps_video_header, 0, 1, &pps_data);
+ rtp_packet.SetSequenceNumber(1);
+ pps_video_header.is_first_packet_in_frame = true;
+ pps_video_header.frame_type = VideoFrameType::kEmptyFrame;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(),
+ pps_data.size());
+ rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
+ pps_video_header);
+
+ rtc::CopyOnWriteBuffer idr_data;
+ RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
+ AddIdr(&idr_video_header, 1);
+ rtp_packet.SetSequenceNumber(2);
+ idr_video_header.is_first_packet_in_frame = true;
+ idr_video_header.is_last_packet_in_frame = true;
+ idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ const uint8_t idr[] = {0x65, 1, 2, 3};
+ idr_data.AppendData(idr);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
+ idr_data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet,
+ idr_video_header);
+}
+
+TEST_P(RtpVideoStreamReceiver2TestH264, OutOfBandFmtpSpsPps) {
+ constexpr int kPayloadType = 99;
+ VideoCodec codec;
+ codec.plType = kPayloadType;
+ std::map<std::string, std::string> codec_params;
+ // Example parameter sets from https://tools.ietf.org/html/rfc3984#section-8.2
+ // .
+ codec_params.insert(
+ {cricket::kH264FmtpSpropParameterSets, "Z0IACpZTBYmI,aMljiA=="});
+ rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params,
+ /*raw_payload=*/false);
+ const uint8_t binary_sps[] = {0x67, 0x42, 0x00, 0x0a, 0x96,
+ 0x53, 0x05, 0x89, 0x88};
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_sps,
+ sizeof(binary_sps));
+ const uint8_t binary_pps[] = {0x68, 0xc9, 0x63, 0x88};
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_pps,
+ sizeof(binary_pps));
+
+ RtpPacketReceived rtp_packet;
+ RTPVideoHeader video_header = GetDefaultH264VideoHeader();
+ AddIdr(&video_header, 0);
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(2);
+ video_header.is_first_packet_in_frame = true;
+ video_header.is_last_packet_in_frame = true;
+ video_header.codec = kVideoCodecH264;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ rtc::CopyOnWriteBuffer data({1, 2, 3});
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(
+ kH264StartCode, sizeof(kH264StartCode));
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, PaddingInMediaStream) {
+ RtpPacketReceived rtp_packet;
+ RTPVideoHeader video_header = GetDefaultH264VideoHeader();
+ rtc::CopyOnWriteBuffer data({1, 2, 3});
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(2);
+ video_header.is_first_packet_in_frame = true;
+ video_header.is_last_packet_in_frame = true;
+ video_header.codec = kVideoCodecGeneric;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+
+ rtp_packet.SetSequenceNumber(3);
+ rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
+ video_header);
+
+ rtp_packet.SetSequenceNumber(4);
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ video_header.frame_type = VideoFrameType::kVideoFrameDelta;
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+
+ rtp_packet.SetSequenceNumber(6);
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_));
+ rtp_packet.SetSequenceNumber(5);
+ rtp_video_stream_receiver_->OnReceivedPayloadData({}, rtp_packet,
+ video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeIfFirstFrameIsDelta) {
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
+ rtp_packet.SetSequenceNumber(1);
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
+ EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeWhenPacketBufferGetsFull) {
+ constexpr int kPacketBufferMaxSize = 2048;
+
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameDelta);
+ // Incomplete frames so that the packet buffer is filling up.
+ video_header.is_last_packet_in_frame = false;
+ uint16_t start_sequence_number = 1234;
+ rtp_packet.SetSequenceNumber(start_sequence_number);
+ while (rtp_packet.SequenceNumber() - start_sequence_number <
+ kPacketBufferMaxSize) {
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+ rtp_packet.SetSequenceNumber(rtp_packet.SequenceNumber() + 2);
+ }
+
+ EXPECT_CALL(mock_key_frame_request_sender_, RequestKeyFrame());
+ rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet,
+ video_header);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, SecondarySinksGetRtpNotifications) {
+ rtp_video_stream_receiver_->StartReceive();
+
+ MockRtpPacketSink secondary_sink_1;
+ MockRtpPacketSink secondary_sink_2;
+
+ rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_1);
+ rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_2);
+
+ auto rtp_packet = CreateRtpPacketReceived();
+ EXPECT_CALL(secondary_sink_1, OnRtpPacket(SamePacketAs(*rtp_packet)));
+ EXPECT_CALL(secondary_sink_2, OnRtpPacket(SamePacketAs(*rtp_packet)));
+
+ rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
+
+ // Test tear-down.
+ rtp_video_stream_receiver_->StopReceive();
+ rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_1);
+ rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_2);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test,
+ RemovedSecondarySinksGetNoRtpNotifications) {
+ rtp_video_stream_receiver_->StartReceive();
+
+ MockRtpPacketSink secondary_sink;
+
+ rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
+ rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
+
+ auto rtp_packet = CreateRtpPacketReceived();
+
+ EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0);
+
+ rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
+
+ // Test tear-down.
+ rtp_video_stream_receiver_->StopReceive();
+}
+
+TEST_F(RtpVideoStreamReceiver2Test,
+ OnlyRemovedSecondarySinksExcludedFromNotifications) {
+ rtp_video_stream_receiver_->StartReceive();
+
+ MockRtpPacketSink kept_secondary_sink;
+ MockRtpPacketSink removed_secondary_sink;
+
+ rtp_video_stream_receiver_->AddSecondarySink(&kept_secondary_sink);
+ rtp_video_stream_receiver_->AddSecondarySink(&removed_secondary_sink);
+ rtp_video_stream_receiver_->RemoveSecondarySink(&removed_secondary_sink);
+
+ auto rtp_packet = CreateRtpPacketReceived();
+ EXPECT_CALL(kept_secondary_sink, OnRtpPacket(SamePacketAs(*rtp_packet)));
+
+ rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
+
+ // Test tear-down.
+ rtp_video_stream_receiver_->StopReceive();
+ rtp_video_stream_receiver_->RemoveSecondarySink(&kept_secondary_sink);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test,
+ SecondariesOfNonStartedStreamGetNoNotifications) {
+ // Explicitly showing that the stream is not in the |started| state,
+ // regardless of whether streams start out |started| or |stopped|.
+ rtp_video_stream_receiver_->StopReceive();
+
+ MockRtpPacketSink secondary_sink;
+ rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
+
+ auto rtp_packet = CreateRtpPacketReceived();
+ EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0);
+
+ rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet);
+
+ // Test tear-down.
+ rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorOnePacket) {
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ const int kSpatialIndex = 1;
+
+ rtp_video_stream_receiver_->StartReceive();
+
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
+ RtpPacketReceived rtp_packet(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
+
+ RtpGenericFrameDescriptor generic_descriptor;
+ generic_descriptor.SetFirstPacketInSubFrame(true);
+ generic_descriptor.SetLastPacketInSubFrame(true);
+ generic_descriptor.SetFrameId(100);
+ generic_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
+ generic_descriptor.AddFrameDependencyDiff(90);
+ generic_descriptor.AddFrameDependencyDiff(80);
+ ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
+ generic_descriptor));
+
+ uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
+ memcpy(payload, data.data(), data.size());
+ // The first byte is the header, so we ignore the first byte of |data|.
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
+ data.size() - 1);
+
+ rtp_packet.SetMarker(true);
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(1);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) {
+ EXPECT_EQ(frame->num_references, 2U);
+ EXPECT_EQ(frame->references[0], frame->id.picture_id - 90);
+ EXPECT_EQ(frame->references[1], frame->id.picture_id - 80);
+ EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
+ EXPECT_THAT(frame->PacketInfos(), SizeIs(1));
+ }));
+
+ rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorTwoPackets) {
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ const int kSpatialIndex = 1;
+
+ rtp_video_stream_receiver_->StartReceive();
+
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
+ RtpPacketReceived first_packet(&extension_map);
+
+ RtpGenericFrameDescriptor first_packet_descriptor;
+ first_packet_descriptor.SetFirstPacketInSubFrame(true);
+ first_packet_descriptor.SetLastPacketInSubFrame(false);
+ first_packet_descriptor.SetFrameId(100);
+ first_packet_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
+ first_packet_descriptor.SetResolution(480, 360);
+ ASSERT_TRUE(first_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
+ first_packet_descriptor));
+
+ uint8_t* first_packet_payload = first_packet.SetPayloadSize(data.size());
+ memcpy(first_packet_payload, data.data(), data.size());
+ // The first byte is the header, so we ignore the first byte of |data|.
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
+ data.size() - 1);
+
+ first_packet.SetPayloadType(kPayloadType);
+ first_packet.SetSequenceNumber(1);
+ rtp_video_stream_receiver_->OnRtpPacket(first_packet);
+
+ RtpPacketReceived second_packet(&extension_map);
+ RtpGenericFrameDescriptor second_packet_descriptor;
+ second_packet_descriptor.SetFirstPacketInSubFrame(false);
+ second_packet_descriptor.SetLastPacketInSubFrame(true);
+ ASSERT_TRUE(second_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
+ second_packet_descriptor));
+
+ second_packet.SetMarker(true);
+ second_packet.SetPayloadType(kPayloadType);
+ second_packet.SetSequenceNumber(2);
+
+ uint8_t* second_packet_payload = second_packet.SetPayloadSize(data.size());
+ memcpy(second_packet_payload, data.data(), data.size());
+ // The first byte is the header, so we ignore the first byte of |data|.
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data() + 1,
+ data.size() - 1);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) {
+ EXPECT_EQ(frame->num_references, 0U);
+ EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
+ EXPECT_EQ(frame->EncodedImage()._encodedWidth, 480u);
+ EXPECT_EQ(frame->EncodedImage()._encodedHeight, 360u);
+ EXPECT_THAT(frame->PacketInfos(), SizeIs(2));
+ }));
+
+ rtp_video_stream_receiver_->OnRtpPacket(second_packet);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorRawPayload) {
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ const int kRawPayloadType = 123;
+
+ VideoCodec codec;
+ codec.plType = kRawPayloadType;
+ rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true);
+ rtp_video_stream_receiver_->StartReceive();
+
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
+ RtpPacketReceived rtp_packet(&extension_map);
+
+ RtpGenericFrameDescriptor generic_descriptor;
+ generic_descriptor.SetFirstPacketInSubFrame(true);
+ generic_descriptor.SetLastPacketInSubFrame(true);
+ ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
+ generic_descriptor));
+
+ uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
+ memcpy(payload, data.data(), data.size());
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+
+ rtp_packet.SetMarker(true);
+ rtp_packet.SetPayloadType(kRawPayloadType);
+ rtp_packet.SetSequenceNumber(1);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
+ rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
+}
+
+TEST_F(RtpVideoStreamReceiver2Test, UnwrapsFrameId) {
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ const int kPayloadType = 123;
+
+ VideoCodec codec;
+ codec.plType = kPayloadType;
+ rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true);
+ rtp_video_stream_receiver_->StartReceive();
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<RtpGenericFrameDescriptorExtension00>(5);
+
+ uint16_t rtp_sequence_number = 1;
+ auto inject_packet = [&](uint16_t wrapped_frame_id) {
+ RtpPacketReceived rtp_packet(&extension_map);
+
+ RtpGenericFrameDescriptor generic_descriptor;
+ generic_descriptor.SetFirstPacketInSubFrame(true);
+ generic_descriptor.SetLastPacketInSubFrame(true);
+ generic_descriptor.SetFrameId(wrapped_frame_id);
+ ASSERT_TRUE(rtp_packet.SetExtension<RtpGenericFrameDescriptorExtension00>(
+ generic_descriptor));
+
+ uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
+ ASSERT_TRUE(payload);
+ memcpy(payload, data.data(), data.size());
+ mock_on_complete_frame_callback_.ClearExpectedBitstream();
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ rtp_packet.SetMarker(true);
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(++rtp_sequence_number);
+ rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
+ };
+
+ int64_t first_picture_id;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ first_picture_id = frame->id.picture_id;
+ });
+ inject_packet(/*wrapped_frame_id=*/0xffff);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ EXPECT_EQ(frame->id.picture_id - first_picture_id, 3);
+ });
+ inject_packet(/*wrapped_frame_id=*/0x0002);
+}
+
+class RtpVideoStreamReceiver2DependencyDescriptorTest
+ : public RtpVideoStreamReceiver2Test {
+ public:
+ RtpVideoStreamReceiver2DependencyDescriptorTest() {
+ VideoCodec codec;
+ codec.plType = payload_type_;
+ rtp_video_stream_receiver_->AddReceiveCodec(codec, {},
+ /*raw_payload=*/true);
+ extension_map_.Register<RtpDependencyDescriptorExtension>(7);
+ rtp_video_stream_receiver_->StartReceive();
+ }
+
+ // Returns some valid structure for the DependencyDescriptors.
+ // First template of that structure always fit for a key frame.
+ static FrameDependencyStructure CreateStreamStructure() {
+ FrameDependencyStructure stream_structure;
+ stream_structure.num_decode_targets = 1;
+ stream_structure.templates = {
+ FrameDependencyTemplate().Dtis("S"),
+ FrameDependencyTemplate().Dtis("S").FrameDiffs({1}),
+ };
+ return stream_structure;
+ }
+
+ void InjectPacketWith(const FrameDependencyStructure& stream_structure,
+ const DependencyDescriptor& dependency_descriptor) {
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ RtpPacketReceived rtp_packet(&extension_map_);
+ ASSERT_TRUE(rtp_packet.SetExtension<RtpDependencyDescriptorExtension>(
+ stream_structure, dependency_descriptor));
+ uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
+ ASSERT_TRUE(payload);
+ memcpy(payload, data.data(), data.size());
+ mock_on_complete_frame_callback_.ClearExpectedBitstream();
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ rtp_packet.SetMarker(true);
+ rtp_packet.SetPayloadType(payload_type_);
+ rtp_packet.SetSequenceNumber(++rtp_sequence_number_);
+ rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
+ }
+
+ private:
+ const int payload_type_ = 123;
+ RtpHeaderExtensionMap extension_map_;
+ uint16_t rtp_sequence_number_ = 321;
+};
+
+TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest, UnwrapsFrameId) {
+ FrameDependencyStructure stream_structure = CreateStreamStructure();
+
+ DependencyDescriptor keyframe_descriptor;
+ keyframe_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure);
+ keyframe_descriptor.frame_dependencies = stream_structure.templates[0];
+ keyframe_descriptor.frame_number = 0xfff0;
+ // DependencyDescriptor doesn't support reordering delta frame before
+ // keyframe. Thus feed a key frame first, then test reodered delta frames.
+ int64_t first_picture_id;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ first_picture_id = frame->id.picture_id;
+ });
+ InjectPacketWith(stream_structure, keyframe_descriptor);
+
+ DependencyDescriptor deltaframe1_descriptor;
+ deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
+ deltaframe1_descriptor.frame_number = 0xfffe;
+
+ DependencyDescriptor deltaframe2_descriptor;
+ deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
+ deltaframe2_descriptor.frame_number = 0x0002;
+
+ // Parser should unwrap frame ids correctly even if packets were reordered by
+ // the network.
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ // 0x0002 - 0xfff0
+ EXPECT_EQ(frame->id.picture_id - first_picture_id, 18);
+ })
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ // 0xfffe - 0xfff0
+ EXPECT_EQ(frame->id.picture_id - first_picture_id, 14);
+ });
+ InjectPacketWith(stream_structure, deltaframe2_descriptor);
+ InjectPacketWith(stream_structure, deltaframe1_descriptor);
+}
+
+TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
+ DropsLateDeltaFramePacketWithDependencyDescriptorExtension) {
+ FrameDependencyStructure stream_structure1 = CreateStreamStructure();
+ FrameDependencyStructure stream_structure2 = CreateStreamStructure();
+ // Make sure template ids for these two structures do not collide:
+ // adjust structure_id (that is also used as template id offset).
+ stream_structure1.structure_id = 13;
+ stream_structure2.structure_id =
+ stream_structure1.structure_id + stream_structure1.templates.size();
+
+ DependencyDescriptor keyframe1_descriptor;
+ keyframe1_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure1);
+ keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
+ keyframe1_descriptor.frame_number = 1;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
+ InjectPacketWith(stream_structure1, keyframe1_descriptor);
+
+ // Pass in 2nd key frame with different structure.
+ DependencyDescriptor keyframe2_descriptor;
+ keyframe2_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure2);
+ keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
+ keyframe2_descriptor.frame_number = 3;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
+ InjectPacketWith(stream_structure2, keyframe2_descriptor);
+
+ // Pass in late delta frame that uses structure of the 1st key frame.
+ DependencyDescriptor deltaframe_descriptor;
+ deltaframe_descriptor.frame_dependencies = stream_structure1.templates[0];
+ deltaframe_descriptor.frame_number = 2;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame).Times(0);
+ InjectPacketWith(stream_structure1, deltaframe_descriptor);
+}
+
+TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest,
+ DropsLateKeyFramePacketWithDependencyDescriptorExtension) {
+ FrameDependencyStructure stream_structure1 = CreateStreamStructure();
+ FrameDependencyStructure stream_structure2 = CreateStreamStructure();
+ // Make sure template ids for these two structures do not collide:
+ // adjust structure_id (that is also used as template id offset).
+ stream_structure1.structure_id = 13;
+ stream_structure2.structure_id =
+ stream_structure1.structure_id + stream_structure1.templates.size();
+
+ DependencyDescriptor keyframe1_descriptor;
+ keyframe1_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure1);
+ keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
+ keyframe1_descriptor.frame_number = 1;
+
+ DependencyDescriptor keyframe2_descriptor;
+ keyframe2_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure2);
+ keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
+ keyframe2_descriptor.frame_number = 3;
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ EXPECT_EQ(frame->id.picture_id & 0xFFFF, 3);
+ });
+ InjectPacketWith(stream_structure2, keyframe2_descriptor);
+ InjectPacketWith(stream_structure1, keyframe1_descriptor);
+
+ // Pass in delta frame that uses structure of the 2nd key frame. Late key
+ // frame shouldn't block it.
+ DependencyDescriptor deltaframe_descriptor;
+ deltaframe_descriptor.frame_dependencies = stream_structure2.templates[0];
+ deltaframe_descriptor.frame_number = 4;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ EXPECT_EQ(frame->id.picture_id & 0xFFFF, 4);
+ });
+ InjectPacketWith(stream_structure2, deltaframe_descriptor);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+using RtpVideoStreamReceiver2DeathTest = RtpVideoStreamReceiver2Test;
+TEST_F(RtpVideoStreamReceiver2DeathTest, RepeatedSecondarySinkDisallowed) {
+ MockRtpPacketSink secondary_sink;
+
+ rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
+ EXPECT_DEATH(rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink),
+ "");
+
+ // Test tear-down.
+ rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink);
+}
+#endif
+
+TEST_F(RtpVideoStreamReceiver2Test, TransformFrame) {
+ rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
+ new rtc::RefCountedObject<testing::NiceMock<MockFrameTransformer>>();
+ EXPECT_CALL(*mock_frame_transformer,
+ RegisterTransformedFrameSinkCallback(_, config_.rtp.remote_ssrc));
+ auto receiver = std::make_unique<RtpVideoStreamReceiver2>(
+ TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_,
+ nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr,
+ nullptr, process_thread_.get(), &mock_nack_sender_, nullptr,
+ &mock_on_complete_frame_callback_, nullptr, mock_frame_transformer);
+ VideoCodec video_codec;
+ video_codec.plType = kPayloadType;
+ video_codec.codecType = kVideoCodecGeneric;
+ receiver->AddReceiveCodec(video_codec, {}, /*raw_payload=*/false);
+
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
+ rtp_packet.SetSequenceNumber(1);
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(*mock_frame_transformer, Transform(_));
+ receiver->OnReceivedPayloadData(data, rtp_packet, video_header);
+
+ EXPECT_CALL(*mock_frame_transformer,
+ UnregisterTransformedFrameSinkCallback(config_.rtp.remote_ssrc));
+ receiver = nullptr;
+}
+
+// Test default behavior and when playout delay is overridden by field trial.
+const PlayoutDelay kTransmittedPlayoutDelay = {100, 200};
+const PlayoutDelay kForcedPlayoutDelay = {70, 90};
+struct PlayoutDelayOptions {
+ std::string field_trial;
+ PlayoutDelay expected_delay;
+};
+const PlayoutDelayOptions kDefaultBehavior = {
+ /*field_trial=*/"", /*expected_delay=*/kTransmittedPlayoutDelay};
+const PlayoutDelayOptions kOverridePlayoutDelay = {
+ /*field_trial=*/"WebRTC-ForcePlayoutDelay/min_ms:70,max_ms:90/",
+ /*expected_delay=*/kForcedPlayoutDelay};
+
+class RtpVideoStreamReceiver2TestPlayoutDelay
+ : public RtpVideoStreamReceiver2Test,
+ public ::testing::WithParamInterface<PlayoutDelayOptions> {
+ protected:
+ RtpVideoStreamReceiver2TestPlayoutDelay()
+ : RtpVideoStreamReceiver2Test(GetParam().field_trial) {}
+};
+
+INSTANTIATE_TEST_SUITE_P(PlayoutDelay,
+ RtpVideoStreamReceiver2TestPlayoutDelay,
+ Values(kDefaultBehavior, kOverridePlayoutDelay));
+
+TEST_P(RtpVideoStreamReceiver2TestPlayoutDelay, PlayoutDelay) {
+ rtc::CopyOnWriteBuffer payload_data({1, 2, 3, 4});
+ RtpHeaderExtensionMap extension_map;
+ extension_map.Register<PlayoutDelayLimits>(1);
+ RtpPacketToSend packet_to_send(&extension_map);
+ packet_to_send.SetPayloadType(kPayloadType);
+ packet_to_send.SetSequenceNumber(1);
+
+ // Set playout delay on outgoing packet.
+ EXPECT_TRUE(packet_to_send.SetExtension<PlayoutDelayLimits>(
+ kTransmittedPlayoutDelay));
+ uint8_t* payload = packet_to_send.AllocatePayload(payload_data.size());
+ memcpy(payload, payload_data.data(), payload_data.size());
+
+ RtpPacketReceived received_packet(&extension_map);
+ received_packet.Parse(packet_to_send.data(), packet_to_send.size());
+
+ RTPVideoHeader video_header =
+ GetGenericVideoHeader(VideoFrameType::kVideoFrameKey);
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(payload_data.data(),
+ payload_data.size());
+ // Expect the playout delay of encoded frame to be the same as the transmitted
+ // playout delay unless it was overridden by a field trial.
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_))
+ .WillOnce(Invoke([expected_playout_delay = GetParam().expected_delay](
+ video_coding::EncodedFrame* frame) {
+ EXPECT_EQ(frame->EncodedImage().playout_delay_, expected_playout_delay);
+ }));
+ rtp_video_stream_receiver_->OnReceivedPayloadData(
+ received_packet.PayloadBuffer(), received_packet, video_header);
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc
index 8324b191367..31eb344d5b6 100644
--- a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc
@@ -17,7 +17,6 @@
#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
#include "rtc_base/task_utils/to_queued_task.h"
#include "rtc_base/thread.h"
-#include "video/rtp_video_stream_receiver.h"
namespace webrtc {
@@ -28,7 +27,9 @@ class TransformableVideoReceiverFrame
TransformableVideoReceiverFrame(
std::unique_ptr<video_coding::RtpFrameObject> frame,
uint32_t ssrc)
- : frame_(std::move(frame)), ssrc_(ssrc) {}
+ : frame_(std::move(frame)),
+ metadata_(frame_->GetRtpVideoHeader()),
+ ssrc_(ssrc) {}
~TransformableVideoReceiverFrame() override = default;
// Implements TransformableVideoFrameInterface.
@@ -52,19 +53,22 @@ class TransformableVideoReceiverFrame
return RtpDescriptorAuthentication(frame_->GetRtpVideoHeader());
}
+ const VideoFrameMetadata& GetMetadata() const override { return metadata_; }
+
std::unique_ptr<video_coding::RtpFrameObject> ExtractFrame() && {
return std::move(frame_);
}
private:
std::unique_ptr<video_coding::RtpFrameObject> frame_;
+ const VideoFrameMetadata metadata_;
const uint32_t ssrc_;
};
} // namespace
RtpVideoStreamReceiverFrameTransformerDelegate::
RtpVideoStreamReceiverFrameTransformerDelegate(
- RtpVideoStreamReceiver* receiver,
+ RtpVideoFrameReceiver* receiver,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
rtc::Thread* network_thread,
uint32_t ssrc)
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h
index 0a106c956ac..e687e7f47b7 100644
--- a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h
@@ -20,7 +20,16 @@
namespace webrtc {
-class RtpVideoStreamReceiver;
+// Called back by RtpVideoStreamReceiverFrameTransformerDelegate on the network
+// thread after transformation.
+class RtpVideoFrameReceiver {
+ public:
+ virtual void ManageFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame) = 0;
+
+ protected:
+ virtual ~RtpVideoFrameReceiver() = default;
+};
// Delegates calls to FrameTransformerInterface to transform frames, and to
// RtpVideoStreamReceiver to manage transformed frames on the |network_thread_|.
@@ -28,7 +37,7 @@ class RtpVideoStreamReceiverFrameTransformerDelegate
: public TransformedFrameCallback {
public:
RtpVideoStreamReceiverFrameTransformerDelegate(
- RtpVideoStreamReceiver* receiver,
+ RtpVideoFrameReceiver* receiver,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
rtc::Thread* network_thread,
uint32_t ssrc);
@@ -44,7 +53,7 @@ class RtpVideoStreamReceiverFrameTransformerDelegate
void OnTransformedFrame(
std::unique_ptr<TransformableFrameInterface> frame) override;
- // Delegates the call to RtpVideoReceiver::ManageFrame on the
+ // Delegates the call to RtpVideoFrameReceiver::ManageFrame on the
// |network_thread_|.
void ManageFrame(std::unique_ptr<TransformableFrameInterface> frame);
@@ -53,7 +62,7 @@ class RtpVideoStreamReceiverFrameTransformerDelegate
private:
SequenceChecker network_sequence_checker_;
- RtpVideoStreamReceiver* receiver_ RTC_GUARDED_BY(network_sequence_checker_);
+ RtpVideoFrameReceiver* receiver_ RTC_GUARDED_BY(network_sequence_checker_);
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer_
RTC_GUARDED_BY(network_sequence_checker_);
rtc::Thread* const network_thread_;
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc
index c481f502a45..a411ca6e9ab 100644
--- a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc
@@ -15,95 +15,42 @@
#include <utility>
#include <vector>
+#include "absl/memory/memory.h"
#include "api/call/transport.h"
#include "call/video_receive_stream.h"
#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
#include "modules/utility/include/process_thread.h"
#include "rtc_base/event.h"
+#include "rtc_base/ref_counted_object.h"
#include "rtc_base/task_utils/to_queued_task.h"
#include "test/gmock.h"
#include "test/gtest.h"
#include "test/mock_frame_transformer.h"
-#include "video/rtp_video_stream_receiver.h"
namespace webrtc {
namespace {
using ::testing::_;
+using ::testing::ElementsAre;
using ::testing::NiceMock;
using ::testing::SaveArg;
-std::unique_ptr<video_coding::RtpFrameObject> CreateRtpFrameObject() {
+std::unique_ptr<video_coding::RtpFrameObject> CreateRtpFrameObject(
+ const RTPVideoHeader& video_header) {
return std::make_unique<video_coding::RtpFrameObject>(
- 0, 0, true, 0, 0, 0, 0, 0, VideoSendTiming(), 0, kVideoCodecGeneric,
- kVideoRotation_0, VideoContentType::UNSPECIFIED, RTPVideoHeader(),
+ 0, 0, true, 0, 0, 0, 0, 0, VideoSendTiming(), 0, video_header.codec,
+ kVideoRotation_0, VideoContentType::UNSPECIFIED, video_header,
absl::nullopt, RtpPacketInfos(), EncodedImageBuffer::Create(0));
}
-class FakeTransport : public Transport {
- public:
- bool SendRtp(const uint8_t* packet,
- size_t length,
- const PacketOptions& options) {
- return true;
- }
- bool SendRtcp(const uint8_t* packet, size_t length) { return true; }
-};
-
-class FakeNackSender : public NackSender {
- public:
- void SendNack(const std::vector<uint16_t>& sequence_numbers) {}
- void SendNack(const std::vector<uint16_t>& sequence_numbers,
- bool buffering_allowed) {}
-};
-
-class FakeOnCompleteFrameCallback
- : public video_coding::OnCompleteFrameCallback {
- public:
- void OnCompleteFrame(
- std::unique_ptr<video_coding::EncodedFrame> frame) override {}
-};
-
-class TestRtpVideoStreamReceiverInitializer {
- public:
- TestRtpVideoStreamReceiverInitializer()
- : test_config_(nullptr),
- test_process_thread_(ProcessThread::Create("TestThread")) {
- test_config_.rtp.remote_ssrc = 1111;
- test_config_.rtp.local_ssrc = 2222;
- test_rtp_receive_statistics_ =
- ReceiveStatistics::Create(Clock::GetRealTimeClock());
- }
-
- protected:
- VideoReceiveStream::Config test_config_;
- FakeTransport fake_transport_;
- FakeNackSender fake_nack_sender_;
- FakeOnCompleteFrameCallback fake_on_complete_frame_callback_;
- std::unique_ptr<ProcessThread> test_process_thread_;
- std::unique_ptr<ReceiveStatistics> test_rtp_receive_statistics_;
-};
+std::unique_ptr<video_coding::RtpFrameObject> CreateRtpFrameObject() {
+ return CreateRtpFrameObject(RTPVideoHeader());
+}
-class TestRtpVideoStreamReceiver : public TestRtpVideoStreamReceiverInitializer,
- public RtpVideoStreamReceiver {
+class TestRtpVideoFrameReceiver : public RtpVideoFrameReceiver {
public:
- TestRtpVideoStreamReceiver()
- : TestRtpVideoStreamReceiverInitializer(),
- RtpVideoStreamReceiver(Clock::GetRealTimeClock(),
- &fake_transport_,
- nullptr,
- nullptr,
- &test_config_,
- test_rtp_receive_statistics_.get(),
- nullptr,
- nullptr,
- test_process_thread_.get(),
- &fake_nack_sender_,
- nullptr,
- &fake_on_complete_frame_callback_,
- nullptr,
- nullptr) {}
- ~TestRtpVideoStreamReceiver() override = default;
+ TestRtpVideoFrameReceiver() {}
+ ~TestRtpVideoFrameReceiver() override = default;
MOCK_METHOD(void,
ManageFrame,
@@ -113,7 +60,7 @@ class TestRtpVideoStreamReceiver : public TestRtpVideoStreamReceiverInitializer,
TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
RegisterTransformedFrameCallbackSinkOnInit) {
- TestRtpVideoStreamReceiver receiver;
+ TestRtpVideoFrameReceiver receiver;
rtc::scoped_refptr<MockFrameTransformer> frame_transformer(
new rtc::RefCountedObject<MockFrameTransformer>());
rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate> delegate(
@@ -127,7 +74,7 @@ TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
UnregisterTransformedFrameSinkCallbackOnReset) {
- TestRtpVideoStreamReceiver receiver;
+ TestRtpVideoFrameReceiver receiver;
rtc::scoped_refptr<MockFrameTransformer> frame_transformer(
new rtc::RefCountedObject<MockFrameTransformer>());
rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate> delegate(
@@ -139,7 +86,7 @@ TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
}
TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, TransformFrame) {
- TestRtpVideoStreamReceiver receiver;
+ TestRtpVideoFrameReceiver receiver;
rtc::scoped_refptr<MockFrameTransformer> frame_transformer(
new rtc::RefCountedObject<testing::NiceMock<MockFrameTransformer>>());
rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate> delegate(
@@ -153,7 +100,7 @@ TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, TransformFrame) {
TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
ManageFrameOnTransformedFrame) {
- TestRtpVideoStreamReceiver receiver;
+ TestRtpVideoFrameReceiver receiver;
rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer(
new rtc::RefCountedObject<NiceMock<MockFrameTransformer>>());
rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate> delegate =
@@ -177,5 +124,48 @@ TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
rtc::ThreadManager::ProcessAllMessageQueuesForTesting();
}
+TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
+ TransformableFrameMetadataHasCorrectValue) {
+ TestRtpVideoFrameReceiver receiver;
+ rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
+ new rtc::RefCountedObject<NiceMock<MockFrameTransformer>>();
+ rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate> delegate =
+ new rtc::RefCountedObject<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ &receiver, mock_frame_transformer, rtc::Thread::Current(), 1111);
+ delegate->Init();
+ RTPVideoHeader video_header;
+ video_header.width = 1280u;
+ video_header.height = 720u;
+ RTPVideoHeader::GenericDescriptorInfo& generic =
+ video_header.generic.emplace();
+ generic.frame_id = 10;
+ generic.temporal_index = 3;
+ generic.spatial_index = 2;
+ generic.decode_target_indications = {DecodeTargetIndication::kSwitch};
+ generic.dependencies = {5};
+
+ // Check that the transformable frame passed to the frame transformer has the
+ // correct metadata.
+ EXPECT_CALL(*mock_frame_transformer, Transform)
+ .WillOnce(
+ [](std::unique_ptr<TransformableFrameInterface> transformable_frame) {
+ auto frame =
+ absl::WrapUnique(static_cast<TransformableVideoFrameInterface*>(
+ transformable_frame.release()));
+ ASSERT_TRUE(frame);
+ auto metadata = frame->GetMetadata();
+ EXPECT_EQ(metadata.GetWidth(), 1280u);
+ EXPECT_EQ(metadata.GetHeight(), 720u);
+ EXPECT_EQ(metadata.GetFrameId(), 10);
+ EXPECT_EQ(metadata.GetTemporalIndex(), 3);
+ EXPECT_EQ(metadata.GetSpatialIndex(), 2);
+ EXPECT_THAT(metadata.GetFrameDependencies(), ElementsAre(5));
+ EXPECT_THAT(metadata.GetDecodeTargetIndications(),
+ ElementsAre(DecodeTargetIndication::kSwitch));
+ });
+ // The delegate creates a transformable frame from the RtpFrameObject.
+ delegate->TransformFrame(CreateRtpFrameObject(video_header));
+}
+
} // namespace
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_unittest.cc b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_unittest.cc
index 255de54e8be..20d6ae88ad9 100644
--- a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_unittest.cc
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_unittest.cc
@@ -73,37 +73,45 @@ RTPVideoHeader GetGenericVideoHeader(VideoFrameType frame_type) {
class MockTransport : public Transport {
public:
- MOCK_METHOD3(SendRtp,
- bool(const uint8_t* packet,
- size_t length,
- const PacketOptions& options));
- MOCK_METHOD2(SendRtcp, bool(const uint8_t* packet, size_t length));
+ MOCK_METHOD(bool,
+ SendRtp,
+ (const uint8_t*, size_t length, const PacketOptions& options),
+ (override));
+ MOCK_METHOD(bool, SendRtcp, (const uint8_t*, size_t length), (override));
};
class MockNackSender : public NackSender {
public:
- MOCK_METHOD1(SendNack, void(const std::vector<uint16_t>& sequence_numbers));
- MOCK_METHOD2(SendNack,
- void(const std::vector<uint16_t>& sequence_numbers,
- bool buffering_allowed));
+ MOCK_METHOD(void,
+ SendNack,
+ (const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed),
+ (override));
};
class MockKeyFrameRequestSender : public KeyFrameRequestSender {
public:
- MOCK_METHOD0(RequestKeyFrame, void());
+ MOCK_METHOD(void, RequestKeyFrame, (), (override));
};
class MockOnCompleteFrameCallback
: public video_coding::OnCompleteFrameCallback {
public:
- MOCK_METHOD1(DoOnCompleteFrame, void(video_coding::EncodedFrame* frame));
- MOCK_METHOD1(DoOnCompleteFrameFailNullptr,
- void(video_coding::EncodedFrame* frame));
- MOCK_METHOD1(DoOnCompleteFrameFailLength,
- void(video_coding::EncodedFrame* frame));
- MOCK_METHOD1(DoOnCompleteFrameFailBitstream,
- void(video_coding::EncodedFrame* frame));
- void OnCompleteFrame(std::unique_ptr<video_coding::EncodedFrame> frame) {
+ MOCK_METHOD(void, DoOnCompleteFrame, (video_coding::EncodedFrame*), ());
+ MOCK_METHOD(void,
+ DoOnCompleteFrameFailNullptr,
+ (video_coding::EncodedFrame*),
+ ());
+ MOCK_METHOD(void,
+ DoOnCompleteFrameFailLength,
+ (video_coding::EncodedFrame*),
+ ());
+ MOCK_METHOD(void,
+ DoOnCompleteFrameFailBitstream,
+ (video_coding::EncodedFrame*),
+ ());
+ void OnCompleteFrame(
+ std::unique_ptr<video_coding::EncodedFrame> frame) override {
if (!frame) {
DoOnCompleteFrameFailNullptr(nullptr);
return;
@@ -132,7 +140,7 @@ class MockOnCompleteFrameCallback
class MockRtpPacketSink : public RtpPacketSinkInterface {
public:
- MOCK_METHOD1(OnRtpPacket, void(const RtpPacketReceived&));
+ MOCK_METHOD(void, OnRtpPacket, (const RtpPacketReceived&), (override));
};
constexpr uint32_t kSsrc = 111;
@@ -955,8 +963,8 @@ class RtpVideoStreamReceiverDependencyDescriptorTest
FrameDependencyStructure stream_structure;
stream_structure.num_decode_targets = 1;
stream_structure.templates = {
- GenericFrameInfo::Builder().Dtis("S").Build(),
- GenericFrameInfo::Builder().Dtis("S").Fdiffs({1}).Build(),
+ FrameDependencyTemplate().Dtis("S"),
+ FrameDependencyTemplate().Dtis("S").FrameDiffs({1}),
};
return stream_structure;
}
@@ -1102,7 +1110,8 @@ TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest,
}
#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
-TEST_F(RtpVideoStreamReceiverTest, RepeatedSecondarySinkDisallowed) {
+using RtpVideoStreamReceiverDeathTest = RtpVideoStreamReceiverTest;
+TEST_F(RtpVideoStreamReceiverDeathTest, RepeatedSecondarySinkDisallowed) {
MockRtpPacketSink secondary_sink;
rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink);
diff --git a/chromium/third_party/webrtc/video/send_statistics_proxy.cc b/chromium/third_party/webrtc/video/send_statistics_proxy.cc
index f8d768f9d2d..b5bcbe6bf1d 100644
--- a/chromium/third_party/webrtc/video/send_statistics_proxy.cc
+++ b/chromium/third_party/webrtc/video/send_statistics_proxy.cc
@@ -717,9 +717,11 @@ void SendStatisticsProxy::OnSuspendChange(bool is_suspended) {
uma_container_->quality_adapt_timer_.Stop(now_ms);
} else {
// Start adaptation stats if scaling is enabled.
- if (adaptations_.MaskedCpuCounts().resolution_adaptations.has_value())
+ if (adaptation_limitations_.MaskedCpuCounts()
+ .resolution_adaptations.has_value())
uma_container_->cpu_adapt_timer_.Start(now_ms);
- if (adaptations_.MaskedQualityCounts().resolution_adaptations.has_value())
+ if (adaptation_limitations_.MaskedQualityCounts()
+ .resolution_adaptations.has_value())
uma_container_->quality_adapt_timer_.Start(now_ms);
// Stop pause explicitly for stats that may be zero/not updated for some
// time.
@@ -1021,7 +1023,7 @@ void SendStatisticsProxy::OnSendEncodedImage(
}
absl::optional<int> downscales =
- adaptations_.MaskedQualityCounts().resolution_adaptations;
+ adaptation_limitations_.MaskedQualityCounts().resolution_adaptations;
stats_.bw_limited_resolution |=
(downscales.has_value() && downscales.value() > 0);
@@ -1056,7 +1058,8 @@ void SendStatisticsProxy::OnIncomingFrame(int width, int height) {
uma_container_->input_fps_counter_.Add(1);
uma_container_->input_width_counter_.Add(width);
uma_container_->input_height_counter_.Add(height);
- if (adaptations_.MaskedCpuCounts().resolution_adaptations.has_value()) {
+ if (adaptation_limitations_.MaskedCpuCounts()
+ .resolution_adaptations.has_value()) {
uma_container_->cpu_limited_frame_counter_.Add(
stats_.cpu_limited_resolution);
}
@@ -1090,8 +1093,8 @@ void SendStatisticsProxy::OnFrameDropped(DropReason reason) {
void SendStatisticsProxy::ClearAdaptationStats() {
rtc::CritScope lock(&crit_);
- adaptations_.set_cpu_counts(VideoAdaptationCounters());
- adaptations_.set_quality_counts(VideoAdaptationCounters());
+ adaptation_limitations_.set_cpu_counts(VideoAdaptationCounters());
+ adaptation_limitations_.set_quality_counts(VideoAdaptationCounters());
UpdateAdaptationStats();
}
@@ -1099,10 +1102,10 @@ void SendStatisticsProxy::UpdateAdaptationSettings(
VideoStreamEncoderObserver::AdaptationSettings cpu_settings,
VideoStreamEncoderObserver::AdaptationSettings quality_settings) {
rtc::CritScope lock(&crit_);
- adaptations_.UpdateMaskingSettings(cpu_settings, quality_settings);
- SetAdaptTimer(adaptations_.MaskedCpuCounts(),
+ adaptation_limitations_.UpdateMaskingSettings(cpu_settings, quality_settings);
+ SetAdaptTimer(adaptation_limitations_.MaskedCpuCounts(),
&uma_container_->cpu_adapt_timer_);
- SetAdaptTimer(adaptations_.MaskedQualityCounts(),
+ SetAdaptTimer(adaptation_limitations_.MaskedQualityCounts(),
&uma_container_->quality_adapt_timer_);
UpdateAdaptationStats();
}
@@ -1113,9 +1116,10 @@ void SendStatisticsProxy::OnAdaptationChanged(
const VideoAdaptationCounters& quality_counters) {
rtc::CritScope lock(&crit_);
- MaskedAdaptationCounts receiver = adaptations_.MaskedQualityCounts();
- adaptations_.set_cpu_counts(cpu_counters);
- adaptations_.set_quality_counts(quality_counters);
+ MaskedAdaptationCounts receiver =
+ adaptation_limitations_.MaskedQualityCounts();
+ adaptation_limitations_.set_cpu_counts(cpu_counters);
+ adaptation_limitations_.set_quality_counts(quality_counters);
switch (reason) {
case VideoAdaptationReason::kCpu:
++stats_.number_of_cpu_adapt_changes;
@@ -1123,7 +1127,7 @@ void SendStatisticsProxy::OnAdaptationChanged(
case VideoAdaptationReason::kQuality:
TryUpdateInitialQualityResolutionAdaptUp(
receiver.resolution_adaptations,
- adaptations_.MaskedQualityCounts().resolution_adaptations);
+ adaptation_limitations_.MaskedQualityCounts().resolution_adaptations);
++stats_.number_of_quality_adapt_changes;
break;
}
@@ -1131,8 +1135,8 @@ void SendStatisticsProxy::OnAdaptationChanged(
}
void SendStatisticsProxy::UpdateAdaptationStats() {
- auto cpu_counts = adaptations_.MaskedCpuCounts();
- auto quality_counts = adaptations_.MaskedQualityCounts();
+ auto cpu_counts = adaptation_limitations_.MaskedCpuCounts();
+ auto quality_counts = adaptation_limitations_.MaskedQualityCounts();
bool is_cpu_limited = cpu_counts.resolution_adaptations > 0 ||
cpu_counts.num_framerate_reductions > 0;
@@ -1459,6 +1463,16 @@ void SendStatisticsProxy::Adaptations::set_quality_counts(
const VideoAdaptationCounters& quality_counts) {
quality_counts_ = quality_counts;
}
+
+VideoAdaptationCounters SendStatisticsProxy::Adaptations::cpu_counts() const {
+ return cpu_counts_;
+}
+
+VideoAdaptationCounters SendStatisticsProxy::Adaptations::quality_counts()
+ const {
+ return quality_counts_;
+}
+
void SendStatisticsProxy::Adaptations::UpdateMaskingSettings(
VideoStreamEncoderObserver::AdaptationSettings cpu_settings,
VideoStreamEncoderObserver::AdaptationSettings quality_settings) {
diff --git a/chromium/third_party/webrtc/video/send_statistics_proxy.h b/chromium/third_party/webrtc/video/send_statistics_proxy.h
index 1d2fd21cfa0..ff3b786be93 100644
--- a/chromium/third_party/webrtc/video/send_statistics_proxy.h
+++ b/chromium/third_party/webrtc/video/send_statistics_proxy.h
@@ -240,6 +240,9 @@ class SendStatisticsProxy : public VideoStreamEncoderObserver,
void set_cpu_counts(const VideoAdaptationCounters& cpu_counts);
void set_quality_counts(const VideoAdaptationCounters& quality_counts);
+ VideoAdaptationCounters cpu_counts() const;
+ VideoAdaptationCounters quality_counts() const;
+
void UpdateMaskingSettings(AdaptationSettings cpu_settings,
AdaptationSettings quality_settings);
@@ -299,7 +302,7 @@ class SendStatisticsProxy : public VideoStreamEncoderObserver,
bool bw_limited_layers_ RTC_GUARDED_BY(crit_);
// Indicastes if the encoder internally downscales input image.
bool internal_encoder_scaler_ RTC_GUARDED_BY(crit_);
- Adaptations adaptations_ RTC_GUARDED_BY(crit_);
+ Adaptations adaptation_limitations_ RTC_GUARDED_BY(crit_);
struct EncoderChangeEvent {
std::string previous_encoder_implementation;
diff --git a/chromium/third_party/webrtc/video/test/mock_video_stream_encoder.h b/chromium/third_party/webrtc/video/test/mock_video_stream_encoder.h
index 8e429681b8d..c9efc765985 100644
--- a/chromium/third_party/webrtc/video/test/mock_video_stream_encoder.h
+++ b/chromium/third_party/webrtc/video/test/mock_video_stream_encoder.h
@@ -10,6 +10,8 @@
#ifndef VIDEO_TEST_MOCK_VIDEO_STREAM_ENCODER_H_
#define VIDEO_TEST_MOCK_VIDEO_STREAM_ENCODER_H_
+#include <vector>
+
#include "api/video/video_stream_encoder_interface.h"
#include "test/gmock.h"
@@ -17,22 +19,44 @@ namespace webrtc {
class MockVideoStreamEncoder : public VideoStreamEncoderInterface {
public:
- MOCK_METHOD2(SetSource,
- void(rtc::VideoSourceInterface<VideoFrame>*,
- const DegradationPreference&));
- MOCK_METHOD2(SetSink, void(EncoderSink*, bool));
- MOCK_METHOD1(SetStartBitrate, void(int));
- MOCK_METHOD0(SendKeyFrame, void());
- MOCK_METHOD1(OnLossNotification, void(const VideoEncoder::LossNotification&));
- MOCK_METHOD6(OnBitrateUpdated,
- void(DataRate, DataRate, DataRate, uint8_t, int64_t, double));
- MOCK_METHOD1(OnFrame, void(const VideoFrame&));
- MOCK_METHOD1(SetBitrateAllocationObserver,
- void(VideoBitrateAllocationObserver*));
- MOCK_METHOD1(SetFecControllerOverride, void(FecControllerOverride*));
- MOCK_METHOD0(Stop, void());
+ MOCK_METHOD(void,
+ AddAdaptationResource,
+ (rtc::scoped_refptr<Resource>),
+ (override));
+ MOCK_METHOD(std::vector<rtc::scoped_refptr<Resource>>,
+ GetAdaptationResources,
+ (),
+ (override));
+ MOCK_METHOD(void,
+ SetSource,
+ (rtc::VideoSourceInterface<VideoFrame>*,
+ const DegradationPreference&),
+ (override));
+ MOCK_METHOD(void, SetSink, (EncoderSink*, bool), (override));
+ MOCK_METHOD(void, SetStartBitrate, (int), (override));
+ MOCK_METHOD(void, SendKeyFrame, (), (override));
+ MOCK_METHOD(void,
+ OnLossNotification,
+ (const VideoEncoder::LossNotification&),
+ (override));
+ MOCK_METHOD(void,
+ OnBitrateUpdated,
+ (DataRate, DataRate, DataRate, uint8_t, int64_t, double),
+ (override));
+ MOCK_METHOD(void, OnFrame, (const VideoFrame&), (override));
+ MOCK_METHOD(void,
+ SetBitrateAllocationObserver,
+ (VideoBitrateAllocationObserver*),
+ (override));
+ MOCK_METHOD(void,
+ SetFecControllerOverride,
+ (FecControllerOverride*),
+ (override));
+ MOCK_METHOD(void, Stop, (), (override));
- MOCK_METHOD2(MockedConfigureEncoder, void(const VideoEncoderConfig&, size_t));
+ MOCK_METHOD(void,
+ MockedConfigureEncoder,
+ (const VideoEncoderConfig&, size_t));
// gtest generates implicit copy which is not allowed on VideoEncoderConfig,
// so we can't mock ConfigureEncoder directly.
void ConfigureEncoder(VideoEncoderConfig config,
diff --git a/chromium/third_party/webrtc/video/video_quality_test.cc b/chromium/third_party/webrtc/video/video_quality_test.cc
index 94ce268fa92..88270b4b2e3 100644
--- a/chromium/third_party/webrtc/video/video_quality_test.cc
+++ b/chromium/third_party/webrtc/video/video_quality_test.cc
@@ -815,11 +815,6 @@ void VideoQualityTest::SetupVideo(Transport* send_transport,
}
if (params_.call.generic_descriptor) {
- // The generic descriptor is currently behind a field trial, so it needs
- // to be set for this flag to have any effect.
- // TODO(philipel): Remove this check when the experiment is removed.
- RTC_CHECK(field_trial::IsEnabled("WebRTC-GenericDescriptor"));
-
video_send_configs_[video_idx].rtp.extensions.emplace_back(
RtpExtension::kGenericFrameDescriptorUri00,
kGenericFrameDescriptorExtensionId00);
diff --git a/chromium/third_party/webrtc/video/video_receive_stream.h b/chromium/third_party/webrtc/video/video_receive_stream.h
index c1ebf2b600e..8a5136a4b1d 100644
--- a/chromium/third_party/webrtc/video/video_receive_stream.h
+++ b/chromium/third_party/webrtc/video/video_receive_stream.h
@@ -15,7 +15,6 @@
#include <vector>
#include "api/task_queue/task_queue_factory.h"
-#include "api/transport/media/media_transport_interface.h"
#include "api/video/recordable_encoded_frame.h"
#include "call/rtp_packet_sink_interface.h"
#include "call/syncable.h"
diff --git a/chromium/third_party/webrtc/video/video_receive_stream2.cc b/chromium/third_party/webrtc/video/video_receive_stream2.cc
index b1b482da298..9413b72354f 100644
--- a/chromium/third_party/webrtc/video/video_receive_stream2.cc
+++ b/chromium/third_party/webrtc/video/video_receive_stream2.cc
@@ -49,7 +49,7 @@
#include "system_wrappers/include/field_trial.h"
#include "video/call_stats2.h"
#include "video/frame_dumping_decoder.h"
-#include "video/receive_statistics_proxy.h"
+#include "video/receive_statistics_proxy2.h"
namespace webrtc {
@@ -201,7 +201,8 @@ VideoReceiveStream2::VideoReceiveStream2(
rtp_receive_statistics_(ReceiveStatistics::Create(clock_)),
timing_(timing),
video_receiver_(clock_, timing_.get()),
- rtp_video_stream_receiver_(clock_,
+ rtp_video_stream_receiver_(worker_thread_,
+ clock_,
&transport_adapter_,
call_stats->AsRtcpRttStats(),
packet_router,
@@ -232,7 +233,6 @@ VideoReceiveStream2::VideoReceiveStream2(
RTC_DCHECK(call_stats_);
module_process_sequence_checker_.Detach();
- network_sequence_checker_.Detach();
RTC_DCHECK(!config_.decoders.empty());
std::set<int> decoder_payload_types;
@@ -472,8 +472,6 @@ bool VideoReceiveStream2::SetBaseMinimumPlayoutDelayMs(int delay_ms) {
return false;
}
- // TODO(bugs.webrtc.org/11489): Consider posting to worker.
- rtc::CritScope cs(&playout_delay_lock_);
base_minimum_playout_delay_ms_ = delay_ms;
UpdatePlayoutDelays();
return true;
@@ -481,8 +479,6 @@ bool VideoReceiveStream2::SetBaseMinimumPlayoutDelayMs(int delay_ms) {
int VideoReceiveStream2::GetBaseMinimumPlayoutDelayMs() const {
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
-
- rtc::CritScope cs(&playout_delay_lock_);
return base_minimum_playout_delay_ms_;
}
@@ -522,18 +518,26 @@ void VideoReceiveStream2::SetDepacketizerToDecoderFrameTransformer(
void VideoReceiveStream2::SendNack(
const std::vector<uint16_t>& sequence_numbers,
bool buffering_allowed) {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
RTC_DCHECK(buffering_allowed);
rtp_video_stream_receiver_.RequestPacketRetransmit(sequence_numbers);
}
void VideoReceiveStream2::RequestKeyFrame(int64_t timestamp_ms) {
+ // Running on worker_sequence_checker_.
+ // Called from RtpVideoStreamReceiver (rtp_video_stream_receiver_ is
+ // ultimately responsible).
rtp_video_stream_receiver_.RequestKeyFrame();
- last_keyframe_request_ms_ = timestamp_ms;
+ decode_queue_.PostTask([this, timestamp_ms]() {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+ last_keyframe_request_ms_ = timestamp_ms;
+ });
}
void VideoReceiveStream2::OnCompleteFrame(
std::unique_ptr<video_coding::EncodedFrame> frame) {
- RTC_DCHECK_RUN_ON(&network_sequence_checker_);
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+
// TODO(https://bugs.webrtc.org/9974): Consider removing this workaround.
int64_t time_now_ms = clock_->TimeInMilliseconds();
if (last_complete_frame_time_ms_ > 0 &&
@@ -542,19 +546,13 @@ void VideoReceiveStream2::OnCompleteFrame(
}
last_complete_frame_time_ms_ = time_now_ms;
- // TODO(bugs.webrtc.org/11489): We grab the playout_delay_lock_ lock
- // potentially twice. Consider checking both min/max and posting to worker if
- // there's a change. If we always update playout delays on the worker, we
- // don't need a lock.
const PlayoutDelay& playout_delay = frame->EncodedImage().playout_delay_;
if (playout_delay.min_ms >= 0) {
- rtc::CritScope cs(&playout_delay_lock_);
frame_minimum_playout_delay_ms_ = playout_delay.min_ms;
UpdatePlayoutDelays();
}
if (playout_delay.max_ms >= 0) {
- rtc::CritScope cs(&playout_delay_lock_);
frame_maximum_playout_delay_ms_ = playout_delay.max_ms;
UpdatePlayoutDelays();
}
@@ -602,22 +600,20 @@ void VideoReceiveStream2::SetEstimatedPlayoutNtpTimestampMs(
void VideoReceiveStream2::SetMinimumPlayoutDelay(int delay_ms) {
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
- // TODO(bugs.webrtc.org/11489): See if we can't get rid of the
- // |playout_delay_lock_|
- rtc::CritScope cs(&playout_delay_lock_);
syncable_minimum_playout_delay_ms_ = delay_ms;
UpdatePlayoutDelays();
}
-int64_t VideoReceiveStream2::GetWaitMs() const {
+int64_t VideoReceiveStream2::GetMaxWaitMs() const {
return keyframe_required_ ? max_wait_for_keyframe_ms_
: max_wait_for_frame_ms_;
}
void VideoReceiveStream2::StartNextDecode() {
+ // Running on the decode thread.
TRACE_EVENT0("webrtc", "VideoReceiveStream2::StartNextDecode");
frame_buffer_->NextFrame(
- GetWaitMs(), keyframe_required_, &decode_queue_,
+ GetMaxWaitMs(), keyframe_required_, &decode_queue_,
/* encoded frame handler */
[this](std::unique_ptr<EncodedFrame> frame, ReturnReason res) {
RTC_DCHECK_EQ(frame == nullptr, res == ReturnReason::kTimeout);
@@ -629,7 +625,12 @@ void VideoReceiveStream2::StartNextDecode() {
if (frame) {
HandleEncodedFrame(std::move(frame));
} else {
- HandleFrameBufferTimeout();
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ worker_thread_->PostTask(ToQueuedTask(
+ task_safety_, [this, now_ms, wait_ms = GetMaxWaitMs()]() {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ HandleFrameBufferTimeout(now_ms, wait_ms);
+ }));
}
StartNextDecode();
});
@@ -649,25 +650,48 @@ void VideoReceiveStream2::HandleEncodedFrame(
}
}
stats_proxy_.OnPreDecode(frame->CodecSpecific()->codecType, qp);
- HandleKeyFrameGeneration(frame->FrameType() == VideoFrameType::kVideoFrameKey,
- now_ms);
+
+ bool force_request_key_frame = false;
+ int64_t decoded_frame_picture_id = -1;
+
+ const bool keyframe_request_is_due =
+ now_ms >= (last_keyframe_request_ms_ + max_wait_for_keyframe_ms_);
+
int decode_result = video_receiver_.Decode(frame.get());
if (decode_result == WEBRTC_VIDEO_CODEC_OK ||
decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME) {
keyframe_required_ = false;
frame_decoded_ = true;
- rtp_video_stream_receiver_.FrameDecoded(frame->id.picture_id);
+
+ decoded_frame_picture_id = frame->id.picture_id;
if (decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME)
- RequestKeyFrame(now_ms);
+ force_request_key_frame = true;
} else if (!frame_decoded_ || !keyframe_required_ ||
- (last_keyframe_request_ms_ + max_wait_for_keyframe_ms_ < now_ms)) {
+ keyframe_request_is_due) {
keyframe_required_ = true;
// TODO(philipel): Remove this keyframe request when downstream project
// has been fixed.
- RequestKeyFrame(now_ms);
+ force_request_key_frame = true;
}
+ bool received_frame_is_keyframe =
+ frame->FrameType() == VideoFrameType::kVideoFrameKey;
+
+ worker_thread_->PostTask(ToQueuedTask(
+ task_safety_,
+ [this, now_ms, received_frame_is_keyframe, force_request_key_frame,
+ decoded_frame_picture_id, keyframe_request_is_due]() {
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+
+ if (decoded_frame_picture_id != -1)
+ rtp_video_stream_receiver_.FrameDecoded(decoded_frame_picture_id);
+
+ HandleKeyFrameGeneration(received_frame_is_keyframe, now_ms,
+ force_request_key_frame,
+ keyframe_request_is_due);
+ }));
+
if (encoded_frame_buffer_function_) {
frame->Retain();
encoded_frame_buffer_function_(WebRtcRecordableEncodedFrame(*frame));
@@ -676,48 +700,58 @@ void VideoReceiveStream2::HandleEncodedFrame(
void VideoReceiveStream2::HandleKeyFrameGeneration(
bool received_frame_is_keyframe,
- int64_t now_ms) {
+ int64_t now_ms,
+ bool always_request_key_frame,
+ bool keyframe_request_is_due) {
+ // Running on worker_sequence_checker_.
+
+ bool request_key_frame = always_request_key_frame;
+
// Repeat sending keyframe requests if we've requested a keyframe.
- if (!keyframe_generation_requested_) {
- return;
- }
- if (received_frame_is_keyframe) {
- keyframe_generation_requested_ = false;
- } else if (last_keyframe_request_ms_ + max_wait_for_keyframe_ms_ <= now_ms) {
- if (!IsReceivingKeyFrame(now_ms)) {
- RequestKeyFrame(now_ms);
+ if (keyframe_generation_requested_) {
+ if (received_frame_is_keyframe) {
+ keyframe_generation_requested_ = false;
+ } else if (keyframe_request_is_due) {
+ if (!IsReceivingKeyFrame(now_ms)) {
+ request_key_frame = true;
+ }
+ } else {
+ // It hasn't been long enough since the last keyframe request, do nothing.
}
- } else {
- // It hasn't been long enough since the last keyframe request, do nothing.
+ }
+
+ if (request_key_frame) {
+ // HandleKeyFrameGeneration is initated from the decode thread -
+ // RequestKeyFrame() triggers a call back to the decode thread.
+ // Perhaps there's a way to avoid that.
+ RequestKeyFrame(now_ms);
}
}
-void VideoReceiveStream2::HandleFrameBufferTimeout() {
- // Running on |decode_queue_|.
- int64_t now_ms = clock_->TimeInMilliseconds();
+void VideoReceiveStream2::HandleFrameBufferTimeout(int64_t now_ms,
+ int64_t wait_ms) {
+ // Running on |worker_sequence_checker_|.
absl::optional<int64_t> last_packet_ms =
rtp_video_stream_receiver_.LastReceivedPacketMs();
// To avoid spamming keyframe requests for a stream that is not active we
// check if we have received a packet within the last 5 seconds.
- bool stream_is_active = last_packet_ms && now_ms - *last_packet_ms < 5000;
- if (!stream_is_active) {
- worker_thread_->PostTask(ToQueuedTask(task_safety_, [this]() {
- RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
- stats_proxy_.OnStreamInactive();
- }));
- }
+ const bool stream_is_active =
+ last_packet_ms && now_ms - *last_packet_ms < 5000;
+ if (!stream_is_active)
+ stats_proxy_.OnStreamInactive();
if (stream_is_active && !IsReceivingKeyFrame(now_ms) &&
(!config_.crypto_options.sframe.require_frame_encryption ||
rtp_video_stream_receiver_.IsDecryptable())) {
- RTC_LOG(LS_WARNING) << "No decodable frame in " << GetWaitMs()
+ RTC_LOG(LS_WARNING) << "No decodable frame in " << wait_ms
<< " ms, requesting keyframe.";
RequestKeyFrame(now_ms);
}
}
bool VideoReceiveStream2::IsReceivingKeyFrame(int64_t timestamp_ms) const {
+ // Running on worker_sequence_checker_.
absl::optional<int64_t> last_keyframe_packet_ms =
rtp_video_stream_receiver_.LastReceivedKeyframePacketMs();
@@ -730,6 +764,7 @@ bool VideoReceiveStream2::IsReceivingKeyFrame(int64_t timestamp_ms) const {
}
void VideoReceiveStream2::UpdatePlayoutDelays() const {
+ // Running on worker_sequence_checker_.
const int minimum_delay_ms =
std::max({frame_minimum_playout_delay_ms_, base_minimum_playout_delay_ms_,
syncable_minimum_playout_delay_ms_});
@@ -752,36 +787,43 @@ VideoReceiveStream2::SetAndGetRecordingState(RecordingState state,
bool generate_key_frame) {
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
rtc::Event event;
+
+ // Save old state, set the new state.
RecordingState old_state;
- decode_queue_.PostTask([this, &event, &old_state, generate_key_frame,
- state = std::move(state)] {
- RTC_DCHECK_RUN_ON(&decode_queue_);
- // Save old state.
- old_state.callback = std::move(encoded_frame_buffer_function_);
- old_state.keyframe_needed = keyframe_generation_requested_;
- old_state.last_keyframe_request_ms = last_keyframe_request_ms_;
-
- // Set new state.
- encoded_frame_buffer_function_ = std::move(state.callback);
- if (generate_key_frame) {
- RequestKeyFrame(clock_->TimeInMilliseconds());
- keyframe_generation_requested_ = true;
- } else {
- keyframe_generation_requested_ = state.keyframe_needed;
- last_keyframe_request_ms_ = state.last_keyframe_request_ms.value_or(0);
- }
- event.Set();
- });
+
+ decode_queue_.PostTask(
+ [this, &event, &old_state, callback = std::move(state.callback),
+ generate_key_frame,
+ last_keyframe_request = state.last_keyframe_request_ms.value_or(0)] {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+ old_state.callback = std::move(encoded_frame_buffer_function_);
+ encoded_frame_buffer_function_ = std::move(callback);
+
+ old_state.last_keyframe_request_ms = last_keyframe_request_ms_;
+ last_keyframe_request_ms_ = generate_key_frame
+ ? clock_->TimeInMilliseconds()
+ : last_keyframe_request;
+
+ event.Set();
+ });
+
+ old_state.keyframe_needed = keyframe_generation_requested_;
+
+ if (generate_key_frame) {
+ rtp_video_stream_receiver_.RequestKeyFrame();
+ keyframe_generation_requested_ = true;
+ } else {
+ keyframe_generation_requested_ = state.keyframe_needed;
+ }
+
event.Wait(rtc::Event::kForever);
return old_state;
}
void VideoReceiveStream2::GenerateKeyFrame() {
- decode_queue_.PostTask([this]() {
- RTC_DCHECK_RUN_ON(&decode_queue_);
- RequestKeyFrame(clock_->TimeInMilliseconds());
- keyframe_generation_requested_ = true;
- });
+ RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
+ RequestKeyFrame(clock_->TimeInMilliseconds());
+ keyframe_generation_requested_ = true;
}
} // namespace internal
diff --git a/chromium/third_party/webrtc/video/video_receive_stream2.h b/chromium/third_party/webrtc/video/video_receive_stream2.h
index f8cd65dc9db..71b336e587e 100644
--- a/chromium/third_party/webrtc/video/video_receive_stream2.h
+++ b/chromium/third_party/webrtc/video/video_receive_stream2.h
@@ -15,7 +15,6 @@
#include <vector>
#include "api/task_queue/task_queue_factory.h"
-#include "api/transport/media/media_transport_interface.h"
#include "api/units/timestamp.h"
#include "api/video/recordable_encoded_frame.h"
#include "call/rtp_packet_sink_interface.h"
@@ -27,10 +26,11 @@
#include "modules/video_coding/video_receiver2.h"
#include "rtc_base/synchronization/sequence_checker.h"
#include "rtc_base/task_queue.h"
+#include "rtc_base/task_utils/pending_task_safety_flag.h"
#include "system_wrappers/include/clock.h"
#include "video/receive_statistics_proxy2.h"
#include "video/rtp_streams_synchronizer2.h"
-#include "video/rtp_video_stream_receiver.h"
+#include "video/rtp_video_stream_receiver2.h"
#include "video/transport_adapter.h"
#include "video/video_stream_decoder2.h"
@@ -158,24 +158,28 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream,
void GenerateKeyFrame() override;
private:
- int64_t GetWaitMs() const;
+ int64_t GetMaxWaitMs() const RTC_RUN_ON(decode_queue_);
void StartNextDecode() RTC_RUN_ON(decode_queue_);
void HandleEncodedFrame(std::unique_ptr<video_coding::EncodedFrame> frame)
RTC_RUN_ON(decode_queue_);
- void HandleFrameBufferTimeout() RTC_RUN_ON(decode_queue_);
+ void HandleFrameBufferTimeout(int64_t now_ms, int64_t wait_ms)
+ RTC_RUN_ON(worker_sequence_checker_);
void UpdatePlayoutDelays() const
- RTC_EXCLUSIVE_LOCKS_REQUIRED(playout_delay_lock_);
- void RequestKeyFrame(int64_t timestamp_ms) RTC_RUN_ON(decode_queue_);
- void HandleKeyFrameGeneration(bool received_frame_is_keyframe, int64_t now_ms)
- RTC_RUN_ON(decode_queue_);
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_sequence_checker_);
+ void RequestKeyFrame(int64_t timestamp_ms)
+ RTC_RUN_ON(worker_sequence_checker_);
+ void HandleKeyFrameGeneration(bool received_frame_is_keyframe,
+ int64_t now_ms,
+ bool always_request_key_frame,
+ bool keyframe_request_is_due)
+ RTC_RUN_ON(worker_sequence_checker_);
bool IsReceivingKeyFrame(int64_t timestamp_ms) const
- RTC_RUN_ON(decode_queue_);
+ RTC_RUN_ON(worker_sequence_checker_);
void UpdateHistograms();
SequenceChecker worker_sequence_checker_;
SequenceChecker module_process_sequence_checker_;
- SequenceChecker network_sequence_checker_;
TaskQueueFactory* const task_queue_factory_;
@@ -199,7 +203,7 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream,
std::unique_ptr<VCMTiming> timing_; // Jitter buffer experiment.
VideoReceiver2 video_receiver_;
std::unique_ptr<rtc::VideoSinkInterface<VideoFrame>> incoming_video_stream_;
- RtpVideoStreamReceiver rtp_video_stream_receiver_;
+ RtpVideoStreamReceiver2 rtp_video_stream_receiver_;
std::unique_ptr<VideoStreamDecoder> video_stream_decoder_;
RtpStreamsSynchronizer rtp_stream_sync_;
@@ -216,40 +220,43 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream,
// Whenever we are in an undecodable state (stream has just started or due to
// a decoding error) we require a keyframe to restart the stream.
- bool keyframe_required_ = true;
+ bool keyframe_required_ RTC_GUARDED_BY(decode_queue_) = true;
// If we have successfully decoded any frame.
- bool frame_decoded_ = false;
+ bool frame_decoded_ RTC_GUARDED_BY(decode_queue_) = false;
- int64_t last_keyframe_request_ms_ = 0;
- int64_t last_complete_frame_time_ms_ = 0;
+ int64_t last_keyframe_request_ms_ RTC_GUARDED_BY(decode_queue_) = 0;
+ int64_t last_complete_frame_time_ms_
+ RTC_GUARDED_BY(worker_sequence_checker_) = 0;
// Keyframe request intervals are configurable through field trials.
const int max_wait_for_keyframe_ms_;
const int max_wait_for_frame_ms_;
- rtc::CriticalSection playout_delay_lock_;
-
// All of them tries to change current min_playout_delay on |timing_| but
// source of the change request is different in each case. Among them the
// biggest delay is used. -1 means use default value from the |timing_|.
//
// Minimum delay as decided by the RTP playout delay extension.
- int frame_minimum_playout_delay_ms_ RTC_GUARDED_BY(playout_delay_lock_) = -1;
+ int frame_minimum_playout_delay_ms_ RTC_GUARDED_BY(worker_sequence_checker_) =
+ -1;
// Minimum delay as decided by the setLatency function in "webrtc/api".
- int base_minimum_playout_delay_ms_ RTC_GUARDED_BY(playout_delay_lock_) = -1;
- // Minimum delay as decided by the A/V synchronization feature.
- int syncable_minimum_playout_delay_ms_ RTC_GUARDED_BY(playout_delay_lock_) =
+ int base_minimum_playout_delay_ms_ RTC_GUARDED_BY(worker_sequence_checker_) =
-1;
+ // Minimum delay as decided by the A/V synchronization feature.
+ int syncable_minimum_playout_delay_ms_
+ RTC_GUARDED_BY(worker_sequence_checker_) = -1;
// Maximum delay as decided by the RTP playout delay extension.
- int frame_maximum_playout_delay_ms_ RTC_GUARDED_BY(playout_delay_lock_) = -1;
+ int frame_maximum_playout_delay_ms_ RTC_GUARDED_BY(worker_sequence_checker_) =
+ -1;
// Function that is triggered with encoded frames, if not empty.
std::function<void(const RecordableEncodedFrame&)>
encoded_frame_buffer_function_ RTC_GUARDED_BY(decode_queue_);
// Set to true while we're requesting keyframes but not yet received one.
- bool keyframe_generation_requested_ RTC_GUARDED_BY(decode_queue_) = false;
+ bool keyframe_generation_requested_ RTC_GUARDED_BY(worker_sequence_checker_) =
+ false;
// Defined last so they are destroyed before all other members.
rtc::TaskQueue decode_queue_;
diff --git a/chromium/third_party/webrtc/video/video_receive_stream2_unittest.cc b/chromium/third_party/webrtc/video/video_receive_stream2_unittest.cc
new file mode 100644
index 00000000000..a411cec740d
--- /dev/null
+++ b/chromium/third_party/webrtc/video/video_receive_stream2_unittest.cc
@@ -0,0 +1,571 @@
+/*
+ * Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_receive_stream2.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/task_queue/default_task_queue_factory.h"
+#include "api/test/video/function_video_decoder_factory.h"
+#include "api/video_codecs/video_decoder.h"
+#include "call/rtp_stream_receiver_controller.h"
+#include "common_video/test/utilities.h"
+#include "media/base/fake_video_renderer.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
+#include "modules/utility/include/process_thread.h"
+#include "modules/video_coding/encoded_frame.h"
+#include "rtc_base/critical_section.h"
+#include "rtc_base/event.h"
+#include "system_wrappers/include/clock.h"
+#include "test/fake_decoder.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/run_loop.h"
+#include "test/time_controller/simulated_time_controller.h"
+#include "test/video_decoder_proxy_factory.h"
+#include "video/call_stats2.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+using ::testing::ElementsAreArray;
+using ::testing::Invoke;
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+
+constexpr int kDefaultTimeOutMs = 50;
+
+class MockTransport : public Transport {
+ public:
+ MOCK_METHOD(bool,
+ SendRtp,
+ (const uint8_t*, size_t length, const PacketOptions& options),
+ (override));
+ MOCK_METHOD(bool, SendRtcp, (const uint8_t*, size_t length), (override));
+};
+
+class MockVideoDecoder : public VideoDecoder {
+ public:
+ MOCK_METHOD(int32_t,
+ InitDecode,
+ (const VideoCodec*, int32_t number_of_cores),
+ (override));
+ MOCK_METHOD(int32_t,
+ Decode,
+ (const EncodedImage& input,
+ bool missing_frames,
+ int64_t render_time_ms),
+ (override));
+ MOCK_METHOD(int32_t,
+ RegisterDecodeCompleteCallback,
+ (DecodedImageCallback*),
+ (override));
+ MOCK_METHOD(int32_t, Release, (), (override));
+ const char* ImplementationName() const { return "MockVideoDecoder"; }
+};
+
+class FrameObjectFake : public video_coding::EncodedFrame {
+ public:
+ void SetPayloadType(uint8_t payload_type) { _payloadType = payload_type; }
+
+ void SetRotation(const VideoRotation& rotation) { rotation_ = rotation; }
+
+ void SetNtpTime(int64_t ntp_time_ms) { ntp_time_ms_ = ntp_time_ms; }
+
+ int64_t ReceivedTime() const override { return 0; }
+
+ int64_t RenderTime() const override { return _renderTimeMs; }
+};
+
+} // namespace
+
+class VideoReceiveStream2Test : public ::testing::Test {
+ public:
+ VideoReceiveStream2Test()
+ : process_thread_(ProcessThread::Create("TestThread")),
+ task_queue_factory_(CreateDefaultTaskQueueFactory()),
+ config_(&mock_transport_),
+ call_stats_(Clock::GetRealTimeClock(), loop_.task_queue()),
+ h264_decoder_factory_(&mock_h264_video_decoder_),
+ null_decoder_factory_(&mock_null_video_decoder_) {}
+
+ void SetUp() {
+ constexpr int kDefaultNumCpuCores = 2;
+ config_.rtp.remote_ssrc = 1111;
+ config_.rtp.local_ssrc = 2222;
+ config_.renderer = &fake_renderer_;
+ VideoReceiveStream::Decoder h264_decoder;
+ h264_decoder.payload_type = 99;
+ h264_decoder.video_format = SdpVideoFormat("H264");
+ h264_decoder.video_format.parameters.insert(
+ {"sprop-parameter-sets", "Z0IACpZTBYmI,aMljiA=="});
+ h264_decoder.decoder_factory = &h264_decoder_factory_;
+ config_.decoders.push_back(h264_decoder);
+ VideoReceiveStream::Decoder null_decoder;
+ null_decoder.payload_type = 98;
+ null_decoder.video_format = SdpVideoFormat("null");
+ null_decoder.decoder_factory = &null_decoder_factory_;
+ config_.decoders.push_back(null_decoder);
+
+ clock_ = Clock::GetRealTimeClock();
+ timing_ = new VCMTiming(clock_);
+
+ video_receive_stream_ =
+ std::make_unique<webrtc::internal::VideoReceiveStream2>(
+ task_queue_factory_.get(), loop_.task_queue(),
+ &rtp_stream_receiver_controller_, kDefaultNumCpuCores,
+ &packet_router_, config_.Copy(), process_thread_.get(),
+ &call_stats_, clock_, timing_);
+ }
+
+ protected:
+ test::RunLoop loop_;
+ std::unique_ptr<ProcessThread> process_thread_;
+ const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+ VideoReceiveStream::Config config_;
+ internal::CallStats call_stats_;
+ MockVideoDecoder mock_h264_video_decoder_;
+ MockVideoDecoder mock_null_video_decoder_;
+ test::VideoDecoderProxyFactory h264_decoder_factory_;
+ test::VideoDecoderProxyFactory null_decoder_factory_;
+ cricket::FakeVideoRenderer fake_renderer_;
+ MockTransport mock_transport_;
+ PacketRouter packet_router_;
+ RtpStreamReceiverController rtp_stream_receiver_controller_;
+ std::unique_ptr<webrtc::internal::VideoReceiveStream2> video_receive_stream_;
+ Clock* clock_;
+ VCMTiming* timing_;
+};
+
+TEST_F(VideoReceiveStream2Test, CreateFrameFromH264FmtpSpropAndIdr) {
+ constexpr uint8_t idr_nalu[] = {0x05, 0xFF, 0xFF, 0xFF};
+ RtpPacketToSend rtppacket(nullptr);
+ uint8_t* payload = rtppacket.AllocatePayload(sizeof(idr_nalu));
+ memcpy(payload, idr_nalu, sizeof(idr_nalu));
+ rtppacket.SetMarker(true);
+ rtppacket.SetSsrc(1111);
+ rtppacket.SetPayloadType(99);
+ rtppacket.SetSequenceNumber(1);
+ rtppacket.SetTimestamp(0);
+ rtc::Event init_decode_event_;
+ EXPECT_CALL(mock_h264_video_decoder_, InitDecode(_, _))
+ .WillOnce(Invoke([&init_decode_event_](const VideoCodec* config,
+ int32_t number_of_cores) {
+ init_decode_event_.Set();
+ return 0;
+ }));
+ EXPECT_CALL(mock_h264_video_decoder_, RegisterDecodeCompleteCallback(_));
+ video_receive_stream_->Start();
+ EXPECT_CALL(mock_h264_video_decoder_, Decode(_, false, _));
+ RtpPacketReceived parsed_packet;
+ ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size()));
+ rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet);
+ EXPECT_CALL(mock_h264_video_decoder_, Release());
+ // Make sure the decoder thread had a chance to run.
+ init_decode_event_.Wait(kDefaultTimeOutMs);
+}
+
+TEST_F(VideoReceiveStream2Test, PlayoutDelay) {
+ const PlayoutDelay kPlayoutDelayMs = {123, 321};
+ std::unique_ptr<FrameObjectFake> test_frame(new FrameObjectFake());
+ test_frame->id.picture_id = 0;
+ test_frame->SetPlayoutDelay(kPlayoutDelayMs);
+
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+ EXPECT_EQ(kPlayoutDelayMs.min_ms, timing_->min_playout_delay());
+ EXPECT_EQ(kPlayoutDelayMs.max_ms, timing_->max_playout_delay());
+
+ // Check that the biggest minimum delay is chosen.
+ video_receive_stream_->SetMinimumPlayoutDelay(400);
+ EXPECT_EQ(400, timing_->min_playout_delay());
+
+ // Check base minimum delay validation.
+ EXPECT_FALSE(video_receive_stream_->SetBaseMinimumPlayoutDelayMs(12345));
+ EXPECT_FALSE(video_receive_stream_->SetBaseMinimumPlayoutDelayMs(-1));
+ EXPECT_TRUE(video_receive_stream_->SetBaseMinimumPlayoutDelayMs(500));
+ EXPECT_EQ(500, timing_->min_playout_delay());
+
+ // Check that intermidiate values are remembered and the biggest remembered
+ // is chosen.
+ video_receive_stream_->SetBaseMinimumPlayoutDelayMs(0);
+ EXPECT_EQ(400, timing_->min_playout_delay());
+
+ video_receive_stream_->SetMinimumPlayoutDelay(0);
+ EXPECT_EQ(123, timing_->min_playout_delay());
+}
+
+TEST_F(VideoReceiveStream2Test, PlayoutDelayPreservesDefaultMaxValue) {
+ const int default_max_playout_latency = timing_->max_playout_delay();
+ const PlayoutDelay kPlayoutDelayMs = {123, -1};
+
+ std::unique_ptr<FrameObjectFake> test_frame(new FrameObjectFake());
+ test_frame->id.picture_id = 0;
+ test_frame->SetPlayoutDelay(kPlayoutDelayMs);
+
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+
+ // Ensure that -1 preserves default maximum value from |timing_|.
+ EXPECT_EQ(kPlayoutDelayMs.min_ms, timing_->min_playout_delay());
+ EXPECT_NE(kPlayoutDelayMs.max_ms, timing_->max_playout_delay());
+ EXPECT_EQ(default_max_playout_latency, timing_->max_playout_delay());
+}
+
+TEST_F(VideoReceiveStream2Test, PlayoutDelayPreservesDefaultMinValue) {
+ const int default_min_playout_latency = timing_->min_playout_delay();
+ const PlayoutDelay kPlayoutDelayMs = {-1, 321};
+
+ std::unique_ptr<FrameObjectFake> test_frame(new FrameObjectFake());
+ test_frame->id.picture_id = 0;
+ test_frame->SetPlayoutDelay(kPlayoutDelayMs);
+
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+
+ // Ensure that -1 preserves default minimum value from |timing_|.
+ EXPECT_NE(kPlayoutDelayMs.min_ms, timing_->min_playout_delay());
+ EXPECT_EQ(kPlayoutDelayMs.max_ms, timing_->max_playout_delay());
+ EXPECT_EQ(default_min_playout_latency, timing_->min_playout_delay());
+}
+
+class VideoReceiveStream2TestWithFakeDecoder : public ::testing::Test {
+ public:
+ VideoReceiveStream2TestWithFakeDecoder()
+ : fake_decoder_factory_(
+ []() { return std::make_unique<test::FakeDecoder>(); }),
+ process_thread_(ProcessThread::Create("TestThread")),
+ task_queue_factory_(CreateDefaultTaskQueueFactory()),
+ config_(&mock_transport_),
+ call_stats_(Clock::GetRealTimeClock(), loop_.task_queue()) {}
+
+ void SetUp() {
+ config_.rtp.remote_ssrc = 1111;
+ config_.rtp.local_ssrc = 2222;
+ config_.renderer = &fake_renderer_;
+ VideoReceiveStream::Decoder fake_decoder;
+ fake_decoder.payload_type = 99;
+ fake_decoder.video_format = SdpVideoFormat("VP8");
+ fake_decoder.decoder_factory = &fake_decoder_factory_;
+ config_.decoders.push_back(fake_decoder);
+ clock_ = Clock::GetRealTimeClock();
+ ReCreateReceiveStream(VideoReceiveStream::RecordingState());
+ }
+
+ void ReCreateReceiveStream(VideoReceiveStream::RecordingState state) {
+ constexpr int kDefaultNumCpuCores = 2;
+ video_receive_stream_ = nullptr;
+ timing_ = new VCMTiming(clock_);
+ video_receive_stream_.reset(new webrtc::internal::VideoReceiveStream2(
+ task_queue_factory_.get(), loop_.task_queue(),
+ &rtp_stream_receiver_controller_, kDefaultNumCpuCores, &packet_router_,
+ config_.Copy(), process_thread_.get(), &call_stats_, clock_, timing_));
+ video_receive_stream_->SetAndGetRecordingState(std::move(state), false);
+ }
+
+ protected:
+ test::RunLoop loop_;
+ test::FunctionVideoDecoderFactory fake_decoder_factory_;
+ std::unique_ptr<ProcessThread> process_thread_;
+ const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
+ VideoReceiveStream::Config config_;
+ internal::CallStats call_stats_;
+ cricket::FakeVideoRenderer fake_renderer_;
+ MockTransport mock_transport_;
+ PacketRouter packet_router_;
+ RtpStreamReceiverController rtp_stream_receiver_controller_;
+ std::unique_ptr<webrtc::internal::VideoReceiveStream2> video_receive_stream_;
+ Clock* clock_;
+ VCMTiming* timing_;
+};
+
+TEST_F(VideoReceiveStream2TestWithFakeDecoder, PassesNtpTime) {
+ const int64_t kNtpTimestamp = 12345;
+ auto test_frame = std::make_unique<FrameObjectFake>();
+ test_frame->SetPayloadType(99);
+ test_frame->id.picture_id = 0;
+ test_frame->SetNtpTime(kNtpTimestamp);
+
+ video_receive_stream_->Start();
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+ EXPECT_TRUE(fake_renderer_.WaitForRenderedFrame(kDefaultTimeOutMs));
+ EXPECT_EQ(kNtpTimestamp, fake_renderer_.ntp_time_ms());
+}
+
+TEST_F(VideoReceiveStream2TestWithFakeDecoder, PassesRotation) {
+ const webrtc::VideoRotation kRotation = webrtc::kVideoRotation_180;
+ auto test_frame = std::make_unique<FrameObjectFake>();
+ test_frame->SetPayloadType(99);
+ test_frame->id.picture_id = 0;
+ test_frame->SetRotation(kRotation);
+
+ video_receive_stream_->Start();
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+ EXPECT_TRUE(fake_renderer_.WaitForRenderedFrame(kDefaultTimeOutMs));
+
+ EXPECT_EQ(kRotation, fake_renderer_.rotation());
+}
+
+TEST_F(VideoReceiveStream2TestWithFakeDecoder, PassesPacketInfos) {
+ auto test_frame = std::make_unique<FrameObjectFake>();
+ test_frame->SetPayloadType(99);
+ test_frame->id.picture_id = 0;
+ RtpPacketInfos packet_infos = CreatePacketInfos(3);
+ test_frame->SetPacketInfos(packet_infos);
+
+ video_receive_stream_->Start();
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+ EXPECT_TRUE(fake_renderer_.WaitForRenderedFrame(kDefaultTimeOutMs));
+
+ EXPECT_THAT(fake_renderer_.packet_infos(), ElementsAreArray(packet_infos));
+}
+
+TEST_F(VideoReceiveStream2TestWithFakeDecoder, RenderedFrameUpdatesGetSources) {
+ constexpr uint32_t kSsrc = 1111;
+ constexpr uint32_t kCsrc = 9001;
+ constexpr uint32_t kRtpTimestamp = 12345;
+
+ // Prepare one video frame with per-packet information.
+ auto test_frame = std::make_unique<FrameObjectFake>();
+ test_frame->SetPayloadType(99);
+ test_frame->id.picture_id = 0;
+ RtpPacketInfos packet_infos;
+ {
+ RtpPacketInfos::vector_type infos;
+
+ RtpPacketInfo info;
+ info.set_ssrc(kSsrc);
+ info.set_csrcs({kCsrc});
+ info.set_rtp_timestamp(kRtpTimestamp);
+
+ info.set_receive_time_ms(clock_->TimeInMilliseconds() - 5000);
+ infos.push_back(info);
+
+ info.set_receive_time_ms(clock_->TimeInMilliseconds() - 3000);
+ infos.push_back(info);
+
+ info.set_receive_time_ms(clock_->TimeInMilliseconds() - 2000);
+ infos.push_back(info);
+
+ info.set_receive_time_ms(clock_->TimeInMilliseconds() - 4000);
+ infos.push_back(info);
+
+ packet_infos = RtpPacketInfos(std::move(infos));
+ }
+ test_frame->SetPacketInfos(packet_infos);
+
+ // Start receive stream.
+ video_receive_stream_->Start();
+ EXPECT_THAT(video_receive_stream_->GetSources(), IsEmpty());
+
+ // Render one video frame.
+ int64_t timestamp_ms_min = clock_->TimeInMilliseconds();
+ video_receive_stream_->OnCompleteFrame(std::move(test_frame));
+ EXPECT_TRUE(fake_renderer_.WaitForRenderedFrame(kDefaultTimeOutMs));
+ int64_t timestamp_ms_max = clock_->TimeInMilliseconds();
+
+ // Verify that the per-packet information is passed to the renderer.
+ EXPECT_THAT(fake_renderer_.packet_infos(), ElementsAreArray(packet_infos));
+
+ // Verify that the per-packet information also updates |GetSources()|.
+ std::vector<RtpSource> sources = video_receive_stream_->GetSources();
+ ASSERT_THAT(sources, SizeIs(2));
+ {
+ auto it = std::find_if(sources.begin(), sources.end(),
+ [](const RtpSource& source) {
+ return source.source_type() == RtpSourceType::SSRC;
+ });
+ ASSERT_NE(it, sources.end());
+
+ EXPECT_EQ(it->source_id(), kSsrc);
+ EXPECT_EQ(it->source_type(), RtpSourceType::SSRC);
+ EXPECT_EQ(it->rtp_timestamp(), kRtpTimestamp);
+ EXPECT_GE(it->timestamp_ms(), timestamp_ms_min);
+ EXPECT_LE(it->timestamp_ms(), timestamp_ms_max);
+ }
+ {
+ auto it = std::find_if(sources.begin(), sources.end(),
+ [](const RtpSource& source) {
+ return source.source_type() == RtpSourceType::CSRC;
+ });
+ ASSERT_NE(it, sources.end());
+
+ EXPECT_EQ(it->source_id(), kCsrc);
+ EXPECT_EQ(it->source_type(), RtpSourceType::CSRC);
+ EXPECT_EQ(it->rtp_timestamp(), kRtpTimestamp);
+ EXPECT_GE(it->timestamp_ms(), timestamp_ms_min);
+ EXPECT_LE(it->timestamp_ms(), timestamp_ms_max);
+ }
+}
+
+std::unique_ptr<FrameObjectFake> MakeFrame(VideoFrameType frame_type,
+ int picture_id) {
+ auto frame = std::make_unique<FrameObjectFake>();
+ frame->SetPayloadType(99);
+ frame->id.picture_id = picture_id;
+ frame->SetFrameType(frame_type);
+ return frame;
+}
+
+TEST_F(VideoReceiveStream2TestWithFakeDecoder,
+ PassesFrameWhenEncodedFramesCallbackSet) {
+ testing::MockFunction<void(const RecordableEncodedFrame&)> callback;
+ video_receive_stream_->Start();
+ // Expect a keyframe request to be generated
+ EXPECT_CALL(mock_transport_, SendRtcp);
+ EXPECT_CALL(callback, Call);
+ video_receive_stream_->SetAndGetRecordingState(
+ VideoReceiveStream::RecordingState(callback.AsStdFunction()), true);
+ video_receive_stream_->OnCompleteFrame(
+ MakeFrame(VideoFrameType::kVideoFrameKey, 0));
+ EXPECT_TRUE(fake_renderer_.WaitForRenderedFrame(kDefaultTimeOutMs));
+ video_receive_stream_->Stop();
+}
+
+TEST_F(VideoReceiveStream2TestWithFakeDecoder,
+ MovesEncodedFrameDispatchStateWhenReCreating) {
+ testing::MockFunction<void(const RecordableEncodedFrame&)> callback;
+ video_receive_stream_->Start();
+ // Expect a key frame request over RTCP.
+ EXPECT_CALL(mock_transport_, SendRtcp).Times(1);
+ video_receive_stream_->SetAndGetRecordingState(
+ VideoReceiveStream::RecordingState(callback.AsStdFunction()), true);
+ video_receive_stream_->Stop();
+ VideoReceiveStream::RecordingState old_state =
+ video_receive_stream_->SetAndGetRecordingState(
+ VideoReceiveStream::RecordingState(), false);
+ ReCreateReceiveStream(std::move(old_state));
+ video_receive_stream_->Stop();
+}
+
+class VideoReceiveStream2TestWithSimulatedClock : public ::testing::Test {
+ public:
+ class FakeDecoder2 : public test::FakeDecoder {
+ public:
+ explicit FakeDecoder2(std::function<void()> decode_callback)
+ : callback_(decode_callback) {}
+
+ int32_t Decode(const EncodedImage& input,
+ bool missing_frames,
+ int64_t render_time_ms) override {
+ int32_t result =
+ FakeDecoder::Decode(input, missing_frames, render_time_ms);
+ callback_();
+ return result;
+ }
+
+ private:
+ std::function<void()> callback_;
+ };
+
+ static VideoReceiveStream::Config GetConfig(
+ Transport* transport,
+ VideoDecoderFactory* decoder_factory,
+ rtc::VideoSinkInterface<webrtc::VideoFrame>* renderer) {
+ VideoReceiveStream::Config config(transport);
+ config.rtp.remote_ssrc = 1111;
+ config.rtp.local_ssrc = 2222;
+ config.renderer = renderer;
+ VideoReceiveStream::Decoder fake_decoder;
+ fake_decoder.payload_type = 99;
+ fake_decoder.video_format = SdpVideoFormat("VP8");
+ fake_decoder.decoder_factory = decoder_factory;
+ config.decoders.push_back(fake_decoder);
+ return config;
+ }
+
+ VideoReceiveStream2TestWithSimulatedClock()
+ : time_controller_(Timestamp::Millis(4711)),
+ fake_decoder_factory_([this] {
+ return std::make_unique<FakeDecoder2>([this] { OnFrameDecoded(); });
+ }),
+ process_thread_(time_controller_.CreateProcessThread("ProcessThread")),
+ config_(GetConfig(&mock_transport_,
+ &fake_decoder_factory_,
+ &fake_renderer_)),
+ call_stats_(time_controller_.GetClock(), loop_.task_queue()),
+ video_receive_stream_(time_controller_.GetTaskQueueFactory(),
+ loop_.task_queue(),
+ &rtp_stream_receiver_controller_,
+ /*num_cores=*/2,
+ &packet_router_,
+ config_.Copy(),
+ process_thread_.get(),
+ &call_stats_,
+ time_controller_.GetClock(),
+ new VCMTiming(time_controller_.GetClock())) {
+ video_receive_stream_.Start();
+ }
+
+ void OnFrameDecoded() { event_->Set(); }
+
+ void PassEncodedFrameAndWait(
+ std::unique_ptr<video_coding::EncodedFrame> frame) {
+ event_ = std::make_unique<rtc::Event>();
+ // This call will eventually end up in the Decoded method where the
+ // event is set.
+ video_receive_stream_.OnCompleteFrame(std::move(frame));
+ event_->Wait(rtc::Event::kForever);
+ }
+
+ protected:
+ GlobalSimulatedTimeController time_controller_;
+ test::RunLoop loop_;
+ test::FunctionVideoDecoderFactory fake_decoder_factory_;
+ std::unique_ptr<ProcessThread> process_thread_;
+ MockTransport mock_transport_;
+ cricket::FakeVideoRenderer fake_renderer_;
+ VideoReceiveStream::Config config_;
+ internal::CallStats call_stats_;
+ PacketRouter packet_router_;
+ RtpStreamReceiverController rtp_stream_receiver_controller_;
+ webrtc::internal::VideoReceiveStream2 video_receive_stream_;
+ std::unique_ptr<rtc::Event> event_;
+};
+
+TEST_F(VideoReceiveStream2TestWithSimulatedClock,
+ RequestsKeyFramesUntilKeyFrameReceived) {
+ auto tick = TimeDelta::Millis(
+ internal::VideoReceiveStream2::kMaxWaitForKeyFrameMs / 2);
+ EXPECT_CALL(mock_transport_, SendRtcp).Times(1).WillOnce(Invoke([this]() {
+ loop_.Quit();
+ return 0;
+ }));
+ video_receive_stream_.GenerateKeyFrame();
+ PassEncodedFrameAndWait(MakeFrame(VideoFrameType::kVideoFrameDelta, 0));
+ time_controller_.AdvanceTime(tick);
+ PassEncodedFrameAndWait(MakeFrame(VideoFrameType::kVideoFrameDelta, 1));
+ loop_.Run();
+ testing::Mock::VerifyAndClearExpectations(&mock_transport_);
+
+ // T+200ms: still no key frame received, expect key frame request sent again.
+ EXPECT_CALL(mock_transport_, SendRtcp).Times(1).WillOnce(Invoke([this]() {
+ loop_.Quit();
+ return 0;
+ }));
+ time_controller_.AdvanceTime(tick);
+ PassEncodedFrameAndWait(MakeFrame(VideoFrameType::kVideoFrameDelta, 2));
+ loop_.Run();
+ testing::Mock::VerifyAndClearExpectations(&mock_transport_);
+
+ // T+200ms: now send a key frame - we should not observe new key frame
+ // requests after this.
+ EXPECT_CALL(mock_transport_, SendRtcp).Times(0);
+ PassEncodedFrameAndWait(MakeFrame(VideoFrameType::kVideoFrameKey, 3));
+ time_controller_.AdvanceTime(2 * tick);
+ PassEncodedFrameAndWait(MakeFrame(VideoFrameType::kVideoFrameDelta, 4));
+ loop_.PostTask([this]() { loop_.Quit(); });
+ loop_.Run();
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/video_receive_stream_unittest.cc b/chromium/third_party/webrtc/video/video_receive_stream_unittest.cc
index 54896e89d8a..07032fe4684 100644
--- a/chromium/third_party/webrtc/video/video_receive_stream_unittest.cc
+++ b/chromium/third_party/webrtc/video/video_receive_stream_unittest.cc
@@ -49,24 +49,30 @@ constexpr int kDefaultTimeOutMs = 50;
class MockTransport : public Transport {
public:
- MOCK_METHOD3(SendRtp,
- bool(const uint8_t* packet,
- size_t length,
- const PacketOptions& options));
- MOCK_METHOD2(SendRtcp, bool(const uint8_t* packet, size_t length));
+ MOCK_METHOD(bool,
+ SendRtp,
+ (const uint8_t*, size_t length, const PacketOptions& options),
+ (override));
+ MOCK_METHOD(bool, SendRtcp, (const uint8_t*, size_t length), (override));
};
class MockVideoDecoder : public VideoDecoder {
public:
- MOCK_METHOD2(InitDecode,
- int32_t(const VideoCodec* config, int32_t number_of_cores));
- MOCK_METHOD3(Decode,
- int32_t(const EncodedImage& input,
- bool missing_frames,
- int64_t render_time_ms));
- MOCK_METHOD1(RegisterDecodeCompleteCallback,
- int32_t(DecodedImageCallback* callback));
- MOCK_METHOD0(Release, int32_t(void));
+ MOCK_METHOD(int32_t,
+ InitDecode,
+ (const VideoCodec*, int32_t number_of_cores),
+ (override));
+ MOCK_METHOD(int32_t,
+ Decode,
+ (const EncodedImage& input,
+ bool missing_frames,
+ int64_t render_time_ms),
+ (override));
+ MOCK_METHOD(int32_t,
+ RegisterDecodeCompleteCallback,
+ (DecodedImageCallback*),
+ (override));
+ MOCK_METHOD(int32_t, Release, (), (override));
const char* ImplementationName() const { return "MockVideoDecoder"; }
};
diff --git a/chromium/third_party/webrtc/video/video_send_stream.cc b/chromium/third_party/webrtc/video/video_send_stream.cc
index bc9a0cd5f31..30ed86dbd18 100644
--- a/chromium/third_party/webrtc/video/video_send_stream.cc
+++ b/chromium/third_party/webrtc/video/video_send_stream.cc
@@ -179,6 +179,18 @@ void VideoSendStream::Stop() {
worker_queue_->PostTask([send_stream] { send_stream->Stop(); });
}
+void VideoSendStream::AddAdaptationResource(
+ rtc::scoped_refptr<Resource> resource) {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ video_stream_encoder_->AddAdaptationResource(resource);
+}
+
+std::vector<rtc::scoped_refptr<Resource>>
+VideoSendStream::GetAdaptationResources() {
+ RTC_DCHECK_RUN_ON(&thread_checker_);
+ return video_stream_encoder_->GetAdaptationResources();
+}
+
void VideoSendStream::SetSource(
rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
const DegradationPreference& degradation_preference) {
diff --git a/chromium/third_party/webrtc/video/video_send_stream.h b/chromium/third_party/webrtc/video/video_send_stream.h
index addaee49c25..78d8926e96e 100644
--- a/chromium/third_party/webrtc/video/video_send_stream.h
+++ b/chromium/third_party/webrtc/video/video_send_stream.h
@@ -79,6 +79,9 @@ class VideoSendStream : public webrtc::VideoSendStream {
void Start() override;
void Stop() override;
+ void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) override;
+ std::vector<rtc::scoped_refptr<Resource>> GetAdaptationResources() override;
+
void SetSource(rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
const DegradationPreference& degradation_preference) override;
diff --git a/chromium/third_party/webrtc/video/video_send_stream_impl_unittest.cc b/chromium/third_party/webrtc/video/video_send_stream_impl_unittest.cc
index a0f1201cbd8..bb702ba270d 100644
--- a/chromium/third_party/webrtc/video/video_send_stream_impl_unittest.cc
+++ b/chromium/third_party/webrtc/video/video_send_stream_impl_unittest.cc
@@ -61,33 +61,45 @@ std::string GetAlrProbingExperimentString() {
}
class MockRtpVideoSender : public RtpVideoSenderInterface {
public:
- MOCK_METHOD1(RegisterProcessThread, void(ProcessThread*));
- MOCK_METHOD0(DeRegisterProcessThread, void());
- MOCK_METHOD1(SetActive, void(bool));
- MOCK_METHOD1(SetActiveModules, void(const std::vector<bool>));
- MOCK_METHOD0(IsActive, bool());
- MOCK_METHOD1(OnNetworkAvailability, void(bool));
- MOCK_CONST_METHOD0(GetRtpStates, std::map<uint32_t, RtpState>());
- MOCK_CONST_METHOD0(GetRtpPayloadStates,
- std::map<uint32_t, RtpPayloadState>());
- MOCK_METHOD2(DeliverRtcp, void(const uint8_t*, size_t));
- MOCK_METHOD1(OnBitrateAllocationUpdated, void(const VideoBitrateAllocation&));
- MOCK_METHOD3(OnEncodedImage,
- EncodedImageCallback::Result(const EncodedImage&,
- const CodecSpecificInfo*,
- const RTPFragmentationHeader*));
- MOCK_METHOD1(OnTransportOverheadChanged, void(size_t));
- MOCK_METHOD1(OnOverheadChanged, void(size_t));
- MOCK_METHOD2(OnBitrateUpdated, void(BitrateAllocationUpdate, int));
- MOCK_CONST_METHOD0(GetPayloadBitrateBps, uint32_t());
- MOCK_CONST_METHOD0(GetProtectionBitrateBps, uint32_t());
- MOCK_METHOD3(SetEncodingData, void(size_t, size_t, size_t));
- MOCK_CONST_METHOD2(GetSentRtpPacketInfos,
- std::vector<RtpSequenceNumberMap::Info>(
- uint32_t ssrc,
- rtc::ArrayView<const uint16_t> sequence_numbers));
-
- MOCK_METHOD1(SetFecAllowed, void(bool fec_allowed));
+ MOCK_METHOD(void, RegisterProcessThread, (ProcessThread*), (override));
+ MOCK_METHOD(void, DeRegisterProcessThread, (), (override));
+ MOCK_METHOD(void, SetActive, (bool), (override));
+ MOCK_METHOD(void, SetActiveModules, (const std::vector<bool>), (override));
+ MOCK_METHOD(bool, IsActive, (), (override));
+ MOCK_METHOD(void, OnNetworkAvailability, (bool), (override));
+ MOCK_METHOD((std::map<uint32_t, RtpState>),
+ GetRtpStates,
+ (),
+ (const, override));
+ MOCK_METHOD((std::map<uint32_t, RtpPayloadState>),
+ GetRtpPayloadStates,
+ (),
+ (const, override));
+ MOCK_METHOD(void, DeliverRtcp, (const uint8_t*, size_t), (override));
+ MOCK_METHOD(void,
+ OnBitrateAllocationUpdated,
+ (const VideoBitrateAllocation&),
+ (override));
+ MOCK_METHOD(EncodedImageCallback::Result,
+ OnEncodedImage,
+ (const EncodedImage&,
+ const CodecSpecificInfo*,
+ const RTPFragmentationHeader*),
+ (override));
+ MOCK_METHOD(void, OnTransportOverheadChanged, (size_t), (override));
+ MOCK_METHOD(void,
+ OnBitrateUpdated,
+ (BitrateAllocationUpdate, int),
+ (override));
+ MOCK_METHOD(uint32_t, GetPayloadBitrateBps, (), (const, override));
+ MOCK_METHOD(uint32_t, GetProtectionBitrateBps, (), (const, override));
+ MOCK_METHOD(void, SetEncodingData, (size_t, size_t, size_t), (override));
+ MOCK_METHOD(std::vector<RtpSequenceNumberMap::Info>,
+ GetSentRtpPacketInfos,
+ (uint32_t ssrc, rtc::ArrayView<const uint16_t> sequence_numbers),
+ (const, override));
+
+ MOCK_METHOD(void, SetFecAllowed, (bool fec_allowed), (override));
};
BitrateAllocationUpdate CreateAllocation(int bitrate_bps) {
diff --git a/chromium/third_party/webrtc/video/video_send_stream_tests.cc b/chromium/third_party/webrtc/video/video_send_stream_tests.cc
index e38653831b8..09d7abc062e 100644
--- a/chromium/third_party/webrtc/video/video_send_stream_tests.cc
+++ b/chromium/third_party/webrtc/video/video_send_stream_tests.cc
@@ -25,10 +25,10 @@
#include "call/simulated_network.h"
#include "call/video_send_stream.h"
#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
-#include "modules/rtp_rtcp/include/rtp_rtcp.h"
#include "modules/rtp_rtcp/source/rtcp_sender.h"
#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "modules/video_coding/codecs/vp9/include/vp9.h"
@@ -948,7 +948,7 @@ void VideoSendStreamTest::TestNackRetransmission(
non_padding_sequence_numbers_.end() - kNackedPacketsAtOnceCount,
non_padding_sequence_numbers_.end());
- RtpRtcp::Configuration config;
+ RtpRtcpInterface::Configuration config;
config.clock = Clock::GetRealTimeClock();
config.outgoing_transport = transport_adapter_.get();
config.rtcp_report_interval_ms = kRtcpIntervalMs;
@@ -1164,7 +1164,7 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
kVideoSendSsrcs[0], rtp_packet.SequenceNumber(),
packets_lost_, // Cumulative lost.
loss_ratio); // Loss percent.
- RtpRtcp::Configuration config;
+ RtpRtcpInterface::Configuration config;
config.clock = Clock::GetRealTimeClock();
config.receive_statistics = &lossy_receive_stats;
config.outgoing_transport = transport_adapter_.get();
@@ -1416,7 +1416,7 @@ TEST_F(VideoSendStreamTest, SuspendBelowMinBitrate) {
RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_) {
FakeReceiveStatistics receive_stats(kVideoSendSsrcs[0],
last_sequence_number_, rtp_count_, 0);
- RtpRtcp::Configuration config;
+ RtpRtcpInterface::Configuration config;
config.clock = clock_;
config.receive_statistics = &receive_stats;
config.outgoing_transport = transport_adapter_.get();
@@ -1627,12 +1627,18 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) {
static const int kRembRespectedBitrateBps = 100000;
class BitrateObserver : public test::SendTest {
public:
- BitrateObserver()
+ explicit BitrateObserver(TaskQueueBase* task_queue)
: SendTest(kDefaultTimeoutMs),
+ task_queue_(task_queue),
retranmission_rate_limiter_(Clock::GetRealTimeClock(), 1000),
stream_(nullptr),
bitrate_capped_(false) {}
+ ~BitrateObserver() override {
+ // Make sure we free |rtp_rtcp_| in the same context as we constructed it.
+ SendTask(RTC_FROM_HERE, task_queue_, [this]() { rtp_rtcp_ = nullptr; });
+ }
+
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
if (RtpHeaderParser::IsRtcp(packet, length))
@@ -1667,11 +1673,11 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) {
VideoSendStream* send_stream,
const std::vector<VideoReceiveStream*>& receive_streams) override {
stream_ = send_stream;
- RtpRtcp::Configuration config;
+ RtpRtcpInterface::Configuration config;
config.clock = Clock::GetRealTimeClock();
config.outgoing_transport = feedback_transport_.get();
config.retransmission_rate_limiter = &retranmission_rate_limiter_;
- rtp_rtcp_ = RtpRtcp::Create(config);
+ rtp_rtcp_ = ModuleRtpRtcpImpl2::Create(config);
rtp_rtcp_->SetRTCPStatus(RtcpMode::kReducedSize);
}
@@ -1690,12 +1696,13 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) {
<< "Timeout while waiting for low bitrate stats after REMB.";
}
- std::unique_ptr<RtpRtcp> rtp_rtcp_;
+ TaskQueueBase* const task_queue_;
+ std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp_;
std::unique_ptr<internal::TransportAdapter> feedback_transport_;
RateLimiter retranmission_rate_limiter_;
VideoSendStream* stream_;
bool bitrate_capped_;
- } test;
+ } test(task_queue());
RunBaseTest(&test);
}
@@ -2476,29 +2483,34 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
released_(false),
encoder_factory_(this) {}
- bool IsReleased() {
+ bool IsReleased() RTC_LOCKS_EXCLUDED(crit_) {
rtc::CritScope lock(&crit_);
return released_;
}
- bool IsReadyForEncode() {
+ bool IsReadyForEncode() RTC_LOCKS_EXCLUDED(crit_) {
rtc::CritScope lock(&crit_);
- return initialized_ && callback_registered_;
+ return IsReadyForEncodeLocked();
}
- size_t num_releases() {
+ size_t num_releases() RTC_LOCKS_EXCLUDED(crit_) {
rtc::CritScope lock(&crit_);
return num_releases_;
}
private:
+ bool IsReadyForEncodeLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_) {
+ return initialized_ && callback_registered_;
+ }
+
void SetFecControllerOverride(
FecControllerOverride* fec_controller_override) override {
// Ignored.
}
int32_t InitEncode(const VideoCodec* codecSettings,
- const Settings& settings) override {
+ const Settings& settings) override
+ RTC_LOCKS_EXCLUDED(crit_) {
rtc::CritScope lock(&crit_);
EXPECT_FALSE(initialized_);
initialized_ = true;
@@ -2515,16 +2527,16 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
}
int32_t RegisterEncodeCompleteCallback(
- EncodedImageCallback* callback) override {
+ EncodedImageCallback* callback) override RTC_LOCKS_EXCLUDED(crit_) {
rtc::CritScope lock(&crit_);
EXPECT_TRUE(initialized_);
callback_registered_ = true;
return 0;
}
- int32_t Release() override {
+ int32_t Release() override RTC_LOCKS_EXCLUDED(crit_) {
rtc::CritScope lock(&crit_);
- EXPECT_TRUE(IsReadyForEncode());
+ EXPECT_TRUE(IsReadyForEncodeLocked());
EXPECT_FALSE(released_);
initialized_ = false;
callback_registered_ = false;
diff --git a/chromium/third_party/webrtc/video/video_source_sink_controller.cc b/chromium/third_party/webrtc/video/video_source_sink_controller.cc
index a649adc68c1..7c24eadef58 100644
--- a/chromium/third_party/webrtc/video/video_source_sink_controller.cc
+++ b/chromium/third_party/webrtc/video/video_source_sink_controller.cc
@@ -14,10 +14,28 @@
#include <limits>
#include <utility>
+#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/strings/string_builder.h"
namespace webrtc {
+namespace {
+
+std::string WantsToString(const rtc::VideoSinkWants& wants) {
+ rtc::StringBuilder ss;
+
+ ss << "max_fps=" << wants.max_framerate_fps
+ << " max_pixel_count=" << wants.max_pixel_count << " target_pixel_count="
+ << (wants.target_pixel_count.has_value()
+ ? std::to_string(wants.target_pixel_count.value())
+ : "null");
+
+ return ss.Release();
+}
+
+} // namespace
+
VideoSourceSinkController::VideoSourceSinkController(
rtc::VideoSinkInterface<VideoFrame>* sink,
rtc::VideoSourceInterface<VideoFrame>* source)
@@ -46,7 +64,9 @@ void VideoSourceSinkController::PushSourceSinkSettings() {
rtc::CritScope lock(&crit_);
if (!source_)
return;
- source_->AddOrUpdateSink(sink_, CurrentSettingsToSinkWants());
+ rtc::VideoSinkWants wants = CurrentSettingsToSinkWants();
+ RTC_LOG(INFO) << "Pushing SourceSink restrictions: " << WantsToString(wants);
+ source_->AddOrUpdateSink(sink_, wants);
}
VideoSourceRestrictions VideoSourceSinkController::restrictions() const {
diff --git a/chromium/third_party/webrtc/video/video_source_sink_controller.h b/chromium/third_party/webrtc/video/video_source_sink_controller.h
index 68fef3f0719..665493aa3d1 100644
--- a/chromium/third_party/webrtc/video/video_source_sink_controller.h
+++ b/chromium/third_party/webrtc/video/video_source_sink_controller.h
@@ -11,6 +11,8 @@
#ifndef VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_
#define VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_
+#include <string>
+
#include "absl/types/optional.h"
#include "api/video/video_frame.h"
#include "api/video/video_sink_interface.h"
diff --git a/chromium/third_party/webrtc/video/video_source_sink_controller_unittest.cc b/chromium/third_party/webrtc/video/video_source_sink_controller_unittest.cc
index c4e2ea11d2f..66881cd0238 100644
--- a/chromium/third_party/webrtc/video/video_source_sink_controller_unittest.cc
+++ b/chromium/third_party/webrtc/video/video_source_sink_controller_unittest.cc
@@ -30,8 +30,8 @@ class MockVideoSinkWithVideoFrame : public rtc::VideoSinkInterface<VideoFrame> {
public:
~MockVideoSinkWithVideoFrame() override {}
- MOCK_METHOD1(OnFrame, void(const VideoFrame& frame));
- MOCK_METHOD0(OnDiscardedFrame, void());
+ MOCK_METHOD(void, OnFrame, (const VideoFrame& frame), (override));
+ MOCK_METHOD(void, OnDiscardedFrame, (), (override));
};
class MockVideoSourceWithVideoFrame
@@ -39,10 +39,15 @@ class MockVideoSourceWithVideoFrame
public:
~MockVideoSourceWithVideoFrame() override {}
- MOCK_METHOD2(AddOrUpdateSink,
- void(rtc::VideoSinkInterface<VideoFrame>*,
- const rtc::VideoSinkWants&));
- MOCK_METHOD1(RemoveSink, void(rtc::VideoSinkInterface<VideoFrame>*));
+ MOCK_METHOD(void,
+ AddOrUpdateSink,
+ (rtc::VideoSinkInterface<VideoFrame>*,
+ const rtc::VideoSinkWants&),
+ (override));
+ MOCK_METHOD(void,
+ RemoveSink,
+ (rtc::VideoSinkInterface<VideoFrame>*),
+ (override));
};
} // namespace
diff --git a/chromium/third_party/webrtc/video/video_stream_decoder_impl_unittest.cc b/chromium/third_party/webrtc/video/video_stream_decoder_impl_unittest.cc
index 44e914001d4..a45a12ccae1 100644
--- a/chromium/third_party/webrtc/video/video_stream_decoder_impl_unittest.cc
+++ b/chromium/third_party/webrtc/video/video_stream_decoder_impl_unittest.cc
@@ -27,21 +27,25 @@ using ::testing::Return;
class MockVideoStreamDecoderCallbacks
: public VideoStreamDecoderInterface::Callbacks {
public:
- MOCK_METHOD0(OnNonDecodableState, void());
- MOCK_METHOD1(OnContinuousUntil,
- void(const video_coding::VideoLayerFrameId& key));
- MOCK_METHOD1(OnEncodedFrame, void(const video_coding::EncodedFrame& frame));
- MOCK_METHOD3(OnDecodedFrame,
- void(VideoFrame decodedImage,
- absl::optional<int> decode_time_ms,
- absl::optional<int> qp));
+ MOCK_METHOD(void, OnNonDecodableState, (), (override));
+ MOCK_METHOD(void,
+ OnContinuousUntil,
+ (const video_coding::VideoLayerFrameId& key),
+ (override));
+ MOCK_METHOD(void,
+ OnDecodedFrame,
+ (VideoFrame decodedImage,
+ absl::optional<int> decode_time_ms,
+ absl::optional<int> qp),
+ (override));
};
class StubVideoDecoder : public VideoDecoder {
public:
- MOCK_METHOD2(InitDecode,
- int32_t(const VideoCodec* codec_settings,
- int32_t number_of_cores));
+ MOCK_METHOD(int32_t,
+ InitDecode,
+ (const VideoCodec*, int32_t number_of_cores),
+ (override));
int32_t Decode(const EncodedImage& input_image,
bool missing_frames,
@@ -57,10 +61,12 @@ class StubVideoDecoder : public VideoDecoder {
return ret_code;
}
- MOCK_METHOD3(DecodeCall,
- int32_t(const EncodedImage& input_image,
- bool missing_frames,
- int64_t render_time_ms));
+ MOCK_METHOD(int32_t,
+ DecodeCall,
+ (const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms),
+ ());
int32_t Release() override { return 0; }
diff --git a/chromium/third_party/webrtc/video/video_stream_encoder.cc b/chromium/third_party/webrtc/video/video_stream_encoder.cc
index 92ab5fc5c33..0ed73a3e636 100644
--- a/chromium/third_party/webrtc/video/video_stream_encoder.cc
+++ b/chromium/third_party/webrtc/video/video_stream_encoder.cc
@@ -251,7 +251,6 @@ VideoStreamEncoder::VideoStreamEncoder(
next_frame_types_(1, VideoFrameType::kVideoFrameDelta),
frame_encode_metadata_writer_(this),
experiment_groups_(GetExperimentGroups()),
- next_frame_id_(0),
encoder_switch_experiment_(ParseEncoderSwitchFieldTrial()),
automatic_animation_detection_experiment_(
ParseAutomatincAnimationDetectionFieldTrial()),
@@ -261,6 +260,8 @@ VideoStreamEncoder::VideoStreamEncoder(
std::make_unique<ResourceAdaptationProcessor>(
&input_state_provider_,
encoder_stats_observer)),
+ adaptation_constraints_(),
+ adaptation_listeners_(),
stream_resource_manager_(&input_state_provider_,
encoder_stats_observer,
clock_,
@@ -283,21 +284,29 @@ VideoStreamEncoder::VideoStreamEncoder(
rtc::Event initialize_processor_event;
resource_adaptation_queue_.PostTask([this, &initialize_processor_event] {
RTC_DCHECK_RUN_ON(&resource_adaptation_queue_);
- resource_adaptation_processor_->InitializeOnResourceAdaptationQueue();
+ resource_adaptation_processor_->SetResourceAdaptationQueue(
+ resource_adaptation_queue_.Get());
stream_resource_manager_.SetAdaptationProcessor(
resource_adaptation_processor_.get());
- resource_adaptation_processor_->AddAdaptationListener(
+ resource_adaptation_processor_->AddRestrictionsListener(
&stream_resource_manager_);
- resource_adaptation_processor_->AddAdaptationListener(this);
+ resource_adaptation_processor_->AddRestrictionsListener(this);
+
// Add the stream resource manager's resources to the processor.
- for (Resource* resource : stream_resource_manager_.MappedResources())
+ adaptation_constraints_ = stream_resource_manager_.AdaptationConstraints();
+ adaptation_listeners_ = stream_resource_manager_.AdaptationListeners();
+ for (auto& resource : stream_resource_manager_.MappedResources()) {
resource_adaptation_processor_->AddResource(resource);
+ }
+ for (auto* constraint : adaptation_constraints_) {
+ resource_adaptation_processor_->AddAdaptationConstraint(constraint);
+ }
+ for (auto* listener : adaptation_listeners_) {
+ resource_adaptation_processor_->AddAdaptationListener(listener);
+ }
initialize_processor_event.Set();
});
initialize_processor_event.Wait(rtc::Event::kForever);
-
- for (auto& state : encoder_buffer_state_)
- state.fill(std::numeric_limits<int64_t>::max());
}
VideoStreamEncoder::~VideoStreamEncoder() {
@@ -315,12 +324,17 @@ void VideoStreamEncoder::Stop() {
&shutdown_adaptation_processor_event] {
RTC_DCHECK_RUN_ON(&resource_adaptation_queue_);
if (resource_adaptation_processor_) {
- resource_adaptation_processor_->StopResourceAdaptation();
- for (Resource* resource : stream_resource_manager_.MappedResources()) {
+ for (auto& resource : stream_resource_manager_.MappedResources()) {
resource_adaptation_processor_->RemoveResource(resource);
}
- resource_adaptation_processor_->RemoveAdaptationListener(this);
- resource_adaptation_processor_->RemoveAdaptationListener(
+ for (auto* constraint : adaptation_constraints_) {
+ resource_adaptation_processor_->RemoveAdaptationConstraint(constraint);
+ }
+ for (auto* listener : adaptation_listeners_) {
+ resource_adaptation_processor_->RemoveAdaptationListener(listener);
+ }
+ resource_adaptation_processor_->RemoveRestrictionsListener(this);
+ resource_adaptation_processor_->RemoveRestrictionsListener(
&stream_resource_manager_);
stream_resource_manager_.SetAdaptationProcessor(nullptr);
resource_adaptation_processor_.reset();
@@ -361,6 +375,53 @@ void VideoStreamEncoder::SetFecControllerOverride(
});
}
+void VideoStreamEncoder::AddAdaptationResource(
+ rtc::scoped_refptr<Resource> resource) {
+ // Map any externally added resources as kCpu for the sake of stats reporting.
+ // TODO(hbos): Make the manager map any unknown resources to kCpu and get rid
+ // of this MapResourceToReason() call.
+ rtc::Event map_resource_event;
+ encoder_queue_.PostTask([this, resource, &map_resource_event] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ stream_resource_manager_.MapResourceToReason(resource,
+ VideoAdaptationReason::kCpu);
+ map_resource_event.Set();
+ });
+ map_resource_event.Wait(rtc::Event::kForever);
+
+ // Add the resource to the processor.
+ rtc::Event add_resource_event;
+ resource_adaptation_queue_.PostTask([this, resource, &add_resource_event] {
+ RTC_DCHECK_RUN_ON(&resource_adaptation_queue_);
+ if (!resource_adaptation_processor_) {
+ // The VideoStreamEncoder was stopped and the processor destroyed before
+ // this task had a chance to execute. No action needed.
+ return;
+ }
+ resource_adaptation_processor_->AddResource(resource);
+ add_resource_event.Set();
+ });
+ add_resource_event.Wait(rtc::Event::kForever);
+}
+
+std::vector<rtc::scoped_refptr<Resource>>
+VideoStreamEncoder::GetAdaptationResources() {
+ std::vector<rtc::scoped_refptr<Resource>> resources;
+ rtc::Event event;
+ resource_adaptation_queue_.PostTask([this, &resources, &event] {
+ RTC_DCHECK_RUN_ON(&resource_adaptation_queue_);
+ if (!resource_adaptation_processor_) {
+ // The VideoStreamEncoder was stopped and the processor destroyed before
+ // this task had a chance to execute. No action needed.
+ return;
+ }
+ resources = resource_adaptation_processor_->GetResources();
+ event.Set();
+ });
+ event.Wait(rtc::Event::kForever);
+ return resources;
+}
+
void VideoStreamEncoder::SetSource(
rtc::VideoSourceInterface<VideoFrame>* source,
const DegradationPreference& degradation_preference) {
@@ -722,16 +783,6 @@ void VideoStreamEncoder::ReconfigureEncoder() {
// invoked later in this method.)
stream_resource_manager_.StopManagedResources();
stream_resource_manager_.StartEncodeUsageResource();
- resource_adaptation_queue_.PostTask([this] {
- RTC_DCHECK_RUN_ON(&resource_adaptation_queue_);
- if (!resource_adaptation_processor_) {
- // The VideoStreamEncoder was stopped and the processor destroyed before
- // this task had a chance to execute. No action needed.
- return;
- }
- // Ensures started. If already started this is a NO-OP.
- resource_adaptation_processor_->StartResourceAdaptation();
- });
pending_encoder_creation_ = false;
}
@@ -1506,48 +1557,8 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
simulcast_id = encoded_image.SpatialIndex().value_or(0);
}
- std::unique_ptr<CodecSpecificInfo> codec_info_copy;
- {
- rtc::CritScope cs(&encoded_image_lock_);
-
- if (codec_specific_info && codec_specific_info->generic_frame_info) {
- codec_info_copy =
- std::make_unique<CodecSpecificInfo>(*codec_specific_info);
- GenericFrameInfo& generic_info = *codec_info_copy->generic_frame_info;
- generic_info.frame_id = next_frame_id_++;
-
- if (encoder_buffer_state_.size() <= static_cast<size_t>(simulcast_id)) {
- RTC_LOG(LS_ERROR) << "At most " << encoder_buffer_state_.size()
- << " simulcast streams supported.";
- } else {
- std::array<int64_t, kMaxEncoderBuffers>& state =
- encoder_buffer_state_[simulcast_id];
- for (const CodecBufferUsage& buffer : generic_info.encoder_buffers) {
- if (state.size() <= static_cast<size_t>(buffer.id)) {
- RTC_LOG(LS_ERROR)
- << "At most " << state.size() << " encoder buffers supported.";
- break;
- }
-
- if (buffer.referenced) {
- int64_t diff = generic_info.frame_id - state[buffer.id];
- if (diff <= 0) {
- RTC_LOG(LS_ERROR) << "Invalid frame diff: " << diff << ".";
- } else if (absl::c_find(generic_info.frame_diffs, diff) ==
- generic_info.frame_diffs.end()) {
- generic_info.frame_diffs.push_back(diff);
- }
- }
-
- if (buffer.updated)
- state[buffer.id] = generic_info.frame_id;
- }
- }
- }
- }
-
EncodedImageCallback::Result result = sink_->OnEncodedImage(
- image_copy, codec_info_copy ? codec_info_copy.get() : codec_specific_info,
+ image_copy, codec_specific_info,
fragmentation_copy ? fragmentation_copy.get() : fragmentation);
// We are only interested in propagating the meta-data about the image, not
@@ -1750,6 +1761,9 @@ void VideoStreamEncoder::OnVideoSourceRestrictionsUpdated(
const VideoAdaptationCounters& adaptation_counters,
rtc::scoped_refptr<Resource> reason) {
RTC_DCHECK_RUN_ON(&resource_adaptation_queue_);
+ std::string resource_name = reason ? reason->Name() : "<null>";
+ RTC_LOG(INFO) << "Updating sink restrictions from " << resource_name << " to "
+ << restrictions.ToString();
video_source_sink_controller_.SetRestrictions(std::move(restrictions));
video_source_sink_controller_.PushSourceSinkSettings();
}
@@ -2027,7 +2041,8 @@ void VideoStreamEncoder::InjectAdaptationResource(
});
map_resource_event.Wait(rtc::Event::kForever);
- resource_adaptation_queue_.PostTask([this, resource] {
+ rtc::Event add_resource_event;
+ resource_adaptation_queue_.PostTask([this, resource, &add_resource_event] {
RTC_DCHECK_RUN_ON(&resource_adaptation_queue_);
if (!resource_adaptation_processor_) {
// The VideoStreamEncoder was stopped and the processor destroyed before
@@ -2035,7 +2050,44 @@ void VideoStreamEncoder::InjectAdaptationResource(
return;
}
resource_adaptation_processor_->AddResource(resource);
+ add_resource_event.Set();
});
+ add_resource_event.Wait(rtc::Event::kForever);
+}
+
+void VideoStreamEncoder::InjectAdaptationConstraint(
+ AdaptationConstraint* adaptation_constraint) {
+ rtc::Event event;
+ resource_adaptation_queue_.PostTask([this, adaptation_constraint, &event] {
+ RTC_DCHECK_RUN_ON(&resource_adaptation_queue_);
+ if (!resource_adaptation_processor_) {
+ // The VideoStreamEncoder was stopped and the processor destroyed before
+ // this task had a chance to execute. No action needed.
+ return;
+ }
+ adaptation_constraints_.push_back(adaptation_constraint);
+ resource_adaptation_processor_->AddAdaptationConstraint(
+ adaptation_constraint);
+ event.Set();
+ });
+ event.Wait(rtc::Event::kForever);
+}
+
+void VideoStreamEncoder::InjectAdaptationListener(
+ AdaptationListener* adaptation_listener) {
+ rtc::Event event;
+ resource_adaptation_queue_.PostTask([this, adaptation_listener, &event] {
+ RTC_DCHECK_RUN_ON(&resource_adaptation_queue_);
+ if (!resource_adaptation_processor_) {
+ // The VideoStreamEncoder was stopped and the processor destroyed before
+ // this task had a chance to execute. No action needed.
+ return;
+ }
+ adaptation_listeners_.push_back(adaptation_listener);
+ resource_adaptation_processor_->AddAdaptationListener(adaptation_listener);
+ event.Set();
+ });
+ event.Wait(rtc::Event::kForever);
}
rtc::scoped_refptr<QualityScalerResource>
@@ -2044,26 +2096,27 @@ VideoStreamEncoder::quality_scaler_resource_for_testing() {
return stream_resource_manager_.quality_scaler_resource_for_testing();
}
-void VideoStreamEncoder::AddAdaptationListenerForTesting(
- ResourceAdaptationProcessorListener* adaptation_listener) {
+void VideoStreamEncoder::AddRestrictionsListenerForTesting(
+ VideoSourceRestrictionsListener* restrictions_listener) {
rtc::Event event;
- resource_adaptation_queue_.PostTask([this, adaptation_listener, &event] {
+ resource_adaptation_queue_.PostTask([this, restrictions_listener, &event] {
RTC_DCHECK_RUN_ON(&resource_adaptation_queue_);
RTC_DCHECK(resource_adaptation_processor_);
- resource_adaptation_processor_->AddAdaptationListener(adaptation_listener);
+ resource_adaptation_processor_->AddRestrictionsListener(
+ restrictions_listener);
event.Set();
});
event.Wait(rtc::Event::kForever);
}
-void VideoStreamEncoder::RemoveAdaptationListenerForTesting(
- ResourceAdaptationProcessorListener* adaptation_listener) {
+void VideoStreamEncoder::RemoveRestrictionsListenerForTesting(
+ VideoSourceRestrictionsListener* restrictions_listener) {
rtc::Event event;
- resource_adaptation_queue_.PostTask([this, adaptation_listener, &event] {
+ resource_adaptation_queue_.PostTask([this, restrictions_listener, &event] {
RTC_DCHECK_RUN_ON(&resource_adaptation_queue_);
RTC_DCHECK(resource_adaptation_processor_);
- resource_adaptation_processor_->RemoveAdaptationListener(
- adaptation_listener);
+ resource_adaptation_processor_->RemoveRestrictionsListener(
+ restrictions_listener);
event.Set();
});
event.Wait(rtc::Event::kForever);
diff --git a/chromium/third_party/webrtc/video/video_stream_encoder.h b/chromium/third_party/webrtc/video/video_stream_encoder.h
index 13b2bdf46bd..68b264deac3 100644
--- a/chromium/third_party/webrtc/video/video_stream_encoder.h
+++ b/chromium/third_party/webrtc/video/video_stream_encoder.h
@@ -17,6 +17,7 @@
#include <string>
#include <vector>
+#include "api/adaptation/resource.h"
#include "api/units/data_rate.h"
#include "api/video/video_bitrate_allocator.h"
#include "api/video/video_rotation.h"
@@ -26,6 +27,8 @@
#include "api/video/video_stream_encoder_settings.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/adaptation_constraint.h"
+#include "call/adaptation/adaptation_listener.h"
#include "call/adaptation/resource_adaptation_processor_interface.h"
#include "call/adaptation/video_source_restrictions.h"
#include "call/adaptation/video_stream_input_state_provider.h"
@@ -44,6 +47,7 @@
#include "video/encoder_bitrate_adjuster.h"
#include "video/frame_encode_metadata_writer.h"
#include "video/video_source_sink_controller.h"
+
namespace webrtc {
// VideoStreamEncoder represent a video encoder that accepts raw video frames as
@@ -56,7 +60,7 @@ namespace webrtc {
// Call Stop() when done.
class VideoStreamEncoder : public VideoStreamEncoderInterface,
private EncodedImageCallback,
- public ResourceAdaptationProcessorListener {
+ public VideoSourceRestrictionsListener {
public:
VideoStreamEncoder(Clock* clock,
uint32_t number_of_cores,
@@ -66,6 +70,9 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
TaskQueueFactory* task_queue_factory);
~VideoStreamEncoder() override;
+ void AddAdaptationResource(rtc::scoped_refptr<Resource> resource) override;
+ std::vector<rtc::scoped_refptr<Resource>> GetAdaptationResources() override;
+
void SetSource(rtc::VideoSourceInterface<VideoFrame>* source,
const DegradationPreference& degradation_preference) override;
@@ -118,16 +125,17 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
// Used for injected test resources.
// TODO(eshr): Move all adaptation tests out of VideoStreamEncoder tests.
void InjectAdaptationResource(rtc::scoped_refptr<Resource> resource,
- VideoAdaptationReason reason)
- RTC_RUN_ON(&encoder_queue_);
+ VideoAdaptationReason reason);
+ void InjectAdaptationConstraint(AdaptationConstraint* adaptation_constraint);
+ void InjectAdaptationListener(AdaptationListener* adaptation_listener);
rtc::scoped_refptr<QualityScalerResource>
quality_scaler_resource_for_testing();
- void AddAdaptationListenerForTesting(
- ResourceAdaptationProcessorListener* adaptation_listener);
- void RemoveAdaptationListenerForTesting(
- ResourceAdaptationProcessorListener* adaptation_listener);
+ void AddRestrictionsListenerForTesting(
+ VideoSourceRestrictionsListener* restrictions_listener);
+ void RemoveRestrictionsListenerForTesting(
+ VideoSourceRestrictionsListener* restrictions_listener);
private:
class VideoFrameInfo {
@@ -341,17 +349,6 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
// experiment group numbers incremented by 1.
const std::array<uint8_t, 2> experiment_groups_;
- // TODO(philipel): Remove this lock and run on |encoder_queue_| instead.
- rtc::CriticalSection encoded_image_lock_;
-
- int64_t next_frame_id_ RTC_GUARDED_BY(encoded_image_lock_);
-
- // This array is used as a map from simulcast id to an encoder's buffer
- // state. For every buffer of the encoder we keep track of the last frame id
- // that updated that buffer.
- std::array<std::array<int64_t, kMaxEncoderBuffers>, kMaxSimulcastStreams>
- encoder_buffer_state_ RTC_GUARDED_BY(encoded_image_lock_);
-
struct EncoderSwitchExperiment {
struct Thresholds {
absl::optional<DataRate> bitrate;
@@ -417,6 +414,10 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
std::unique_ptr<ResourceAdaptationProcessorInterface>
resource_adaptation_processor_
RTC_GUARDED_BY(&resource_adaptation_queue_);
+ std::vector<AdaptationConstraint*> adaptation_constraints_
+ RTC_GUARDED_BY(&resource_adaptation_queue_);
+ std::vector<AdaptationListener*> adaptation_listeners_
+ RTC_GUARDED_BY(&resource_adaptation_queue_);
// Handles input, output and stats reporting related to VideoStreamEncoder
// specific resources, such as "encode usage percent" measurements and "QP
// scaling". Also involved with various mitigations such as inital frame
diff --git a/chromium/third_party/webrtc/video/video_stream_encoder_unittest.cc b/chromium/third_party/webrtc/video/video_stream_encoder_unittest.cc
index 1c334fc3b3a..e963619e607 100644
--- a/chromium/third_party/webrtc/video/video_stream_encoder_unittest.cc
+++ b/chromium/third_party/webrtc/video/video_stream_encoder_unittest.cc
@@ -26,6 +26,8 @@
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/vp8_temporal_layers.h"
#include "api/video_codecs/vp8_temporal_layers_factory.h"
+#include "call/adaptation/test/fake_adaptation_constraint.h"
+#include "call/adaptation/test/fake_adaptation_listener.h"
#include "call/adaptation/test/fake_resource.h"
#include "common_video/h264/h264_common.h"
#include "common_video/include/video_frame_buffer.h"
@@ -34,6 +36,7 @@
#include "modules/video_coding/utility/quality_scaler.h"
#include "modules/video_coding/utility/simulcast_rate_allocator.h"
#include "rtc_base/fake_clock.h"
+#include "rtc_base/gunit.h"
#include "rtc_base/logging.h"
#include "rtc_base/ref_counted_object.h"
#include "system_wrappers/include/field_trial.h"
@@ -52,7 +55,12 @@ namespace webrtc {
using ::testing::_;
using ::testing::AllOf;
+using ::testing::Eq;
using ::testing::Field;
+using ::testing::Ge;
+using ::testing::Gt;
+using ::testing::Le;
+using ::testing::Lt;
using ::testing::Matcher;
using ::testing::NiceMock;
using ::testing::Return;
@@ -180,12 +188,12 @@ class FakeQualityScalerQpUsageHandlerCallback
absl::optional<bool> clear_qp_samples_result_;
};
-class VideoSourceRestrictionsUpdatedListener
- : public ResourceAdaptationProcessorListener {
+class FakeVideoSourceRestrictionsListener
+ : public VideoSourceRestrictionsListener {
public:
- VideoSourceRestrictionsUpdatedListener()
+ FakeVideoSourceRestrictionsListener()
: was_restrictions_updated_(false), restrictions_updated_event_() {}
- ~VideoSourceRestrictionsUpdatedListener() override {
+ ~FakeVideoSourceRestrictionsListener() override {
RTC_DCHECK(was_restrictions_updated_);
}
@@ -193,7 +201,7 @@ class VideoSourceRestrictionsUpdatedListener
return &restrictions_updated_event_;
}
- // ResourceAdaptationProcessorListener implementation.
+ // VideoSourceRestrictionsListener implementation.
void OnVideoSourceRestrictionsUpdated(
VideoSourceRestrictions restrictions,
const VideoAdaptationCounters& adaptation_counters,
@@ -207,6 +215,96 @@ class VideoSourceRestrictionsUpdatedListener
rtc::Event restrictions_updated_event_;
};
+auto WantsFps(Matcher<int> fps_matcher) {
+ return Field("max_framerate_fps", &rtc::VideoSinkWants::max_framerate_fps,
+ fps_matcher);
+}
+
+auto WantsMaxPixels(Matcher<int> max_pixel_matcher) {
+ return Field("max_pixel_count", &rtc::VideoSinkWants::max_pixel_count,
+ AllOf(max_pixel_matcher, Gt(0)));
+}
+
+auto ResolutionMax() {
+ return AllOf(
+ WantsMaxPixels(Eq(std::numeric_limits<int>::max())),
+ Field("target_pixel_count", &rtc::VideoSinkWants::target_pixel_count,
+ Eq(absl::nullopt)));
+}
+
+auto FpsMax() {
+ return WantsFps(Eq(kDefaultFramerate));
+}
+
+auto FpsUnlimited() {
+ return WantsFps(Eq(std::numeric_limits<int>::max()));
+}
+
+auto FpsMatchesResolutionMax(Matcher<int> fps_matcher) {
+ return AllOf(WantsFps(fps_matcher), ResolutionMax());
+}
+
+auto FpsMaxResolutionMatches(Matcher<int> pixel_matcher) {
+ return AllOf(FpsMax(), WantsMaxPixels(pixel_matcher));
+}
+
+auto FpsMaxResolutionMax() {
+ return AllOf(FpsMax(), ResolutionMax());
+}
+
+auto UnlimitedSinkWants() {
+ return AllOf(FpsUnlimited(), ResolutionMax());
+}
+
+auto FpsInRangeForPixelsInBalanced(int last_frame_pixels) {
+ Matcher<int> fps_range_matcher;
+
+ if (last_frame_pixels <= 320 * 240) {
+ fps_range_matcher = AllOf(Ge(7), Le(10));
+ } else if (last_frame_pixels <= 480 * 270) {
+ fps_range_matcher = AllOf(Ge(10), Le(15));
+ } else if (last_frame_pixels <= 640 * 480) {
+ fps_range_matcher = Ge(15);
+ } else {
+ fps_range_matcher = Eq(kDefaultFramerate);
+ }
+ return Field("max_framerate_fps", &rtc::VideoSinkWants::max_framerate_fps,
+ fps_range_matcher);
+}
+
+auto FpsEqResolutionEqTo(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(WantsFps(Eq(other_wants.max_framerate_fps)),
+ WantsMaxPixels(Eq(other_wants.max_pixel_count)));
+}
+
+auto FpsMaxResolutionLt(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(FpsMax(), WantsMaxPixels(Lt(other_wants.max_pixel_count)));
+}
+
+auto FpsMaxResolutionGt(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(FpsMax(), WantsMaxPixels(Gt(other_wants.max_pixel_count)));
+}
+
+auto FpsLtResolutionEq(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(WantsFps(Lt(other_wants.max_framerate_fps)),
+ WantsMaxPixels(Eq(other_wants.max_pixel_count)));
+}
+
+auto FpsGtResolutionEq(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(WantsFps(Gt(other_wants.max_framerate_fps)),
+ WantsMaxPixels(Eq(other_wants.max_pixel_count)));
+}
+
+auto FpsEqResolutionLt(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(WantsFps(Eq(other_wants.max_framerate_fps)),
+ WantsMaxPixels(Lt(other_wants.max_pixel_count)));
+}
+
+auto FpsEqResolutionGt(const rtc::VideoSinkWants& other_wants) {
+ return AllOf(WantsFps(Eq(other_wants.max_framerate_fps)),
+ WantsMaxPixels(Gt(other_wants.max_pixel_count)));
+}
+
class VideoStreamEncoderUnderTest : public VideoStreamEncoder {
public:
VideoStreamEncoderUnderTest(SendStatisticsProxy* stats_proxy,
@@ -220,25 +318,25 @@ class VideoStreamEncoderUnderTest : public VideoStreamEncoder {
overuse_detector_proxy_ =
new CpuOveruseDetectorProxy(stats_proxy)),
task_queue_factory),
- fake_cpu_resource_(new FakeResource("FakeResource[CPU]")),
- fake_quality_resource_(new FakeResource("FakeResource[QP]")) {
- fake_cpu_resource_->Initialize(encoder_queue(),
- resource_adaptation_queue());
- fake_quality_resource_->Initialize(encoder_queue(),
- resource_adaptation_queue());
+ fake_cpu_resource_(FakeResource::Create("FakeResource[CPU]")),
+ fake_quality_resource_(FakeResource::Create("FakeResource[QP]")),
+ fake_adaptation_constraint_("FakeAdaptationConstraint"),
+ fake_adaptation_listener_() {
InjectAdaptationResource(fake_quality_resource_,
VideoAdaptationReason::kQuality);
InjectAdaptationResource(fake_cpu_resource_, VideoAdaptationReason::kCpu);
+ InjectAdaptationConstraint(&fake_adaptation_constraint_);
+ InjectAdaptationListener(&fake_adaptation_listener_);
}
void SetSourceAndWaitForRestrictionsUpdated(
rtc::VideoSourceInterface<VideoFrame>* source,
const DegradationPreference& degradation_preference) {
- VideoSourceRestrictionsUpdatedListener listener;
- AddAdaptationListenerForTesting(&listener);
+ FakeVideoSourceRestrictionsListener listener;
+ AddRestrictionsListenerForTesting(&listener);
SetSource(source, degradation_preference);
listener.restrictions_updated_event()->Wait(5000);
- RemoveAdaptationListenerForTesting(&listener);
+ RemoveRestrictionsListenerForTesting(&listener);
}
void SetSourceAndWaitForFramerateUpdated(
@@ -283,7 +381,7 @@ class VideoStreamEncoderUnderTest : public VideoStreamEncoder {
void TriggerCpuOveruse() {
rtc::Event event;
resource_adaptation_queue()->PostTask([this, &event] {
- fake_cpu_resource_->set_usage_state(ResourceUsageState::kOveruse);
+ fake_cpu_resource_->SetUsageState(ResourceUsageState::kOveruse);
event.Set();
});
ASSERT_TRUE(event.Wait(5000));
@@ -291,7 +389,7 @@ class VideoStreamEncoderUnderTest : public VideoStreamEncoder {
void TriggerCpuUnderuse() {
rtc::Event event;
resource_adaptation_queue()->PostTask([this, &event] {
- fake_cpu_resource_->set_usage_state(ResourceUsageState::kUnderuse);
+ fake_cpu_resource_->SetUsageState(ResourceUsageState::kUnderuse);
event.Set();
});
ASSERT_TRUE(event.Wait(5000));
@@ -301,7 +399,7 @@ class VideoStreamEncoderUnderTest : public VideoStreamEncoder {
void TriggerQualityLow() {
rtc::Event event;
resource_adaptation_queue()->PostTask([this, &event] {
- fake_quality_resource_->set_usage_state(ResourceUsageState::kOveruse);
+ fake_quality_resource_->SetUsageState(ResourceUsageState::kOveruse);
event.Set();
});
ASSERT_TRUE(event.Wait(5000));
@@ -309,7 +407,7 @@ class VideoStreamEncoderUnderTest : public VideoStreamEncoder {
void TriggerQualityHigh() {
rtc::Event event;
resource_adaptation_queue()->PostTask([this, &event] {
- fake_quality_resource_->set_usage_state(ResourceUsageState::kUnderuse);
+ fake_quality_resource_->SetUsageState(ResourceUsageState::kUnderuse);
event.Set();
});
ASSERT_TRUE(event.Wait(5000));
@@ -334,6 +432,8 @@ class VideoStreamEncoderUnderTest : public VideoStreamEncoder {
CpuOveruseDetectorProxy* overuse_detector_proxy_;
rtc::scoped_refptr<FakeResource> fake_cpu_resource_;
rtc::scoped_refptr<FakeResource> fake_quality_resource_;
+ FakeAdaptationConstraint fake_adaptation_constraint_;
+ FakeAdaptationListener fake_adaptation_listener_;
};
class VideoStreamFactory
@@ -458,9 +558,9 @@ class AdaptingFrameForwarder : public test::FrameForwarder {
void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
const rtc::VideoSinkWants& wants) override {
rtc::CritScope cs(&crit_);
- last_wants_ = sink_wants();
+ last_wants_ = sink_wants_locked();
adapter_.OnSinkWants(wants);
- test::FrameForwarder::AddOrUpdateSink(sink, wants);
+ test::FrameForwarder::AddOrUpdateSinkLocked(sink, wants);
}
cricket::VideoAdapter adapter_;
bool adaptation_enabled_ RTC_GUARDED_BY(crit_);
@@ -507,16 +607,24 @@ class MockableSendStatisticsProxy : public SendStatisticsProxy {
class MockBitrateObserver : public VideoBitrateAllocationObserver {
public:
- MOCK_METHOD1(OnBitrateAllocationUpdated, void(const VideoBitrateAllocation&));
+ MOCK_METHOD(void,
+ OnBitrateAllocationUpdated,
+ (const VideoBitrateAllocation&),
+ (override));
};
class MockEncoderSelector
: public VideoEncoderFactory::EncoderSelectorInterface {
public:
- MOCK_METHOD1(OnCurrentEncoder, void(const SdpVideoFormat& format));
- MOCK_METHOD1(OnAvailableBitrate,
- absl::optional<SdpVideoFormat>(const DataRate& rate));
- MOCK_METHOD0(OnEncoderBroken, absl::optional<SdpVideoFormat>());
+ MOCK_METHOD(void,
+ OnCurrentEncoder,
+ (const SdpVideoFormat& format),
+ (override));
+ MOCK_METHOD(absl::optional<SdpVideoFormat>,
+ OnAvailableBitrate,
+ (const DataRate& rate),
+ (override));
+ MOCK_METHOD(absl::optional<SdpVideoFormat>, OnEncoderBroken, (), (override));
};
} // namespace
@@ -689,106 +797,6 @@ class VideoStreamEncoderTest : public ::testing::Test {
WaitForEncodedFrame(1);
}
- void VerifyNoLimitation(const rtc::VideoSinkWants& wants) {
- EXPECT_EQ(std::numeric_limits<int>::max(), wants.max_framerate_fps);
- EXPECT_EQ(std::numeric_limits<int>::max(), wants.max_pixel_count);
- EXPECT_FALSE(wants.target_pixel_count);
- }
-
- void VerifyFpsEqResolutionEq(const rtc::VideoSinkWants& wants1,
- const rtc::VideoSinkWants& wants2) {
- EXPECT_EQ(wants1.max_framerate_fps, wants2.max_framerate_fps);
- EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count);
- }
-
- void VerifyFpsMaxResolutionMax(const rtc::VideoSinkWants& wants) {
- EXPECT_EQ(kDefaultFramerate, wants.max_framerate_fps);
- EXPECT_EQ(std::numeric_limits<int>::max(), wants.max_pixel_count);
- EXPECT_FALSE(wants.target_pixel_count);
- }
-
- void VerifyFpsMaxResolutionLt(const rtc::VideoSinkWants& wants1,
- const rtc::VideoSinkWants& wants2) {
- EXPECT_EQ(kDefaultFramerate, wants1.max_framerate_fps);
- EXPECT_LT(wants1.max_pixel_count, wants2.max_pixel_count);
- EXPECT_GT(wants1.max_pixel_count, 0);
- }
-
- void VerifyFpsMaxResolutionGt(const rtc::VideoSinkWants& wants1,
- const rtc::VideoSinkWants& wants2) {
- EXPECT_EQ(kDefaultFramerate, wants1.max_framerate_fps);
- EXPECT_GT(wants1.max_pixel_count, wants2.max_pixel_count);
- }
-
- void VerifyFpsMaxResolutionEq(const rtc::VideoSinkWants& wants1,
- const rtc::VideoSinkWants& wants2) {
- EXPECT_EQ(kDefaultFramerate, wants1.max_framerate_fps);
- EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count);
- }
-
- void VerifyFpsLtResolutionEq(const rtc::VideoSinkWants& wants1,
- const rtc::VideoSinkWants& wants2) {
- EXPECT_LT(wants1.max_framerate_fps, wants2.max_framerate_fps);
- EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count);
- }
-
- void VerifyFpsGtResolutionEq(const rtc::VideoSinkWants& wants1,
- const rtc::VideoSinkWants& wants2) {
- EXPECT_GT(wants1.max_framerate_fps, wants2.max_framerate_fps);
- EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count);
- }
-
- void VerifyFpsEqResolutionLt(const rtc::VideoSinkWants& wants1,
- const rtc::VideoSinkWants& wants2) {
- EXPECT_EQ(wants1.max_framerate_fps, wants2.max_framerate_fps);
- EXPECT_LT(wants1.max_pixel_count, wants2.max_pixel_count);
- EXPECT_GT(wants1.max_pixel_count, 0);
- }
-
- void VerifyFpsEqResolutionGt(const rtc::VideoSinkWants& wants1,
- const rtc::VideoSinkWants& wants2) {
- EXPECT_EQ(wants1.max_framerate_fps, wants2.max_framerate_fps);
- EXPECT_GT(wants1.max_pixel_count, wants2.max_pixel_count);
- }
-
- void VerifyFpsMaxResolutionLt(const rtc::VideoSinkWants& wants,
- int pixel_count) {
- EXPECT_EQ(kDefaultFramerate, wants.max_framerate_fps);
- EXPECT_LT(wants.max_pixel_count, pixel_count);
- EXPECT_GT(wants.max_pixel_count, 0);
- }
-
- void VerifyFpsLtResolutionMax(const rtc::VideoSinkWants& wants, int fps) {
- EXPECT_LT(wants.max_framerate_fps, fps);
- EXPECT_EQ(std::numeric_limits<int>::max(), wants.max_pixel_count);
- EXPECT_FALSE(wants.target_pixel_count);
- }
-
- void VerifyFpsEqResolutionMax(const rtc::VideoSinkWants& wants,
- int expected_fps) {
- EXPECT_EQ(expected_fps, wants.max_framerate_fps);
- EXPECT_EQ(std::numeric_limits<int>::max(), wants.max_pixel_count);
- EXPECT_FALSE(wants.target_pixel_count);
- }
-
- void VerifyBalancedModeFpsRange(const rtc::VideoSinkWants& wants,
- int last_frame_pixels) {
- // Balanced mode should always scale FPS to the desired range before
- // attempting to scale resolution.
- int fps_limit = wants.max_framerate_fps;
- if (last_frame_pixels <= 320 * 240) {
- EXPECT_LE(7, fps_limit);
- EXPECT_LE(fps_limit, 10);
- } else if (last_frame_pixels <= 480 * 270) {
- EXPECT_LE(10, fps_limit);
- EXPECT_LE(fps_limit, 15);
- } else if (last_frame_pixels <= 640 * 480) {
- EXPECT_LE(15, fps_limit);
- } else {
- EXPECT_EQ(kDefaultFramerate, fps_limit);
- }
- }
-
void WaitForEncodedFrame(int64_t expected_ntp_time) {
sink_.WaitForEncodedFrame(expected_ntp_time);
fake_clock_.AdvanceTime(TimeDelta::Seconds(1) / max_framerate_);
@@ -1864,7 +1872,7 @@ TEST_F(VideoStreamEncoderTest, TestCpuDowngrades_BalancedMode) {
DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_stream_encoder_->SetSource(&video_source_,
webrtc::DegradationPreference::BALANCED);
- VerifyNoLimitation(video_source_.sink_wants());
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -1889,9 +1897,10 @@ TEST_F(VideoStreamEncoderTest, TestCpuDowngrades_BalancedMode) {
t += frame_interval_ms;
video_stream_encoder_->TriggerCpuOveruse();
- VerifyBalancedModeFpsRange(
+ EXPECT_THAT(
video_source_.sink_wants(),
- *video_source_.last_sent_width() * *video_source_.last_sent_height());
+ FpsInRangeForPixelsInBalanced(*video_source_.last_sent_width() *
+ *video_source_.last_sent_height()));
} while (video_source_.sink_wants().max_pixel_count <
last_wants.max_pixel_count ||
video_source_.sink_wants().max_framerate_fps <
@@ -1924,16 +1933,17 @@ TEST_F(VideoStreamEncoderTest, TestCpuDowngrades_BalancedMode) {
t += frame_interval_ms;
video_stream_encoder_->TriggerCpuUnderuse();
- VerifyBalancedModeFpsRange(
+ EXPECT_THAT(
video_source_.sink_wants(),
- *video_source_.last_sent_width() * *video_source_.last_sent_height());
+ FpsInRangeForPixelsInBalanced(*video_source_.last_sent_width() *
+ *video_source_.last_sent_height()));
EXPECT_TRUE(video_source_.sink_wants().max_pixel_count >
last_wants.max_pixel_count ||
video_source_.sink_wants().max_framerate_fps >
last_wants.max_framerate_fps);
}
- VerifyFpsMaxResolutionMax(video_source_.sink_wants());
+ EXPECT_THAT(video_source_.sink_wants(), FpsMaxResolutionMax());
stats_proxy_->ResetMockStats();
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
@@ -1949,7 +1959,7 @@ TEST_F(VideoStreamEncoderTest,
DataRate::BitsPerSec(kTargetBitrateBps),
DataRate::BitsPerSec(kTargetBitrateBps),
DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
- VerifyNoLimitation(video_source_.sink_wants());
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
const int kFrameWidth = 1280;
const int kFrameHeight = 720;
@@ -1966,7 +1976,7 @@ TEST_F(VideoStreamEncoderTest,
video_source_.set_adaptation_enabled(true);
video_stream_encoder_->SetSource(
&video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
- VerifyNoLimitation(video_source_.sink_wants());
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
video_source_.IncomingCapturedFrame(
CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
sink_.WaitForEncodedFrame(ntp_time);
@@ -1990,7 +2000,7 @@ TEST_F(VideoStreamEncoderTest,
sink_.WaitForEncodedFrame(ntp_time);
ntp_time += 100;
- video_stream_encoder_->SetSource(
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
&video_source_, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
// Give the encoder queue time to process the change in degradation preference
// by waiting for an encoded frame.
@@ -2022,8 +2032,9 @@ TEST_F(VideoStreamEncoderTest,
EXPECT_EQ(video_source_.sink_wants().max_pixel_count, pixel_count);
EXPECT_EQ(video_source_.sink_wants().max_framerate_fps, kInputFps);
- // Change the degradation preference back. CPU underuse should now adapt.
- video_stream_encoder_->SetSource(
+ // Change the degradation preference back. CPU underuse should not adapt since
+ // QP is most limited.
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
&video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
video_source_.IncomingCapturedFrame(
CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
@@ -2042,7 +2053,15 @@ TEST_F(VideoStreamEncoderTest,
CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
sink_.WaitForEncodedFrame(ntp_time);
ntp_time += kFrameIntervalMs;
- EXPECT_EQ(video_source_.sink_wants().max_framerate_fps, kInputFps);
+ EXPECT_EQ(video_source_.sink_wants().max_framerate_fps, restricted_fps);
+
+ // Trigger QP underuse, fps should return to normal.
+ video_stream_encoder_->TriggerQualityHigh();
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(ntp_time, kFrameWidth, kFrameHeight));
+ sink_.WaitForEncodedFrame(ntp_time);
+ ntp_time += kFrameIntervalMs;
+ EXPECT_THAT(video_source_.sink_wants(), FpsMax());
video_stream_encoder_->Stop();
}
@@ -2052,7 +2071,7 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
DataRate::BitsPerSec(kTargetBitrateBps),
DataRate::BitsPerSec(kTargetBitrateBps),
DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
- VerifyNoLimitation(video_source_.sink_wants());
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
const int kFrameWidth = 1280;
const int kFrameHeight = 720;
@@ -2089,7 +2108,7 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
sink_.WaitForEncodedFrame(frame_timestamp);
frame_timestamp += kFrameIntervalMs;
// Initially no degradation registered.
- VerifyFpsMaxResolutionMax(new_video_source.sink_wants());
+ EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax());
// Force an input frame rate to be available, or the adaptation call won't
// know what framerate to adapt form.
@@ -2119,7 +2138,7 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
CreateFrame(frame_timestamp, kFrameWidth, kFrameWidth));
sink_.WaitForEncodedFrame(frame_timestamp);
frame_timestamp += kFrameIntervalMs;
- VerifyFpsMaxResolutionMax(new_video_source.sink_wants());
+ EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax());
video_stream_encoder_->TriggerCpuOveruse();
new_video_source.IncomingCapturedFrame(
@@ -2128,7 +2147,7 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
frame_timestamp += kFrameIntervalMs;
// Still no degradation.
- VerifyFpsMaxResolutionMax(new_video_source.sink_wants());
+ EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax());
// Calling SetSource with resolution scaling enabled apply the old SinkWants.
video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
@@ -2455,7 +2474,7 @@ TEST_F(VideoStreamEncoderTest,
// Set new degradation preference should clear restrictions since we changed
// from BALANCED.
- video_stream_encoder_->SetSource(
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
&source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
WaitForEncodedFrame(sequence++);
@@ -2479,8 +2498,8 @@ TEST_F(VideoStreamEncoderTest,
EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
// Back to BALANCED, should clear the restrictions again.
- video_stream_encoder_->SetSource(&source,
- webrtc::DegradationPreference::BALANCED);
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
+ &source, webrtc::DegradationPreference::BALANCED);
source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
WaitForEncodedFrame(sequence++);
stats = stats_proxy_->GetStats();
@@ -2638,7 +2657,7 @@ TEST_F(VideoStreamEncoderTest,
DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Expect no scaling to begin with.
- VerifyNoLimitation(video_source_.sink_wants());
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
WaitForEncodedFrame(1);
@@ -2695,13 +2714,14 @@ TEST_F(VideoStreamEncoderTest,
source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
WaitForEncodedFrame(1);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
// Trigger adapt down, expect scaled down resolution.
video_stream_encoder_->TriggerCpuOveruse();
- VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
const int kLastMaxPixelCount = source.sink_wants().max_pixel_count;
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -2729,11 +2749,12 @@ TEST_F(VideoStreamEncoderTest, SkipsSameOrLargerAdaptDownRequest_BalancedMode) {
webrtc::DegradationPreference::BALANCED);
source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
sink_.WaitForEncodedFrame(1);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
// Trigger adapt down, expect scaled down resolution.
video_stream_encoder_->TriggerQualityLow();
- VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
const int kLastMaxPixelCount = source.sink_wants().max_pixel_count;
@@ -2773,13 +2794,13 @@ TEST_F(VideoStreamEncoderTest,
source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
// Trigger adapt up, expect no change.
video_stream_encoder_->TriggerCpuUnderuse();
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -2802,13 +2823,13 @@ TEST_F(VideoStreamEncoderTest,
source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
// Trigger adapt up, expect no change.
video_stream_encoder_->TriggerCpuUnderuse();
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -2830,14 +2851,14 @@ TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_BalancedMode) {
source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
sink_.WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt up, expect no change.
video_stream_encoder_->TriggerQualityHigh();
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -2860,14 +2881,14 @@ TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_DisabledMode) {
source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
sink_.WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt up, expect no change.
video_stream_encoder_->TriggerQualityHigh();
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -2892,7 +2913,7 @@ TEST_F(VideoStreamEncoderTest,
source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
WaitForEncodedFrame(1);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -2900,13 +2921,14 @@ TEST_F(VideoStreamEncoderTest,
video_stream_encoder_->TriggerQualityLow();
source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
WaitForEncodedFrame(2);
- VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt up, expect no restriction.
video_stream_encoder_->TriggerQualityHigh();
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -2931,33 +2953,35 @@ TEST_F(VideoStreamEncoderTest,
// Expect no scaling to begin with (preference: MAINTAIN_FRAMERATE).
video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
sink_.WaitForEncodedFrame(1);
- VerifyFpsMaxResolutionMax(video_source_.sink_wants());
+ EXPECT_THAT(video_source_.sink_wants(), FpsMaxResolutionMax());
// Trigger adapt down, expect scaled down resolution.
video_stream_encoder_->TriggerQualityLow();
video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
sink_.WaitForEncodedFrame(2);
- VerifyFpsMaxResolutionLt(video_source_.sink_wants(), kWidth * kHeight);
+ EXPECT_THAT(video_source_.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
// Enable MAINTAIN_RESOLUTION preference.
test::FrameForwarder new_video_source;
- video_stream_encoder_->SetSource(
+ video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated(
&new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
// Give the encoder queue time to process the change in degradation preference
// by waiting for an encoded frame.
new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
sink_.WaitForEncodedFrame(3);
- VerifyFpsMaxResolutionMax(new_video_source.sink_wants());
+ EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax());
// Trigger adapt down, expect reduced framerate.
video_stream_encoder_->TriggerQualityLow();
new_video_source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
sink_.WaitForEncodedFrame(4);
- VerifyFpsLtResolutionMax(new_video_source.sink_wants(), kInputFps);
+ EXPECT_THAT(new_video_source.sink_wants(),
+ FpsMatchesResolutionMax(Lt(kInputFps)));
// Trigger adapt up, expect no restriction.
video_stream_encoder_->TriggerQualityHigh();
- VerifyFpsMaxResolutionMax(new_video_source.sink_wants());
+ EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax());
video_stream_encoder_->Stop();
}
@@ -3019,7 +3043,7 @@ TEST_F(VideoStreamEncoderTest,
int64_t timestamp_ms = kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -3028,7 +3052,8 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -3037,7 +3062,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -3046,7 +3071,8 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -3055,7 +3081,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -3080,7 +3106,7 @@ TEST_F(VideoStreamEncoderTest,
int64_t timestamp_ms = kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -3089,7 +3115,8 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -3098,7 +3125,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -3107,7 +3134,8 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -3116,7 +3144,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
sink_.WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -3160,7 +3188,7 @@ TEST_F(VideoStreamEncoderTest, AdaptUpIfBwEstimateIsHigherThanMinBitrate) {
// Trigger adapt up. Higher resolution should not be requested duo to lack
// of bitrate.
video_stream_encoder_->TriggerQualityHigh();
- VerifyFpsMaxResolutionLt(source.sink_wants(), 1280 * 720);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMatches(Lt(1280 * 720)));
// Increase bitrate.
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
@@ -3171,7 +3199,7 @@ TEST_F(VideoStreamEncoderTest, AdaptUpIfBwEstimateIsHigherThanMinBitrate) {
// Trigger adapt up. Higher resolution should be requested.
video_stream_encoder_->TriggerQualityHigh();
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
video_stream_encoder_->Stop();
}
@@ -3198,7 +3226,7 @@ TEST_F(VideoStreamEncoderTest, DropFirstFramesIfBwEstimateIsTooLow) {
int64_t timestamp_ms = kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720));
ExpectDroppedFrame();
- VerifyFpsMaxResolutionLt(source.sink_wants(), 1280 * 720);
+ EXPECT_TRUE_WAIT(source.sink_wants().max_pixel_count < 1280 * 720, 5000);
// Insert 720p frame. It should be downscaled and encoded.
timestamp_ms += kFrameIntervalMs;
@@ -3256,7 +3284,7 @@ TEST_F(BalancedDegradationTest, AdaptDownReturnsFalseIfFpsDiffLtThreshold) {
stats_proxy_->SetMockStats(stats);
InsertFrameAndWaitForEncoded();
- VerifyFpsMaxResolutionMax(source_.sink_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
// Trigger adapt down, expect scaled down framerate (640x360@24fps).
// Fps diff (input-requested:0) < threshold, expect adapting down not to clear
@@ -3264,7 +3292,7 @@ TEST_F(BalancedDegradationTest, AdaptDownReturnsFalseIfFpsDiffLtThreshold) {
EXPECT_FALSE(
video_stream_encoder_
->TriggerQualityScalerHighQpAndReturnIfQpSamplesShouldBeCleared());
- VerifyFpsEqResolutionMax(source_.sink_wants(), 24);
+ EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(24)));
video_stream_encoder_->Stop();
}
@@ -3282,7 +3310,7 @@ TEST_F(BalancedDegradationTest, AdaptDownReturnsTrueIfFpsDiffGeThreshold) {
stats_proxy_->SetMockStats(stats);
InsertFrameAndWaitForEncoded();
- VerifyFpsMaxResolutionMax(source_.sink_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
// Trigger adapt down, expect scaled down framerate (640x360@24fps).
// Fps diff (input-requested:1) == threshold, expect adapting down to clear QP
@@ -3290,7 +3318,7 @@ TEST_F(BalancedDegradationTest, AdaptDownReturnsTrueIfFpsDiffGeThreshold) {
EXPECT_TRUE(
video_stream_encoder_
->TriggerQualityScalerHighQpAndReturnIfQpSamplesShouldBeCleared());
- VerifyFpsEqResolutionMax(source_.sink_wants(), 24);
+ EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(24)));
video_stream_encoder_->Stop();
}
@@ -3304,11 +3332,11 @@ TEST_F(BalancedDegradationTest, AdaptDownUsesCodecSpecificFps) {
EXPECT_EQ(kVideoCodecVP8, video_encoder_config_.codec_type);
InsertFrameAndWaitForEncoded();
- VerifyFpsMaxResolutionMax(source_.sink_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
// Trigger adapt down, expect scaled down framerate (640x360@22fps).
video_stream_encoder_->TriggerQualityLow();
- VerifyFpsEqResolutionMax(source_.sink_wants(), 22);
+ EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(22)));
video_stream_encoder_->Stop();
}
@@ -3324,25 +3352,25 @@ TEST_F(BalancedDegradationTest, NoAdaptUpIfBwEstimateIsLessThanMinBitrate) {
OnBitrateUpdated(kTooLowMinBitrateBps);
InsertFrameAndWaitForEncoded();
- VerifyFpsMaxResolutionMax(source_.sink_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt down, expect scaled down framerate (640x360@14fps).
video_stream_encoder_->TriggerQualityLow();
InsertFrameAndWaitForEncoded();
- VerifyFpsEqResolutionMax(source_.sink_wants(), 14);
+ EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(14)));
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt down, expect scaled down resolution (480x270@14fps).
video_stream_encoder_->TriggerQualityLow();
InsertFrameAndWaitForEncoded();
- VerifyFpsEqResolutionLt(source_.sink_wants(), source_.last_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsEqResolutionLt(source_.last_wants()));
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt down, expect scaled down framerate (480x270@10fps).
video_stream_encoder_->TriggerQualityLow();
InsertFrameAndWaitForEncoded();
- VerifyFpsLtResolutionEq(source_.sink_wants(), source_.last_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsLtResolutionEq(source_.last_wants()));
EXPECT_EQ(source_.sink_wants().max_framerate_fps, 10);
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -3369,7 +3397,7 @@ TEST_F(BalancedDegradationTest,
SetupTest();
OnBitrateUpdated(kLowTargetBitrateBps);
- VerifyNoLimitation(source_.sink_wants());
+ EXPECT_THAT(source_.sink_wants(), UnlimitedSinkWants());
// Insert frame, expect scaled down:
// framerate (640x360@24fps) -> resolution (480x270@24fps).
@@ -3404,31 +3432,31 @@ TEST_F(BalancedDegradationTest,
OnBitrateUpdated(kTooLowMinResolutionBitrateBps);
InsertFrameAndWaitForEncoded();
- VerifyFpsMaxResolutionMax(source_.sink_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt down, expect scaled down framerate (640x360@14fps).
video_stream_encoder_->TriggerQualityLow();
InsertFrameAndWaitForEncoded();
- VerifyFpsEqResolutionMax(source_.sink_wants(), 14);
+ EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(14)));
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt down, expect scaled down resolution (480x270@14fps).
video_stream_encoder_->TriggerQualityLow();
InsertFrameAndWaitForEncoded();
- VerifyFpsEqResolutionLt(source_.sink_wants(), source_.last_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsEqResolutionLt(source_.last_wants()));
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt down, expect scaled down framerate (480x270@10fps).
video_stream_encoder_->TriggerQualityLow();
InsertFrameAndWaitForEncoded();
- VerifyFpsLtResolutionEq(source_.sink_wants(), source_.last_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsLtResolutionEq(source_.last_wants()));
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt up, expect upscaled fps (no bitrate limit) (480x270@14fps).
video_stream_encoder_->TriggerQualityHigh();
InsertFrameAndWaitForEncoded();
- VerifyFpsGtResolutionEq(source_.sink_wants(), source_.last_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsGtResolutionEq(source_.last_wants()));
EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt up, expect no upscale in res (target bitrate < min bitrate).
@@ -3440,7 +3468,7 @@ TEST_F(BalancedDegradationTest,
OnBitrateUpdated(kResolutionMinBitrateBps);
video_stream_encoder_->TriggerQualityHigh();
InsertFrameAndWaitForEncoded();
- VerifyFpsEqResolutionGt(source_.sink_wants(), source_.last_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsEqResolutionGt(source_.last_wants()));
EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes);
video_stream_encoder_->Stop();
@@ -3460,25 +3488,25 @@ TEST_F(BalancedDegradationTest,
OnBitrateUpdated(kTooLowMinBitrateBps);
InsertFrameAndWaitForEncoded();
- VerifyFpsMaxResolutionMax(source_.sink_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax());
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt down, expect scaled down framerate (640x360@14fps).
video_stream_encoder_->TriggerQualityLow();
InsertFrameAndWaitForEncoded();
- VerifyFpsEqResolutionMax(source_.sink_wants(), 14);
+ EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(14)));
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt down, expect scaled down resolution (480x270@14fps).
video_stream_encoder_->TriggerQualityLow();
InsertFrameAndWaitForEncoded();
- VerifyFpsEqResolutionLt(source_.sink_wants(), source_.last_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsEqResolutionLt(source_.last_wants()));
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt down, expect scaled down framerate (480x270@10fps).
video_stream_encoder_->TriggerQualityLow();
InsertFrameAndWaitForEncoded();
- VerifyFpsLtResolutionEq(source_.sink_wants(), source_.last_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsLtResolutionEq(source_.last_wants()));
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt up, expect no upscale (target bitrate < min bitrate).
@@ -3490,7 +3518,7 @@ TEST_F(BalancedDegradationTest,
OnBitrateUpdated(kMinBitrateBps);
video_stream_encoder_->TriggerQualityHigh();
InsertFrameAndWaitForEncoded();
- VerifyFpsGtResolutionEq(source_.sink_wants(), source_.last_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsGtResolutionEq(source_.last_wants()));
EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt up, expect no upscale in res (target bitrate < min bitrate).
@@ -3503,7 +3531,7 @@ TEST_F(BalancedDegradationTest,
OnBitrateUpdated(kResolutionMinBitrateBps);
video_stream_encoder_->TriggerQualityHigh();
InsertFrameAndWaitForEncoded();
- VerifyFpsEqResolutionGt(source_.sink_wants(), source_.last_wants());
+ EXPECT_THAT(source_.sink_wants(), FpsEqResolutionGt(source_.last_wants()));
EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes);
video_stream_encoder_->Stop();
@@ -3527,7 +3555,7 @@ TEST_F(VideoStreamEncoderTest,
int64_t timestamp_ms = kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -3538,7 +3566,8 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -3549,7 +3578,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -3560,7 +3589,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -3571,7 +3600,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
rtc::VideoSinkWants last_wants = source.sink_wants();
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
@@ -3583,68 +3612,74 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionEq(source.sink_wants(), last_wants);
+ EXPECT_THAT(source.sink_wants(), FpsMax());
+ EXPECT_EQ(source.sink_wants().max_pixel_count, last_wants.max_pixel_count);
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
- // Trigger cpu adapt up, expect upscaled resolution (480x270).
- video_stream_encoder_->TriggerCpuUnderuse();
+ // Trigger quality adapt up, expect upscaled resolution (480x270).
+ video_stream_encoder_->TriggerQualityHigh();
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
- EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
- // Trigger cpu adapt up, expect upscaled resolution (640x360).
+ // Trigger quality and cpu adapt up since both are most limited, expect
+ // upscaled resolution (640x360).
video_stream_encoder_->TriggerCpuUnderuse();
+ video_stream_encoder_->TriggerQualityHigh();
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
- EXPECT_EQ(5, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
- // Trigger cpu adapt up, expect upscaled resolution (960x540).
+ // Trigger quality and cpu adapt up since both are most limited, expect
+ // upscaled resolution (960x540).
video_stream_encoder_->TriggerCpuUnderuse();
+ video_stream_encoder_->TriggerQualityHigh();
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
last_wants = source.sink_wants();
- EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
- EXPECT_EQ(6, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(5, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
- // Trigger cpu adapt up, no cpu downgrades, expect no change (960x540).
+ // Trigger cpu adapt up, expect no change since not most limited (960x540).
+ // However the stats will change since the CPU resource is no longer limited.
video_stream_encoder_->TriggerCpuUnderuse();
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsEqResolutionEq(source.sink_wants(), last_wants);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(last_wants));
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(6, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger quality adapt up, expect no restriction (1280x720).
video_stream_encoder_->TriggerQualityHigh();
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_EQ(6, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes);
video_stream_encoder_->Stop();
}
@@ -4015,7 +4050,8 @@ TEST_F(VideoStreamEncoderTest, DropsFramesAndScalesWhenBitrateIsTooLow) {
ExpectDroppedFrame();
// Expect the sink_wants to specify a scaled frame.
- EXPECT_LT(video_source_.sink_wants().max_pixel_count, kWidth * kHeight);
+ EXPECT_TRUE_WAIT(
+ video_source_.sink_wants().max_pixel_count < kWidth * kHeight, 5000);
int last_pixel_count = video_source_.sink_wants().max_pixel_count;
@@ -4026,7 +4062,8 @@ TEST_F(VideoStreamEncoderTest, DropsFramesAndScalesWhenBitrateIsTooLow) {
// Expect to drop this frame, the wait should time out.
ExpectDroppedFrame();
- EXPECT_LT(video_source_.sink_wants().max_pixel_count, last_pixel_count);
+ EXPECT_TRUE_WAIT(
+ video_source_.sink_wants().max_pixel_count < last_pixel_count, 5000);
video_stream_encoder_->Stop();
}
@@ -4141,7 +4178,8 @@ TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenBweDrops) {
ExpectDroppedFrame();
// Expect the sink_wants to specify a scaled frame.
- EXPECT_LT(video_source_.sink_wants().max_pixel_count, kWidth * kHeight);
+ EXPECT_TRUE_WAIT(
+ video_source_.sink_wants().max_pixel_count < kWidth * kHeight, 5000);
video_stream_encoder_->Stop();
}
@@ -4176,7 +4214,8 @@ TEST_F(VideoStreamEncoderTest, RampsUpInQualityWhenBwIsHigh) {
int64_t timestamp_ms = kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
ExpectDroppedFrame();
- EXPECT_LT(source.sink_wants().max_pixel_count, kWidth * kHeight);
+ EXPECT_TRUE_WAIT(source.sink_wants().max_pixel_count < kWidth * kHeight,
+ 5000);
// Increase bitrate to encoder max.
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
@@ -4200,7 +4239,7 @@ TEST_F(VideoStreamEncoderTest, RampsUpInQualityWhenBwIsHigh) {
// The ramp-up code involves the adaptation queue, give it time to execute.
// TODO(hbos): Can we await an appropriate event instead?
video_stream_encoder_->WaitUntilAdaptationTaskQueueIsIdle();
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
// Frame should not be adapted.
timestamp_ms += kFrameIntervalMs;
@@ -4224,14 +4263,14 @@ TEST_F(VideoStreamEncoderTest,
test::FrameForwarder source;
video_stream_encoder_->SetSource(
&source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
- VerifyNoLimitation(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
// Trigger adapt down, too small frame, expect no change.
source.IncomingCapturedFrame(CreateFrame(1, kTooSmallWidth, kTooSmallHeight));
WaitForEncodedFrame(1);
video_stream_encoder_->TriggerCpuOveruse();
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
@@ -4252,7 +4291,7 @@ TEST_F(VideoStreamEncoderTest,
test::FrameForwarder source;
video_stream_encoder_->SetSource(&source,
webrtc::DegradationPreference::BALANCED);
- VerifyNoLimitation(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
@@ -4260,7 +4299,7 @@ TEST_F(VideoStreamEncoderTest,
source.IncomingCapturedFrame(CreateFrame(1, kTooSmallWidth, kTooSmallHeight));
WaitForEncodedFrame(1);
video_stream_encoder_->TriggerQualityLow();
- VerifyFpsEqResolutionMax(source.sink_wants(), kFpsLimit);
+ EXPECT_THAT(source.sink_wants(), FpsMatchesResolutionMax(Eq(kFpsLimit)));
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4269,7 +4308,7 @@ TEST_F(VideoStreamEncoderTest,
source.IncomingCapturedFrame(CreateFrame(2, kTooSmallWidth, kTooSmallHeight));
WaitForEncodedFrame(2);
video_stream_encoder_->TriggerQualityLow();
- VerifyFpsEqResolutionMax(source.sink_wants(), kFpsLimit);
+ EXPECT_THAT(source.sink_wants(), FpsMatchesResolutionMax(Eq(kFpsLimit)));
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4465,7 +4504,8 @@ TEST_F(VideoStreamEncoderTest, DoesntAdaptDownPastMinFramerate) {
} while (video_source_.sink_wants().max_framerate_fps <
last_wants.max_framerate_fps);
- VerifyFpsEqResolutionMax(video_source_.sink_wants(), kMinFramerateFps);
+ EXPECT_THAT(video_source_.sink_wants(),
+ FpsMatchesResolutionMax(Eq(kMinFramerateFps)));
video_stream_encoder_->Stop();
}
@@ -4489,7 +4529,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4499,7 +4539,8 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4509,7 +4550,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4519,7 +4560,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4529,7 +4570,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsEqResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4539,7 +4580,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4549,7 +4590,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsEqResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(6, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4559,7 +4600,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
rtc::VideoSinkWants last_wants = source.sink_wants();
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
@@ -4570,17 +4611,17 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsEqResolutionEq(source.sink_wants(), last_wants);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(last_wants));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(7, stats_proxy_->GetStats().number_of_quality_adapt_changes);
- // Trigger adapt down, expect expect increased fps (320x180@10fps).
+ // Trigger adapt up, expect expect increased fps (320x180@10fps).
video_stream_encoder_->TriggerQualityHigh();
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsGtResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(8, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4590,7 +4631,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsEqResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(9, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4600,7 +4641,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsGtResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(10, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4610,7 +4651,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsEqResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(11, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4620,7 +4661,9 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMax());
+ EXPECT_EQ(source.sink_wants().max_pixel_count,
+ source.last_wants().max_pixel_count);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(12, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4630,7 +4673,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(13, stats_proxy_->GetStats().number_of_quality_adapt_changes);
@@ -4640,15 +4683,15 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_EQ(14, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt up, expect no change.
video_stream_encoder_->TriggerQualityHigh();
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_EQ(14, stats_proxy_->GetStats().number_of_quality_adapt_changes);
video_stream_encoder_->Stop();
@@ -4672,7 +4715,7 @@ TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) {
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
@@ -4685,7 +4728,8 @@ TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) {
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight);
+ EXPECT_THAT(source.sink_wants(),
+ FpsMaxResolutionMatches(Lt(kWidth * kHeight)));
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
@@ -4698,7 +4742,7 @@ TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) {
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants()));
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
@@ -4711,59 +4755,77 @@ TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) {
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants());
- EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
- // Trigger cpu adapt up, expect increased fps (640x360@30fps).
- video_stream_encoder_->TriggerCpuUnderuse();
+ // Trigger cpu adapt up, expect no change since QP is most limited.
+ {
+ // Store current sink wants since we expect no change and if there is no
+ // change then last_wants() is not updated.
+ auto previous_sink_wants = source.sink_wants();
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(previous_sink_wants));
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ }
+
+ // Trigger quality adapt up, expect increased fps (640x360@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionEq(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
- EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
- // Trigger quality adapt up, expect upscaled resolution (960x540@30fps).
+ // Trigger quality adapt up and Cpu adapt up since both are most limited,
+ // expect increased resolution (960x540@30fps).
video_stream_encoder_->TriggerQualityHigh();
+ video_stream_encoder_->TriggerCpuUnderuse();
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
- EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
- // Trigger cpu adapt up, expect no restriction (1280x720fps@30fps).
+ // Trigger quality adapt up and Cpu adapt up since both are most limited,
+ // expect no restriction (1280x720fps@30fps).
+ video_stream_encoder_->TriggerQualityHigh();
video_stream_encoder_->TriggerCpuUnderuse();
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants());
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants()));
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt up, expect no change.
video_stream_encoder_->TriggerQualityHigh();
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes);
video_stream_encoder_->Stop();
}
@@ -4788,7 +4850,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(kWidth, kHeight);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
@@ -4801,7 +4863,7 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsEqResolutionMax(source.sink_wants(), kFpsLimit);
+ EXPECT_THAT(source.sink_wants(), FpsMatchesResolutionMax(Eq(kFpsLimit)));
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
@@ -4814,45 +4876,60 @@ TEST_F(VideoStreamEncoderTest,
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsEqResolutionLt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants()));
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
- EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
+ EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
- // Trigger cpu adapt up, expect upscaled resolution (640x360@15fps).
- video_stream_encoder_->TriggerCpuUnderuse();
+ // Trigger cpu adapt up, expect no change because quality is most limited.
+ {
+ auto previous_sink_wants = source.sink_wants();
+ // Store current sink wants since we expect no change ind if there is no
+ // change then last__wants() is not updated.
+ video_stream_encoder_->TriggerCpuUnderuse();
+ timestamp_ms += kFrameIntervalMs;
+ source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
+ WaitForEncodedFrame(timestamp_ms);
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(previous_sink_wants));
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ }
+
+ // Trigger quality adapt up, expect upscaled resolution (640x360@15fps).
+ video_stream_encoder_->TriggerQualityHigh();
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsEqResolutionGt(source.sink_wants(), source.last_wants());
+ EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants()));
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
- EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
- EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate);
+ EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
+ EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
- // Trigger quality adapt up, expect increased fps (640x360@30fps).
+ // Trigger quality and cpu adapt up, expect increased fps (640x360@30fps).
video_stream_encoder_->TriggerQualityHigh();
+ video_stream_encoder_->TriggerCpuUnderuse();
timestamp_ms += kFrameIntervalMs;
source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight));
WaitForEncodedFrame(timestamp_ms);
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution);
EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate);
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
// Trigger adapt up, expect no change.
video_stream_encoder_->TriggerQualityHigh();
- VerifyFpsMaxResolutionMax(source.sink_wants());
+ EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax());
EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes);
- EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes);
+ EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes);
video_stream_encoder_->Stop();
}
@@ -5518,10 +5595,12 @@ TEST_F(VideoStreamEncoderTest, EncoderRatesPropagatedOnReconfigure) {
}
struct MockEncoderSwitchRequestCallback : public EncoderSwitchRequestCallback {
- MOCK_METHOD0(RequestEncoderFallback, void());
- MOCK_METHOD1(RequestEncoderSwitch, void(const Config& conf));
- MOCK_METHOD1(RequestEncoderSwitch,
- void(const webrtc::SdpVideoFormat& format));
+ MOCK_METHOD(void, RequestEncoderFallback, (), (override));
+ MOCK_METHOD(void, RequestEncoderSwitch, (const Config& conf), (override));
+ MOCK_METHOD(void,
+ RequestEncoderSwitch,
+ (const webrtc::SdpVideoFormat& format),
+ (override));
};
TEST_F(VideoStreamEncoderTest, BitrateEncoderSwitch) {
@@ -5845,7 +5924,7 @@ TEST_F(VideoStreamEncoderTest, AutomaticAnimationDetection) {
DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_stream_encoder_->SetSource(&video_source_,
webrtc::DegradationPreference::BALANCED);
- VerifyNoLimitation(video_source_.sink_wants());
+ EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants());
VideoFrame frame = CreateFrame(1, kWidth, kHeight);
frame.set_update_rect(VideoFrame::UpdateRect{0, 0, kWidth, kHeight});
@@ -5864,7 +5943,7 @@ TEST_F(VideoStreamEncoderTest, AutomaticAnimationDetection) {
rtc::VideoSinkWants expected;
expected.max_framerate_fps = kFramerateFps;
expected.max_pixel_count = 1280 * 720 + 1;
- VerifyFpsEqResolutionLt(video_source_.sink_wants(), expected);
+ EXPECT_THAT(video_source_.sink_wants(), FpsEqResolutionLt(expected));
// Pass one frame with no known update.
// Resolution cap should be removed immediately.
@@ -5877,7 +5956,8 @@ TEST_F(VideoStreamEncoderTest, AutomaticAnimationDetection) {
WaitForEncodedFrame(timestamp_ms);
// Resolution should be unlimited now.
- VerifyFpsEqResolutionMax(video_source_.sink_wants(), kFramerateFps);
+ EXPECT_THAT(video_source_.sink_wants(),
+ FpsMatchesResolutionMax(Eq(kFramerateFps)));
video_stream_encoder_->Stop();
}