summaryrefslogtreecommitdiff
path: root/chromium/third_party/webrtc/video
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-07-16 11:45:35 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-07-17 08:59:23 +0000
commit552906b0f222c5d5dd11b9fd73829d510980461a (patch)
tree3a11e6ed0538a81dd83b20cf3a4783e297f26d91 /chromium/third_party/webrtc/video
parent1b05827804eaf047779b597718c03e7d38344261 (diff)
downloadqtwebengine-chromium-552906b0f222c5d5dd11b9fd73829d510980461a.tar.gz
BASELINE: Update Chromium to 83.0.4103.122
Change-Id: Ie3a82f5bb0076eec2a7c6a6162326b4301ee291e Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/third_party/webrtc/video')
-rw-r--r--chromium/third_party/webrtc/video/BUILD.gn105
-rw-r--r--chromium/third_party/webrtc/video/OWNERS5
-rw-r--r--chromium/third_party/webrtc/video/adaptation/BUILD.gn88
-rw-r--r--chromium/third_party/webrtc/video/adaptation/OWNERS2
-rw-r--r--chromium/third_party/webrtc/video/adaptation/adaptation_counters.cc38
-rw-r--r--chromium/third_party/webrtc/video/adaptation/adaptation_counters.h38
-rw-r--r--chromium/third_party/webrtc/video/adaptation/adaptation_counters_unittest.cc50
-rw-r--r--chromium/third_party/webrtc/video/adaptation/encode_usage_resource.cc85
-rw-r--r--chromium/third_party/webrtc/video/adaptation/encode_usage_resource.h66
-rw-r--r--chromium/third_party/webrtc/video/adaptation/overuse_frame_detector.cc (renamed from chromium/third_party/webrtc/video/overuse_frame_detector.cc)17
-rw-r--r--chromium/third_party/webrtc/video/adaptation/overuse_frame_detector.h (renamed from chromium/third_party/webrtc/video/overuse_frame_detector.h)12
-rw-r--r--chromium/third_party/webrtc/video/adaptation/overuse_frame_detector_unittest.cc (renamed from chromium/third_party/webrtc/video/overuse_frame_detector_unittest.cc)54
-rw-r--r--chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.cc84
-rw-r--r--chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.h60
-rw-r--r--chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor.cc752
-rw-r--r--chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor.h225
-rw-r--r--chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor_unittest.cc98
-rw-r--r--chromium/third_party/webrtc/video/adaptation/video_stream_adapter.cc622
-rw-r--r--chromium/third_party/webrtc/video/adaptation/video_stream_adapter.h229
-rw-r--r--chromium/third_party/webrtc/video/adaptation/video_stream_adapter_unittest.cc766
-rw-r--r--chromium/third_party/webrtc/video/buffered_frame_decryptor.cc16
-rw-r--r--chromium/third_party/webrtc/video/buffered_frame_decryptor_unittest.cc5
-rw-r--r--chromium/third_party/webrtc/video/call_stats_unittest.cc9
-rw-r--r--chromium/third_party/webrtc/video/encoder_bitrate_adjuster.cc19
-rw-r--r--chromium/third_party/webrtc/video/encoder_bitrate_adjuster_unittest.cc9
-rw-r--r--chromium/third_party/webrtc/video/encoder_overshoot_detector_unittest.cc12
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/call_operation_tests.cc19
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/fec_tests.cc90
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/frame_encryption_tests.cc112
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/histogram_tests.cc233
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/multi_codec_receive_tests.cc18
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/multi_stream_tester.cc5
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/network_state_tests.cc7
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/retransmission_tests.cc50
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/rtp_rtcp_tests.cc24
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/ssrc_tests.cc74
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/stats_tests.cc168
-rw-r--r--chromium/third_party/webrtc/video/end_to_end_tests/transport_feedback_tests.cc83
-rw-r--r--chromium/third_party/webrtc/video/full_stack_tests.cc324
-rw-r--r--chromium/third_party/webrtc/video/pc_full_stack_tests.cc288
-rw-r--r--chromium/third_party/webrtc/video/picture_id_tests.cc71
-rw-r--r--chromium/third_party/webrtc/video/receive_statistics_proxy_unittest.cc474
-rw-r--r--chromium/third_party/webrtc/video/rtp_streams_synchronizer.cc32
-rw-r--r--chromium/third_party/webrtc/video/rtp_streams_synchronizer.h5
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver.cc388
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver.h42
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc146
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h68
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc207
-rw-r--r--chromium/third_party/webrtc/video/rtp_video_stream_receiver_unittest.cc370
-rw-r--r--chromium/third_party/webrtc/video/send_delay_stats_unittest.cc8
-rw-r--r--chromium/third_party/webrtc/video/send_statistics_proxy.cc101
-rw-r--r--chromium/third_party/webrtc/video/send_statistics_proxy.h4
-rw-r--r--chromium/third_party/webrtc/video/send_statistics_proxy_unittest.cc678
-rw-r--r--chromium/third_party/webrtc/video/stream_synchronization.cc85
-rw-r--r--chromium/third_party/webrtc/video/stream_synchronization.h31
-rw-r--r--chromium/third_party/webrtc/video/stream_synchronization_unittest.cc457
-rw-r--r--chromium/third_party/webrtc/video/test/mock_video_stream_encoder.h4
-rw-r--r--chromium/third_party/webrtc/video/video_analyzer.cc117
-rw-r--r--chromium/third_party/webrtc/video/video_analyzer.h16
-rw-r--r--chromium/third_party/webrtc/video/video_quality_test.cc57
-rw-r--r--chromium/third_party/webrtc/video/video_quality_test.h5
-rw-r--r--chromium/third_party/webrtc/video/video_receive_stream.cc13
-rw-r--r--chromium/third_party/webrtc/video/video_receive_stream.h4
-rw-r--r--chromium/third_party/webrtc/video/video_receive_stream_unittest.cc6
-rw-r--r--chromium/third_party/webrtc/video/video_replay.cc524
-rw-r--r--chromium/third_party/webrtc/video/video_send_stream.cc19
-rw-r--r--chromium/third_party/webrtc/video/video_send_stream_impl.cc110
-rw-r--r--chromium/third_party/webrtc/video/video_send_stream_impl.h1
-rw-r--r--chromium/third_party/webrtc/video/video_send_stream_impl_unittest.cc144
-rw-r--r--chromium/third_party/webrtc/video/video_send_stream_tests.cc511
-rw-r--r--chromium/third_party/webrtc/video/video_source_sink_controller.cc139
-rw-r--r--chromium/third_party/webrtc/video/video_source_sink_controller.h75
-rw-r--r--chromium/third_party/webrtc/video/video_source_sink_controller_unittest.cc145
-rw-r--r--chromium/third_party/webrtc/video/video_stream_decoder_impl.cc272
-rw-r--r--chromium/third_party/webrtc/video/video_stream_decoder_impl.h90
-rw-r--r--chromium/third_party/webrtc/video/video_stream_decoder_impl_unittest.cc232
-rw-r--r--chromium/third_party/webrtc/video/video_stream_encoder.cc1325
-rw-r--r--chromium/third_party/webrtc/video/video_stream_encoder.h158
-rw-r--r--chromium/third_party/webrtc/video/video_stream_encoder_unittest.cc1120
80 files changed, 8611 insertions, 4604 deletions
diff --git a/chromium/third_party/webrtc/video/BUILD.gn b/chromium/third_party/webrtc/video/BUILD.gn
index 6c8565b15ab..14109c34944 100644
--- a/chromium/third_party/webrtc/video/BUILD.gn
+++ b/chromium/third_party/webrtc/video/BUILD.gn
@@ -28,6 +28,8 @@ rtc_library("video") {
"rtp_streams_synchronizer.h",
"rtp_video_stream_receiver.cc",
"rtp_video_stream_receiver.h",
+ "rtp_video_stream_receiver_frame_transformer_delegate.cc",
+ "rtp_video_stream_receiver_frame_transformer_delegate.h",
"send_delay_stats.cc",
"send_delay_stats.h",
"send_statistics_proxy.cc",
@@ -54,6 +56,7 @@ rtc_library("video") {
":frame_dumping_decoder",
"../api:array_view",
"../api:fec_controller_api",
+ "../api:frame_transformer_interface",
"../api:libjingle_peerconnection_api",
"../api:rtp_parameters",
"../api:scoped_refptr",
@@ -98,6 +101,7 @@ rtc_library("video") {
"../modules/video_processing",
"../rtc_base:checks",
"../rtc_base:rate_limiter",
+ "../rtc_base:rtc_base",
"../rtc_base:rtc_base_approved",
"../rtc_base:rtc_numerics",
"../rtc_base:rtc_task_queue",
@@ -110,7 +114,6 @@ rtc_library("video") {
"../rtc_base/experiments:quality_scaling_experiment",
"../rtc_base/experiments:rate_control_settings",
"../rtc_base/synchronization:sequence_checker",
- "../rtc_base/system:fallthrough",
"../rtc_base/system:thread_registry",
"../rtc_base/task_utils:repeating_task",
"../rtc_base/task_utils:to_queued_task",
@@ -119,7 +122,9 @@ rtc_library("video") {
"../system_wrappers:field_trial",
"../system_wrappers:metrics",
"//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/memory",
+ "//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
@@ -182,13 +187,15 @@ rtc_library("video_stream_encoder_impl") {
"encoder_overshoot_detector.h",
"frame_encode_metadata_writer.cc",
"frame_encode_metadata_writer.h",
- "overuse_frame_detector.cc",
- "overuse_frame_detector.h",
+ "video_source_sink_controller.cc",
+ "video_source_sink_controller.h",
"video_stream_encoder.cc",
"video_stream_encoder.h",
]
deps = [
+ "../api:rtp_parameters",
+ "../api/task_queue:task_queue",
"../api/units:data_rate",
"../api/video:encoded_image",
"../api/video:video_bitrate_allocation",
@@ -200,6 +207,7 @@ rtc_library("video_stream_encoder_impl") {
"../api/video:video_rtp_headers",
"../api/video:video_stream_encoder",
"../api/video_codecs:video_codecs_api",
+ "../call/adaptation:resource_adaptation",
"../common_video",
"../modules:module_api_public",
"../modules/video_coding",
@@ -223,11 +231,12 @@ rtc_library("video_stream_encoder_impl") {
"../rtc_base/experiments:quality_scaling_experiment",
"../rtc_base/experiments:rate_control_settings",
"../rtc_base/synchronization:sequence_checker",
- "../rtc_base/system:fallthrough",
"../rtc_base/task_utils:repeating_task",
"../system_wrappers",
"../system_wrappers:field_trial",
+ "adaptation:video_adaptation",
"//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/types:optional",
]
}
@@ -235,9 +244,7 @@ rtc_library("video_stream_encoder_impl") {
if (rtc_include_tests) {
rtc_library("video_mocks") {
testonly = true
- sources = [
- "test/mock_video_stream_encoder.h",
- ]
+ sources = [ "test/mock_video_stream_encoder.h" ]
deps = [
"../api/video:video_stream_encoder",
"../test:test_support",
@@ -259,7 +266,9 @@ if (rtc_include_tests) {
]
deps = [
":frame_dumping_decoder",
+ "../api:create_frame_generator",
"../api:fec_controller_api",
+ "../api:frame_generator_api",
"../api:libjingle_peerconnection_api",
"../api:rtc_event_log_output_file",
"../api:test_dependency_factory",
@@ -284,6 +293,7 @@ if (rtc_include_tests) {
"../modules/audio_device:windows_core_audio_utility",
"../modules/audio_mixer:audio_mixer_impl",
"../modules/rtp_rtcp",
+ "../modules/rtp_rtcp:rtp_rtcp_format",
"../modules/video_coding",
"../modules/video_coding:video_coding_utility",
"../modules/video_coding:webrtc_h264",
@@ -320,9 +330,7 @@ if (rtc_include_tests) {
rtc_library("video_full_stack_tests") {
testonly = true
- sources = [
- "full_stack_tests.cc",
- ]
+ sources = [ "full_stack_tests.cc" ]
deps = [
":video_quality_test",
"../api:simulated_network_api",
@@ -347,9 +355,7 @@ if (rtc_include_tests) {
rtc_library("video_pc_full_stack_tests") {
testonly = true
- sources = [
- "pc_full_stack_tests.cc",
- ]
+ sources = [ "pc_full_stack_tests.cc" ]
deps = [
"../api:create_network_emulation_manager",
"../api:create_peerconnection_quality_test_fixture",
@@ -398,31 +404,21 @@ if (rtc_include_tests) {
if (is_mac) {
mac_app_bundle("video_loopback") {
testonly = true
- sources = [
- "video_loopback_main.mm",
- ]
+ sources = [ "video_loopback_main.mm" ]
info_plist = "../test/mac/Info.plist"
- deps = [
- ":video_loopback_lib",
- ]
+ deps = [ ":video_loopback_lib" ]
}
} else {
rtc_executable("video_loopback") {
testonly = true
- sources = [
- "video_loopback_main.cc",
- ]
- deps = [
- ":video_loopback_lib",
- ]
+ sources = [ "video_loopback_main.cc" ]
+ deps = [ ":video_loopback_lib" ]
}
}
rtc_executable("screenshare_loopback") {
testonly = true
- sources = [
- "screenshare_loopback.cc",
- ]
+ sources = [ "screenshare_loopback.cc" ]
deps = [
":video_quality_test",
@@ -449,9 +445,7 @@ if (rtc_include_tests) {
rtc_executable("sv_loopback") {
testonly = true
- sources = [
- "sv_loopback.cc",
- ]
+ sources = [ "sv_loopback.cc" ]
deps = [
":video_quality_test",
"../api:libjingle_peerconnection_api",
@@ -475,41 +469,6 @@ if (rtc_include_tests) {
]
}
- rtc_executable("video_replay") {
- testonly = true
- sources = [
- "video_replay.cc",
- ]
- deps = [
- "../api/rtc_event_log",
- "../api/task_queue:default_task_queue_factory",
- "../api/test/video:function_video_factory",
- "../api/video_codecs:video_codecs_api",
- "../call:call_interfaces",
- "../common_video",
- "../media:rtc_internal_video_codecs",
- "../rtc_base:checks",
- "../rtc_base:rtc_json",
- "../rtc_base:stringutils",
- "../rtc_base:timeutils",
- "../system_wrappers",
- "../test:call_config_utils",
- "../test:encoder_settings",
- "../test:fake_video_codecs",
- "../test:null_transport",
- "../test:rtp_test_utils",
- "../test:run_test",
- "../test:run_test_interface",
- "../test:test_common",
- "../test:test_renderer",
- "../test:test_support",
- "../test:video_test_common",
- "../test:video_test_support",
- "//third_party/abseil-cpp/absl/flags:flag",
- "//third_party/abseil-cpp/absl/flags:parse",
- ]
- }
-
# TODO(pbos): Rename test suite.
rtc_library("video_tests") {
testonly = true
@@ -542,13 +501,13 @@ if (rtc_include_tests) {
"end_to_end_tests/stats_tests.cc",
"end_to_end_tests/transport_feedback_tests.cc",
"frame_encode_metadata_writer_unittest.cc",
- "overuse_frame_detector_unittest.cc",
"picture_id_tests.cc",
"quality_limitation_reason_tracker_unittest.cc",
"quality_scaling_tests.cc",
"quality_threshold_unittest.cc",
"receive_statistics_proxy_unittest.cc",
"report_block_stats_unittest.cc",
+ "rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc",
"rtp_video_stream_receiver_unittest.cc",
"send_delay_stats_unittest.cc",
"send_statistics_proxy_unittest.cc",
@@ -557,21 +516,28 @@ if (rtc_include_tests) {
"video_receive_stream_unittest.cc",
"video_send_stream_impl_unittest.cc",
"video_send_stream_tests.cc",
+ "video_source_sink_controller_unittest.cc",
+ "video_stream_decoder_impl_unittest.cc",
"video_stream_encoder_unittest.cc",
]
deps = [
":video",
":video_mocks",
+ ":video_stream_decoder_impl",
":video_stream_encoder_impl",
+ "../api:create_frame_generator",
"../api:fake_frame_decryptor",
"../api:fake_frame_encryptor",
+ "../api:frame_generator_api",
"../api:libjingle_peerconnection_api",
"../api:mock_fec_controller_override",
"../api:mock_frame_decryptor",
+ "../api:mock_video_encoder",
"../api:rtp_headers",
"../api:rtp_parameters",
"../api:scoped_refptr",
"../api:simulated_network_api",
+ "../api:transport_api",
"../api/crypto:options",
"../api/rtc_event_log",
"../api/task_queue",
@@ -598,6 +564,8 @@ if (rtc_include_tests) {
"../call:simulated_network",
"../call:simulated_packet_receiver",
"../call:video_stream_api",
+ "../call/adaptation:resource_adaptation",
+ "../call/adaptation:resource_adaptation_test_utilities",
"../common_video",
"../common_video/test:utilities",
"../media:rtc_audio_video",
@@ -641,6 +609,8 @@ if (rtc_include_tests) {
"../test:fake_video_codecs",
"../test:field_trial",
"../test:fileutils",
+ "../test:frame_utils",
+ "../test:mock_transport",
"../test:null_transport",
"../test:perf_test",
"../test:rtp_test_utils",
@@ -648,6 +618,7 @@ if (rtc_include_tests) {
"../test:test_support",
"../test:video_test_common",
"../test/time_controller",
+ "adaptation:video_adaptation",
"//testing/gtest",
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/memory",
diff --git a/chromium/third_party/webrtc/video/OWNERS b/chromium/third_party/webrtc/video/OWNERS
index 65b660602b7..f76cf9009a4 100644
--- a/chromium/third_party/webrtc/video/OWNERS
+++ b/chromium/third_party/webrtc/video/OWNERS
@@ -3,8 +3,3 @@ ilnik@webrtc.org
mflodman@webrtc.org
sprang@webrtc.org
stefan@webrtc.org
-
-# These are for the common case of adding or renaming files. If you're doing
-# structural changes, please get a review from a reviewer in this file.
-per-file *.gn=*
-per-file *.gni=*
diff --git a/chromium/third_party/webrtc/video/adaptation/BUILD.gn b/chromium/third_party/webrtc/video/adaptation/BUILD.gn
new file mode 100644
index 00000000000..fd611e23842
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/BUILD.gn
@@ -0,0 +1,88 @@
+# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_library("video_adaptation") {
+ sources = [
+ "adaptation_counters.cc",
+ "adaptation_counters.h",
+ "encode_usage_resource.cc",
+ "encode_usage_resource.h",
+ "overuse_frame_detector.cc",
+ "overuse_frame_detector.h",
+ "quality_scaler_resource.cc",
+ "quality_scaler_resource.h",
+ "resource_adaptation_processor.cc",
+ "resource_adaptation_processor.h",
+ "video_stream_adapter.cc",
+ "video_stream_adapter.h",
+ ]
+
+ deps = [
+ "../../api:rtp_parameters",
+ "../../api/task_queue:task_queue",
+ "../../api/video:video_frame",
+ "../../api/video:video_stream_encoder",
+ "../../api/video_codecs:video_codecs_api",
+ "../../call/adaptation:resource_adaptation",
+ "../../modules/video_coding:video_coding_utility",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:macromagic",
+ "../../rtc_base:rtc_base_approved",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:timeutils",
+ "../../rtc_base/experiments:balanced_degradation_settings",
+ "../../rtc_base/experiments:field_trial_parser",
+ "../../rtc_base/experiments:quality_rampup_experiment",
+ "../../rtc_base/experiments:quality_scaler_settings",
+ "../../rtc_base/synchronization:sequence_checker",
+ "../../rtc_base/task_utils:repeating_task",
+ "../../system_wrappers:field_trial",
+ "../../system_wrappers:system_wrappers",
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
+
+if (rtc_include_tests) {
+ rtc_library("video_adaptation_tests") {
+ testonly = true
+
+ defines = []
+ sources = [
+ "adaptation_counters_unittest.cc",
+ "overuse_frame_detector_unittest.cc",
+ "resource_adaptation_processor_unittest.cc",
+ "video_stream_adapter_unittest.cc",
+ ]
+ deps = [
+ ":video_adaptation",
+ "../../api/video:encoded_image",
+ "../../api/video:video_frame_i420",
+ "../../api/video_codecs:video_codecs_api",
+ "../../call/adaptation:resource_adaptation",
+ "../../modules/video_coding:video_coding_utility",
+ "../../rtc_base:checks",
+ "../../rtc_base:logging",
+ "../../rtc_base:rtc_base_approved",
+ "../../rtc_base:rtc_base_tests_utils",
+ "../../rtc_base:rtc_event",
+ "../../rtc_base:rtc_numerics",
+ "../../rtc_base:task_queue_for_test",
+ "../../test:field_trial",
+ "//test:rtc_expect_death",
+ "//test:test_support",
+ "//testing/gtest",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+ }
+}
diff --git a/chromium/third_party/webrtc/video/adaptation/OWNERS b/chromium/third_party/webrtc/video/adaptation/OWNERS
new file mode 100644
index 00000000000..e4bec4aebba
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/OWNERS
@@ -0,0 +1,2 @@
+eshr@google.com
+hbos@webrtc.org
diff --git a/chromium/third_party/webrtc/video/adaptation/adaptation_counters.cc b/chromium/third_party/webrtc/video/adaptation/adaptation_counters.cc
new file mode 100644
index 00000000000..085743a90a8
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/adaptation_counters.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/adaptation_counters.h"
+
+namespace webrtc {
+
+bool AdaptationCounters::operator==(const AdaptationCounters& rhs) const {
+ return fps_adaptations == rhs.fps_adaptations &&
+ resolution_adaptations == rhs.resolution_adaptations;
+}
+
+bool AdaptationCounters::operator!=(const AdaptationCounters& rhs) const {
+ return !(rhs == *this);
+}
+
+AdaptationCounters AdaptationCounters::operator+(
+ const AdaptationCounters& other) const {
+ return AdaptationCounters(
+ resolution_adaptations + other.resolution_adaptations,
+ fps_adaptations + other.fps_adaptations);
+}
+
+AdaptationCounters AdaptationCounters::operator-(
+ const AdaptationCounters& other) const {
+ return AdaptationCounters(
+ resolution_adaptations - other.resolution_adaptations,
+ fps_adaptations - other.fps_adaptations);
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/adaptation/adaptation_counters.h b/chromium/third_party/webrtc/video/adaptation/adaptation_counters.h
new file mode 100644
index 00000000000..0cb17dd8755
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/adaptation_counters.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_ADAPTATION_COUNTERS_H_
+#define VIDEO_ADAPTATION_ADAPTATION_COUNTERS_H_
+
+namespace webrtc {
+
+// Counts the number of adaptations have resulted due to resource overuse.
+// Today we can adapt resolution and fps.
+struct AdaptationCounters {
+ AdaptationCounters() : resolution_adaptations(0), fps_adaptations(0) {}
+ AdaptationCounters(int resolution_adaptations, int fps_adaptations)
+ : resolution_adaptations(resolution_adaptations),
+ fps_adaptations(fps_adaptations) {}
+
+ int Total() const { return fps_adaptations + resolution_adaptations; }
+
+ bool operator==(const AdaptationCounters& rhs) const;
+ bool operator!=(const AdaptationCounters& rhs) const;
+
+ AdaptationCounters operator+(const AdaptationCounters& other) const;
+ AdaptationCounters operator-(const AdaptationCounters& other) const;
+
+ int resolution_adaptations;
+ int fps_adaptations;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_ADAPTATION_COUNTERS_H_
diff --git a/chromium/third_party/webrtc/video/adaptation/adaptation_counters_unittest.cc b/chromium/third_party/webrtc/video/adaptation/adaptation_counters_unittest.cc
new file mode 100644
index 00000000000..7522a40ebfa
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/adaptation_counters_unittest.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/adaptation_counters.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(AdaptationCountersTest, Addition) {
+ AdaptationCounters a{0, 0};
+ AdaptationCounters b{1, 2};
+ AdaptationCounters total = a + b;
+ EXPECT_EQ(1, total.resolution_adaptations);
+ EXPECT_EQ(2, total.fps_adaptations);
+}
+
+TEST(AdaptationCountersTest, Subtraction) {
+ AdaptationCounters a{0, 1};
+ AdaptationCounters b{2, 1};
+ AdaptationCounters diff = a - b;
+ EXPECT_EQ(-2, diff.resolution_adaptations);
+ EXPECT_EQ(0, diff.fps_adaptations);
+}
+
+TEST(AdaptationCountersTest, Equality) {
+ AdaptationCounters a{1, 2};
+ AdaptationCounters b{2, 1};
+ EXPECT_EQ(a, a);
+ EXPECT_NE(a, b);
+}
+
+TEST(AdaptationCountersTest, SelfAdditionSubtraction) {
+ AdaptationCounters a{1, 0};
+ AdaptationCounters b{0, 1};
+
+ EXPECT_EQ(a, a + b - b);
+ EXPECT_EQ(a, b + a - b);
+ EXPECT_EQ(a, a - b + b);
+ EXPECT_EQ(a, b - b + a);
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.cc b/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.cc
new file mode 100644
index 00000000000..385a8b91822
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/encode_usage_resource.h"
+
+#include <limits>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+EncodeUsageResource::EncodeUsageResource(
+ std::unique_ptr<OveruseFrameDetector> overuse_detector)
+ : overuse_detector_(std::move(overuse_detector)),
+ is_started_(false),
+ target_frame_rate_(absl::nullopt) {
+ RTC_DCHECK(overuse_detector_);
+}
+
+void EncodeUsageResource::StartCheckForOveruse(CpuOveruseOptions options) {
+ RTC_DCHECK(!is_started_);
+ overuse_detector_->StartCheckForOveruse(TaskQueueBase::Current(),
+ std::move(options), this);
+ is_started_ = true;
+ overuse_detector_->OnTargetFramerateUpdated(TargetFrameRateAsInt());
+}
+
+void EncodeUsageResource::StopCheckForOveruse() {
+ overuse_detector_->StopCheckForOveruse();
+ is_started_ = false;
+}
+
+void EncodeUsageResource::SetTargetFrameRate(
+ absl::optional<double> target_frame_rate) {
+ if (target_frame_rate == target_frame_rate_)
+ return;
+ target_frame_rate_ = target_frame_rate;
+ if (is_started_)
+ overuse_detector_->OnTargetFramerateUpdated(TargetFrameRateAsInt());
+}
+
+void EncodeUsageResource::OnEncodeStarted(const VideoFrame& cropped_frame,
+ int64_t time_when_first_seen_us) {
+ // TODO(hbos): Rename FrameCaptured() to something more appropriate (e.g.
+ // "OnEncodeStarted"?) or revise usage.
+ overuse_detector_->FrameCaptured(cropped_frame, time_when_first_seen_us);
+}
+
+void EncodeUsageResource::OnEncodeCompleted(
+ uint32_t timestamp,
+ int64_t time_sent_in_us,
+ int64_t capture_time_us,
+ absl::optional<int> encode_duration_us) {
+ // TODO(hbos): Rename FrameSent() to something more appropriate (e.g.
+ // "OnEncodeCompleted"?).
+ overuse_detector_->FrameSent(timestamp, time_sent_in_us, capture_time_us,
+ encode_duration_us);
+}
+
+void EncodeUsageResource::AdaptUp(AdaptReason reason) {
+ RTC_DCHECK_EQ(reason, AdaptReason::kCpu);
+ OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse);
+}
+
+bool EncodeUsageResource::AdaptDown(AdaptReason reason) {
+ RTC_DCHECK_EQ(reason, AdaptReason::kCpu);
+ return OnResourceUsageStateMeasured(ResourceUsageState::kOveruse) !=
+ ResourceListenerResponse::kQualityScalerShouldIncreaseFrequency;
+}
+
+int EncodeUsageResource::TargetFrameRateAsInt() {
+ return target_frame_rate_.has_value()
+ ? static_cast<int>(target_frame_rate_.value())
+ : std::numeric_limits<int>::max();
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.h b/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.h
new file mode 100644
index 00000000000..e626c2f50ec
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/encode_usage_resource.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_ENCODE_USAGE_RESOURCE_H_
+#define VIDEO_ADAPTATION_ENCODE_USAGE_RESOURCE_H_
+
+#include <memory>
+#include <string>
+
+#include "absl/types/optional.h"
+#include "call/adaptation/resource.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+#include "video/adaptation/overuse_frame_detector.h"
+
+namespace webrtc {
+
+// Handles interaction with the OveruseDetector.
+// TODO(hbos): Add unittests specific to this class, it is currently only tested
+// indirectly by usage in the ResourceAdaptationProcessor (which is only tested
+// because of its usage in VideoStreamEncoder); all tests are currently in
+// video_stream_encoder_unittest.cc.
+// TODO(https://crbug.com/webrtc/11222): Move this class to the
+// video/adaptation/ subdirectory.
+class EncodeUsageResource : public Resource,
+ public AdaptationObserverInterface {
+ public:
+ explicit EncodeUsageResource(
+ std::unique_ptr<OveruseFrameDetector> overuse_detector);
+
+ void StartCheckForOveruse(CpuOveruseOptions options);
+ void StopCheckForOveruse();
+
+ void SetTargetFrameRate(absl::optional<double> target_frame_rate);
+ void OnEncodeStarted(const VideoFrame& cropped_frame,
+ int64_t time_when_first_seen_us);
+ void OnEncodeCompleted(uint32_t timestamp,
+ int64_t time_sent_in_us,
+ int64_t capture_time_us,
+ absl::optional<int> encode_duration_us);
+
+ // AdaptationObserverInterface implementation.
+ // TODO(https://crbug.com/webrtc/11222, 11172): This resource also needs to
+ // signal when its stable to support multi-stream aware modules.
+ void AdaptUp(AdaptReason reason) override;
+ bool AdaptDown(AdaptReason reason) override;
+
+ std::string name() const override { return "EncoderUsageResource"; }
+
+ private:
+ int TargetFrameRateAsInt();
+
+ const std::unique_ptr<OveruseFrameDetector> overuse_detector_;
+ bool is_started_;
+ absl::optional<double> target_frame_rate_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_ENCODE_USAGE_RESOURCE_H_
diff --git a/chromium/third_party/webrtc/video/overuse_frame_detector.cc b/chromium/third_party/webrtc/video/adaptation/overuse_frame_detector.cc
index 429dbc4f320..64b67687e9e 100644
--- a/chromium/third_party/webrtc/video/overuse_frame_detector.cc
+++ b/chromium/third_party/webrtc/video/adaptation/overuse_frame_detector.cc
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "video/overuse_frame_detector.h"
+#include "video/adaptation/overuse_frame_detector.h"
#include <math.h>
#include <stdio.h>
@@ -540,7 +540,7 @@ OveruseFrameDetector::OveruseFrameDetector(
OveruseFrameDetector::~OveruseFrameDetector() {}
void OveruseFrameDetector::StartCheckForOveruse(
- rtc::TaskQueue* task_queue,
+ TaskQueueBase* task_queue_base,
const CpuOveruseOptions& options,
AdaptationObserverInterface* overuse_observer) {
RTC_DCHECK_RUN_ON(&task_checker_);
@@ -549,10 +549,10 @@ void OveruseFrameDetector::StartCheckForOveruse(
SetOptions(options);
check_overuse_task_ = RepeatingTaskHandle::DelayedStart(
- task_queue->Get(), TimeDelta::ms(kTimeToFirstCheckForOveruseMs),
+ task_queue_base, TimeDelta::Millis(kTimeToFirstCheckForOveruseMs),
[this, overuse_observer] {
CheckForOveruse(overuse_observer);
- return TimeDelta::ms(kCheckForOveruseIntervalMs);
+ return TimeDelta::Millis(kCheckForOveruseIntervalMs);
});
}
void OveruseFrameDetector::StopCheckForOveruse() {
@@ -677,9 +677,10 @@ void OveruseFrameDetector::CheckForOveruse(
in_quick_rampup_ ? kQuickRampUpDelayMs : current_rampup_delay_ms_;
RTC_LOG(LS_VERBOSE) << " Frame stats: "
- << " encode usage " << *encode_usage_percent_
- << " overuse detections " << num_overuse_detections_
- << " rampup delay " << rampup_delay;
+ " encode usage "
+ << *encode_usage_percent_ << " overuse detections "
+ << num_overuse_detections_ << " rampup delay "
+ << rampup_delay;
}
void OveruseFrameDetector::SetOptions(const CpuOveruseOptions& options) {
diff --git a/chromium/third_party/webrtc/video/overuse_frame_detector.h b/chromium/third_party/webrtc/video/adaptation/overuse_frame_detector.h
index 20e097111b0..e8c667dfdce 100644
--- a/chromium/third_party/webrtc/video/overuse_frame_detector.h
+++ b/chromium/third_party/webrtc/video/adaptation/overuse_frame_detector.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@@ -8,20 +8,20 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VIDEO_OVERUSE_FRAME_DETECTOR_H_
-#define VIDEO_OVERUSE_FRAME_DETECTOR_H_
+#ifndef VIDEO_ADAPTATION_OVERUSE_FRAME_DETECTOR_H_
+#define VIDEO_ADAPTATION_OVERUSE_FRAME_DETECTOR_H_
#include <list>
#include <memory>
#include "absl/types/optional.h"
+#include "api/task_queue/task_queue_base.h"
#include "api/video/video_stream_encoder_observer.h"
#include "modules/video_coding/utility/quality_scaler.h"
#include "rtc_base/constructor_magic.h"
#include "rtc_base/experiments/field_trial_parser.h"
#include "rtc_base/numerics/exp_filter.h"
#include "rtc_base/synchronization/sequence_checker.h"
-#include "rtc_base/task_queue.h"
#include "rtc_base/task_utils/repeating_task.h"
#include "rtc_base/thread_annotations.h"
@@ -58,7 +58,7 @@ class OveruseFrameDetector {
virtual ~OveruseFrameDetector();
// Start to periodically check for overuse.
- void StartCheckForOveruse(rtc::TaskQueue* task_queue,
+ void StartCheckForOveruse(TaskQueueBase* task_queue_base,
const CpuOveruseOptions& options,
AdaptationObserverInterface* overuse_observer);
@@ -155,4 +155,4 @@ class OveruseFrameDetector {
} // namespace webrtc
-#endif // VIDEO_OVERUSE_FRAME_DETECTOR_H_
+#endif // VIDEO_ADAPTATION_OVERUSE_FRAME_DETECTOR_H_
diff --git a/chromium/third_party/webrtc/video/overuse_frame_detector_unittest.cc b/chromium/third_party/webrtc/video/adaptation/overuse_frame_detector_unittest.cc
index 7d8217ccf9e..5ace2f2be8f 100644
--- a/chromium/third_party/webrtc/video/overuse_frame_detector_unittest.cc
+++ b/chromium/third_party/webrtc/video/adaptation/overuse_frame_detector_unittest.cc
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "video/overuse_frame_detector.h"
+#include "video/adaptation/overuse_frame_detector.h"
#include <memory>
@@ -49,11 +49,11 @@ class CpuOveruseObserverImpl : public AdaptationObserverInterface {
CpuOveruseObserverImpl() : overuse_(0), normaluse_(0) {}
virtual ~CpuOveruseObserverImpl() {}
- bool AdaptDown(AdaptReason) {
+ bool AdaptDown(AdaptReason) override {
++overuse_;
return true;
}
- void AdaptUp(AdaptReason) { ++normaluse_; }
+ void AdaptUp(AdaptReason) override { ++normaluse_; }
int overuse_;
int normaluse_;
@@ -109,10 +109,10 @@ class OveruseFrameDetectorTest : public ::testing::Test,
frame.set_timestamp(timestamp);
int64_t capture_time_us = rtc::TimeMicros();
overuse_detector_->FrameCaptured(frame, capture_time_us);
- clock_.AdvanceTime(TimeDelta::us(delay_us));
+ clock_.AdvanceTime(TimeDelta::Micros(delay_us));
overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(),
capture_time_us, delay_us);
- clock_.AdvanceTime(TimeDelta::us(interval_us - delay_us));
+ clock_.AdvanceTime(TimeDelta::Micros(interval_us - delay_us));
timestamp += interval_us * 90 / 1000;
}
}
@@ -138,7 +138,7 @@ class OveruseFrameDetectorTest : public ::testing::Test,
int max_delay_us = 0;
for (int delay_us : delays_us) {
if (delay_us > max_delay_us) {
- clock_.AdvanceTime(TimeDelta::us(delay_us - max_delay_us));
+ clock_.AdvanceTime(TimeDelta::Micros(delay_us - max_delay_us));
max_delay_us = delay_us;
}
@@ -146,7 +146,7 @@ class OveruseFrameDetectorTest : public ::testing::Test,
capture_time_us, delay_us);
}
overuse_detector_->CheckForOveruse(observer_);
- clock_.AdvanceTime(TimeDelta::us(interval_us - max_delay_us));
+ clock_.AdvanceTime(TimeDelta::Micros(interval_us - max_delay_us));
timestamp += interval_us * 90 / 1000;
}
}
@@ -171,7 +171,7 @@ class OveruseFrameDetectorTest : public ::testing::Test,
int interval_us = random.Rand(min_interval_us, max_interval_us);
int64_t capture_time_us = rtc::TimeMicros();
overuse_detector_->FrameCaptured(frame, capture_time_us);
- clock_.AdvanceTime(TimeDelta::us(delay_us));
+ clock_.AdvanceTime(TimeDelta::Micros(delay_us));
overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(),
capture_time_us,
absl::optional<int>(delay_us));
@@ -179,7 +179,7 @@ class OveruseFrameDetectorTest : public ::testing::Test,
overuse_detector_->CheckForOveruse(observer_);
// Avoid turning clock backwards.
if (interval_us > delay_us)
- clock_.AdvanceTime(TimeDelta::us(interval_us - delay_us));
+ clock_.AdvanceTime(TimeDelta::Micros(interval_us - delay_us));
timestamp += interval_us * 90 / 1000;
}
@@ -276,7 +276,7 @@ TEST_F(OveruseFrameDetectorTest, TriggerUnderuseWithMinProcessCount) {
kProcessTimeUs);
overuse_detector_->CheckForOveruse(&overuse_observer);
EXPECT_EQ(0, overuse_observer.normaluse_);
- clock_.AdvanceTime(TimeDelta::us(kProcessIntervalUs));
+ clock_.AdvanceTime(TimeDelta::Micros(kProcessIntervalUs));
overuse_detector_->CheckForOveruse(&overuse_observer);
EXPECT_EQ(1, overuse_observer.normaluse_);
}
@@ -352,14 +352,14 @@ TEST_F(OveruseFrameDetectorTest, MinFrameSamplesBeforeUpdating) {
kProcessTimeUs);
EXPECT_EQ(InitialUsage(), UsagePercent());
// Pass time far enough to digest all previous samples.
- clock_.AdvanceTime(TimeDelta::seconds(1));
+ clock_.AdvanceTime(TimeDelta::Seconds(1));
InsertAndSendFramesWithInterval(1, kFrameIntervalUs, kWidth, kHeight,
kProcessTimeUs);
// The last sample has not been processed here.
EXPECT_EQ(InitialUsage(), UsagePercent());
// Pass time far enough to digest all previous samples, 41 in total.
- clock_.AdvanceTime(TimeDelta::seconds(1));
+ clock_.AdvanceTime(TimeDelta::Seconds(1));
InsertAndSendFramesWithInterval(1, kFrameIntervalUs, kWidth, kHeight,
kProcessTimeUs);
EXPECT_NE(InitialUsage(), UsagePercent());
@@ -387,7 +387,7 @@ TEST_F(OveruseFrameDetectorTest, MeasuresMultipleConcurrentSamples) {
frame.set_timestamp(static_cast<uint32_t>(i));
int64_t capture_time_us = rtc::TimeMicros();
overuse_detector_->FrameCaptured(frame, capture_time_us);
- clock_.AdvanceTime(TimeDelta::us(kIntervalUs));
+ clock_.AdvanceTime(TimeDelta::Micros(kIntervalUs));
if (i > kNumFramesEncodingDelay) {
overuse_detector_->FrameSent(
static_cast<uint32_t>(i - kNumFramesEncodingDelay), rtc::TimeMicros(),
@@ -415,14 +415,14 @@ TEST_F(OveruseFrameDetectorTest, UpdatesExistingSamples) {
int64_t capture_time_us = rtc::TimeMicros();
overuse_detector_->FrameCaptured(frame, capture_time_us);
// Encode and send first parts almost instantly.
- clock_.AdvanceTime(TimeDelta::ms(1));
+ clock_.AdvanceTime(TimeDelta::Millis(1));
overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(), capture_time_us,
rtc::kNumMicrosecsPerMillisec);
// Encode heavier part, resulting in >85% usage total.
- clock_.AdvanceTime(TimeDelta::us(kDelayUs) - TimeDelta::ms(1));
+ clock_.AdvanceTime(TimeDelta::Micros(kDelayUs) - TimeDelta::Millis(1));
overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(), capture_time_us,
kDelayUs);
- clock_.AdvanceTime(TimeDelta::us(kIntervalUs - kDelayUs));
+ clock_.AdvanceTime(TimeDelta::Micros(kIntervalUs - kDelayUs));
timestamp += kIntervalUs * 90 / 1000;
overuse_detector_->CheckForOveruse(observer_);
}
@@ -433,7 +433,8 @@ TEST_F(OveruseFrameDetectorTest, RunOnTqNormalUsage) {
queue.SendTask(
[&] {
- overuse_detector_->StartCheckForOveruse(&queue, options_, observer_);
+ overuse_detector_->StartCheckForOveruse(queue.Get(), options_,
+ observer_);
},
RTC_FROM_HERE);
@@ -680,7 +681,7 @@ class OveruseFrameDetectorTest2 : public OveruseFrameDetectorTest {
overuse_detector_->FrameSent(0 /* ignored timestamp */,
0 /* ignored send_time_us */,
capture_time_us, delay_us);
- clock_.AdvanceTime(TimeDelta::us(interval_us));
+ clock_.AdvanceTime(TimeDelta::Micros(interval_us));
}
}
@@ -707,7 +708,7 @@ class OveruseFrameDetectorTest2 : public OveruseFrameDetectorTest {
capture_time_us, delay_us);
overuse_detector_->CheckForOveruse(observer_);
- clock_.AdvanceTime(TimeDelta::us(interval_us));
+ clock_.AdvanceTime(TimeDelta::Micros(interval_us));
}
}
@@ -757,7 +758,7 @@ TEST_F(OveruseFrameDetectorTest2, TriggerUnderuseWithMinProcessCount) {
kProcessTimeUs);
overuse_detector_->CheckForOveruse(&overuse_observer);
EXPECT_EQ(0, overuse_observer.normaluse_);
- clock_.AdvanceTime(TimeDelta::us(kProcessIntervalUs));
+ clock_.AdvanceTime(TimeDelta::Micros(kProcessIntervalUs));
overuse_detector_->CheckForOveruse(&overuse_observer);
EXPECT_EQ(1, overuse_observer.normaluse_);
}
@@ -868,7 +869,7 @@ TEST_F(OveruseFrameDetectorTest2, MeasuresMultipleConcurrentSamples) {
frame.set_timestamp(static_cast<uint32_t>(i));
int64_t capture_time_us = rtc::TimeMicros();
overuse_detector_->FrameCaptured(frame, capture_time_us);
- clock_.AdvanceTime(TimeDelta::us(kIntervalUs));
+ clock_.AdvanceTime(TimeDelta::Micros(kIntervalUs));
if (i > kNumFramesEncodingDelay) {
overuse_detector_->FrameSent(
static_cast<uint32_t>(i - kNumFramesEncodingDelay), rtc::TimeMicros(),
@@ -896,14 +897,14 @@ TEST_F(OveruseFrameDetectorTest2, UpdatesExistingSamples) {
int64_t capture_time_us = rtc::TimeMicros();
overuse_detector_->FrameCaptured(frame, capture_time_us);
// Encode and send first parts almost instantly.
- clock_.AdvanceTime(TimeDelta::ms(1));
+ clock_.AdvanceTime(TimeDelta::Millis(1));
overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(), capture_time_us,
rtc::kNumMicrosecsPerMillisec);
// Encode heavier part, resulting in >85% usage total.
- clock_.AdvanceTime(TimeDelta::us(kDelayUs) - TimeDelta::ms(1));
+ clock_.AdvanceTime(TimeDelta::Micros(kDelayUs) - TimeDelta::Millis(1));
overuse_detector_->FrameSent(timestamp, rtc::TimeMicros(), capture_time_us,
kDelayUs);
- clock_.AdvanceTime(TimeDelta::us(kIntervalUs - kDelayUs));
+ clock_.AdvanceTime(TimeDelta::Micros(kIntervalUs - kDelayUs));
timestamp += kIntervalUs * 90 / 1000;
overuse_detector_->CheckForOveruse(observer_);
}
@@ -914,7 +915,8 @@ TEST_F(OveruseFrameDetectorTest2, RunOnTqNormalUsage) {
queue.SendTask(
[&] {
- overuse_detector_->StartCheckForOveruse(&queue, options_, observer_);
+ overuse_detector_->StartCheckForOveruse(queue.Get(), options_,
+ observer_);
},
RTC_FROM_HERE);
diff --git a/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.cc b/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.cc
new file mode 100644
index 00000000000..c4d83abc3b0
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/quality_scaler_resource.h"
+
+#include <utility>
+
+namespace webrtc {
+
+QualityScalerResource::QualityScalerResource() : quality_scaler_(nullptr) {}
+
+bool QualityScalerResource::is_started() const {
+ return quality_scaler_.get();
+}
+
+void QualityScalerResource::StartCheckForOveruse(
+ VideoEncoder::QpThresholds qp_thresholds) {
+ RTC_DCHECK(!is_started());
+ quality_scaler_ =
+ std::make_unique<QualityScaler>(this, std::move(qp_thresholds));
+}
+
+void QualityScalerResource::StopCheckForOveruse() {
+ quality_scaler_.reset();
+}
+
+void QualityScalerResource::SetQpThresholds(
+ VideoEncoder::QpThresholds qp_thresholds) {
+ RTC_DCHECK(is_started());
+ quality_scaler_->SetQpThresholds(std::move(qp_thresholds));
+}
+
+bool QualityScalerResource::QpFastFilterLow() {
+ RTC_DCHECK(is_started());
+ return quality_scaler_->QpFastFilterLow();
+}
+
+void QualityScalerResource::OnEncodeCompleted(const EncodedImage& encoded_image,
+ int64_t time_sent_in_us) {
+ if (quality_scaler_ && encoded_image.qp_ >= 0) {
+ quality_scaler_->ReportQp(encoded_image.qp_, time_sent_in_us);
+ } else if (!quality_scaler_) {
+ // TODO(webrtc:11553): this is a workaround to ensure that all quality
+ // scaler imposed limitations are removed once qualty scaler is disabled
+ // mid call.
+ // Instead it should be done at a higher layer in the same way for all
+ // resources.
+ OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse);
+ }
+}
+
+void QualityScalerResource::OnFrameDropped(
+ EncodedImageCallback::DropReason reason) {
+ if (!quality_scaler_)
+ return;
+ switch (reason) {
+ case EncodedImageCallback::DropReason::kDroppedByMediaOptimizations:
+ quality_scaler_->ReportDroppedFrameByMediaOpt();
+ break;
+ case EncodedImageCallback::DropReason::kDroppedByEncoder:
+ quality_scaler_->ReportDroppedFrameByEncoder();
+ break;
+ }
+}
+
+void QualityScalerResource::AdaptUp(AdaptReason reason) {
+ RTC_DCHECK_EQ(reason, AdaptReason::kQuality);
+ OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse);
+}
+
+bool QualityScalerResource::AdaptDown(AdaptReason reason) {
+ RTC_DCHECK_EQ(reason, AdaptReason::kQuality);
+ return OnResourceUsageStateMeasured(ResourceUsageState::kOveruse) !=
+ ResourceListenerResponse::kQualityScalerShouldIncreaseFrequency;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.h b/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.h
new file mode 100644
index 00000000000..7708710dd53
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/quality_scaler_resource.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_QUALITY_SCALER_RESOURCE_H_
+#define VIDEO_ADAPTATION_QUALITY_SCALER_RESOURCE_H_
+
+#include <memory>
+#include <string>
+
+#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/resource.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+
+namespace webrtc {
+
+// Handles interaction with the QualityScaler.
+// TODO(hbos): Add unittests specific to this class, it is currently only tested
+// indirectly by usage in the ResourceAdaptationProcessor (which is only tested
+// because of its usage in VideoStreamEncoder); all tests are currently in
+// video_stream_encoder_unittest.cc.
+// TODO(https://crbug.com/webrtc/11222): Move this class to the
+// video/adaptation/ subdirectory.
+class QualityScalerResource : public Resource,
+ public AdaptationObserverInterface {
+ public:
+ QualityScalerResource();
+
+ bool is_started() const;
+
+ void StartCheckForOveruse(VideoEncoder::QpThresholds qp_thresholds);
+ void StopCheckForOveruse();
+
+ void SetQpThresholds(VideoEncoder::QpThresholds qp_thresholds);
+ bool QpFastFilterLow();
+ void OnEncodeCompleted(const EncodedImage& encoded_image,
+ int64_t time_sent_in_us);
+ void OnFrameDropped(EncodedImageCallback::DropReason reason);
+
+ // AdaptationObserverInterface implementation.
+ // TODO(https://crbug.com/webrtc/11222, 11172): This resource also needs to
+ // signal when its stable to support multi-stream aware modules.
+ void AdaptUp(AdaptReason reason) override;
+ bool AdaptDown(AdaptReason reason) override;
+
+ std::string name() const override { return "QualityScalerResource"; }
+
+ private:
+ std::unique_ptr<QualityScaler> quality_scaler_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_QUALITY_SCALER_RESOURCE_H_
diff --git a/chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor.cc b/chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor.cc
new file mode 100644
index 00000000000..1a0930040a7
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor.cc
@@ -0,0 +1,752 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/resource_adaptation_processor.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
+#include "api/task_queue/task_queue_base.h"
+#include "api/video/video_source_interface.h"
+#include "call/adaptation/resource.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/time_utils.h"
+
+namespace webrtc {
+
+const int kDefaultInputPixelsWidth = 176;
+const int kDefaultInputPixelsHeight = 144;
+
+namespace {
+
+bool IsResolutionScalingEnabled(DegradationPreference degradation_preference) {
+ return degradation_preference == DegradationPreference::MAINTAIN_FRAMERATE ||
+ degradation_preference == DegradationPreference::BALANCED;
+}
+
+bool IsFramerateScalingEnabled(DegradationPreference degradation_preference) {
+ return degradation_preference == DegradationPreference::MAINTAIN_RESOLUTION ||
+ degradation_preference == DegradationPreference::BALANCED;
+}
+
+// Returns modified restrictions where any constraints that don't apply to the
+// degradation preference are cleared.
+VideoSourceRestrictions ApplyDegradationPreference(
+ VideoSourceRestrictions source_restrictions,
+ DegradationPreference degradation_preference) {
+ switch (degradation_preference) {
+ case DegradationPreference::BALANCED:
+ break;
+ case DegradationPreference::MAINTAIN_FRAMERATE:
+ source_restrictions.set_max_frame_rate(absl::nullopt);
+ break;
+ case DegradationPreference::MAINTAIN_RESOLUTION:
+ source_restrictions.set_max_pixels_per_frame(absl::nullopt);
+ source_restrictions.set_target_pixels_per_frame(absl::nullopt);
+ break;
+ case DegradationPreference::DISABLED:
+ source_restrictions.set_max_pixels_per_frame(absl::nullopt);
+ source_restrictions.set_target_pixels_per_frame(absl::nullopt);
+ source_restrictions.set_max_frame_rate(absl::nullopt);
+ }
+ return source_restrictions;
+}
+
+// Returns AdaptationCounters where constraints that don't apply to the
+// degredation preference are cleared. This behaviour must reflect that of
+// ApplyDegredationPreference for SourceRestrictions. Any to that method must
+// also change this one.
+AdaptationCounters ApplyDegradationPreference(
+ AdaptationCounters counters,
+ DegradationPreference degradation_preference) {
+ switch (degradation_preference) {
+ case DegradationPreference::BALANCED:
+ break;
+ case DegradationPreference::MAINTAIN_FRAMERATE:
+ counters.fps_adaptations = 0;
+ break;
+ case DegradationPreference::MAINTAIN_RESOLUTION:
+ counters.resolution_adaptations = 0;
+ break;
+ case DegradationPreference::DISABLED:
+ counters.resolution_adaptations = 0;
+ counters.fps_adaptations = 0;
+ break;
+ default:
+ RTC_NOTREACHED();
+ }
+ return counters;
+}
+
+} // namespace
+
+class ResourceAdaptationProcessor::InitialFrameDropper {
+ public:
+ explicit InitialFrameDropper(QualityScalerResource* quality_scaler_resource)
+ : quality_scaler_resource_(quality_scaler_resource),
+ quality_scaler_settings_(QualityScalerSettings::ParseFromFieldTrials()),
+ has_seen_first_bwe_drop_(false),
+ set_start_bitrate_(DataRate::Zero()),
+ set_start_bitrate_time_ms_(0),
+ initial_framedrop_(0) {
+ RTC_DCHECK(quality_scaler_resource_);
+ }
+
+ // Output signal.
+ bool DropInitialFrames() const {
+ return initial_framedrop_ < kMaxInitialFramedrop;
+ }
+
+ // Input signals.
+ void SetStartBitrate(DataRate start_bitrate, int64_t now_ms) {
+ set_start_bitrate_ = start_bitrate;
+ set_start_bitrate_time_ms_ = now_ms;
+ }
+
+ void SetTargetBitrate(DataRate target_bitrate, int64_t now_ms) {
+ if (set_start_bitrate_ > DataRate::Zero() && !has_seen_first_bwe_drop_ &&
+ quality_scaler_resource_->is_started() &&
+ quality_scaler_settings_.InitialBitrateIntervalMs() &&
+ quality_scaler_settings_.InitialBitrateFactor()) {
+ int64_t diff_ms = now_ms - set_start_bitrate_time_ms_;
+ if (diff_ms <
+ quality_scaler_settings_.InitialBitrateIntervalMs().value() &&
+ (target_bitrate <
+ (set_start_bitrate_ *
+ quality_scaler_settings_.InitialBitrateFactor().value()))) {
+ RTC_LOG(LS_INFO) << "Reset initial_framedrop_. Start bitrate: "
+ << set_start_bitrate_.bps()
+ << ", target bitrate: " << target_bitrate.bps();
+ initial_framedrop_ = 0;
+ has_seen_first_bwe_drop_ = true;
+ }
+ }
+ }
+
+ void OnFrameDroppedDueToSize() { ++initial_framedrop_; }
+
+ void OnMaybeEncodeFrame() { initial_framedrop_ = kMaxInitialFramedrop; }
+
+ void OnQualityScalerSettingsUpdated() {
+ if (quality_scaler_resource_->is_started()) {
+ // Restart frame drops due to size.
+ initial_framedrop_ = 0;
+ } else {
+ // Quality scaling disabled so we shouldn't drop initial frames.
+ initial_framedrop_ = kMaxInitialFramedrop;
+ }
+ }
+
+ private:
+ // The maximum number of frames to drop at beginning of stream to try and
+ // achieve desired bitrate.
+ static const int kMaxInitialFramedrop = 4;
+
+ const QualityScalerResource* quality_scaler_resource_;
+ const QualityScalerSettings quality_scaler_settings_;
+ bool has_seen_first_bwe_drop_;
+ DataRate set_start_bitrate_;
+ int64_t set_start_bitrate_time_ms_;
+ // Counts how many frames we've dropped in the initial framedrop phase.
+ int initial_framedrop_;
+};
+
+ResourceAdaptationProcessor::ResourceAdaptationProcessor(
+ Clock* clock,
+ bool experiment_cpu_load_estimator,
+ std::unique_ptr<OveruseFrameDetector> overuse_detector,
+ VideoStreamEncoderObserver* encoder_stats_observer,
+ ResourceAdaptationProcessorListener* adaptation_listener)
+ : adaptation_listener_(adaptation_listener),
+ clock_(clock),
+ state_(State::kStopped),
+ experiment_cpu_load_estimator_(experiment_cpu_load_estimator),
+ has_input_video_(false),
+ degradation_preference_(DegradationPreference::DISABLED),
+ stream_adapter_(std::make_unique<VideoStreamAdapter>()),
+ encode_usage_resource_(
+ std::make_unique<EncodeUsageResource>(std::move(overuse_detector))),
+ quality_scaler_resource_(std::make_unique<QualityScalerResource>()),
+ initial_frame_dropper_(std::make_unique<InitialFrameDropper>(
+ quality_scaler_resource_.get())),
+ quality_scaling_experiment_enabled_(QualityScalingExperiment::Enabled()),
+ last_input_frame_size_(absl::nullopt),
+ target_frame_rate_(absl::nullopt),
+ encoder_target_bitrate_bps_(absl::nullopt),
+ quality_rampup_done_(false),
+ quality_rampup_experiment_(QualityRampupExperiment::ParseSettings()),
+ encoder_settings_(absl::nullopt),
+ encoder_stats_observer_(encoder_stats_observer),
+ active_counts_() {
+ RTC_DCHECK(adaptation_listener_);
+ RTC_DCHECK(encoder_stats_observer_);
+ AddResource(encode_usage_resource_.get(),
+ AdaptationObserverInterface::AdaptReason::kCpu);
+ AddResource(quality_scaler_resource_.get(),
+ AdaptationObserverInterface::AdaptReason::kQuality);
+}
+
+ResourceAdaptationProcessor::~ResourceAdaptationProcessor() {
+ RTC_DCHECK_EQ(state_, State::kStopped);
+}
+
+void ResourceAdaptationProcessor::StartResourceAdaptation(
+ ResourceAdaptationProcessorListener* adaptation_listener) {
+ RTC_DCHECK_EQ(state_, State::kStopped);
+ RTC_DCHECK(encoder_settings_.has_value());
+ // TODO(https://crbug.com/webrtc/11222): Rethink when the adaptation listener
+ // should be passed in and why. If resources are separated from modules then
+ // those resources may be started or stopped separately from the module.
+ RTC_DCHECK_EQ(adaptation_listener, adaptation_listener_);
+ encode_usage_resource_->StartCheckForOveruse(GetCpuOveruseOptions());
+ for (auto& resource_and_reason : resources_) {
+ resource_and_reason.resource->RegisterListener(this);
+ }
+ state_ = State::kStarted;
+}
+
+void ResourceAdaptationProcessor::StopResourceAdaptation() {
+ encode_usage_resource_->StopCheckForOveruse();
+ quality_scaler_resource_->StopCheckForOveruse();
+ for (auto& resource_and_reason : resources_) {
+ resource_and_reason.resource->UnregisterListener(this);
+ }
+ state_ = State::kStopped;
+}
+
+void ResourceAdaptationProcessor::AddResource(Resource* resource) {
+ return AddResource(resource, AdaptationObserverInterface::AdaptReason::kCpu);
+}
+
+void ResourceAdaptationProcessor::AddResource(
+ Resource* resource,
+ AdaptationObserverInterface::AdaptReason reason) {
+ RTC_DCHECK(resource);
+ RTC_DCHECK(absl::c_find_if(resources_,
+ [resource](const ResourceAndReason& r) {
+ return r.resource == resource;
+ }) == resources_.end())
+ << "Resource " << resource->name() << " already was inserted";
+ resources_.emplace_back(resource, reason);
+}
+
+void ResourceAdaptationProcessor::SetHasInputVideo(bool has_input_video) {
+ // While false, OnResourceUnderuse() and OnResourceOveruse() are NO-OPS.
+ has_input_video_ = has_input_video;
+}
+
+void ResourceAdaptationProcessor::SetDegradationPreference(
+ DegradationPreference degradation_preference) {
+ degradation_preference_ = degradation_preference;
+ if (stream_adapter_->SetDegradationPreference(degradation_preference) ==
+ VideoStreamAdapter::SetDegradationPreferenceResult::
+ kRestrictionsCleared) {
+ active_counts_.fill(AdaptationCounters());
+ }
+ MaybeUpdateVideoSourceRestrictions();
+}
+
+void ResourceAdaptationProcessor::SetEncoderSettings(
+ EncoderSettings encoder_settings) {
+ encoder_settings_ = std::move(encoder_settings);
+
+ quality_rampup_experiment_.SetMaxBitrate(
+ LastInputFrameSizeOrDefault(),
+ encoder_settings_->video_codec().maxBitrate);
+ MaybeUpdateTargetFrameRate();
+}
+
+void ResourceAdaptationProcessor::SetStartBitrate(DataRate start_bitrate) {
+ if (!start_bitrate.IsZero())
+ encoder_target_bitrate_bps_ = start_bitrate.bps();
+ initial_frame_dropper_->SetStartBitrate(start_bitrate,
+ clock_->TimeInMicroseconds());
+}
+
+void ResourceAdaptationProcessor::SetTargetBitrate(DataRate target_bitrate) {
+ if (!target_bitrate.IsZero())
+ encoder_target_bitrate_bps_ = target_bitrate.bps();
+ initial_frame_dropper_->SetTargetBitrate(target_bitrate,
+ clock_->TimeInMilliseconds());
+}
+
+void ResourceAdaptationProcessor::SetEncoderRates(
+ const VideoEncoder::RateControlParameters& encoder_rates) {
+ encoder_rates_ = encoder_rates;
+}
+
+void ResourceAdaptationProcessor::ResetVideoSourceRestrictions() {
+ stream_adapter_->ClearRestrictions();
+ active_counts_.fill(AdaptationCounters());
+ MaybeUpdateVideoSourceRestrictions();
+}
+
+void ResourceAdaptationProcessor::OnFrame(const VideoFrame& frame) {
+ last_input_frame_size_ = frame.size();
+}
+
+void ResourceAdaptationProcessor::OnFrameDroppedDueToSize() {
+ AdaptationCounters counters_before = stream_adapter_->adaptation_counters();
+ OnResourceOveruse(AdaptationObserverInterface::AdaptReason::kQuality);
+ if (degradation_preference() == DegradationPreference::BALANCED &&
+ stream_adapter_->adaptation_counters().fps_adaptations >
+ counters_before.fps_adaptations) {
+ // Adapt framerate in same step as resolution.
+ OnResourceOveruse(AdaptationObserverInterface::AdaptReason::kQuality);
+ }
+ if (stream_adapter_->adaptation_counters().resolution_adaptations >
+ counters_before.resolution_adaptations) {
+ encoder_stats_observer_->OnInitialQualityResolutionAdaptDown();
+ }
+ initial_frame_dropper_->OnFrameDroppedDueToSize();
+}
+
+void ResourceAdaptationProcessor::OnEncodeStarted(
+ const VideoFrame& cropped_frame,
+ int64_t time_when_first_seen_us) {
+ encode_usage_resource_->OnEncodeStarted(cropped_frame,
+ time_when_first_seen_us);
+}
+
+void ResourceAdaptationProcessor::OnEncodeCompleted(
+ const EncodedImage& encoded_image,
+ int64_t time_sent_in_us,
+ absl::optional<int> encode_duration_us) {
+ // Inform |encode_usage_resource_| of the encode completed event.
+ uint32_t timestamp = encoded_image.Timestamp();
+ int64_t capture_time_us =
+ encoded_image.capture_time_ms_ * rtc::kNumMicrosecsPerMillisec;
+ encode_usage_resource_->OnEncodeCompleted(
+ timestamp, time_sent_in_us, capture_time_us, encode_duration_us);
+ // Inform |quality_scaler_resource_| of the encode completed event.
+ quality_scaler_resource_->OnEncodeCompleted(encoded_image, time_sent_in_us);
+}
+
+void ResourceAdaptationProcessor::OnFrameDropped(
+ EncodedImageCallback::DropReason reason) {
+ quality_scaler_resource_->OnFrameDropped(reason);
+}
+
+bool ResourceAdaptationProcessor::DropInitialFrames() const {
+ return initial_frame_dropper_->DropInitialFrames();
+}
+
+void ResourceAdaptationProcessor::OnMaybeEncodeFrame() {
+ initial_frame_dropper_->OnMaybeEncodeFrame();
+ MaybePerformQualityRampupExperiment();
+}
+
+void ResourceAdaptationProcessor::UpdateQualityScalerSettings(
+ absl::optional<VideoEncoder::QpThresholds> qp_thresholds) {
+ if (qp_thresholds.has_value()) {
+ quality_scaler_resource_->StopCheckForOveruse();
+ quality_scaler_resource_->StartCheckForOveruse(qp_thresholds.value());
+ } else {
+ quality_scaler_resource_->StopCheckForOveruse();
+ }
+ initial_frame_dropper_->OnQualityScalerSettingsUpdated();
+}
+
+void ResourceAdaptationProcessor::ConfigureQualityScaler(
+ const VideoEncoder::EncoderInfo& encoder_info) {
+ const auto scaling_settings = encoder_info.scaling_settings;
+ const bool quality_scaling_allowed =
+ IsResolutionScalingEnabled(degradation_preference_) &&
+ scaling_settings.thresholds;
+
+ // TODO(https://crbug.com/webrtc/11222): Should this move to
+ // QualityScalerResource?
+ if (quality_scaling_allowed) {
+ if (!quality_scaler_resource_->is_started()) {
+ // Quality scaler has not already been configured.
+
+ // Use experimental thresholds if available.
+ absl::optional<VideoEncoder::QpThresholds> experimental_thresholds;
+ if (quality_scaling_experiment_enabled_) {
+ experimental_thresholds = QualityScalingExperiment::GetQpThresholds(
+ GetVideoCodecTypeOrGeneric(encoder_settings_));
+ }
+ UpdateQualityScalerSettings(experimental_thresholds
+ ? *experimental_thresholds
+ : *(scaling_settings.thresholds));
+ }
+ } else {
+ UpdateQualityScalerSettings(absl::nullopt);
+ }
+
+ // Set the qp-thresholds to the balanced settings if balanced mode.
+ if (degradation_preference_ == DegradationPreference::BALANCED &&
+ quality_scaler_resource_->is_started()) {
+ absl::optional<VideoEncoder::QpThresholds> thresholds =
+ stream_adapter_->balanced_settings().GetQpThresholds(
+ GetVideoCodecTypeOrGeneric(encoder_settings_),
+ LastInputFrameSizeOrDefault());
+ if (thresholds) {
+ quality_scaler_resource_->SetQpThresholds(*thresholds);
+ }
+ }
+
+ encoder_stats_observer_->OnAdaptationChanged(
+ VideoStreamEncoderObserver::AdaptationReason::kNone,
+ GetActiveCounts(AdaptationObserverInterface::AdaptReason::kCpu),
+ GetActiveCounts(AdaptationObserverInterface::AdaptReason::kQuality));
+}
+
+ResourceListenerResponse
+ResourceAdaptationProcessor::OnResourceUsageStateMeasured(
+ const Resource& resource) {
+ const auto& registered_resource =
+ absl::c_find_if(resources_, [&resource](const ResourceAndReason& r) {
+ return r.resource == &resource;
+ });
+ RTC_DCHECK(registered_resource != resources_.end())
+ << resource.name() << " not found.";
+
+ const AdaptationObserverInterface::AdaptReason reason =
+ registered_resource->reason;
+ switch (resource.usage_state()) {
+ case ResourceUsageState::kOveruse:
+ return OnResourceOveruse(reason);
+ case ResourceUsageState::kStable:
+ // Do nothing.
+ //
+ // This module has two resources: |encoude_usage_resource_| and
+ // |quality_scaler_resource_|. A smarter adaptation module might not
+ // attempt to adapt up unless ALL resources were underused, but this
+ // module acts on each resource's measurement in isolation - without
+ // taking the current usage of any other resource into account.
+ return ResourceListenerResponse::kNothing;
+ case ResourceUsageState::kUnderuse:
+ OnResourceUnderuse(reason);
+ return ResourceListenerResponse::kNothing;
+ }
+}
+
+void ResourceAdaptationProcessor::OnResourceUnderuse(
+ AdaptationObserverInterface::AdaptReason reason) {
+ // We can't adapt up if we're already at the highest setting.
+ // Note that this only includes counts relevant to the current degradation
+ // preference. e.g. we previously adapted resolution, now prefer adpating fps,
+ // only count the fps adaptations and not the previous resolution adaptations.
+ //
+ // TODO(https://crbug.com/webrtc/11394): Checking the counts for reason should
+ // be replaced with checking the overuse state of all resources. This is
+ // effectively trying to infer if the the Resource specified by |reason| is OK
+ // with adapting up by looking at active counters. If the relevant Resources
+ // simply told us this directly we wouldn't have to depend on stats counters
+ // to abort VideoStreamAdapter::GetAdaptationUp(). This may be possible by
+ // peeking the next restrictions (VideoStreamAdapter::PeekNextRestrictions()),
+ // and asking the Resource: "Can we apply these restrictions without
+ // overusing?" or if there is a ResourceUsageState::kStable.
+ int num_downgrades = ApplyDegradationPreference(active_counts_[reason],
+ degradation_preference_)
+ .Total();
+ RTC_DCHECK_GE(num_downgrades, 0);
+ if (num_downgrades == 0)
+ return;
+ // Update video input states and encoder settings for accurate adaptation.
+ stream_adapter_->SetInput(GetVideoInputMode(), LastInputFrameSizeOrDefault(),
+ encoder_stats_observer_->GetInputFrameRate(),
+ encoder_settings_, encoder_target_bitrate_bps_);
+ // Should we adapt, and if so: how?
+ Adaptation adaptation = stream_adapter_->GetAdaptationUp(reason);
+ if (adaptation.status() != Adaptation::Status::kValid)
+ return;
+ // Apply adaptation.
+ stream_adapter_->ApplyAdaptation(adaptation);
+ // Update VideoSourceRestrictions based on adaptation. This also informs the
+ // |adaptation_listener_|.
+ MaybeUpdateVideoSourceRestrictions();
+ // Stats and logging.
+ UpdateAdaptationStats(reason);
+ RTC_LOG(LS_INFO) << ActiveCountsToString();
+}
+
+ResourceListenerResponse ResourceAdaptationProcessor::OnResourceOveruse(
+ AdaptationObserverInterface::AdaptReason reason) {
+ if (!has_input_video_)
+ return ResourceListenerResponse::kQualityScalerShouldIncreaseFrequency;
+ // Update video input states and encoder settings for accurate adaptation.
+ stream_adapter_->SetInput(GetVideoInputMode(), LastInputFrameSizeOrDefault(),
+ encoder_stats_observer_->GetInputFrameRate(),
+ encoder_settings_, encoder_target_bitrate_bps_);
+ // Should we adapt, and if so: how?
+ Adaptation adaptation = stream_adapter_->GetAdaptationDown();
+ if (adaptation.min_pixel_limit_reached())
+ encoder_stats_observer_->OnMinPixelLimitReached();
+ if (adaptation.status() != Adaptation::Status::kValid)
+ return ResourceListenerResponse::kNothing;
+ // Apply adaptation.
+ ResourceListenerResponse response =
+ stream_adapter_->ApplyAdaptation(adaptation);
+ // Update VideoSourceRestrictions based on adaptation. This also informs the
+ // |adaptation_listener_|.
+ MaybeUpdateVideoSourceRestrictions();
+ // Stats and logging.
+ UpdateAdaptationStats(reason);
+ RTC_LOG(INFO) << ActiveCountsToString();
+ return response;
+}
+
+// TODO(pbos): Lower these thresholds (to closer to 100%) when we handle
+// pipelining encoders better (multiple input frames before something comes
+// out). This should effectively turn off CPU adaptations for systems that
+// remotely cope with the load right now.
+CpuOveruseOptions ResourceAdaptationProcessor::GetCpuOveruseOptions() const {
+ // This is already ensured by the only caller of this method:
+ // StartResourceAdaptation().
+ RTC_DCHECK(encoder_settings_.has_value());
+ CpuOveruseOptions options;
+ // Hardware accelerated encoders are assumed to be pipelined; give them
+ // additional overuse time.
+ if (encoder_settings_->encoder_info().is_hardware_accelerated) {
+ options.low_encode_usage_threshold_percent = 150;
+ options.high_encode_usage_threshold_percent = 200;
+ }
+ if (experiment_cpu_load_estimator_) {
+ options.filter_time_ms = 5 * rtc::kNumMillisecsPerSec;
+ }
+ return options;
+}
+
+int ResourceAdaptationProcessor::LastInputFrameSizeOrDefault() const {
+ // The dependency on this hardcoded resolution is inherited from old code,
+ // which used this resolution as a stand-in for not knowing the resolution
+ // yet.
+ // TODO(hbos): Can we simply DCHECK has_value() before usage instead? Having a
+ // DCHECK passed all the tests but adding it does change the requirements of
+ // this class (= not being allowed to call OnResourceUnderuse() or
+ // OnResourceOveruse() before OnFrame()) and deserves a standalone CL.
+ return last_input_frame_size_.value_or(kDefaultInputPixelsWidth *
+ kDefaultInputPixelsHeight);
+}
+
+void ResourceAdaptationProcessor::MaybeUpdateVideoSourceRestrictions() {
+ VideoSourceRestrictions new_restrictions = ApplyDegradationPreference(
+ stream_adapter_->source_restrictions(), degradation_preference_);
+ if (video_source_restrictions_ != new_restrictions) {
+ video_source_restrictions_ = std::move(new_restrictions);
+ adaptation_listener_->OnVideoSourceRestrictionsUpdated(
+ video_source_restrictions_);
+ MaybeUpdateTargetFrameRate();
+ }
+}
+
+void ResourceAdaptationProcessor::MaybeUpdateTargetFrameRate() {
+ absl::optional<double> codec_max_frame_rate =
+ encoder_settings_.has_value()
+ ? absl::optional<double>(
+ encoder_settings_->video_codec().maxFramerate)
+ : absl::nullopt;
+ // The current target framerate is the maximum frame rate as specified by
+ // the current codec configuration or any limit imposed by the adaptation
+ // module. This is used to make sure overuse detection doesn't needlessly
+ // trigger in low and/or variable framerate scenarios.
+ absl::optional<double> target_frame_rate =
+ ApplyDegradationPreference(stream_adapter_->source_restrictions(),
+ degradation_preference_)
+ .max_frame_rate();
+ if (!target_frame_rate.has_value() ||
+ (codec_max_frame_rate.has_value() &&
+ codec_max_frame_rate.value() < target_frame_rate.value())) {
+ target_frame_rate = codec_max_frame_rate;
+ }
+ encode_usage_resource_->SetTargetFrameRate(target_frame_rate);
+}
+
+void ResourceAdaptationProcessor::OnAdaptationCountChanged(
+ const AdaptationCounters& adaptation_count,
+ AdaptationCounters* active_count,
+ AdaptationCounters* other_active) {
+ RTC_DCHECK(active_count);
+ RTC_DCHECK(other_active);
+ const int active_total = active_count->Total();
+ const int other_total = other_active->Total();
+ const AdaptationCounters prev_total = *active_count + *other_active;
+ const AdaptationCounters delta = adaptation_count - prev_total;
+
+ RTC_DCHECK_EQ(
+ std::abs(delta.resolution_adaptations) + std::abs(delta.fps_adaptations),
+ 1)
+ << "Adaptation took more than one step!";
+
+ if (delta.resolution_adaptations > 0) {
+ ++active_count->resolution_adaptations;
+ } else if (delta.resolution_adaptations < 0) {
+ if (active_count->resolution_adaptations == 0) {
+ RTC_DCHECK_GT(active_count->fps_adaptations, 0) << "No downgrades left";
+ RTC_DCHECK_GT(other_active->resolution_adaptations, 0)
+ << "No resolution adaptation to borrow from";
+ // Lend an fps adaptation to other and take one resolution adaptation.
+ --active_count->fps_adaptations;
+ ++other_active->fps_adaptations;
+ --other_active->resolution_adaptations;
+ } else {
+ --active_count->resolution_adaptations;
+ }
+ }
+ if (delta.fps_adaptations > 0) {
+ ++active_count->fps_adaptations;
+ } else if (delta.fps_adaptations < 0) {
+ if (active_count->fps_adaptations == 0) {
+ RTC_DCHECK_GT(active_count->resolution_adaptations, 0)
+ << "No downgrades left";
+ RTC_DCHECK_GT(other_active->fps_adaptations, 0)
+ << "No fps adaptation to borrow from";
+ // Lend a resolution adaptation to other and take one fps adaptation.
+ --active_count->resolution_adaptations;
+ ++other_active->resolution_adaptations;
+ --other_active->fps_adaptations;
+ } else {
+ --active_count->fps_adaptations;
+ }
+ }
+
+ RTC_DCHECK(*active_count + *other_active == adaptation_count);
+ RTC_DCHECK_EQ(other_active->Total(), other_total);
+ RTC_DCHECK_EQ(active_count->Total(), active_total + delta.Total());
+ RTC_DCHECK_GE(active_count->resolution_adaptations, 0);
+ RTC_DCHECK_GE(active_count->fps_adaptations, 0);
+ RTC_DCHECK_GE(other_active->resolution_adaptations, 0);
+ RTC_DCHECK_GE(other_active->fps_adaptations, 0);
+}
+
+// TODO(nisse): Delete, once AdaptReason and AdaptationReason are merged.
+void ResourceAdaptationProcessor::UpdateAdaptationStats(
+ AdaptationObserverInterface::AdaptReason reason) {
+ // Update active counts
+ AdaptationCounters& active_count = active_counts_[reason];
+ AdaptationCounters& other_active = active_counts_[(reason + 1) % 2];
+ const AdaptationCounters total_counts =
+ stream_adapter_->adaptation_counters();
+
+ OnAdaptationCountChanged(total_counts, &active_count, &other_active);
+
+ switch (reason) {
+ case AdaptationObserverInterface::AdaptReason::kCpu:
+ encoder_stats_observer_->OnAdaptationChanged(
+ VideoStreamEncoderObserver::AdaptationReason::kCpu,
+ GetActiveCounts(AdaptationObserverInterface::AdaptReason::kCpu),
+ GetActiveCounts(AdaptationObserverInterface::AdaptReason::kQuality));
+ break;
+ case AdaptationObserverInterface::AdaptReason::kQuality:
+ encoder_stats_observer_->OnAdaptationChanged(
+ VideoStreamEncoderObserver::AdaptationReason::kQuality,
+ GetActiveCounts(AdaptationObserverInterface::AdaptReason::kCpu),
+ GetActiveCounts(AdaptationObserverInterface::AdaptReason::kQuality));
+ break;
+ }
+}
+
+VideoStreamEncoderObserver::AdaptationSteps
+ResourceAdaptationProcessor::GetActiveCounts(
+ AdaptationObserverInterface::AdaptReason reason) {
+ // TODO(https://crbug.com/webrtc/11392) Ideally this shuold be moved out of
+ // this class and into the encoder_stats_observer_.
+ const AdaptationCounters counters = active_counts_[reason];
+
+ VideoStreamEncoderObserver::AdaptationSteps counts =
+ VideoStreamEncoderObserver::AdaptationSteps();
+ counts.num_resolution_reductions = counters.resolution_adaptations;
+ counts.num_framerate_reductions = counters.fps_adaptations;
+ switch (reason) {
+ case AdaptationObserverInterface::AdaptReason::kCpu:
+ if (!IsFramerateScalingEnabled(degradation_preference_))
+ counts.num_framerate_reductions = absl::nullopt;
+ if (!IsResolutionScalingEnabled(degradation_preference_))
+ counts.num_resolution_reductions = absl::nullopt;
+ break;
+ case AdaptationObserverInterface::AdaptReason::kQuality:
+ if (!IsFramerateScalingEnabled(degradation_preference_) ||
+ !quality_scaler_resource_->is_started()) {
+ counts.num_framerate_reductions = absl::nullopt;
+ }
+ if (!IsResolutionScalingEnabled(degradation_preference_) ||
+ !quality_scaler_resource_->is_started()) {
+ counts.num_resolution_reductions = absl::nullopt;
+ }
+ break;
+ }
+ return counts;
+}
+
+VideoStreamAdapter::VideoInputMode
+ResourceAdaptationProcessor::GetVideoInputMode() const {
+ if (!has_input_video_)
+ return VideoStreamAdapter::VideoInputMode::kNoVideo;
+ return (encoder_settings_.has_value() &&
+ encoder_settings_->encoder_config().content_type ==
+ VideoEncoderConfig::ContentType::kScreen)
+ ? VideoStreamAdapter::VideoInputMode::kScreenshareVideo
+ : VideoStreamAdapter::VideoInputMode::kNormalVideo;
+}
+
+void ResourceAdaptationProcessor::MaybePerformQualityRampupExperiment() {
+ if (!quality_scaler_resource_->is_started())
+ return;
+
+ if (quality_rampup_done_)
+ return;
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ uint32_t bw_kbps = encoder_rates_.has_value()
+ ? encoder_rates_.value().bandwidth_allocation.kbps()
+ : 0;
+
+ bool try_quality_rampup = false;
+ if (quality_rampup_experiment_.BwHigh(now_ms, bw_kbps)) {
+ // Verify that encoder is at max bitrate and the QP is low.
+ if (encoder_settings_ &&
+ encoder_target_bitrate_bps_.value_or(0) ==
+ encoder_settings_->video_codec().maxBitrate * 1000 &&
+ quality_scaler_resource_->QpFastFilterLow()) {
+ try_quality_rampup = true;
+ }
+ }
+ // TODO(https://crbug.com/webrtc/11392): See if we can rely on the total
+ // counts or the stats, and not the active counts.
+ const AdaptationCounters& qp_counts =
+ std::get<AdaptationObserverInterface::kQuality>(active_counts_);
+ const AdaptationCounters& cpu_counts =
+ std::get<AdaptationObserverInterface::kCpu>(active_counts_);
+ if (try_quality_rampup && qp_counts.resolution_adaptations > 0 &&
+ cpu_counts.Total() == 0) {
+ RTC_LOG(LS_INFO) << "Reset quality limitations.";
+ ResetVideoSourceRestrictions();
+ quality_rampup_done_ = true;
+ }
+}
+
+std::string ResourceAdaptationProcessor::ActiveCountsToString() const {
+ rtc::StringBuilder ss;
+
+ ss << "Downgrade counts: fps: {";
+ for (size_t reason = 0; reason < active_counts_.size(); ++reason) {
+ ss << (reason ? " cpu" : "quality") << ":";
+ ss << active_counts_[reason].fps_adaptations;
+ }
+ ss << "}, resolution {";
+ for (size_t reason = 0; reason < active_counts_.size(); ++reason) {
+ ss << (reason ? " cpu" : "quality") << ":";
+ ss << active_counts_[reason].resolution_adaptations;
+ }
+ ss << "}";
+
+ return ss.Release();
+}
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor.h b/chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor.h
new file mode 100644
index 00000000000..589860db5f1
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_H_
+#define VIDEO_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/types/optional.h"
+#include "api/rtp_parameters.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_source_interface.h"
+#include "api/video/video_stream_encoder_observer.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_config.h"
+#include "call/adaptation/resource.h"
+#include "call/adaptation/resource_adaptation_processor_interface.h"
+#include "rtc_base/experiments/quality_rampup_experiment.h"
+#include "rtc_base/experiments/quality_scaler_settings.h"
+#include "rtc_base/strings/string_builder.h"
+#include "system_wrappers/include/clock.h"
+#include "video/adaptation/adaptation_counters.h"
+#include "video/adaptation/encode_usage_resource.h"
+#include "video/adaptation/overuse_frame_detector.h"
+#include "video/adaptation/quality_scaler_resource.h"
+#include "video/adaptation/video_stream_adapter.h"
+
+namespace webrtc {
+
+// The assumed input frame size if we have not yet received a frame.
+// TODO(hbos): This is 144p - why are we assuming super low quality? Seems like
+// a bad heuristic.
+extern const int kDefaultInputPixelsWidth;
+extern const int kDefaultInputPixelsHeight;
+
+// This class is used by the VideoStreamEncoder and is responsible for adapting
+// resolution up or down based on encode usage percent. It keeps track of video
+// source settings, adaptation counters and may get influenced by
+// VideoStreamEncoder's quality scaler through AdaptUp() and AdaptDown() calls.
+//
+// This class is single-threaded. The caller is responsible for ensuring safe
+// usage.
+// TODO(hbos): Add unittests specific to this class, it is currently only tested
+// indirectly in video_stream_encoder_unittest.cc and other tests exercising
+// VideoStreamEncoder.
+class ResourceAdaptationProcessor : public ResourceAdaptationProcessorInterface,
+ public ResourceListener {
+ public:
+ // The processor can be constructed on any sequence, but must be initialized
+ // and used on a single sequence, e.g. the encoder queue.
+ ResourceAdaptationProcessor(
+ Clock* clock,
+ bool experiment_cpu_load_estimator,
+ std::unique_ptr<OveruseFrameDetector> overuse_detector,
+ VideoStreamEncoderObserver* encoder_stats_observer,
+ ResourceAdaptationProcessorListener* adaptation_listener);
+ ~ResourceAdaptationProcessor() override;
+
+ DegradationPreference degradation_preference() const {
+ return degradation_preference_;
+ }
+
+ // ResourceAdaptationProcessorInterface implementation.
+ void StartResourceAdaptation(
+ ResourceAdaptationProcessorListener* adaptation_listener) override;
+ void StopResourceAdaptation() override;
+ // Uses a default AdaptReason of kCpu.
+ void AddResource(Resource* resource) override;
+ void AddResource(Resource* resource,
+ AdaptationObserverInterface::AdaptReason reason);
+ void SetHasInputVideo(bool has_input_video) override;
+ void SetDegradationPreference(
+ DegradationPreference degradation_preference) override;
+ void SetEncoderSettings(EncoderSettings encoder_settings) override;
+ void SetStartBitrate(DataRate start_bitrate) override;
+ void SetTargetBitrate(DataRate target_bitrate) override;
+ void SetEncoderRates(
+ const VideoEncoder::RateControlParameters& encoder_rates) override;
+
+ void OnFrame(const VideoFrame& frame) override;
+ void OnFrameDroppedDueToSize() override;
+ void OnMaybeEncodeFrame() override;
+ void OnEncodeStarted(const VideoFrame& cropped_frame,
+ int64_t time_when_first_seen_us) override;
+ void OnEncodeCompleted(const EncodedImage& encoded_image,
+ int64_t time_sent_in_us,
+ absl::optional<int> encode_duration_us) override;
+ void OnFrameDropped(EncodedImageCallback::DropReason reason) override;
+
+ // TODO(hbos): Is dropping initial frames really just a special case of "don't
+ // encode frames right now"? Can this be part of VideoSourceRestrictions,
+ // which handles the output of the rest of the encoder settings? This is
+ // something we'll need to support for "disable video due to overuse", not
+ // initial frames.
+ bool DropInitialFrames() const;
+
+ // TODO(eshr): This can be made private if we configure on
+ // SetDegredationPreference and SetEncoderSettings.
+ // (https://crbug.com/webrtc/11338)
+ void ConfigureQualityScaler(const VideoEncoder::EncoderInfo& encoder_info);
+
+ // ResourceUsageListener implementation.
+ ResourceListenerResponse OnResourceUsageStateMeasured(
+ const Resource& resource) override;
+
+ // For reasons of adaptation and statistics, we not only count the total
+ // number of adaptations, but we also count the number of adaptations per
+ // reason.
+ // This method takes the new total number of adaptations and allocates that to
+ // the "active" count - number of adaptations for the current reason.
+ // The "other" count is the number of adaptations for the other reason.
+ // This must be called for each adaptation step made.
+ static void OnAdaptationCountChanged(
+ const AdaptationCounters& adaptation_count,
+ AdaptationCounters* active_count,
+ AdaptationCounters* other_active);
+
+ private:
+ class InitialFrameDropper;
+
+ enum class State { kStopped, kStarted };
+
+ // Performs the adaptation by getting the next target, applying it and
+ // informing listeners of the new VideoSourceRestriction and adapt counters.
+ void OnResourceUnderuse(AdaptationObserverInterface::AdaptReason reason);
+ ResourceListenerResponse OnResourceOveruse(
+ AdaptationObserverInterface::AdaptReason reason);
+
+ CpuOveruseOptions GetCpuOveruseOptions() const;
+ int LastInputFrameSizeOrDefault() const;
+ VideoStreamEncoderObserver::AdaptationSteps GetActiveCounts(
+ AdaptationObserverInterface::AdaptReason reason);
+ VideoStreamAdapter::VideoInputMode GetVideoInputMode() const;
+
+ // Makes |video_source_restrictions_| up-to-date and informs the
+ // |adaptation_listener_| if restrictions are changed, allowing the listener
+ // to reconfigure the source accordingly.
+ void MaybeUpdateVideoSourceRestrictions();
+ // Calculates an up-to-date value of the target frame rate and informs the
+ // |encode_usage_resource_| of the new value.
+ void MaybeUpdateTargetFrameRate();
+
+ // Use nullopt to disable quality scaling.
+ void UpdateQualityScalerSettings(
+ absl::optional<VideoEncoder::QpThresholds> qp_thresholds);
+
+ void UpdateAdaptationStats(AdaptationObserverInterface::AdaptReason reason);
+
+ // Checks to see if we should execute the quality rampup experiment. The
+ // experiment resets all video restrictions at the start of the call in the
+ // case the bandwidth estimate is high enough.
+ // TODO(https://crbug.com/webrtc/11222) Move experiment details into an inner
+ // class.
+ void MaybePerformQualityRampupExperiment();
+ void ResetVideoSourceRestrictions();
+
+ std::string ActiveCountsToString() const;
+
+ ResourceAdaptationProcessorListener* const adaptation_listener_;
+ Clock* clock_;
+ State state_;
+ const bool experiment_cpu_load_estimator_;
+ // The restrictions that |adaptation_listener_| is informed of.
+ VideoSourceRestrictions video_source_restrictions_;
+ bool has_input_video_;
+ // TODO(https://crbug.com/webrtc/11393): DegradationPreference has mostly
+ // moved to VideoStreamAdapter. Move it entirely and delete it from this
+ // class. If the responsibility of generating next steps for adaptations is
+ // owned by the adapter, this class has no buisness relying on implementation
+ // details of the adapter.
+ DegradationPreference degradation_preference_;
+ // Keeps track of source restrictions that this adaptation processor outputs.
+ const std::unique_ptr<VideoStreamAdapter> stream_adapter_;
+ const std::unique_ptr<EncodeUsageResource> encode_usage_resource_;
+ const std::unique_ptr<QualityScalerResource> quality_scaler_resource_;
+ const std::unique_ptr<InitialFrameDropper> initial_frame_dropper_;
+ const bool quality_scaling_experiment_enabled_;
+ absl::optional<int> last_input_frame_size_;
+ absl::optional<double> target_frame_rate_;
+ // This is the last non-zero target bitrate for the encoder.
+ absl::optional<uint32_t> encoder_target_bitrate_bps_;
+ absl::optional<VideoEncoder::RateControlParameters> encoder_rates_;
+ bool quality_rampup_done_;
+ QualityRampupExperiment quality_rampup_experiment_;
+ absl::optional<EncoderSettings> encoder_settings_;
+ VideoStreamEncoderObserver* const encoder_stats_observer_;
+
+ // Ties a resource to a reason for statistical reporting. This AdaptReason is
+ // also used by this module to make decisions about how to adapt up/down.
+ struct ResourceAndReason {
+ ResourceAndReason(Resource* resource,
+ AdaptationObserverInterface::AdaptReason reason)
+ : resource(resource), reason(reason) {}
+ virtual ~ResourceAndReason() = default;
+
+ Resource* const resource;
+ const AdaptationObserverInterface::AdaptReason reason;
+ };
+ std::vector<ResourceAndReason> resources_;
+ // One AdaptationCounter for each reason, tracking the number of times we have
+ // adapted for each reason. The sum of active_counts_ MUST always equal the
+ // total adaptation provided by the VideoSourceRestrictions.
+ // TODO(https://crbug.com/webrtc/11392): Move all active count logic to
+ // encoder_stats_observer_; Counters used for deciding if the video resolution
+ // or framerate is currently restricted, and if so, why, on a per degradation
+ // preference basis.
+ std::array<AdaptationCounters, AdaptationObserverInterface::kScaleReasonSize>
+ active_counts_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_H_
diff --git a/chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor_unittest.cc b/chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor_unittest.cc
new file mode 100644
index 00000000000..40a44db0619
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/resource_adaptation_processor_unittest.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/resource_adaptation_processor.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "video/adaptation/adaptation_counters.h"
+
+namespace webrtc {
+
+TEST(ResourceAdaptationProcessorTest, FirstAdaptationDown_Fps) {
+ AdaptationCounters cpu;
+ AdaptationCounters qp;
+ AdaptationCounters total(0, 1);
+
+ ResourceAdaptationProcessor::OnAdaptationCountChanged(total, &cpu, &qp);
+ AdaptationCounters expected_cpu(0, 1);
+ AdaptationCounters expected_qp;
+ EXPECT_EQ(expected_cpu, cpu);
+ EXPECT_EQ(expected_qp, qp);
+}
+
+TEST(ResourceAdaptationProcessorTest, FirstAdaptationDown_Resolution) {
+ AdaptationCounters cpu;
+ AdaptationCounters qp;
+ AdaptationCounters total(1, 0);
+
+ ResourceAdaptationProcessor::OnAdaptationCountChanged(total, &cpu, &qp);
+ AdaptationCounters expected_cpu(1, 0);
+ AdaptationCounters expected_qp;
+ EXPECT_EQ(expected_cpu, cpu);
+ EXPECT_EQ(expected_qp, qp);
+}
+
+TEST(ResourceAdaptationProcessorTest, LastAdaptUp_Fps) {
+ AdaptationCounters cpu(0, 1);
+ AdaptationCounters qp;
+ AdaptationCounters total;
+
+ ResourceAdaptationProcessor::OnAdaptationCountChanged(total, &cpu, &qp);
+ AdaptationCounters expected_cpu;
+ AdaptationCounters expected_qp;
+ EXPECT_EQ(expected_cpu, cpu);
+ EXPECT_EQ(expected_qp, qp);
+}
+
+TEST(ResourceAdaptationProcessorTest, LastAdaptUp_Resolution) {
+ AdaptationCounters cpu(1, 0);
+ AdaptationCounters qp;
+ AdaptationCounters total;
+
+ ResourceAdaptationProcessor::OnAdaptationCountChanged(total, &cpu, &qp);
+ AdaptationCounters expected_cpu;
+ AdaptationCounters expected_qp;
+ EXPECT_EQ(expected_cpu, cpu);
+ EXPECT_EQ(expected_qp, qp);
+}
+
+TEST(ResourceAdaptationProcessorTest, AdaptUpWithBorrow_Resolution) {
+ AdaptationCounters cpu(0, 1);
+ AdaptationCounters qp(1, 0);
+ AdaptationCounters total(0, 1);
+
+ // CPU adaptation for resolution, but no resolution adaptation left from CPU.
+ // We then borrow the resolution adaptation from qp, and give qp the fps
+ // adaptation from CPU.
+ ResourceAdaptationProcessor::OnAdaptationCountChanged(total, &cpu, &qp);
+
+ AdaptationCounters expected_cpu(0, 0);
+ AdaptationCounters expected_qp(0, 1);
+ EXPECT_EQ(expected_cpu, cpu);
+ EXPECT_EQ(expected_qp, qp);
+}
+
+TEST(ResourceAdaptationProcessorTest, AdaptUpWithBorrow_Fps) {
+ AdaptationCounters cpu(1, 0);
+ AdaptationCounters qp(0, 1);
+ AdaptationCounters total(1, 0);
+
+ // CPU adaptation for fps, but no fps adaptation left from CPU. We then borrow
+ // the fps adaptation from qp, and give qp the resolution adaptation from CPU.
+ ResourceAdaptationProcessor::OnAdaptationCountChanged(total, &cpu, &qp);
+
+ AdaptationCounters expected_cpu(0, 0);
+ AdaptationCounters expected_qp(1, 0);
+ EXPECT_EQ(expected_cpu, cpu);
+ EXPECT_EQ(expected_qp, qp);
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/adaptation/video_stream_adapter.cc b/chromium/third_party/webrtc/video/adaptation/video_stream_adapter.cc
new file mode 100644
index 00000000000..7a35b64e22c
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/video_stream_adapter.cc
@@ -0,0 +1,622 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/video_stream_adapter.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/video_encoder.h"
+#include "rtc_base/constructor_magic.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+const int kMinFrameRateFps = 2;
+
+namespace {
+
+int MinPixelsPerFrame(const absl::optional<EncoderSettings>& encoder_settings) {
+ return encoder_settings.has_value()
+ ? encoder_settings->encoder_info()
+ .scaling_settings.min_pixels_per_frame
+ : kDefaultMinPixelsPerFrame;
+}
+
+// Generate suggested higher and lower frame rates and resolutions, to be
+// applied to the VideoSourceRestrictor. These are used in "maintain-resolution"
+// and "maintain-framerate". The "balanced" degradation preference also makes
+// use of BalancedDegradationPreference when generating suggestions. The
+// VideoSourceRestrictor decidedes whether or not a proposed adaptation is
+// valid.
+
+// For frame rate, the steps we take are 2/3 (down) and 3/2 (up).
+int GetLowerFrameRateThan(int fps) {
+ RTC_DCHECK(fps != std::numeric_limits<int>::max());
+ return (fps * 2) / 3;
+}
+// TODO(hbos): Use absl::optional<> instead?
+int GetHigherFrameRateThan(int fps) {
+ return fps != std::numeric_limits<int>::max()
+ ? (fps * 3) / 2
+ : std::numeric_limits<int>::max();
+}
+
+// For resolution, the steps we take are 3/5 (down) and 5/3 (up).
+// Notice the asymmetry of which restriction property is set depending on if
+// we are adapting up or down:
+// - VideoSourceRestrictor::DecreaseResolution() sets the max_pixels_per_frame()
+// to the desired target and target_pixels_per_frame() to null.
+// - VideoSourceRestrictor::IncreaseResolutionTo() sets the
+// target_pixels_per_frame() to the desired target, and max_pixels_per_frame()
+// is set according to VideoSourceRestrictor::GetIncreasedMaxPixelsWanted().
+int GetLowerResolutionThan(int pixel_count) {
+ RTC_DCHECK(pixel_count != std::numeric_limits<int>::max());
+ return (pixel_count * 3) / 5;
+}
+// TODO(hbos): Use absl::optional<> instead?
+int GetHigherResolutionThan(int pixel_count) {
+ return pixel_count != std::numeric_limits<int>::max()
+ ? (pixel_count * 5) / 3
+ : std::numeric_limits<int>::max();
+}
+
+// One of the conditions used in VideoStreamAdapter::GetAdaptationUp().
+// TODO(hbos): Whether or not we can adapt up due to encoder settings and
+// bitrate should be expressed as a bandwidth-related Resource.
+bool CanAdaptUpResolution(
+ const absl::optional<EncoderSettings>& encoder_settings,
+ absl::optional<uint32_t> encoder_target_bitrate_bps,
+ int input_pixels) {
+ uint32_t bitrate_bps = encoder_target_bitrate_bps.value_or(0);
+ absl::optional<VideoEncoder::ResolutionBitrateLimits> bitrate_limits =
+ encoder_settings.has_value()
+ ? encoder_settings->encoder_info()
+ .GetEncoderBitrateLimitsForResolution(
+ GetHigherResolutionThan(input_pixels))
+ : absl::nullopt;
+ if (!bitrate_limits.has_value() || bitrate_bps == 0) {
+ return true; // No limit configured or bitrate provided.
+ }
+ RTC_DCHECK_GE(bitrate_limits->frame_size_pixels, input_pixels);
+ return bitrate_bps >=
+ static_cast<uint32_t>(bitrate_limits->min_start_bitrate_bps);
+}
+
+} // namespace
+
+Adaptation::Step::Step(StepType type, int target)
+ : type(type), target(target) {}
+
+Adaptation::Adaptation(int validation_id, Step step)
+ : validation_id_(validation_id),
+ status_(Status::kValid),
+ step_(std::move(step)),
+ min_pixel_limit_reached_(false) {}
+
+Adaptation::Adaptation(int validation_id,
+ Step step,
+ bool min_pixel_limit_reached)
+ : validation_id_(validation_id),
+ status_(Status::kValid),
+ step_(std::move(step)),
+ min_pixel_limit_reached_(min_pixel_limit_reached) {}
+
+Adaptation::Adaptation(int validation_id, Status invalid_status)
+ : validation_id_(validation_id),
+ status_(invalid_status),
+ step_(absl::nullopt),
+ min_pixel_limit_reached_(false) {
+ RTC_DCHECK_NE(status_, Status::kValid);
+}
+
+Adaptation::Adaptation(int validation_id,
+ Status invalid_status,
+ bool min_pixel_limit_reached)
+ : validation_id_(validation_id),
+ status_(invalid_status),
+ step_(absl::nullopt),
+ min_pixel_limit_reached_(min_pixel_limit_reached) {
+ RTC_DCHECK_NE(status_, Status::kValid);
+}
+
+Adaptation::Status Adaptation::status() const {
+ return status_;
+}
+
+bool Adaptation::min_pixel_limit_reached() const {
+ return min_pixel_limit_reached_;
+}
+
+const Adaptation::Step& Adaptation::step() const {
+ RTC_DCHECK_EQ(status_, Status::kValid);
+ return step_.value();
+}
+
+// VideoSourceRestrictor is responsible for keeping track of current
+// VideoSourceRestrictions.
+class VideoStreamAdapter::VideoSourceRestrictor {
+ public:
+ VideoSourceRestrictor() {}
+
+ VideoSourceRestrictions source_restrictions() const {
+ return source_restrictions_;
+ }
+ const AdaptationCounters& adaptation_counters() const { return adaptations_; }
+ void ClearRestrictions() {
+ source_restrictions_ = VideoSourceRestrictions();
+ adaptations_ = AdaptationCounters();
+ }
+
+ void SetMinPixelsPerFrame(int min_pixels_per_frame) {
+ min_pixels_per_frame_ = min_pixels_per_frame;
+ }
+
+ bool CanDecreaseResolutionTo(int target_pixels) {
+ int max_pixels_per_frame = rtc::dchecked_cast<int>(
+ source_restrictions_.max_pixels_per_frame().value_or(
+ std::numeric_limits<int>::max()));
+ return target_pixels < max_pixels_per_frame &&
+ target_pixels >= min_pixels_per_frame_;
+ }
+
+ bool CanIncreaseResolutionTo(int target_pixels) {
+ int max_pixels_wanted = GetIncreasedMaxPixelsWanted(target_pixels);
+ int max_pixels_per_frame = rtc::dchecked_cast<int>(
+ source_restrictions_.max_pixels_per_frame().value_or(
+ std::numeric_limits<int>::max()));
+ return max_pixels_wanted > max_pixels_per_frame;
+ }
+
+ bool CanDecreaseFrameRateTo(int max_frame_rate) {
+ const int fps_wanted = std::max(kMinFrameRateFps, max_frame_rate);
+ return fps_wanted < rtc::dchecked_cast<int>(
+ source_restrictions_.max_frame_rate().value_or(
+ std::numeric_limits<int>::max()));
+ }
+
+ bool CanIncreaseFrameRateTo(int max_frame_rate) {
+ return max_frame_rate > rtc::dchecked_cast<int>(
+ source_restrictions_.max_frame_rate().value_or(
+ std::numeric_limits<int>::max()));
+ }
+
+ void ApplyAdaptationStep(
+ const Adaptation::Step& step,
+ DegradationPreference effective_degradation_preference) {
+ switch (step.type) {
+ case Adaptation::StepType::kIncreaseResolution:
+ IncreaseResolutionTo(step.target);
+ break;
+ case Adaptation::StepType::kDecreaseResolution:
+ DecreaseResolutionTo(step.target);
+ break;
+ case Adaptation::StepType::kIncreaseFrameRate:
+ IncreaseFrameRateTo(step.target);
+ // TODO(https://crbug.com/webrtc/11222): Don't adapt in two steps.
+ // GetAdaptationUp() should tell us the correct value, but BALANCED
+ // logic in DecrementFramerate() makes it hard to predict whether this
+ // will be the last step. Remove the dependency on
+ // adaptation_counters().
+ if (effective_degradation_preference ==
+ DegradationPreference::BALANCED &&
+ adaptation_counters().fps_adaptations == 0 &&
+ step.target != std::numeric_limits<int>::max()) {
+ RTC_LOG(LS_INFO) << "Removing framerate down-scaling setting.";
+ IncreaseFrameRateTo(std::numeric_limits<int>::max());
+ }
+ break;
+ case Adaptation::StepType::kDecreaseFrameRate:
+ DecreaseFrameRateTo(step.target);
+ break;
+ }
+ }
+
+ private:
+ static int GetIncreasedMaxPixelsWanted(int target_pixels) {
+ if (target_pixels == std::numeric_limits<int>::max())
+ return std::numeric_limits<int>::max();
+ // When we decrease resolution, we go down to at most 3/5 of current pixels.
+ // Thus to increase resolution, we need 3/5 to get back to where we started.
+ // When going up, the desired max_pixels_per_frame() has to be significantly
+ // higher than the target because the source's native resolutions might not
+ // match the target. We pick 12/5 of the target.
+ //
+ // (This value was historically 4 times the old target, which is (3/5)*4 of
+ // the new target - or 12/5 - assuming the target is adjusted according to
+ // the above steps.)
+ RTC_DCHECK(target_pixels != std::numeric_limits<int>::max());
+ return (target_pixels * 12) / 5;
+ }
+
+ void DecreaseResolutionTo(int target_pixels) {
+ RTC_DCHECK(CanDecreaseResolutionTo(target_pixels));
+ RTC_LOG(LS_INFO) << "Scaling down resolution, max pixels: "
+ << target_pixels;
+ source_restrictions_.set_max_pixels_per_frame(
+ target_pixels != std::numeric_limits<int>::max()
+ ? absl::optional<size_t>(target_pixels)
+ : absl::nullopt);
+ source_restrictions_.set_target_pixels_per_frame(absl::nullopt);
+ ++adaptations_.resolution_adaptations;
+ }
+
+ void IncreaseResolutionTo(int target_pixels) {
+ RTC_DCHECK(CanIncreaseResolutionTo(target_pixels));
+ int max_pixels_wanted = GetIncreasedMaxPixelsWanted(target_pixels);
+ RTC_LOG(LS_INFO) << "Scaling up resolution, max pixels: "
+ << max_pixels_wanted;
+ source_restrictions_.set_max_pixels_per_frame(
+ max_pixels_wanted != std::numeric_limits<int>::max()
+ ? absl::optional<size_t>(max_pixels_wanted)
+ : absl::nullopt);
+ source_restrictions_.set_target_pixels_per_frame(
+ max_pixels_wanted != std::numeric_limits<int>::max()
+ ? absl::optional<size_t>(target_pixels)
+ : absl::nullopt);
+ --adaptations_.resolution_adaptations;
+ RTC_DCHECK_GE(adaptations_.resolution_adaptations, 0);
+ }
+
+ void DecreaseFrameRateTo(int max_frame_rate) {
+ RTC_DCHECK(CanDecreaseFrameRateTo(max_frame_rate));
+ max_frame_rate = std::max(kMinFrameRateFps, max_frame_rate);
+ RTC_LOG(LS_INFO) << "Scaling down framerate: " << max_frame_rate;
+ source_restrictions_.set_max_frame_rate(
+ max_frame_rate != std::numeric_limits<int>::max()
+ ? absl::optional<double>(max_frame_rate)
+ : absl::nullopt);
+ ++adaptations_.fps_adaptations;
+ }
+
+ void IncreaseFrameRateTo(int max_frame_rate) {
+ RTC_DCHECK(CanIncreaseFrameRateTo(max_frame_rate));
+ RTC_LOG(LS_INFO) << "Scaling up framerate: " << max_frame_rate;
+ source_restrictions_.set_max_frame_rate(
+ max_frame_rate != std::numeric_limits<int>::max()
+ ? absl::optional<double>(max_frame_rate)
+ : absl::nullopt);
+ --adaptations_.fps_adaptations;
+ RTC_DCHECK_GE(adaptations_.fps_adaptations, 0);
+ }
+
+ // Needed by CanDecreaseResolutionTo().
+ int min_pixels_per_frame_ = 0;
+ // Current State.
+ VideoSourceRestrictions source_restrictions_;
+ AdaptationCounters adaptations_;
+};
+
+// static
+VideoStreamAdapter::AdaptationRequest::Mode
+VideoStreamAdapter::AdaptationRequest::GetModeFromAdaptationAction(
+ Adaptation::StepType step_type) {
+ switch (step_type) {
+ case Adaptation::StepType::kIncreaseResolution:
+ return AdaptationRequest::Mode::kAdaptUp;
+ case Adaptation::StepType::kDecreaseResolution:
+ return AdaptationRequest::Mode::kAdaptDown;
+ case Adaptation::StepType::kIncreaseFrameRate:
+ return AdaptationRequest::Mode::kAdaptUp;
+ case Adaptation::StepType::kDecreaseFrameRate:
+ return AdaptationRequest::Mode::kAdaptDown;
+ }
+}
+
+VideoStreamAdapter::VideoStreamAdapter()
+ : source_restrictor_(std::make_unique<VideoSourceRestrictor>()),
+ balanced_settings_(),
+ adaptation_validation_id_(0),
+ degradation_preference_(DegradationPreference::DISABLED),
+ input_mode_(VideoInputMode::kNoVideo),
+ input_pixels_(0),
+ input_fps_(0),
+ encoder_settings_(absl::nullopt),
+ encoder_target_bitrate_bps_(absl::nullopt),
+ last_adaptation_request_(absl::nullopt) {}
+
+VideoStreamAdapter::~VideoStreamAdapter() {}
+
+VideoSourceRestrictions VideoStreamAdapter::source_restrictions() const {
+ return source_restrictor_->source_restrictions();
+}
+
+const AdaptationCounters& VideoStreamAdapter::adaptation_counters() const {
+ return source_restrictor_->adaptation_counters();
+}
+
+const BalancedDegradationSettings& VideoStreamAdapter::balanced_settings()
+ const {
+ return balanced_settings_;
+}
+
+void VideoStreamAdapter::ClearRestrictions() {
+ // Invalidate any previously returned Adaptation.
+ ++adaptation_validation_id_;
+ source_restrictor_->ClearRestrictions();
+ last_adaptation_request_.reset();
+}
+
+VideoStreamAdapter::SetDegradationPreferenceResult
+VideoStreamAdapter::SetDegradationPreference(
+ DegradationPreference degradation_preference) {
+ if (degradation_preference_ == degradation_preference)
+ return SetDegradationPreferenceResult::kRestrictionsNotCleared;
+ // Invalidate any previously returned Adaptation.
+ ++adaptation_validation_id_;
+ bool did_clear = false;
+ if (degradation_preference == DegradationPreference::BALANCED ||
+ degradation_preference_ == DegradationPreference::BALANCED) {
+ ClearRestrictions();
+ did_clear = true;
+ }
+ degradation_preference_ = degradation_preference;
+ return did_clear ? SetDegradationPreferenceResult::kRestrictionsCleared
+ : SetDegradationPreferenceResult::kRestrictionsNotCleared;
+}
+
+void VideoStreamAdapter::SetInput(
+ VideoInputMode input_mode,
+ int input_pixels,
+ int input_fps,
+ absl::optional<EncoderSettings> encoder_settings,
+ absl::optional<uint32_t> encoder_target_bitrate_bps) {
+ // Invalidate any previously returned Adaptation.
+ ++adaptation_validation_id_;
+ input_mode_ = input_mode;
+ input_pixels_ = input_pixels;
+ input_fps_ = input_fps;
+ encoder_settings_ = encoder_settings;
+ encoder_target_bitrate_bps_ = encoder_target_bitrate_bps;
+ source_restrictor_->SetMinPixelsPerFrame(
+ MinPixelsPerFrame(encoder_settings_));
+}
+
+Adaptation VideoStreamAdapter::GetAdaptationUp(
+ AdaptationObserverInterface::AdaptReason reason) const {
+ // Don't adapt if we don't have sufficient input.
+ if (input_mode_ == VideoInputMode::kNoVideo) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kInsufficientInput);
+ }
+ // Don't adapt if we're awaiting a previous adaptation to have an effect.
+ bool last_adaptation_was_up =
+ last_adaptation_request_ &&
+ last_adaptation_request_->mode_ == AdaptationRequest::Mode::kAdaptUp;
+ if (last_adaptation_was_up &&
+ degradation_preference_ == DegradationPreference::MAINTAIN_FRAMERATE &&
+ input_pixels_ <= last_adaptation_request_->input_pixel_count_) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kAwaitingPreviousAdaptation);
+ }
+ // Don't adapt if BalancedDegradationSettings applies and determines this will
+ // exceed bitrate constraints.
+ if (reason == AdaptationObserverInterface::AdaptReason::kQuality &&
+ EffectiveDegradationPreference() == DegradationPreference::BALANCED &&
+ !balanced_settings_.CanAdaptUp(
+ GetVideoCodecTypeOrGeneric(encoder_settings_), input_pixels_,
+ encoder_target_bitrate_bps_.value_or(0))) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kIsBitrateConstrained);
+ }
+
+ // Maybe propose targets based on degradation preference.
+ switch (EffectiveDegradationPreference()) {
+ case DegradationPreference::BALANCED: {
+ // Attempt to increase target frame rate.
+ int target_fps = balanced_settings_.MaxFps(
+ GetVideoCodecTypeOrGeneric(encoder_settings_), input_pixels_);
+ if (source_restrictor_->CanIncreaseFrameRateTo(target_fps)) {
+ return Adaptation(
+ adaptation_validation_id_,
+ Adaptation::Step(Adaptation::StepType::kIncreaseFrameRate,
+ target_fps));
+ }
+ // Fall-through to maybe-adapting resolution, unless |balanced_settings_|
+ // forbids it based on bitrate.
+ if (reason == AdaptationObserverInterface::AdaptReason::kQuality &&
+ !balanced_settings_.CanAdaptUpResolution(
+ GetVideoCodecTypeOrGeneric(encoder_settings_), input_pixels_,
+ encoder_target_bitrate_bps_.value_or(0))) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kIsBitrateConstrained);
+ }
+ // Scale up resolution.
+ ABSL_FALLTHROUGH_INTENDED;
+ }
+ case DegradationPreference::MAINTAIN_FRAMERATE: {
+ // Don't adapt resolution if CanAdaptUpResolution() forbids it based on
+ // bitrate and limits specified by encoder capabilities.
+ if (reason == AdaptationObserverInterface::AdaptReason::kQuality &&
+ !CanAdaptUpResolution(encoder_settings_, encoder_target_bitrate_bps_,
+ input_pixels_)) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kIsBitrateConstrained);
+ }
+ // Attempt to increase pixel count.
+ int target_pixels = input_pixels_;
+ if (source_restrictor_->adaptation_counters().resolution_adaptations ==
+ 1) {
+ RTC_LOG(LS_INFO) << "Removing resolution down-scaling setting.";
+ target_pixels = std::numeric_limits<int>::max();
+ }
+ target_pixels = GetHigherResolutionThan(target_pixels);
+ if (!source_restrictor_->CanIncreaseResolutionTo(target_pixels)) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kLimitReached);
+ }
+ return Adaptation(
+ adaptation_validation_id_,
+ Adaptation::Step(Adaptation::StepType::kIncreaseResolution,
+ target_pixels));
+ }
+ case DegradationPreference::MAINTAIN_RESOLUTION: {
+ // Scale up framerate.
+ int target_fps = input_fps_;
+ if (source_restrictor_->adaptation_counters().fps_adaptations == 1) {
+ RTC_LOG(LS_INFO) << "Removing framerate down-scaling setting.";
+ target_fps = std::numeric_limits<int>::max();
+ }
+ target_fps = GetHigherFrameRateThan(target_fps);
+ if (!source_restrictor_->CanIncreaseFrameRateTo(target_fps)) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kLimitReached);
+ }
+ return Adaptation(
+ adaptation_validation_id_,
+ Adaptation::Step(Adaptation::StepType::kIncreaseFrameRate,
+ target_fps));
+ }
+ case DegradationPreference::DISABLED:
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kAdaptationDisabled);
+ }
+}
+
+Adaptation VideoStreamAdapter::GetAdaptationDown() const {
+ // Don't adapt if we don't have sufficient input or adaptation is disabled.
+ if (input_mode_ == VideoInputMode::kNoVideo) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kInsufficientInput);
+ }
+ if (degradation_preference_ == DegradationPreference::DISABLED) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kAdaptationDisabled);
+ }
+ bool last_adaptation_was_down =
+ last_adaptation_request_ &&
+ last_adaptation_request_->mode_ == AdaptationRequest::Mode::kAdaptDown;
+ if (EffectiveDegradationPreference() ==
+ DegradationPreference::MAINTAIN_RESOLUTION) {
+ // TODO(hbos): This usage of |last_adaptation_was_down| looks like a mistake
+ // - delete it.
+ if (input_fps_ <= 0 ||
+ (last_adaptation_was_down && input_fps_ < kMinFrameRateFps)) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kInsufficientInput);
+ }
+ }
+ // Don't adapt if we're awaiting a previous adaptation to have an effect.
+ if (last_adaptation_was_down &&
+ degradation_preference_ == DegradationPreference::MAINTAIN_FRAMERATE &&
+ input_pixels_ >= last_adaptation_request_->input_pixel_count_) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kAwaitingPreviousAdaptation);
+ }
+
+ // Maybe propose targets based on degradation preference.
+ switch (EffectiveDegradationPreference()) {
+ case DegradationPreference::BALANCED: {
+ // Try scale down framerate, if lower.
+ int target_fps = balanced_settings_.MinFps(
+ GetVideoCodecTypeOrGeneric(encoder_settings_), input_pixels_);
+ if (source_restrictor_->CanDecreaseFrameRateTo(target_fps)) {
+ return Adaptation(
+ adaptation_validation_id_,
+ Adaptation::Step(Adaptation::StepType::kDecreaseFrameRate,
+ target_fps));
+ }
+ // Scale down resolution.
+ ABSL_FALLTHROUGH_INTENDED;
+ }
+ case DegradationPreference::MAINTAIN_FRAMERATE: {
+ // Scale down resolution.
+ int target_pixels = GetLowerResolutionThan(input_pixels_);
+ bool min_pixel_limit_reached =
+ target_pixels < MinPixelsPerFrame(encoder_settings_);
+ if (!source_restrictor_->CanDecreaseResolutionTo(target_pixels)) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kLimitReached,
+ min_pixel_limit_reached);
+ }
+ return Adaptation(
+ adaptation_validation_id_,
+ Adaptation::Step(Adaptation::StepType::kDecreaseResolution,
+ target_pixels),
+ min_pixel_limit_reached);
+ }
+ case DegradationPreference::MAINTAIN_RESOLUTION: {
+ int target_fps = GetLowerFrameRateThan(input_fps_);
+ if (!source_restrictor_->CanDecreaseFrameRateTo(target_fps)) {
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kLimitReached);
+ }
+ return Adaptation(
+ adaptation_validation_id_,
+ Adaptation::Step(Adaptation::StepType::kDecreaseFrameRate,
+ target_fps));
+ }
+ case DegradationPreference::DISABLED:
+ RTC_NOTREACHED();
+ return Adaptation(adaptation_validation_id_,
+ Adaptation::Status::kAdaptationDisabled);
+ }
+}
+
+VideoSourceRestrictions VideoStreamAdapter::PeekNextRestrictions(
+ const Adaptation& adaptation) const {
+ RTC_DCHECK_EQ(adaptation.validation_id_, adaptation_validation_id_);
+ if (adaptation.status() != Adaptation::Status::kValid)
+ return source_restrictor_->source_restrictions();
+ VideoSourceRestrictor restrictor_copy = *source_restrictor_;
+ restrictor_copy.ApplyAdaptationStep(adaptation.step(),
+ EffectiveDegradationPreference());
+ return restrictor_copy.source_restrictions();
+}
+
+ResourceListenerResponse VideoStreamAdapter::ApplyAdaptation(
+ const Adaptation& adaptation) {
+ RTC_DCHECK_EQ(adaptation.validation_id_, adaptation_validation_id_);
+ if (adaptation.status() != Adaptation::Status::kValid) {
+ return ResourceListenerResponse::kNothing;
+ }
+ // Remember the input pixels and fps of this adaptation. Used to avoid
+ // adapting again before this adaptation has had an effect.
+ last_adaptation_request_.emplace(AdaptationRequest{
+ input_pixels_, input_fps_,
+ AdaptationRequest::GetModeFromAdaptationAction(adaptation.step().type)});
+ // Adapt!
+ source_restrictor_->ApplyAdaptationStep(adaptation.step(),
+ EffectiveDegradationPreference());
+ // In BALANCED, if requested FPS is higher or close to input FPS to the target
+ // we tell the QualityScaler to increase its frequency.
+ // TODO(hbos): Don't have QualityScaler-specific logic here. If the
+ // QualityScaler wants to add special logic depending on what effects
+ // adaptation had, it should listen to changes to the VideoSourceRestrictions
+ // instead.
+ if (EffectiveDegradationPreference() == DegradationPreference::BALANCED &&
+ adaptation.step().type == Adaptation::StepType::kDecreaseFrameRate) {
+ absl::optional<int> min_diff = balanced_settings_.MinFpsDiff(input_pixels_);
+ if (min_diff && input_fps_ > 0) {
+ int fps_diff = input_fps_ - adaptation.step().target;
+ if (fps_diff < min_diff.value()) {
+ return ResourceListenerResponse::kQualityScalerShouldIncreaseFrequency;
+ }
+ }
+ }
+ return ResourceListenerResponse::kNothing;
+}
+
+DegradationPreference VideoStreamAdapter::EffectiveDegradationPreference()
+ const {
+ // Balanced mode for screenshare works via automatic animation detection:
+ // Resolution is capped for fullscreen animated content.
+ // Adapatation is done only via framerate downgrade.
+ // Thus effective degradation preference is MAINTAIN_RESOLUTION.
+ return (input_mode_ == VideoInputMode::kScreenshareVideo &&
+ degradation_preference_ == DegradationPreference::BALANCED)
+ ? DegradationPreference::MAINTAIN_RESOLUTION
+ : degradation_preference_;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/adaptation/video_stream_adapter.h b/chromium/third_party/webrtc/video/adaptation/video_stream_adapter.h
new file mode 100644
index 00000000000..9e0a25563da
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/video_stream_adapter.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_ADAPTATION_VIDEO_STREAM_ADAPTER_H_
+#define VIDEO_ADAPTATION_VIDEO_STREAM_ADAPTER_H_
+
+#include <memory>
+
+#include "absl/types/optional.h"
+#include "api/rtp_parameters.h"
+#include "call/adaptation/encoder_settings.h"
+#include "call/adaptation/resource.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "modules/video_coding/utility/quality_scaler.h"
+#include "rtc_base/experiments/balanced_degradation_settings.h"
+#include "video/adaptation/adaptation_counters.h"
+
+namespace webrtc {
+
+extern const int kMinFrameRateFps;
+
+class VideoStreamAdapter;
+
+// Represents one step that the VideoStreamAdapter can take when adapting the
+// VideoSourceRestrictions up or down. Or, if adaptation is not valid, provides
+// a Status code indicating the reason for not adapting.
+class Adaptation final {
+ public:
+ enum class Status {
+ // Applying this adaptation will have an effect. All other Status codes
+ // indicate that adaptation is not possible and why.
+ kValid,
+ // Cannot adapt. DegradationPreference is DISABLED.
+ // TODO(hbos): Don't support DISABLED, it doesn't exist in the spec and it
+ // causes all adaptation to be ignored, even QP-scaling.
+ kAdaptationDisabled,
+ // Cannot adapt. Adaptation is refused because we don't have video, the
+ // input frame rate is not known yet or is less than the minimum allowed
+ // (below the limit).
+ kInsufficientInput,
+ // Cannot adapt. The minimum or maximum adaptation has already been reached.
+ // There are no more steps to take.
+ kLimitReached,
+ // Cannot adapt. The resolution or frame rate requested by a recent
+ // adaptation has not yet been reflected in the input resolution or frame
+ // rate; adaptation is refused to avoid "double-adapting".
+ // TODO(hbos): Can this be rephrased as a resource usage measurement
+ // cooldown mechanism? In a multi-stream setup, we need to wait before
+ // adapting again across streams. The best way to achieve this is probably
+ // to not act on racy resource usage measurements, regardless of individual
+ // adapters. When this logic is moved or replaced then remove this enum
+ // value.
+ kAwaitingPreviousAdaptation,
+ // Cannot adapt. The adaptation that would have been proposed by the adapter
+ // violates bitrate constraints and is therefore rejected.
+ // TODO(hbos): This is a version of being resource limited, except in order
+ // to know if we are constrained we need to have a proposed adaptation in
+ // mind, thus the resource alone cannot determine this in isolation.
+ // Proposal: ask resources for permission to apply a proposed adaptation.
+ // This allows rejecting a given resolution or frame rate based on bitrate
+ // limits without coupling it with the adapter's proposal logic. When this
+ // is done, remove this enum value.
+ kIsBitrateConstrained,
+ };
+
+ // The status of this Adaptation. To find out how this Adaptation affects
+ // VideoSourceRestrictions, see VideoStreamAdapter::PeekNextRestrictions().
+ Status status() const;
+ // Used for stats reporting.
+ bool min_pixel_limit_reached() const;
+
+ private:
+ // The adapter needs to know about step type and step target in order to
+ // construct and perform an Adaptation, which is a detail we do not want to
+ // expose to the public interface.
+ friend class VideoStreamAdapter;
+
+ enum class StepType {
+ kIncreaseResolution,
+ kDecreaseResolution,
+ kIncreaseFrameRate,
+ kDecreaseFrameRate,
+ };
+
+ struct Step {
+ Step(StepType type, int target);
+ const StepType type;
+ const int target; // Pixel or frame rate depending on |type|.
+ };
+
+ // Constructs with a valid adaptation Step. Status is kValid.
+ Adaptation(int validation_id, Step step);
+ Adaptation(int validation_id, Step step, bool min_pixel_limit_reached);
+ // Constructor when adaptation is not valid. Status MUST NOT be kValid.
+ Adaptation(int validation_id, Status invalid_status);
+ Adaptation(int validation_id,
+ Status invalid_status,
+ bool min_pixel_limit_reached);
+
+ const Step& step() const; // Only callable if |status_| is kValid.
+
+ // An Adaptation can become invalidated if the state of VideoStreamAdapter is
+ // modified before the Adaptation is applied. To guard against this, this ID
+ // has to match VideoStreamAdapter::adaptation_validation_id_ when applied.
+ const int validation_id_;
+ const Status status_;
+ const absl::optional<Step> step_; // Only present if |status_| is kValid.
+ const bool min_pixel_limit_reached_;
+};
+
+// Owns the VideoSourceRestriction for a single stream and is responsible for
+// adapting it up or down when told to do so. This class serves the following
+// purposes:
+// 1. Keep track of a stream's restrictions.
+// 2. Provide valid ways to adapt up or down the stream's restrictions.
+// 3. Modify the stream's restrictions in one of the valid ways.
+class VideoStreamAdapter {
+ public:
+ enum class SetDegradationPreferenceResult {
+ kRestrictionsNotCleared,
+ kRestrictionsCleared,
+ };
+
+ enum class VideoInputMode {
+ kNoVideo,
+ kNormalVideo,
+ kScreenshareVideo,
+ };
+
+ VideoStreamAdapter();
+ ~VideoStreamAdapter();
+
+ VideoSourceRestrictions source_restrictions() const;
+ const AdaptationCounters& adaptation_counters() const;
+ // TODO(hbos): Can we get rid of any external dependencies on
+ // BalancedDegradationPreference? How the adaptor generates possible next
+ // steps for adaptation should be an implementation detail. Can the relevant
+ // information be inferred from AdaptationTargetOrReason?
+ const BalancedDegradationSettings& balanced_settings() const;
+ void ClearRestrictions();
+
+ // TODO(hbos): Setting the degradation preference should not clear
+ // restrictions! This is not defined in the spec and is unexpected, there is a
+ // tiny risk that people would discover and rely on this behavior.
+ SetDegradationPreferenceResult SetDegradationPreference(
+ DegradationPreference degradation_preference);
+ // The adaptaiton logic depends on these inputs.
+ void SetInput(VideoInputMode input_mode,
+ int input_pixels,
+ int input_fps,
+ absl::optional<EncoderSettings> encoder_settings,
+ absl::optional<uint32_t> encoder_target_bitrate_bps);
+
+ // Returns an adaptation that we are guaranteed to be able to apply, or a
+ // status code indicating the reason why we cannot adapt.
+ Adaptation GetAdaptationUp(
+ AdaptationObserverInterface::AdaptReason reason) const;
+ Adaptation GetAdaptationDown() const;
+ // Returns the restrictions that result from applying the adaptation, without
+ // actually applying it. If the adaptation is not valid, current restrictions
+ // are returned.
+ VideoSourceRestrictions PeekNextRestrictions(
+ const Adaptation& adaptation) const;
+ // Updates source_restrictions() based according to the Adaptation.
+ // TODO(hbos): Delete ResourceListenerResponse!
+ ResourceListenerResponse ApplyAdaptation(const Adaptation& adaptation);
+
+ private:
+ class VideoSourceRestrictor;
+
+ // The input frame rate and resolution at the time of an adaptation in the
+ // direction described by |mode_| (up or down).
+ // TODO(https://crbug.com/webrtc/11393): Can this be renamed? Can this be
+ // merged with AdaptationTarget?
+ struct AdaptationRequest {
+ // The pixel count produced by the source at the time of the adaptation.
+ int input_pixel_count_;
+ // Framerate received from the source at the time of the adaptation.
+ int framerate_fps_;
+ // Indicates if request was to adapt up or down.
+ enum class Mode { kAdaptUp, kAdaptDown } mode_;
+
+ // This is a static method rather than an anonymous namespace function due
+ // to namespace visiblity.
+ static Mode GetModeFromAdaptationAction(Adaptation::StepType step_type);
+ };
+
+ // Reinterprets "balanced + screenshare" as "maintain-resolution".
+ // TODO(hbos): Don't do this. This is not what "balanced" means. If the
+ // application wants to maintain resolution it should set that degradation
+ // preference rather than depend on non-standard behaviors.
+ DegradationPreference EffectiveDegradationPreference() const;
+
+ // Owner and modifier of the VideoSourceRestriction of this stream adaptor.
+ const std::unique_ptr<VideoSourceRestrictor> source_restrictor_;
+ // Decides the next adaptation target in DegradationPreference::BALANCED.
+ const BalancedDegradationSettings balanced_settings_;
+ // To guard against applying adaptations that have become invalidated, an
+ // Adaptation that is applied has to have a matching validation ID.
+ int adaptation_validation_id_;
+ // When deciding the next target up or down, different strategies are used
+ // depending on the DegradationPreference.
+ // https://w3c.github.io/mst-content-hint/#dom-rtcdegradationpreference
+ DegradationPreference degradation_preference_;
+ VideoInputMode input_mode_;
+ int input_pixels_;
+ int input_fps_;
+ absl::optional<EncoderSettings> encoder_settings_;
+ absl::optional<uint32_t> encoder_target_bitrate_bps_;
+ // The input frame rate, resolution and adaptation direction of the last
+ // ApplyAdaptationTarget(). Used to avoid adapting twice if a recent
+ // adaptation has not had an effect on the input frame rate or resolution yet.
+ // TODO(hbos): Can we implement a more general "cooldown" mechanism of
+ // resources intead? If we already have adapted it seems like we should wait
+ // a while before adapting again, so that we are not acting on usage
+ // measurements that are made obsolete/unreliable by an "ongoing" adaptation.
+ absl::optional<AdaptationRequest> last_adaptation_request_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_ADAPTATION_VIDEO_STREAM_ADAPTER_H_
diff --git a/chromium/third_party/webrtc/video/adaptation/video_stream_adapter_unittest.cc b/chromium/third_party/webrtc/video/adaptation/video_stream_adapter_unittest.cc
new file mode 100644
index 00000000000..46f662bcd2c
--- /dev/null
+++ b/chromium/third_party/webrtc/video/adaptation/video_stream_adapter_unittest.cc
@@ -0,0 +1,766 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/adaptation/video_stream_adapter.h"
+
+#include <string>
+#include <utility>
+
+#include "absl/types/optional.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "api/video_codecs/video_encoder_config.h"
+#include "call/adaptation/encoder_settings.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "rtc_base/string_encode.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/rtc_expect_death.h"
+
+namespace webrtc {
+
+namespace {
+
+// GetAdaptationUp() requires an AdaptReason. This is only used in edge cases,
+// so most tests don't care what reason is used.
+const auto kReasonDontCare = AdaptationObserverInterface::AdaptReason::kQuality;
+
+const int kBalancedHighResolutionPixels = 1280 * 720;
+const int kBalancedHighFrameRateFps = 30;
+
+const int kBalancedMediumResolutionPixels = 640 * 480;
+const int kBalancedMediumFrameRateFps = 20;
+
+const int kBalancedLowResolutionPixels = 320 * 240;
+const int kBalancedLowFrameRateFps = 10;
+
+std::string BalancedFieldTrialConfig() {
+ return "WebRTC-Video-BalancedDegradationSettings/pixels:" +
+ rtc::ToString(kBalancedLowResolutionPixels) + "|" +
+ rtc::ToString(kBalancedMediumResolutionPixels) + "|" +
+ rtc::ToString(kBalancedHighResolutionPixels) +
+ ",fps:" + rtc::ToString(kBalancedLowFrameRateFps) + "|" +
+ rtc::ToString(kBalancedMediumFrameRateFps) + "|" +
+ rtc::ToString(kBalancedHighFrameRateFps) + "/";
+}
+
+// Responsible for adjusting the inputs to VideoStreamAdapter (SetInput), such
+// as pixels and frame rate, according to the most recent source restrictions.
+// This helps tests that apply adaptations multiple times: if the input is not
+// adjusted between adaptations, the subsequent adaptations fail with
+// kAwaitingPreviousAdaptation.
+class FakeVideoStream {
+ public:
+ FakeVideoStream(VideoStreamAdapter* adapter,
+ VideoStreamAdapter::VideoInputMode input_mode,
+ int input_pixels,
+ int input_fps,
+ absl::optional<EncoderSettings> encoder_settings,
+ absl::optional<uint32_t> encoder_target_bitrate_bps)
+ : adapter_(adapter),
+ input_mode_(std::move(input_mode)),
+ input_pixels_(input_pixels),
+ input_fps_(input_fps),
+ encoder_settings_(std::move(encoder_settings)),
+ encoder_target_bitrate_bps_(std::move(encoder_target_bitrate_bps)) {
+ adapter_->SetInput(input_mode_, input_pixels_, input_fps_,
+ encoder_settings_, encoder_target_bitrate_bps_);
+ }
+
+ int input_pixels() const { return input_pixels_; }
+ int input_fps() const { return input_fps_; }
+
+ // Performs ApplyAdaptation() followed by SetInput() with input pixels and
+ // frame rate adjusted according to the resulting restrictions.
+ void ApplyAdaptation(Adaptation adaptation) {
+ adapter_->ApplyAdaptation(adaptation);
+ // Update input pixels and fps according to the resulting restrictions.
+ auto restrictions = adapter_->source_restrictions();
+ if (restrictions.target_pixels_per_frame().has_value()) {
+ RTC_DCHECK(!restrictions.max_pixels_per_frame().has_value() ||
+ restrictions.max_pixels_per_frame().value() >=
+ restrictions.target_pixels_per_frame().value());
+ input_pixels_ = restrictions.target_pixels_per_frame().value();
+ } else if (restrictions.max_pixels_per_frame().has_value()) {
+ input_pixels_ = restrictions.max_pixels_per_frame().value();
+ }
+ if (restrictions.max_frame_rate().has_value()) {
+ input_fps_ = restrictions.max_frame_rate().value();
+ }
+ adapter_->SetInput(input_mode_, input_pixels_, input_fps_,
+ encoder_settings_, encoder_target_bitrate_bps_);
+ }
+
+ private:
+ VideoStreamAdapter* adapter_;
+ VideoStreamAdapter::VideoInputMode input_mode_;
+ int input_pixels_;
+ int input_fps_;
+ absl::optional<EncoderSettings> encoder_settings_;
+ absl::optional<uint32_t> encoder_target_bitrate_bps_;
+};
+
+EncoderSettings EncoderSettingsWithMinPixelsPerFrame(int min_pixels_per_frame) {
+ VideoEncoder::EncoderInfo encoder_info;
+ encoder_info.scaling_settings.min_pixels_per_frame = min_pixels_per_frame;
+ return EncoderSettings(std::move(encoder_info), VideoEncoderConfig(),
+ VideoCodec());
+}
+
+EncoderSettings EncoderSettingsWithBitrateLimits(int resolution_pixels,
+ int min_start_bitrate_bps) {
+ VideoEncoder::EncoderInfo encoder_info;
+ // For bitrate limits, we only care about the next resolution up's
+ // min_start_bitrate_bps. (...Why do we look at start bitrate and not min
+ // bitrate?)
+ encoder_info.resolution_bitrate_limits.emplace_back(
+ resolution_pixels,
+ /* min_start_bitrate_bps */ min_start_bitrate_bps,
+ /* min_bitrate_bps */ 0,
+ /* max_bitrate_bps */ 0);
+ return EncoderSettings(std::move(encoder_info), VideoEncoderConfig(),
+ VideoCodec());
+}
+
+} // namespace
+
+TEST(VideoStreamAdapterTest, NoRestrictionsByDefault) {
+ VideoStreamAdapter adapter;
+ EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions());
+ EXPECT_EQ(0, adapter.adaptation_counters().Total());
+}
+
+TEST(VideoStreamAdapterTest, MaintainFramerate_DecreasesPixelsToThreeFifths) {
+ const int kInputPixels = 1280 * 720;
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ kInputPixels, 30, absl::nullopt, absl::nullopt);
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ EXPECT_FALSE(adaptation.min_pixel_limit_reached());
+ adapter.ApplyAdaptation(adaptation);
+ EXPECT_EQ(static_cast<size_t>((kInputPixels * 3) / 5),
+ adapter.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt, adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations);
+}
+
+TEST(VideoStreamAdapterTest, MaintainFramerate_DecreasesPixelsToLimitReached) {
+ const int kMinPixelsPerFrame = 640 * 480;
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ kMinPixelsPerFrame + 1, 30,
+ EncoderSettingsWithMinPixelsPerFrame(kMinPixelsPerFrame),
+ absl::nullopt);
+ // Even though we are above kMinPixelsPerFrame, because adapting down would
+ // have exceeded the limit, we are said to have reached the limit already.
+ // This differs from the frame rate adaptation logic, which would have clamped
+ // to the limit in the first step and reported kLimitReached in the second
+ // step.
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kLimitReached, adaptation.status());
+ EXPECT_TRUE(adaptation.min_pixel_limit_reached());
+}
+
+TEST(VideoStreamAdapterTest, MaintainFramerate_IncreasePixelsToFiveThirds) {
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter,
+ VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ 1280 * 720, 30, absl::nullopt, absl::nullopt);
+ // Go down twice, ensuring going back up is still a restricted resolution.
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(2, adapter.adaptation_counters().resolution_adaptations);
+ int input_pixels = fake_stream.input_pixels();
+ // Go up once. The target is 5/3 and the max is 12/5 of the target.
+ const int target = (input_pixels * 5) / 3;
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationUp(kReasonDontCare));
+ EXPECT_EQ(static_cast<size_t>((target * 12) / 5),
+ adapter.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(static_cast<size_t>(target),
+ adapter.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt, adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations);
+}
+
+TEST(VideoStreamAdapterTest, MaintainFramerate_IncreasePixelsToUnrestricted) {
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter,
+ VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ 1280 * 720, 30, absl::nullopt, absl::nullopt);
+ // We are unrestricted by default and should not be able to adapt up.
+ EXPECT_EQ(Adaptation::Status::kLimitReached,
+ adapter.GetAdaptationUp(kReasonDontCare).status());
+ // If we go down once and then back up we should not have any restrictions.
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations);
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationUp(kReasonDontCare));
+ EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions());
+ EXPECT_EQ(0, adapter.adaptation_counters().Total());
+}
+
+TEST(VideoStreamAdapterTest, MaintainResolution_DecreasesFpsToTwoThirds) {
+ const int kInputFps = 30;
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNormalVideo, 1280 * 720,
+ kInputFps, absl::nullopt, absl::nullopt);
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ adapter.ApplyAdaptation(adaptation);
+ EXPECT_EQ(absl::nullopt,
+ adapter.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>((kInputFps * 2) / 3),
+ adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+}
+
+TEST(VideoStreamAdapterTest, MaintainResolution_DecreasesFpsToLimitReached) {
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ FakeVideoStream fake_stream(
+ &adapter, VideoStreamAdapter::VideoInputMode::kNormalVideo, 1280 * 720,
+ kMinFrameRateFps + 1, absl::nullopt, absl::nullopt);
+ // If we are not yet at the limit and the next step would exceed it, the step
+ // is clamped such that we end up exactly on the limit.
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(static_cast<double>(kMinFrameRateFps),
+ adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+ // Having reached the limit, the next adaptation down is not valid.
+ EXPECT_EQ(Adaptation::Status::kLimitReached,
+ adapter.GetAdaptationDown().status());
+}
+
+TEST(VideoStreamAdapterTest, MaintainResolution_IncreaseFpsToThreeHalves) {
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ FakeVideoStream fake_stream(&adapter,
+ VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ 1280 * 720, 30, absl::nullopt, absl::nullopt);
+ // Go down twice, ensuring going back up is still a restricted frame rate.
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(2, adapter.adaptation_counters().fps_adaptations);
+ int input_fps = fake_stream.input_fps();
+ // Go up once. The target is 3/2 of the input.
+ Adaptation adaptation = adapter.GetAdaptationUp(kReasonDontCare);
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(absl::nullopt,
+ adapter.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>((input_fps * 3) / 2),
+ adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+}
+
+TEST(VideoStreamAdapterTest, MaintainResolution_IncreaseFpsToUnrestricted) {
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ FakeVideoStream fake_stream(&adapter,
+ VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ 1280 * 720, 30, absl::nullopt, absl::nullopt);
+ // We are unrestricted by default and should not be able to adapt up.
+ EXPECT_EQ(Adaptation::Status::kLimitReached,
+ adapter.GetAdaptationUp(kReasonDontCare).status());
+ // If we go down once and then back up we should not have any restrictions.
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationUp(kReasonDontCare));
+ EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions());
+ EXPECT_EQ(0, adapter.adaptation_counters().Total());
+}
+
+TEST(VideoStreamAdapterTest, Balanced_DecreaseFrameRate) {
+ webrtc::test::ScopedFieldTrials balanced_field_trials(
+ BalancedFieldTrialConfig());
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::BALANCED);
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ kBalancedMediumResolutionPixels, kBalancedHighFrameRateFps,
+ absl::nullopt, absl::nullopt);
+ // If our frame rate is higher than the frame rate associated with our
+ // resolution we should try to adapt to the frame rate associated with our
+ // resolution: kBalancedMediumFrameRateFps.
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ adapter.ApplyAdaptation(adaptation);
+ EXPECT_EQ(absl::nullopt,
+ adapter.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>(kBalancedMediumFrameRateFps),
+ adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(0, adapter.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+}
+
+TEST(VideoStreamAdapterTest, Balanced_DecreaseResolution) {
+ webrtc::test::ScopedFieldTrials balanced_field_trials(
+ BalancedFieldTrialConfig());
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::BALANCED);
+ FakeVideoStream fake_stream(
+ &adapter, VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ kBalancedHighResolutionPixels, kBalancedHighFrameRateFps, absl::nullopt,
+ absl::nullopt);
+ // If we are not below the current resolution's frame rate limit, we should
+ // adapt resolution according to "maintain-framerate" logic (three fifths).
+ //
+ // However, since we are unlimited at the start and input frame rate is not
+ // below kBalancedHighFrameRateFps, we first restrict the frame rate to
+ // kBalancedHighFrameRateFps even though that is our current frame rate. This
+ // does prevent the source from going higher, though, so it's technically not
+ // a NO-OP.
+ {
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ }
+ EXPECT_EQ(absl::nullopt,
+ adapter.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>(kBalancedHighFrameRateFps),
+ adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(0, adapter.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+ // Verify "maintain-framerate" logic the second time we adapt: Frame rate
+ // restrictions remains the same and resolution goes down.
+ {
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ }
+ constexpr size_t kReducedPixelsFirstStep =
+ static_cast<size_t>((kBalancedHighResolutionPixels * 3) / 5);
+ EXPECT_EQ(kReducedPixelsFirstStep,
+ adapter.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>(kBalancedHighFrameRateFps),
+ adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+ // If we adapt again, because the balanced settings' proposed frame rate is
+ // still kBalancedHighFrameRateFps, "maintain-framerate" will trigger again.
+ static_assert(kReducedPixelsFirstStep > kBalancedMediumResolutionPixels,
+ "The reduced resolution is still greater than the next lower "
+ "balanced setting resolution");
+ constexpr size_t kReducedPixelsSecondStep = (kReducedPixelsFirstStep * 3) / 5;
+ {
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ }
+ EXPECT_EQ(kReducedPixelsSecondStep,
+ adapter.source_restrictions().max_pixels_per_frame());
+ EXPECT_EQ(absl::nullopt,
+ adapter.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(static_cast<double>(kBalancedHighFrameRateFps),
+ adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(2, adapter.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+}
+
+// Testing when to adapt frame rate and when to adapt resolution is quite
+// entangled, so this test covers both cases.
+//
+// There is an asymmetry: When we adapt down we do it in one order, but when we
+// adapt up we don't do it in the reverse order. Instead we always try to adapt
+// frame rate first according to balanced settings' configs and only when the
+// frame rate is already achieved do we adjust the resolution.
+TEST(VideoStreamAdapterTest, Balanced_IncreaseFrameRateAndResolution) {
+ webrtc::test::ScopedFieldTrials balanced_field_trials(
+ BalancedFieldTrialConfig());
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::BALANCED);
+ FakeVideoStream fake_stream(
+ &adapter, VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ kBalancedHighResolutionPixels, kBalancedHighFrameRateFps, absl::nullopt,
+ absl::nullopt);
+ // The desired starting point of this test is having adapted frame rate twice.
+ // This requires performing a number of adaptations.
+ constexpr size_t kReducedPixelsFirstStep =
+ static_cast<size_t>((kBalancedHighResolutionPixels * 3) / 5);
+ constexpr size_t kReducedPixelsSecondStep = (kReducedPixelsFirstStep * 3) / 5;
+ constexpr size_t kReducedPixelsThirdStep = (kReducedPixelsSecondStep * 3) / 5;
+ static_assert(kReducedPixelsFirstStep > kBalancedMediumResolutionPixels,
+ "The first pixel reduction is greater than the balanced "
+ "settings' medium pixel configuration");
+ static_assert(kReducedPixelsSecondStep > kBalancedMediumResolutionPixels,
+ "The second pixel reduction is greater than the balanced "
+ "settings' medium pixel configuration");
+ static_assert(kReducedPixelsThirdStep <= kBalancedMediumResolutionPixels,
+ "The third pixel reduction is NOT greater than the balanced "
+ "settings' medium pixel configuration");
+ // The first adaptation should affect the frame rate: See
+ // Balanced_DecreaseResolution for explanation why.
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(static_cast<double>(kBalancedHighFrameRateFps),
+ adapter.source_restrictions().max_frame_rate());
+ // The next three adaptations affects the resolution, because we have to reach
+ // kBalancedMediumResolutionPixels before a lower frame rate is considered by
+ // BalancedDegradationSettings. The number three is derived from the
+ // static_asserts above.
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(kReducedPixelsFirstStep,
+ adapter.source_restrictions().max_pixels_per_frame());
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(kReducedPixelsSecondStep,
+ adapter.source_restrictions().max_pixels_per_frame());
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(kReducedPixelsThirdStep,
+ adapter.source_restrictions().max_pixels_per_frame());
+ // Thus, the next adaptation will reduce frame rate to
+ // kBalancedMediumFrameRateFps.
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(static_cast<double>(kBalancedMediumFrameRateFps),
+ adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(3, adapter.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(2, adapter.adaptation_counters().fps_adaptations);
+ // Adapt up!
+ // While our resolution is in the medium-range, the frame rate associated with
+ // the next resolution configuration up ("high") is kBalancedHighFrameRateFps
+ // and "balanced" prefers adapting frame rate if not already applied.
+ {
+ Adaptation adaptation = adapter.GetAdaptationUp(kReasonDontCare);
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(static_cast<double>(kBalancedHighFrameRateFps),
+ adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(3, adapter.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+ }
+ // Now that we have already achieved the next frame rate up, we act according
+ // to "maintain-framerate". We go back up in resolution. Due to rounding
+ // errors we don't end up back at kReducedPixelsSecondStep. Rather we get to
+ // kReducedPixelsSecondStepUp, which is off by one compared to
+ // kReducedPixelsSecondStep.
+ constexpr size_t kReducedPixelsSecondStepUp =
+ (kReducedPixelsThirdStep * 5) / 3;
+ {
+ Adaptation adaptation = adapter.GetAdaptationUp(kReasonDontCare);
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(kReducedPixelsSecondStepUp,
+ adapter.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(2, adapter.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+ }
+ // Now that our resolution is back in the high-range, the next frame rate to
+ // try out is "unlimited".
+ {
+ Adaptation adaptation = adapter.GetAdaptationUp(kReasonDontCare);
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(absl::nullopt, adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(2, adapter.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(0, adapter.adaptation_counters().fps_adaptations);
+ }
+ // Now only adapting resolution remains.
+ constexpr size_t kReducedPixelsFirstStepUp =
+ (kReducedPixelsSecondStepUp * 5) / 3;
+ {
+ Adaptation adaptation = adapter.GetAdaptationUp(kReasonDontCare);
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(kReducedPixelsFirstStepUp,
+ adapter.source_restrictions().target_pixels_per_frame());
+ EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations);
+ EXPECT_EQ(0, adapter.adaptation_counters().fps_adaptations);
+ }
+ // The last step up should make us entirely unrestricted.
+ {
+ Adaptation adaptation = adapter.GetAdaptationUp(kReasonDontCare);
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions());
+ EXPECT_EQ(0, adapter.adaptation_counters().Total());
+ }
+}
+
+TEST(VideoStreamAdapterTest, Balanced_LimitReached) {
+ webrtc::test::ScopedFieldTrials balanced_field_trials(
+ BalancedFieldTrialConfig());
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::BALANCED);
+ FakeVideoStream fake_stream(
+ &adapter, VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ kBalancedLowResolutionPixels, kBalancedLowFrameRateFps, absl::nullopt,
+ absl::nullopt);
+ // Attempting to adapt up while unrestricted should result in kLimitReached.
+ EXPECT_EQ(Adaptation::Status::kLimitReached,
+ adapter.GetAdaptationUp(kReasonDontCare).status());
+ // Adapting down once result in restricted frame rate, in this case we reach
+ // the lowest possible frame rate immediately: kBalancedLowFrameRateFps.
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(static_cast<double>(kBalancedLowFrameRateFps),
+ adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+ // Any further adaptation must follow "maintain-framerate" rules (these are
+ // covered in more depth by the MaintainFramerate tests). This test does not
+ // assert exactly how resolution is adjusted, only that resolution always
+ // decreases and that we eventually reach kLimitReached.
+ size_t previous_resolution = kBalancedLowResolutionPixels;
+ bool did_reach_limit = false;
+ // If we have not reached the limit within 5 adaptations something is wrong...
+ for (int i = 0; i < 5; i++) {
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ if (adaptation.status() == Adaptation::Status::kLimitReached) {
+ did_reach_limit = true;
+ break;
+ }
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_LT(adapter.source_restrictions().max_pixels_per_frame().value(),
+ previous_resolution);
+ previous_resolution =
+ adapter.source_restrictions().max_pixels_per_frame().value();
+ }
+ EXPECT_TRUE(did_reach_limit);
+ // Frame rate restrictions are the same as before.
+ EXPECT_EQ(static_cast<double>(kBalancedLowFrameRateFps),
+ adapter.source_restrictions().max_frame_rate());
+ EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations);
+}
+
+TEST(VideoStreamAdapterTest, AdaptationDisabled) {
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::DISABLED);
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNormalVideo, 1280 * 720,
+ 30, absl::nullopt, absl::nullopt);
+ EXPECT_EQ(Adaptation::Status::kAdaptationDisabled,
+ adapter.GetAdaptationDown().status());
+ EXPECT_EQ(Adaptation::Status::kAdaptationDisabled,
+ adapter.GetAdaptationUp(kReasonDontCare).status());
+}
+
+TEST(VideoStreamAdapterTest, InsufficientInput) {
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ // No vido is insufficient in either direction.
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNoVideo, 1280 * 720, 30,
+ absl::nullopt, absl::nullopt);
+ EXPECT_EQ(Adaptation::Status::kInsufficientInput,
+ adapter.GetAdaptationDown().status());
+ EXPECT_EQ(Adaptation::Status::kInsufficientInput,
+ adapter.GetAdaptationUp(kReasonDontCare).status());
+ // No frame rate is insufficient when going down.
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNormalVideo, 1280 * 720,
+ 0, absl::nullopt, absl::nullopt);
+ EXPECT_EQ(Adaptation::Status::kInsufficientInput,
+ adapter.GetAdaptationDown().status());
+}
+
+// kAwaitingPreviousAdaptation is only supported in "maintain-framerate".
+TEST(VideoStreamAdapterTest, MaintainFramerate_AwaitingPreviousAdaptationDown) {
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNormalVideo, 1280 * 720,
+ 30, absl::nullopt, absl::nullopt);
+ // Adapt down once, but don't update the input.
+ adapter.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations);
+ {
+ // Having performed the adaptation, but not updated the input based on the
+ // new restrictions, adapting again in the same direction will not work.
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kAwaitingPreviousAdaptation,
+ adaptation.status());
+ }
+}
+
+// kAwaitingPreviousAdaptation is only supported in "maintain-framerate".
+TEST(VideoStreamAdapterTest, MaintainFramerate_AwaitingPreviousAdaptationUp) {
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter,
+ VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ 1280 * 720, 30, absl::nullopt, absl::nullopt);
+ // Perform two adaptation down so that adapting up twice is possible.
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(2, adapter.adaptation_counters().resolution_adaptations);
+ // Adapt up once, but don't update the input.
+ adapter.ApplyAdaptation(adapter.GetAdaptationUp(kReasonDontCare));
+ EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations);
+ {
+ // Having performed the adaptation, but not updated the input based on the
+ // new restrictions, adapting again in the same direction will not work.
+ Adaptation adaptation = adapter.GetAdaptationUp(kReasonDontCare);
+ EXPECT_EQ(Adaptation::Status::kAwaitingPreviousAdaptation,
+ adaptation.status());
+ }
+}
+
+// TODO(hbos): Also add BitrateConstrained test coverage for the BALANCED
+// degradation preference.
+TEST(VideoStreamAdapterTest, BitrateConstrained_MaintainFramerate) {
+ const int kInputPixels = 1280 * 720;
+ const int kBitrateLimit = 1000;
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(
+ &adapter, VideoStreamAdapter::VideoInputMode::kNormalVideo, kInputPixels,
+ 30, EncoderSettingsWithBitrateLimits(kInputPixels, kBitrateLimit),
+ // The target bitrate is one less than necessary
+ // to adapt up.
+ kBitrateLimit - 1);
+ // Adapt down so that it would be possible to adapt up if we weren't bitrate
+ // constrainted.
+ fake_stream.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations);
+ // Adapting up for reason kQuality should not work because this exceeds the
+ // bitrate limit.
+ // TODO(hbos): Why would the reason matter? If the signal was kCpu then the
+ // current code allows us to violate this bitrate constraint. This does not
+ // make any sense: either we are limited or we are not, end of story.
+ EXPECT_EQ(
+ Adaptation::Status::kIsBitrateConstrained,
+ adapter
+ .GetAdaptationUp(AdaptationObserverInterface::AdaptReason::kQuality)
+ .status());
+}
+
+TEST(VideoStreamAdapterTest, PeekNextRestrictions) {
+ VideoStreamAdapter adapter;
+ // Any non-disabled DegradationPreference will do.
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ FakeVideoStream fake_stream(&adapter,
+ VideoStreamAdapter::VideoInputMode::kNormalVideo,
+ 1280 * 720, 30, absl::nullopt, absl::nullopt);
+ // When adaptation is not possible.
+ {
+ Adaptation adaptation = adapter.GetAdaptationUp(kReasonDontCare);
+ EXPECT_EQ(Adaptation::Status::kLimitReached, adaptation.status());
+ EXPECT_EQ(adapter.PeekNextRestrictions(adaptation),
+ adapter.source_restrictions());
+ }
+ // When we adapt down.
+ {
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ VideoSourceRestrictions next_restrictions =
+ adapter.PeekNextRestrictions(adaptation);
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(next_restrictions, adapter.source_restrictions());
+ }
+ // When we adapt up.
+ {
+ Adaptation adaptation = adapter.GetAdaptationUp(kReasonDontCare);
+ EXPECT_EQ(Adaptation::Status::kValid, adaptation.status());
+ VideoSourceRestrictions next_restrictions =
+ adapter.PeekNextRestrictions(adaptation);
+ fake_stream.ApplyAdaptation(adaptation);
+ EXPECT_EQ(next_restrictions, adapter.source_restrictions());
+ }
+}
+
+// This test covers non-standard behavior. If the application desires
+// "maintain-resolution" it should ask for it rather than relying on this
+// behavior, which should become unsupported.
+TEST(VideoStreamAdapterTest, BalancedScreenshareBehavesLikeMaintainResolution) {
+ const int kInputPixels = 1280 * 720;
+ const int kInputFps = 30;
+ VideoStreamAdapter balanced_adapter;
+ balanced_adapter.SetDegradationPreference(DegradationPreference::BALANCED);
+ balanced_adapter.SetInput(
+ VideoStreamAdapter::VideoInputMode::kScreenshareVideo, kInputPixels,
+ kInputFps, absl::nullopt, absl::nullopt);
+ VideoStreamAdapter maintain_resolution_adapter;
+ maintain_resolution_adapter.SetDegradationPreference(
+ DegradationPreference::MAINTAIN_RESOLUTION);
+ maintain_resolution_adapter.SetInput(
+ VideoStreamAdapter::VideoInputMode::kNormalVideo, kInputPixels, kInputFps,
+ absl::nullopt, absl::nullopt);
+ EXPECT_EQ(balanced_adapter.source_restrictions(),
+ maintain_resolution_adapter.source_restrictions());
+ balanced_adapter.ApplyAdaptation(balanced_adapter.GetAdaptationDown());
+ maintain_resolution_adapter.ApplyAdaptation(
+ maintain_resolution_adapter.GetAdaptationDown());
+ EXPECT_EQ(balanced_adapter.source_restrictions(),
+ maintain_resolution_adapter.source_restrictions());
+}
+
+TEST(VideoStreamAdapterTest,
+ SetDegradationPreferenceToOrFromBalancedClearsRestrictions) {
+ VideoStreamAdapter adapter;
+ EXPECT_EQ(VideoStreamAdapter::SetDegradationPreferenceResult::
+ kRestrictionsNotCleared,
+ adapter.SetDegradationPreference(
+ DegradationPreference::MAINTAIN_FRAMERATE));
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNormalVideo, 1280 * 720,
+ 30, absl::nullopt, absl::nullopt);
+ adapter.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_NE(VideoSourceRestrictions(), adapter.source_restrictions());
+ EXPECT_NE(0, adapter.adaptation_counters().Total());
+ // Changing from non-balanced to balanced clears the restrictions.
+ EXPECT_EQ(
+ VideoStreamAdapter::SetDegradationPreferenceResult::kRestrictionsCleared,
+ adapter.SetDegradationPreference(DegradationPreference::BALANCED));
+ EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions());
+ EXPECT_EQ(0, adapter.adaptation_counters().Total());
+ // Apply adaptation again.
+ adapter.ApplyAdaptation(adapter.GetAdaptationDown());
+ EXPECT_NE(VideoSourceRestrictions(), adapter.source_restrictions());
+ EXPECT_NE(0, adapter.adaptation_counters().Total());
+ // Changing from balanced to non-balanced clears the restrictions.
+ EXPECT_EQ(
+ VideoStreamAdapter::SetDegradationPreferenceResult::kRestrictionsCleared,
+ adapter.SetDegradationPreference(
+ DegradationPreference::MAINTAIN_RESOLUTION));
+ EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions());
+ EXPECT_EQ(0, adapter.adaptation_counters().Total());
+}
+
+// Death tests.
+// Disabled on Android because death tests misbehave on Android, see
+// base/test/gtest_util.h.
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+TEST(VideoStreamAdapterDeathTest,
+ SetDegradationPreferenceInvalidatesAdaptations) {
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE);
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNormalVideo, 1280 * 720,
+ 30, absl::nullopt, absl::nullopt);
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ EXPECT_DEATH(adapter.ApplyAdaptation(adaptation), "");
+}
+
+TEST(VideoStreamAdapterDeathTest, SetInputInvalidatesAdaptations) {
+ VideoStreamAdapter adapter;
+ adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION);
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNormalVideo, 1280 * 720,
+ 30, absl::nullopt, absl::nullopt);
+ Adaptation adaptation = adapter.GetAdaptationDown();
+ adapter.SetInput(VideoStreamAdapter::VideoInputMode::kNormalVideo, 1280 * 720,
+ 31, absl::nullopt, absl::nullopt);
+ EXPECT_DEATH(adapter.PeekNextRestrictions(adaptation), "");
+}
+
+#endif // RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/buffered_frame_decryptor.cc b/chromium/third_party/webrtc/video/buffered_frame_decryptor.cc
index 41eddea17ee..fc9dff5b026 100644
--- a/chromium/third_party/webrtc/video/buffered_frame_decryptor.cc
+++ b/chromium/third_party/webrtc/video/buffered_frame_decryptor.cc
@@ -11,9 +11,11 @@
#include "video/buffered_frame_decryptor.h"
#include <utility>
+#include <vector>
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+#include "modules/video_coding/frame_object.h"
#include "rtc_base/logging.h"
-#include "rtc_base/system/fallthrough.h"
#include "system_wrappers/include/field_trial.h"
namespace webrtc {
@@ -22,7 +24,7 @@ BufferedFrameDecryptor::BufferedFrameDecryptor(
OnDecryptedFrameCallback* decrypted_frame_callback,
OnDecryptionStatusChangeCallback* decryption_status_change_callback)
: generic_descriptor_auth_experiment_(
- field_trial::IsEnabled("WebRTC-GenericDescriptorAuth")),
+ !field_trial::IsDisabled("WebRTC-GenericDescriptorAuth")),
decrypted_frame_callback_(decrypted_frame_callback),
decryption_status_change_callback_(decryption_status_change_callback) {}
@@ -61,9 +63,7 @@ BufferedFrameDecryptor::FrameDecision BufferedFrameDecryptor::DecryptFrame(
return FrameDecision::kStash;
}
// When using encryption we expect the frame to have the generic descriptor.
- absl::optional<RtpGenericFrameDescriptor> descriptor =
- frame->GetGenericFrameDescriptor();
- if (!descriptor) {
+ if (frame->GetRtpVideoHeader().generic == absl::nullopt) {
RTC_LOG(LS_ERROR) << "No generic frame descriptor found dropping frame.";
return FrameDecision::kDrop;
}
@@ -76,10 +76,10 @@ BufferedFrameDecryptor::FrameDecision BufferedFrameDecryptor::DecryptFrame(
rtc::ArrayView<uint8_t> inline_decrypted_bitstream(frame->data(),
max_plaintext_byte_size);
- // Only enable authenticating the header if the field trial is enabled.
- rtc::ArrayView<const uint8_t> additional_data;
+ // Enable authenticating the header if the field trial isn't disabled.
+ std::vector<uint8_t> additional_data;
if (generic_descriptor_auth_experiment_) {
- additional_data = descriptor->GetByteRepresentation();
+ additional_data = RtpDescriptorAuthentication(frame->GetRtpVideoHeader());
}
// Attempt to decrypt the video frame.
diff --git a/chromium/third_party/webrtc/video/buffered_frame_decryptor_unittest.cc b/chromium/third_party/webrtc/video/buffered_frame_decryptor_unittest.cc
index 1b21acfb850..bbc08b0da34 100644
--- a/chromium/third_party/webrtc/video/buffered_frame_decryptor_unittest.cc
+++ b/chromium/third_party/webrtc/video/buffered_frame_decryptor_unittest.cc
@@ -57,6 +57,8 @@ class BufferedFrameDecryptorTest : public ::testing::Test,
std::unique_ptr<video_coding::RtpFrameObject> CreateRtpFrameObject(
bool key_frame) {
seq_num_++;
+ RTPVideoHeader rtp_video_header;
+ rtp_video_header.generic.emplace();
// clang-format off
return std::make_unique<video_coding::RtpFrameObject>(
@@ -73,9 +75,8 @@ class BufferedFrameDecryptorTest : public ::testing::Test,
kVideoCodecGeneric,
kVideoRotation_0,
VideoContentType::UNSPECIFIED,
- RTPVideoHeader(),
+ rtp_video_header,
/*color_space=*/absl::nullopt,
- RtpGenericFrameDescriptor(),
RtpPacketInfos(),
EncodedImageBuffer::Create(/*size=*/0));
// clang-format on
diff --git a/chromium/third_party/webrtc/video/call_stats_unittest.cc b/chromium/third_party/webrtc/video/call_stats_unittest.cc
index 85b9eb951d2..c560ccbee6b 100644
--- a/chromium/third_party/webrtc/video/call_stats_unittest.cc
+++ b/chromium/third_party/webrtc/video/call_stats_unittest.cc
@@ -315,10 +315,11 @@ TEST_F(CallStatsTest, ProducesHistogramMetrics) {
process_thread_->Stop();
call_stats_.UpdateHistogramsForTest();
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.AverageRoundTripTimeInMilliseconds"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.AverageRoundTripTimeInMilliseconds", kRtt));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.AverageRoundTripTimeInMilliseconds"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AverageRoundTripTimeInMilliseconds",
+ kRtt));
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/encoder_bitrate_adjuster.cc b/chromium/third_party/webrtc/video/encoder_bitrate_adjuster.cc
index 5af9128a860..45d88875e34 100644
--- a/chromium/third_party/webrtc/video/encoder_bitrate_adjuster.cc
+++ b/chromium/third_party/webrtc/video/encoder_bitrate_adjuster.cc
@@ -109,7 +109,7 @@ VideoBitrateAllocation EncoderBitrateAdjuster::AdjustRateAllocation(
LayerRateInfo& layer_info = layer_infos.back();
layer_info.target_rate =
- DataRate::bps(rates.bitrate.GetSpatialLayerSum(si));
+ DataRate::BitsPerSec(rates.bitrate.GetSpatialLayerSum(si));
// Adjustment is done per spatial layer only (not per temporal layer).
if (frames_since_layout_change_ < kMinFramesSinceLayoutChange) {
@@ -186,8 +186,8 @@ VideoBitrateAllocation EncoderBitrateAdjuster::AdjustRateAllocation(
// Available link headroom that can be used to fill wanted overshoot.
DataRate available_headroom = DataRate::Zero();
if (utilize_bandwidth_headroom_) {
- available_headroom =
- rates.bandwidth_allocation - DataRate::bps(rates.bitrate.get_sum_bps());
+ available_headroom = rates.bandwidth_allocation -
+ DataRate::BitsPerSec(rates.bitrate.get_sum_bps());
}
// All wanted overshoots are satisfied in the same proportion based on
@@ -214,7 +214,7 @@ VideoBitrateAllocation EncoderBitrateAdjuster::AdjustRateAllocation(
if (min_bitrates_bps_[si] > 0 &&
layer_info.target_rate > DataRate::Zero() &&
- DataRate::bps(min_bitrates_bps_[si]) < layer_info.target_rate) {
+ DataRate::BitsPerSec(min_bitrates_bps_[si]) < layer_info.target_rate) {
// Make sure rate adjuster doesn't push target bitrate below minimum.
utilization_factor =
std::min(utilization_factor, layer_info.target_rate.bps<double>() /
@@ -236,7 +236,7 @@ VideoBitrateAllocation EncoderBitrateAdjuster::AdjustRateAllocation(
// Populate the adjusted allocation with determined utilization factor.
if (active_tls_[si] == 1 &&
layer_info.target_rate >
- DataRate::bps(rates.bitrate.GetBitrate(si, 0))) {
+ DataRate::BitsPerSec(rates.bitrate.GetBitrate(si, 0))) {
// Bitrate allocation indicates temporal layer usage, but encoder
// does not seem to support it. Pipe all bitrate into a single
// overshoot detector.
@@ -282,8 +282,15 @@ VideoBitrateAllocation EncoderBitrateAdjuster::AdjustRateAllocation(
(ti == 0 ? 0 : current_fps_allocation_[si][ti - 1])) /
VideoEncoder::EncoderInfo::kMaxFramerateFraction;
+ if (fps_fraction <= 0.0) {
+ RTC_LOG(LS_WARNING)
+ << "Encoder config has temporal layer with non-zero bitrate "
+ "allocation but zero framerate allocation.";
+ continue;
+ }
+
overshoot_detectors_[si][ti]->SetTargetRate(
- DataRate::bps(layer_bitrate_bps),
+ DataRate::BitsPerSec(layer_bitrate_bps),
fps_fraction * rates.framerate_fps, now_ms);
}
}
diff --git a/chromium/third_party/webrtc/video/encoder_bitrate_adjuster_unittest.cc b/chromium/third_party/webrtc/video/encoder_bitrate_adjuster_unittest.cc
index b7cdfd35f29..a521f1de78a 100644
--- a/chromium/third_party/webrtc/video/encoder_bitrate_adjuster_unittest.cc
+++ b/chromium/third_party/webrtc/video/encoder_bitrate_adjuster_unittest.cc
@@ -34,7 +34,7 @@ class EncoderBitrateAdjusterTest : public ::testing::Test {
static_assert(kSequenceLength % 2 == 0, "Sequence length must be even.");
EncoderBitrateAdjusterTest()
- : target_bitrate_(DataRate::bps(kDefaultBitrateBps)),
+ : target_bitrate_(DataRate::BitsPerSec(kDefaultBitrateBps)),
target_framerate_fps_(kDefaultFrameRateFps),
tl_pattern_idx_{},
sequence_idx_{} {}
@@ -106,7 +106,7 @@ class EncoderBitrateAdjusterTest : public ::testing::Test {
const int64_t start_us = rtc::TimeMicros();
while (rtc::TimeMicros() <
start_us + (duration_ms * rtc::kNumMicrosecsPerMillisec)) {
- clock_.AdvanceTime(TimeDelta::seconds(1) / target_framerate_fps_);
+ clock_.AdvanceTime(TimeDelta::Seconds(1) / target_framerate_fps_);
for (size_t si = 0; si < NumSpatialLayers(); ++si) {
const std::vector<int>& tl_pattern =
kTlPatterns[NumTemporalLayers(si) - 1];
@@ -478,7 +478,8 @@ TEST_F(EncoderBitrateAdjusterTest, HeadroomAllowsOvershootToMediaRate) {
current_adjusted_allocation_ =
adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
current_input_allocation_, target_framerate_fps_,
- DataRate::bps(current_input_allocation_.get_sum_bps() * 1.1)));
+ DataRate::BitsPerSec(current_input_allocation_.get_sum_bps() *
+ 1.1)));
ExpectNear(current_input_allocation_, current_adjusted_allocation_, 0.01);
}
}
@@ -520,7 +521,7 @@ TEST_F(EncoderBitrateAdjusterTest, DontExceedMediaRateEvenWithHeadroom) {
current_adjusted_allocation_ =
adjuster_->AdjustRateAllocation(VideoEncoder::RateControlParameters(
current_input_allocation_, target_framerate_fps_,
- DataRate::bps(current_input_allocation_.get_sum_bps() * 2)));
+ DataRate::BitsPerSec(current_input_allocation_.get_sum_bps() * 2)));
ExpectNear(MultiplyAllocation(current_input_allocation_, 1 / 1.1),
current_adjusted_allocation_, 0.015);
}
diff --git a/chromium/third_party/webrtc/video/encoder_overshoot_detector_unittest.cc b/chromium/third_party/webrtc/video/encoder_overshoot_detector_unittest.cc
index 7170f490615..a3c44eb0133 100644
--- a/chromium/third_party/webrtc/video/encoder_overshoot_detector_unittest.cc
+++ b/chromium/third_party/webrtc/video/encoder_overshoot_detector_unittest.cc
@@ -23,7 +23,7 @@ class EncoderOvershootDetectorTest : public ::testing::Test {
static constexpr double kDefaultFrameRateFps = 15;
EncoderOvershootDetectorTest()
: detector_(kWindowSizeMs),
- target_bitrate_(DataRate::bps(kDefaultBitrateBps)),
+ target_bitrate_(DataRate::BitsPerSec(kDefaultBitrateBps)),
target_framerate_fps_(kDefaultFrameRateFps) {}
protected:
@@ -40,14 +40,14 @@ class EncoderOvershootDetectorTest : public ::testing::Test {
if (rtc::TimeMillis() == 0) {
// Encode a first frame which by definition has no overuse factor.
detector_.OnEncodedFrame(frame_size_bytes, rtc::TimeMillis());
- clock_.AdvanceTime(TimeDelta::seconds(1) / target_framerate_fps_);
+ clock_.AdvanceTime(TimeDelta::Seconds(1) / target_framerate_fps_);
}
int64_t runtime_us = 0;
while (runtime_us < test_duration_ms * 1000) {
detector_.OnEncodedFrame(frame_size_bytes, rtc::TimeMillis());
runtime_us += rtc::kNumMicrosecsPerSec / target_framerate_fps_;
- clock_.AdvanceTime(TimeDelta::seconds(1) / target_framerate_fps_);
+ clock_.AdvanceTime(TimeDelta::Seconds(1) / target_framerate_fps_);
}
// At constant utilization, both network and media utilization should be
@@ -81,7 +81,7 @@ TEST_F(EncoderOvershootDetectorTest, NoUtilizationIfNoRate) {
detector_.GetNetworkRateUtilizationFactor(rtc::TimeMillis()).has_value());
detector_.OnEncodedFrame(frame_size_bytes, rtc::TimeMillis());
- clock_.AdvanceTime(TimeDelta::ms(time_interval_ms));
+ clock_.AdvanceTime(TimeDelta::Millis(time_interval_ms));
EXPECT_TRUE(
detector_.GetNetworkRateUtilizationFactor(rtc::TimeMillis()).has_value());
}
@@ -111,7 +111,7 @@ TEST_F(EncoderOvershootDetectorTest, ConstantOvershootVaryingRates) {
RunConstantUtilizationTest(1.2, 1.2, 0.01, kWindowSizeMs);
target_framerate_fps_ /= 2;
RunConstantUtilizationTest(1.2, 1.2, 0.01, kWindowSizeMs / 2);
- target_bitrate_ = DataRate::bps(target_bitrate_.bps() / 2);
+ target_bitrate_ = DataRate::BitsPerSec(target_bitrate_.bps() / 2);
RunConstantUtilizationTest(1.2, 1.2, 0.01, kWindowSizeMs / 2);
}
@@ -147,7 +147,7 @@ TEST_F(EncoderOvershootDetectorTest, PartialOvershoot) {
int i = 0;
while (runtime_us < kWindowSizeMs * rtc::kNumMicrosecsPerMillisec) {
runtime_us += rtc::kNumMicrosecsPerSec / target_framerate_fps_;
- clock_.AdvanceTime(TimeDelta::seconds(1) / target_framerate_fps_);
+ clock_.AdvanceTime(TimeDelta::Seconds(1) / target_framerate_fps_);
int frame_size_bytes = (i++ % 4 < 2) ? (ideal_frame_size_bytes * 120) / 100
: (ideal_frame_size_bytes * 80) / 100;
detector_.OnEncodedFrame(frame_size_bytes, rtc::TimeMillis());
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/call_operation_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/call_operation_tests.cc
index b38062bb809..fcf7660c3da 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/call_operation_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/call_operation_tests.cc
@@ -9,6 +9,9 @@
*/
#include <memory>
+
+#include "api/test/create_frame_generator.h"
+#include "api/test/frame_generator_interface.h"
#include "api/test/simulated_network.h"
#include "call/fake_network_pipe.h"
#include "call/simulated_network.h"
@@ -16,7 +19,7 @@
#include "system_wrappers/include/sleep.h"
#include "test/call_test.h"
#include "test/field_trial.h"
-#include "test/frame_generator.h"
+#include "test/frame_forwarder.h"
#include "test/gtest.h"
#include "test/null_transport.h"
@@ -125,13 +128,13 @@ TEST_F(CallOperationEndToEndTest, RendersSingleDelayedFrame) {
// Create frames that are smaller than the send width/height, this is
// done to check that the callbacks are done after processing video.
- std::unique_ptr<test::FrameGenerator> frame_generator(
- test::FrameGenerator::CreateSquareGenerator(
- kWidth, kHeight, absl::nullopt, absl::nullopt));
+ std::unique_ptr<test::FrameGeneratorInterface> frame_generator(
+ test::CreateSquareFrameGenerator(kWidth, kHeight, absl::nullopt,
+ absl::nullopt));
GetVideoSendStream()->SetSource(
&frame_forwarder, DegradationPreference::MAINTAIN_FRAMERATE);
- test::FrameGenerator::VideoFrameData frame_data =
+ test::FrameGeneratorInterface::VideoFrameData frame_data =
frame_generator->NextFrame();
VideoFrame frame = VideoFrame::Builder()
.set_video_frame_buffer(frame_data.buffer)
@@ -163,7 +166,7 @@ TEST_F(CallOperationEndToEndTest, TransmitsFirstFrame) {
rtc::Event event_;
} renderer;
- std::unique_ptr<test::FrameGenerator> frame_generator;
+ std::unique_ptr<test::FrameGeneratorInterface> frame_generator;
test::FrameForwarder frame_forwarder;
std::unique_ptr<test::DirectTransport> sender_transport;
@@ -197,11 +200,11 @@ TEST_F(CallOperationEndToEndTest, TransmitsFirstFrame) {
CreateVideoStreams();
Start();
- frame_generator = test::FrameGenerator::CreateSquareGenerator(
+ frame_generator = test::CreateSquareFrameGenerator(
kDefaultWidth, kDefaultHeight, absl::nullopt, absl::nullopt);
GetVideoSendStream()->SetSource(
&frame_forwarder, DegradationPreference::MAINTAIN_FRAMERATE);
- test::FrameGenerator::VideoFrameData frame_data =
+ test::FrameGeneratorInterface::VideoFrameData frame_data =
frame_generator->NextFrame();
VideoFrame frame = VideoFrame::Builder()
.set_video_frame_buffer(frame_data.buffer)
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/fec_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/fec_tests.cc
index b20ec29b736..c55c4dbee16 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/fec_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/fec_tests.cc
@@ -18,6 +18,7 @@
#include "media/engine/internal_decoder_factory.h"
#include "modules/include/module_common_types_public.h"
#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "test/call_test.h"
#include "test/field_trial.h"
@@ -59,19 +60,19 @@ TEST_F(FecEndToEndTest, ReceivesUlpfec) {
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock(&crit_);
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
- EXPECT_TRUE(header.payloadType == kVideoSendPayloadType ||
- header.payloadType == kRedPayloadType)
+ EXPECT_TRUE(rtp_packet.PayloadType() == kVideoSendPayloadType ||
+ rtp_packet.PayloadType() == kRedPayloadType)
<< "Unknown payload type received.";
- EXPECT_EQ(kVideoSendSsrcs[0], header.ssrc) << "Unknown SSRC received.";
+ EXPECT_EQ(kVideoSendSsrcs[0], rtp_packet.Ssrc())
+ << "Unknown SSRC received.";
// Parse RED header.
int encapsulated_payload_type = -1;
- if (header.payloadType == kRedPayloadType) {
- encapsulated_payload_type =
- static_cast<int>(packet[header.headerLength]);
+ if (rtp_packet.PayloadType() == kRedPayloadType) {
+ encapsulated_payload_type = rtp_packet.payload()[0];
EXPECT_TRUE(encapsulated_payload_type == kVideoSendPayloadType ||
encapsulated_payload_type == kUlpfecPayloadType)
@@ -87,8 +88,8 @@ TEST_F(FecEndToEndTest, ReceivesUlpfec) {
// corresponding timestamps that were dropped.
if (num_packets_sent_++ > 100 && random_.Rand(1, 100) <= 5) {
if (encapsulated_payload_type == kVideoSendPayloadType) {
- dropped_sequence_numbers_.insert(header.sequenceNumber);
- dropped_timestamps_.insert(header.timestamp);
+ dropped_sequence_numbers_.insert(rtp_packet.SequenceNumber());
+ dropped_timestamps_.insert(rtp_packet.Timestamp());
}
return DROP_PACKET;
}
@@ -169,35 +170,40 @@ class FlexfecRenderObserver : public test::EndToEndTest,
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock(&crit_);
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
-
- EXPECT_TRUE(header.payloadType ==
- test::CallTest::kFakeVideoSendPayloadType ||
- header.payloadType == test::CallTest::kFlexfecPayloadType ||
- (enable_nack_ &&
- header.payloadType == test::CallTest::kSendRtxPayloadType))
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+
+ EXPECT_TRUE(
+ rtp_packet.PayloadType() == test::CallTest::kFakeVideoSendPayloadType ||
+ rtp_packet.PayloadType() == test::CallTest::kFlexfecPayloadType ||
+ (enable_nack_ &&
+ rtp_packet.PayloadType() == test::CallTest::kSendRtxPayloadType))
<< "Unknown payload type received.";
EXPECT_TRUE(
- header.ssrc == test::CallTest::kVideoSendSsrcs[0] ||
- header.ssrc == test::CallTest::kFlexfecSendSsrc ||
- (enable_nack_ && header.ssrc == test::CallTest::kSendRtxSsrcs[0]))
+ rtp_packet.Ssrc() == test::CallTest::kVideoSendSsrcs[0] ||
+ rtp_packet.Ssrc() == test::CallTest::kFlexfecSendSsrc ||
+ (enable_nack_ && rtp_packet.Ssrc() == test::CallTest::kSendRtxSsrcs[0]))
<< "Unknown SSRC received.";
// To reduce test flakiness, always let FlexFEC packets through.
- if (header.payloadType == test::CallTest::kFlexfecPayloadType) {
- EXPECT_EQ(test::CallTest::kFlexfecSendSsrc, header.ssrc);
+ if (rtp_packet.PayloadType() == test::CallTest::kFlexfecPayloadType) {
+ EXPECT_EQ(test::CallTest::kFlexfecSendSsrc, rtp_packet.Ssrc());
return SEND_PACKET;
}
// To reduce test flakiness, always let RTX packets through.
- if (header.payloadType == test::CallTest::kSendRtxPayloadType) {
- EXPECT_EQ(test::CallTest::kSendRtxSsrcs[0], header.ssrc);
+ if (rtp_packet.PayloadType() == test::CallTest::kSendRtxPayloadType) {
+ EXPECT_EQ(test::CallTest::kSendRtxSsrcs[0], rtp_packet.Ssrc());
+
+ if (rtp_packet.payload_size() == 0) {
+ // Pure padding packet.
+ return SEND_PACKET;
+ }
// Parse RTX header.
uint16_t original_sequence_number =
- ByteReader<uint16_t>::ReadBigEndian(&packet[header.headerLength]);
+ ByteReader<uint16_t>::ReadBigEndian(rtp_packet.payload().data());
// From the perspective of FEC, a retransmitted packet is no longer
// dropped, so remove it from list of dropped packets.
@@ -205,7 +211,7 @@ class FlexfecRenderObserver : public test::EndToEndTest,
dropped_sequence_numbers_.find(original_sequence_number);
if (seq_num_it != dropped_sequence_numbers_.end()) {
dropped_sequence_numbers_.erase(seq_num_it);
- auto ts_it = dropped_timestamps_.find(header.timestamp);
+ auto ts_it = dropped_timestamps_.find(rtp_packet.Timestamp());
EXPECT_NE(ts_it, dropped_timestamps_.end());
dropped_timestamps_.erase(ts_it);
}
@@ -216,11 +222,12 @@ class FlexfecRenderObserver : public test::EndToEndTest,
// Simulate 5% video packet loss after rampup period. Record the
// corresponding timestamps that were dropped.
if (num_packets_sent_++ > 100 && random_.Rand(1, 100) <= 5) {
- EXPECT_EQ(test::CallTest::kFakeVideoSendPayloadType, header.payloadType);
- EXPECT_EQ(test::CallTest::kVideoSendSsrcs[0], header.ssrc);
+ EXPECT_EQ(test::CallTest::kFakeVideoSendPayloadType,
+ rtp_packet.PayloadType());
+ EXPECT_EQ(test::CallTest::kVideoSendSsrcs[0], rtp_packet.Ssrc());
- dropped_sequence_numbers_.insert(header.sequenceNumber);
- dropped_timestamps_.insert(header.timestamp);
+ dropped_sequence_numbers_.insert(rtp_packet.SequenceNumber());
+ dropped_timestamps_.insert(rtp_packet.Timestamp());
return DROP_PACKET;
}
@@ -354,26 +361,25 @@ TEST_F(FecEndToEndTest, ReceivedUlpfecPacketsNotNacked) {
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock_(&crit_);
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
int encapsulated_payload_type = -1;
- if (header.payloadType == kRedPayloadType) {
- encapsulated_payload_type =
- static_cast<int>(packet[header.headerLength]);
+ if (rtp_packet.PayloadType() == kRedPayloadType) {
+ encapsulated_payload_type = rtp_packet.payload()[0];
if (encapsulated_payload_type != kFakeVideoSendPayloadType)
EXPECT_EQ(kUlpfecPayloadType, encapsulated_payload_type);
} else {
- EXPECT_EQ(kFakeVideoSendPayloadType, header.payloadType);
+ EXPECT_EQ(kFakeVideoSendPayloadType, rtp_packet.PayloadType());
}
if (has_last_sequence_number_ &&
- !IsNewerSequenceNumber(header.sequenceNumber,
+ !IsNewerSequenceNumber(rtp_packet.SequenceNumber(),
last_sequence_number_)) {
// Drop retransmitted packets.
return DROP_PACKET;
}
- last_sequence_number_ = header.sequenceNumber;
+ last_sequence_number_ = rtp_packet.SequenceNumber();
has_last_sequence_number_ = true;
bool ulpfec_packet = encapsulated_payload_type == kUlpfecPayloadType;
@@ -384,14 +390,14 @@ TEST_F(FecEndToEndTest, ReceivedUlpfecPacketsNotNacked) {
case kDropEveryOtherPacketUntilUlpfec:
if (ulpfec_packet) {
state_ = kDropAllMediaPacketsUntilUlpfec;
- } else if (header.sequenceNumber % 2 == 0) {
+ } else if (rtp_packet.SequenceNumber() % 2 == 0) {
return DROP_PACKET;
}
break;
case kDropAllMediaPacketsUntilUlpfec:
if (!ulpfec_packet)
return DROP_PACKET;
- ulpfec_sequence_number_ = header.sequenceNumber;
+ ulpfec_sequence_number_ = rtp_packet.SequenceNumber();
state_ = kDropOneMediaPacket;
break;
case kDropOneMediaPacket:
@@ -410,7 +416,7 @@ TEST_F(FecEndToEndTest, ReceivedUlpfecPacketsNotNacked) {
break;
case kVerifyUlpfecPacketNotInNackList:
// Continue to drop packets. Make sure no frame can be decoded.
- if (ulpfec_packet || header.sequenceNumber % 2 == 0)
+ if (ulpfec_packet || rtp_packet.SequenceNumber() % 2 == 0)
return DROP_PACKET;
break;
}
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/frame_encryption_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/frame_encryption_tests.cc
index 85ad7dd2cb3..44ac3ecfdfb 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/frame_encryption_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/frame_encryption_tests.cc
@@ -11,79 +11,81 @@
#include "api/test/fake_frame_decryptor.h"
#include "api/test/fake_frame_encryptor.h"
#include "media/engine/internal_decoder_factory.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "test/call_test.h"
-#include "test/field_trial.h"
#include "test/gtest.h"
namespace webrtc {
namespace {
+
+using FrameEncryptionEndToEndTest = test::CallTest;
+
enum : int { // The first valid value is 1.
kGenericDescriptorExtensionId = 1,
};
-} // namespace
-class FrameEncryptionEndToEndTest : public test::CallTest {
+class DecryptedFrameObserver : public test::EndToEndTest,
+ public rtc::VideoSinkInterface<VideoFrame> {
public:
- FrameEncryptionEndToEndTest() {
- RegisterRtpExtension(RtpExtension(RtpExtension::kGenericFrameDescriptorUri,
- kGenericDescriptorExtensionId));
- }
+ DecryptedFrameObserver()
+ : EndToEndTest(test::CallTest::kDefaultTimeoutMs),
+ encoder_factory_([] { return VP8Encoder::Create(); }) {}
private:
- // GenericDescriptor is required for FrameEncryption to work.
- test::ScopedFieldTrials field_trials_{"WebRTC-GenericDescriptor/Enabled/"};
-};
-
-// Validates that payloads cannot be sent without a frame encryptor and frame
-// decryptor attached.
-TEST_F(FrameEncryptionEndToEndTest, RequireFrameEncryptionEnforced) {
- class DecryptedFrameObserver : public test::EndToEndTest,
- public rtc::VideoSinkInterface<VideoFrame> {
- public:
- DecryptedFrameObserver()
- : EndToEndTest(kDefaultTimeoutMs),
- encoder_factory_([]() { return VP8Encoder::Create(); }) {}
-
- private:
- void ModifyVideoConfigs(
- VideoSendStream::Config* send_config,
- std::vector<VideoReceiveStream::Config>* receive_configs,
- VideoEncoderConfig* encoder_config) override {
- // Use VP8 instead of FAKE.
- send_config->encoder_settings.encoder_factory = &encoder_factory_;
- send_config->rtp.payload_name = "VP8";
- send_config->rtp.payload_type = kVideoSendPayloadType;
- send_config->frame_encryptor = new FakeFrameEncryptor();
- send_config->crypto_options.sframe.require_frame_encryption = true;
- encoder_config->codec_type = kVideoCodecVP8;
- VideoReceiveStream::Decoder decoder =
- test::CreateMatchingDecoder(*send_config);
- decoder.decoder_factory = &decoder_factory_;
- for (auto& recv_config : *receive_configs) {
- recv_config.decoders.clear();
- recv_config.decoders.push_back(decoder);
- recv_config.renderer = this;
- recv_config.frame_decryptor = new FakeFrameDecryptor();
- recv_config.crypto_options.sframe.require_frame_encryption = true;
- }
+ void ModifyVideoConfigs(
+ VideoSendStream::Config* send_config,
+ std::vector<VideoReceiveStream::Config>* receive_configs,
+ VideoEncoderConfig* encoder_config) override {
+ // Use VP8 instead of FAKE.
+ send_config->encoder_settings.encoder_factory = &encoder_factory_;
+ send_config->rtp.payload_name = "VP8";
+ send_config->rtp.payload_type = test::CallTest::kVideoSendPayloadType;
+ send_config->frame_encryptor = new FakeFrameEncryptor();
+ send_config->crypto_options.sframe.require_frame_encryption = true;
+ encoder_config->codec_type = kVideoCodecVP8;
+ VideoReceiveStream::Decoder decoder =
+ test::CreateMatchingDecoder(*send_config);
+ decoder.decoder_factory = &decoder_factory_;
+ for (auto& recv_config : *receive_configs) {
+ recv_config.decoders.clear();
+ recv_config.decoders.push_back(decoder);
+ recv_config.renderer = this;
+ recv_config.frame_decryptor = new FakeFrameDecryptor();
+ recv_config.crypto_options.sframe.require_frame_encryption = true;
}
+ }
- // Validate that rotation is preserved.
- void OnFrame(const VideoFrame& video_frame) override {
- observation_complete_.Set();
- }
+ void OnFrame(const VideoFrame& video_frame) override {
+ observation_complete_.Set();
+ }
- void PerformTest() override {
- EXPECT_TRUE(Wait())
- << "Timed out waiting for decrypted frames to be rendered.";
- }
+ void PerformTest() override {
+ EXPECT_TRUE(Wait())
+ << "Timed out waiting for decrypted frames to be rendered.";
+ }
+
+ std::unique_ptr<VideoEncoder> encoder_;
+ test::FunctionVideoEncoderFactory encoder_factory_;
+ InternalDecoderFactory decoder_factory_;
+};
- std::unique_ptr<VideoEncoder> encoder_;
- test::FunctionVideoEncoderFactory encoder_factory_;
- InternalDecoderFactory decoder_factory_;
- } test;
+// Validates that payloads cannot be sent without a frame encryptor and frame
+// decryptor attached.
+TEST_F(FrameEncryptionEndToEndTest,
+ WithGenericFrameDescriptorRequireFrameEncryptionEnforced) {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kGenericFrameDescriptorUri00,
+ kGenericDescriptorExtensionId));
+ DecryptedFrameObserver test;
+ RunBaseTest(&test);
+}
+TEST_F(FrameEncryptionEndToEndTest,
+ WithDependencyDescriptorRequireFrameEncryptionEnforced) {
+ RegisterRtpExtension(RtpExtension(RtpExtension::kDependencyDescriptorUri,
+ kGenericDescriptorExtensionId));
+ DecryptedFrameObserver test;
RunBaseTest(&test);
}
+} // namespace
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/histogram_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/histogram_tests.cc
index ef435b45120..dd6de2543d5 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/histogram_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/histogram_tests.cc
@@ -150,113 +150,146 @@ void HistogramTest::VerifyHistogramStats(bool use_rtx,
const std::string video_suffix = screenshare ? ".S0" : "";
// Verify that stats have been updated once.
- EXPECT_EQ(2, metrics::NumSamples("WebRTC.Call.LifetimeInSeconds"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Call.TimeReceivingVideoRtpPacketsInSeconds"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.VideoBitrateReceivedInKbps"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.RtcpBitrateReceivedInBps"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.BitrateReceivedInKbps"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.EstimatedSendBitrateInKbps"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.PacerBitrateInKbps"));
-
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SendStreamLifetimeInSeconds"));
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
-
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
- EXPECT_EQ(1,
- metrics::NumSamples(video_prefix + "NackPacketsReceivedPerMinute"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
- EXPECT_EQ(1,
- metrics::NumSamples(video_prefix + "FirPacketsReceivedPerMinute"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
- EXPECT_EQ(1,
- metrics::NumSamples(video_prefix + "PliPacketsReceivedPerMinute"));
-
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "KeyFramesSentInPermille"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
-
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SentPacketsLostInPercent"));
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
-
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InputWidthInPixels"));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InputHeightInPixels"));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SentWidthInPixels"));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SentHeightInPixels"));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "ReceivedWidthInPixels"));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "ReceivedHeightInPixels"));
-
- EXPECT_EQ(1, metrics::NumEvents(video_prefix + "InputWidthInPixels",
- kDefaultWidth));
- EXPECT_EQ(1, metrics::NumEvents(video_prefix + "InputHeightInPixels",
- kDefaultHeight));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(2, metrics::NumSamples("WebRTC.Call.LifetimeInSeconds"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Call.TimeReceivingVideoRtpPacketsInSeconds"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Call.VideoBitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Call.RtcpBitrateReceivedInBps"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Call.BitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Call.EstimatedSendBitrateInKbps"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Call.PacerBitrateInKbps"));
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.SendStreamLifetimeInSeconds"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "NackPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "FirPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "PliPacketsReceivedPerMinute"));
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "KeyFramesSentInPermille"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "SentPacketsLostInPercent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "InputWidthInPixels"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "InputHeightInPixels"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "SentWidthInPixels"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "SentHeightInPixels"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "ReceivedWidthInPixels"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "ReceivedHeightInPixels"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(video_prefix + "InputWidthInPixels",
+ kDefaultWidth));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(video_prefix + "InputHeightInPixels",
+ kDefaultHeight));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents(video_prefix + "SentWidthInPixels", kDefaultWidth));
- EXPECT_EQ(1, metrics::NumEvents(video_prefix + "SentHeightInPixels",
- kDefaultHeight));
- EXPECT_EQ(1, metrics::NumEvents(video_prefix + "ReceivedWidthInPixels",
- kDefaultWidth));
- EXPECT_EQ(1, metrics::NumEvents(video_prefix + "ReceivedHeightInPixels",
- kDefaultHeight));
-
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InputFramesPerSecond"));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SentFramesPerSecond"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.DecodedFramesPerSecond"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
-
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
-
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "EndToEndDelayInMs" +
- video_suffix));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "EndToEndDelayMaxInMs" +
- video_suffix));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InterframeDelayInMs" +
- video_suffix));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InterframeDelayMaxInMs" +
- video_suffix));
-
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
-
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "EncodeTimeInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.DecodeTimeInMs"));
-
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "NumberOfPauseEvents"));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "PausedTimeInPercent"));
-
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "BitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.BitrateReceivedInKbps"));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "MediaBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.MediaBitrateReceivedInKbps"));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "PaddingBitrateSentInKbps"));
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.PaddingBitrateReceivedInKbps"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(video_prefix + "SentHeightInPixels",
+ kDefaultHeight));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(video_prefix + "ReceivedWidthInPixels",
+ kDefaultWidth));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents(video_prefix + "ReceivedHeightInPixels",
+ kDefaultHeight));
+
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "InputFramesPerSecond"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "SentFramesPerSecond"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DecodedFramesPerSecond"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "EndToEndDelayInMs" +
+ video_suffix));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "EndToEndDelayMaxInMs" +
+ video_suffix));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "InterframeDelayInMs" +
+ video_suffix));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "InterframeDelayMaxInMs" +
+ video_suffix));
+
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "EncodeTimeInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.DecodeTimeInMs"));
+
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "PausedTimeInPercent"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "BitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.BitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "MediaBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.MediaBitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(video_prefix + "PaddingBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.PaddingBitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples(video_prefix + "RetransmittedBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.RetransmittedBitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.RetransmittedBitrateReceivedInKbps"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SendDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SendSideDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SendSideDelayMaxInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.SendDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(video_prefix + "SendSideDelayInMs"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(video_prefix + "SendSideDelayMaxInMs"));
int num_rtx_samples = use_rtx ? 1 : 0;
- EXPECT_EQ(num_rtx_samples,
- metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
- EXPECT_EQ(num_rtx_samples,
- metrics::NumSamples("WebRTC.Video.RtxBitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(num_rtx_samples,
+ metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ num_rtx_samples,
+ metrics::NumSamples("WebRTC.Video.RtxBitrateReceivedInKbps"));
int num_red_samples = use_fec ? 1 : 0;
- EXPECT_EQ(num_red_samples,
- metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
- EXPECT_EQ(num_red_samples,
- metrics::NumSamples("WebRTC.Video.FecBitrateReceivedInKbps"));
- EXPECT_EQ(num_red_samples,
- metrics::NumSamples("WebRTC.Video.ReceivedFecPacketsInPercent"));
+ EXPECT_METRIC_EQ(num_red_samples,
+ metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ num_red_samples,
+ metrics::NumSamples("WebRTC.Video.FecBitrateReceivedInKbps"));
+ EXPECT_METRIC_EQ(
+ num_red_samples,
+ metrics::NumSamples("WebRTC.Video.ReceivedFecPacketsInPercent"));
}
TEST_F(HistogramTest, VerifyStatsWithRtx) {
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/multi_codec_receive_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/multi_codec_receive_tests.cc
index 4aaf8439968..354ee44072d 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/multi_codec_receive_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/multi_codec_receive_tests.cc
@@ -15,6 +15,7 @@
#include "call/fake_network_pipe.h"
#include "call/simulated_network.h"
#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
#include "modules/video_coding/codecs/h264/include/h264.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "modules/video_coding/codecs/vp9/include/vp9.h"
@@ -75,29 +76,28 @@ class FrameObserver : public test::RtpRtcpObserver,
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock(&crit_);
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
- EXPECT_EQ(header.ssrc, test::CallTest::kVideoSendSsrcs[0]);
- EXPECT_GE(length, header.headerLength + header.paddingLength);
- if ((length - header.headerLength) == header.paddingLength)
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ EXPECT_EQ(rtp_packet.Ssrc(), test::CallTest::kVideoSendSsrcs[0]);
+ if (rtp_packet.payload_size() == 0)
return SEND_PACKET; // Skip padding, may be sent after OnFrame is called.
if (expected_payload_type_ &&
- header.payloadType != expected_payload_type_.value()) {
+ rtp_packet.PayloadType() != expected_payload_type_.value()) {
return DROP_PACKET; // All frames sent.
}
- if (!last_timestamp_ || header.timestamp != *last_timestamp_) {
+ if (!last_timestamp_ || rtp_packet.Timestamp() != *last_timestamp_) {
// New frame.
// Sent enough frames?
if (num_sent_frames_ >= kFramesToObserve)
return DROP_PACKET;
++num_sent_frames_;
- sent_timestamps_.push_back(header.timestamp);
+ sent_timestamps_.push_back(rtp_packet.Timestamp());
}
- last_timestamp_ = header.timestamp;
+ last_timestamp_ = rtp_packet.Timestamp();
return SEND_PACKET;
}
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/multi_stream_tester.cc b/chromium/third_party/webrtc/video/end_to_end_tests/multi_stream_tester.cc
index c8e63e1cbe3..c3d41c37fa1 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/multi_stream_tester.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/multi_stream_tester.cc
@@ -18,6 +18,7 @@
#include "api/rtc_event_log/rtc_event_log.h"
#include "api/task_queue/default_task_queue_factory.h"
#include "api/task_queue/task_queue_base.h"
+#include "api/test/create_frame_generator.h"
#include "api/test/simulated_network.h"
#include "api/test/video/function_video_encoder_factory.h"
#include "api/video/builtin_video_bitrate_allocator_factory.h"
@@ -114,8 +115,8 @@ void MultiStreamTester::RunTest() {
auto* frame_generator = new test::FrameGeneratorCapturer(
Clock::GetRealTimeClock(),
- test::FrameGenerator::CreateSquareGenerator(
- width, height, absl::nullopt, absl::nullopt),
+ test::CreateSquareFrameGenerator(width, height, absl::nullopt,
+ absl::nullopt),
30, *task_queue_factory);
frame_generators[i] = frame_generator;
send_streams[i]->SetSource(frame_generator,
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/network_state_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/network_state_tests.cc
index eda6dae2755..a0977ac7732 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/network_state_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/network_state_tests.cc
@@ -14,6 +14,7 @@
#include "api/video_codecs/video_encoder.h"
#include "call/fake_network_pipe.h"
#include "call/simulated_network.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
#include "rtc_base/task_queue_for_test.h"
#include "system_wrappers/include/sleep.h"
#include "test/call_test.h"
@@ -177,9 +178,9 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) {
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock(&test_crit_);
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
- if (length == header.headerLength + header.paddingLength)
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ if (rtp_packet.payload_size() == 0)
++sender_padding_;
++sender_rtp_;
packet_event_.Set();
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/retransmission_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/retransmission_tests.cc
index 7aae5775fcb..407aa5f2dc2 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/retransmission_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/retransmission_tests.cc
@@ -16,6 +16,7 @@
#include "api/test/video/function_video_encoder_factory.h"
#include "call/fake_network_pipe.h"
#include "call/simulated_network.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "rtc_base/task_queue_for_test.h"
#include "system_wrappers/include/sleep.h"
@@ -58,13 +59,13 @@ TEST_F(RetransmissionEndToEndTest, ReceivesAndRetransmitsNack) {
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock(&crit_);
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
// Never drop retransmitted packets.
- if (dropped_packets_.find(header.sequenceNumber) !=
+ if (dropped_packets_.find(rtp_packet.SequenceNumber()) !=
dropped_packets_.end()) {
- retransmitted_packets_.insert(header.sequenceNumber);
+ retransmitted_packets_.insert(rtp_packet.SequenceNumber());
return SEND_PACKET;
}
@@ -84,9 +85,9 @@ TEST_F(RetransmissionEndToEndTest, ReceivesAndRetransmitsNack) {
packets_left_to_drop_ = kLossBurstSize;
// Never drop padding packets as those won't be retransmitted.
- if (packets_left_to_drop_ > 0 && header.paddingLength == 0) {
+ if (packets_left_to_drop_ > 0 && rtp_packet.padding_size() == 0) {
--packets_left_to_drop_;
- dropped_packets_.insert(header.sequenceNumber);
+ dropped_packets_.insert(rtp_packet.SequenceNumber());
return DROP_PACKET;
}
@@ -152,14 +153,15 @@ TEST_F(RetransmissionEndToEndTest, ReceivesNackAndRetransmitsAudio) {
}
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
if (!sequence_number_to_retransmit_) {
- sequence_number_to_retransmit_ = header.sequenceNumber;
+ sequence_number_to_retransmit_ = rtp_packet.SequenceNumber();
// Don't ask for retransmission straight away, may be deduped in pacer.
- } else if (header.sequenceNumber == *sequence_number_to_retransmit_) {
+ } else if (rtp_packet.SequenceNumber() ==
+ *sequence_number_to_retransmit_) {
observation_complete_.Set();
} else {
// Send a NACK as often as necessary until retransmission is received.
@@ -258,15 +260,15 @@ void RetransmissionEndToEndTest::ReceivesPliAndRecovers(int rtp_history_ms) {
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock(&crit_);
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
// Drop all retransmitted packets to force a PLI.
- if (header.timestamp <= highest_dropped_timestamp_)
+ if (rtp_packet.Timestamp() <= highest_dropped_timestamp_)
return DROP_PACKET;
if (frames_to_drop_ > 0) {
- highest_dropped_timestamp_ = header.timestamp;
+ highest_dropped_timestamp_ = rtp_packet.Timestamp();
--frames_to_drop_;
return DROP_PACKET;
}
@@ -350,29 +352,29 @@ void RetransmissionEndToEndTest::DecodesRetransmittedFrame(bool enable_rtx,
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock(&crit_);
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
// Ignore padding-only packets over RTX.
- if (header.payloadType != payload_type_) {
- EXPECT_EQ(retransmission_ssrc_, header.ssrc);
- if (length == header.headerLength + header.paddingLength)
+ if (rtp_packet.PayloadType() != payload_type_) {
+ EXPECT_EQ(retransmission_ssrc_, rtp_packet.Ssrc());
+ if (rtp_packet.payload_size() == 0)
return SEND_PACKET;
}
- if (header.timestamp == retransmitted_timestamp_) {
- EXPECT_EQ(retransmission_ssrc_, header.ssrc);
- EXPECT_EQ(retransmission_payload_type_, header.payloadType);
+ if (rtp_packet.Timestamp() == retransmitted_timestamp_) {
+ EXPECT_EQ(retransmission_ssrc_, rtp_packet.Ssrc());
+ EXPECT_EQ(retransmission_payload_type_, rtp_packet.PayloadType());
return SEND_PACKET;
}
// Found the final packet of the frame to inflict loss to, drop this and
// expect a retransmission.
- if (header.payloadType == payload_type_ && header.markerBit &&
+ if (rtp_packet.PayloadType() == payload_type_ && rtp_packet.Marker() &&
++marker_bits_observed_ == kDroppedFrameNumber) {
// This should be the only dropped packet.
EXPECT_EQ(0u, retransmitted_timestamp_);
- retransmitted_timestamp_ = header.timestamp;
+ retransmitted_timestamp_ = rtp_packet.Timestamp();
if (absl::c_linear_search(rendered_timestamps_,
retransmitted_timestamp_)) {
// Frame was rendered before last packet was scheduled for sending.
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/rtp_rtcp_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/rtp_rtcp_tests.cc
index 97777a164ae..71783febfe8 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/rtp_rtcp_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/rtp_rtcp_tests.cc
@@ -14,6 +14,7 @@
#include "call/fake_network_pipe.h"
#include "call/simulated_network.h"
#include "modules/include/module_common_types_public.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "rtc_base/task_queue_for_test.h"
#include "test/call_test.h"
@@ -204,14 +205,13 @@ void RtpRtcpEndToEndTest::TestRtpStatePreservation(
}
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
- const uint32_t ssrc = header.ssrc;
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ const uint32_t ssrc = rtp_packet.Ssrc();
const int64_t sequence_number =
- seq_numbers_unwrapper_.Unwrap(header.sequenceNumber);
- const uint32_t timestamp = header.timestamp;
- const bool only_padding =
- header.headerLength + header.paddingLength == length;
+ seq_numbers_unwrapper_.Unwrap(rtp_packet.SequenceNumber());
+ const uint32_t timestamp = rtp_packet.Timestamp();
+ const bool only_padding = rtp_packet.payload_size() == 0;
EXPECT_TRUE(ssrc_is_rtx_.find(ssrc) != ssrc_is_rtx_.end())
<< "Received SSRC that wasn't configured: " << ssrc;
@@ -422,11 +422,11 @@ TEST_F(RtpRtcpEndToEndTest, DISABLED_TestFlexfecRtpStatePreservation) {
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock(&crit_);
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
- const uint16_t sequence_number = header.sequenceNumber;
- const uint32_t timestamp = header.timestamp;
- const uint32_t ssrc = header.ssrc;
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ const uint16_t sequence_number = rtp_packet.SequenceNumber();
+ const uint32_t timestamp = rtp_packet.Timestamp();
+ const uint32_t ssrc = rtp_packet.Ssrc();
if (ssrc == kVideoSendSsrcs[0] || ssrc == kSendRtxSsrcs[0]) {
return SEND_PACKET;
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/ssrc_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/ssrc_tests.cc
index 1251b4578be..3c7db803c0f 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/ssrc_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/ssrc_tests.cc
@@ -13,6 +13,7 @@
#include "api/test/simulated_network.h"
#include "call/fake_network_pipe.h"
#include "call/simulated_network.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
#include "rtc_base/task_queue_for_test.h"
#include "test/call_test.h"
#include "test/gtest.h"
@@ -62,13 +63,12 @@ TEST_F(SsrcEndToEndTest, UnknownRtpPacketGivesUnknownSsrcReturnCode) {
if (RtpHeaderParser::IsRtcp(packet.cdata(), packet.size())) {
return receiver_->DeliverPacket(media_type, std::move(packet),
packet_time_us);
- } else {
- DeliveryStatus delivery_status = receiver_->DeliverPacket(
- media_type, std::move(packet), packet_time_us);
- EXPECT_EQ(DELIVERY_UNKNOWN_SSRC, delivery_status);
- delivered_packet_.Set();
- return delivery_status;
}
+ DeliveryStatus delivery_status = receiver_->DeliverPacket(
+ media_type, std::move(packet), packet_time_us);
+ EXPECT_EQ(DELIVERY_UNKNOWN_SSRC, delivery_status);
+ delivered_packet_.Set();
+ return delivery_status;
}
PacketReceiver* receiver_;
@@ -145,17 +145,17 @@ void SsrcEndToEndTest::TestSendsSetSsrcs(size_t num_ssrcs,
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
- EXPECT_TRUE(valid_ssrcs_[header.ssrc])
- << "Received unknown SSRC: " << header.ssrc;
+ EXPECT_TRUE(valid_ssrcs_[rtp_packet.Ssrc()])
+ << "Received unknown SSRC: " << rtp_packet.Ssrc();
- if (!valid_ssrcs_[header.ssrc])
+ if (!valid_ssrcs_[rtp_packet.Ssrc()])
observation_complete_.Set();
- if (!is_observed_[header.ssrc]) {
- is_observed_[header.ssrc] = true;
+ if (!is_observed_[rtp_packet.Ssrc()]) {
+ is_observed_[rtp_packet.Ssrc()] = true;
--ssrcs_to_observe_;
if (expect_single_ssrc_) {
expect_single_ssrc_ = false;
@@ -269,21 +269,19 @@ TEST_F(SsrcEndToEndTest, DISABLED_RedundantPayloadsTransmittedOnAllSsrcs) {
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
- if (!registered_rtx_ssrc_[header.ssrc])
+ if (!registered_rtx_ssrc_[rtp_packet.Ssrc()])
return SEND_PACKET;
- EXPECT_LE(header.headerLength + header.paddingLength, length);
- const bool packet_is_redundant_payload =
- header.headerLength + header.paddingLength < length;
+ const bool packet_is_redundant_payload = rtp_packet.payload_size() > 0;
if (!packet_is_redundant_payload)
return SEND_PACKET;
- if (!observed_redundant_retransmission_[header.ssrc]) {
- observed_redundant_retransmission_[header.ssrc] = true;
+ if (!observed_redundant_retransmission_[rtp_packet.Ssrc()]) {
+ observed_redundant_retransmission_[rtp_packet.Ssrc()] = true;
if (--ssrcs_to_observe_ == 0)
observation_complete_.Set();
}
@@ -293,39 +291,17 @@ TEST_F(SsrcEndToEndTest, DISABLED_RedundantPayloadsTransmittedOnAllSsrcs) {
size_t GetNumVideoStreams() const override { return kNumSimulcastStreams; }
- // This test use other VideoStream settings than the the default settings
- // implemented in DefaultVideoStreamFactory. Therefore this test implement
- // its own VideoEncoderConfig::VideoStreamFactoryInterface which is created
- // in ModifyVideoConfigs.
- class VideoStreamFactory
- : public VideoEncoderConfig::VideoStreamFactoryInterface {
- public:
- VideoStreamFactory() {}
-
- private:
- std::vector<VideoStream> CreateEncoderStreams(
- int width,
- int height,
- const VideoEncoderConfig& encoder_config) override {
- std::vector<VideoStream> streams =
- test::CreateVideoStreams(width, height, encoder_config);
- // Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
- for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
- streams[i].min_bitrate_bps = 10000;
- streams[i].target_bitrate_bps = 15000;
- streams[i].max_bitrate_bps = 20000;
- }
- return streams;
- }
- };
-
void ModifyVideoConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
VideoEncoderConfig* encoder_config) override {
// Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
- encoder_config->video_stream_factory =
- new rtc::RefCountedObject<VideoStreamFactory>();
+ encoder_config->max_bitrate_bps = 50000;
+ for (auto& layer : encoder_config->simulcast_layers) {
+ layer.min_bitrate_bps = 10000;
+ layer.target_bitrate_bps = 15000;
+ layer.max_bitrate_bps = 20000;
+ }
send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
for (size_t i = 0; i < kNumSimulcastStreams; ++i)
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/stats_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/stats_tests.cc
index 88e0ca76c40..b43f79df0a9 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/stats_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/stats_tests.cc
@@ -11,6 +11,7 @@
#include <memory>
#include "absl/algorithm/container.h"
+#include "absl/types/optional.h"
#include "api/task_queue/task_queue_base.h"
#include "api/test/simulated_network.h"
#include "api/test/video/function_video_encoder_factory.h"
@@ -167,44 +168,42 @@ TEST_F(StatsEndToEndTest, GetStats) {
stats.encoder_implementation_name ==
test::FakeEncoder::kImplementationName;
- for (std::map<uint32_t, VideoSendStream::StreamStats>::const_iterator it =
- stats.substreams.begin();
- it != stats.substreams.end(); ++it) {
- if (expected_send_ssrcs_.find(it->first) == expected_send_ssrcs_.end())
+ for (const auto& kv : stats.substreams) {
+ if (expected_send_ssrcs_.find(kv.first) == expected_send_ssrcs_.end())
continue; // Probably RTX.
- send_stats_filled_[CompoundKey("CapturedFrameRate", it->first)] |=
+ send_stats_filled_[CompoundKey("CapturedFrameRate", kv.first)] |=
stats.input_frame_rate != 0;
- const VideoSendStream::StreamStats& stream_stats = it->second;
+ const VideoSendStream::StreamStats& stream_stats = kv.second;
- send_stats_filled_[CompoundKey("StatisticsUpdated", it->first)] |=
+ send_stats_filled_[CompoundKey("StatisticsUpdated", kv.first)] |=
stream_stats.rtcp_stats.packets_lost != 0 ||
stream_stats.rtcp_stats.extended_highest_sequence_number != 0 ||
stream_stats.rtcp_stats.fraction_lost != 0;
- send_stats_filled_[CompoundKey("DataCountersUpdated", it->first)] |=
+ send_stats_filled_[CompoundKey("DataCountersUpdated", kv.first)] |=
stream_stats.rtp_stats.fec.packets != 0 ||
stream_stats.rtp_stats.transmitted.padding_bytes != 0 ||
stream_stats.rtp_stats.retransmitted.packets != 0 ||
stream_stats.rtp_stats.transmitted.packets != 0;
send_stats_filled_[CompoundKey("BitrateStatisticsObserver.Total",
- it->first)] |=
+ kv.first)] |=
stream_stats.total_bitrate_bps != 0;
send_stats_filled_[CompoundKey("BitrateStatisticsObserver.Retransmit",
- it->first)] |=
+ kv.first)] |=
stream_stats.retransmit_bitrate_bps != 0;
- send_stats_filled_[CompoundKey("FrameCountObserver", it->first)] |=
+ send_stats_filled_[CompoundKey("FrameCountObserver", kv.first)] |=
stream_stats.frame_counts.delta_frames != 0 ||
stream_stats.frame_counts.key_frames != 0;
- send_stats_filled_[CompoundKey("OutgoingRate", it->first)] |=
+ send_stats_filled_[CompoundKey("OutgoingRate", kv.first)] |=
stats.encode_frame_rate != 0;
- send_stats_filled_[CompoundKey("Delay", it->first)] |=
+ send_stats_filled_[CompoundKey("Delay", kv.first)] |=
stream_stats.avg_delay_ms != 0 || stream_stats.max_delay_ms != 0;
// TODO(pbos): Use CompoundKey when the test makes sure that all SSRCs
@@ -246,45 +245,25 @@ TEST_F(StatsEndToEndTest, GetStats) {
Clock::GetRealTimeClock(),
std::make_unique<SimulatedNetwork>(network_config)));
}
+
void ModifySenderBitrateConfig(
BitrateConstraints* bitrate_config) override {
bitrate_config->start_bitrate_bps = kStartBitrateBps;
}
- // This test use other VideoStream settings than the the default settings
- // implemented in DefaultVideoStreamFactory. Therefore this test implement
- // its own VideoEncoderConfig::VideoStreamFactoryInterface which is created
- // in ModifyVideoConfigs.
- class VideoStreamFactory
- : public VideoEncoderConfig::VideoStreamFactoryInterface {
- public: // NOLINT(whitespace/blank_line)
- VideoStreamFactory() {}
-
- private:
- std::vector<VideoStream> CreateEncoderStreams(
- int width,
- int height,
- const VideoEncoderConfig& encoder_config) override {
- std::vector<VideoStream> streams =
- test::CreateVideoStreams(width, height, encoder_config);
- // Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
- for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
- streams[i].min_bitrate_bps = 10000;
- streams[i].target_bitrate_bps = 15000;
- streams[i].max_bitrate_bps = 20000;
- }
- return streams;
- }
- };
-
void ModifyVideoConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
VideoEncoderConfig* encoder_config) override {
- encoder_config->video_stream_factory =
- new rtc::RefCountedObject<VideoStreamFactory>();
- expected_cname_ = send_config->rtp.c_name = "SomeCName";
+ // Set low simulcast bitrates to not have to wait for bandwidth ramp-up.
+ encoder_config->max_bitrate_bps = 50000;
+ for (auto& layer : encoder_config->simulcast_layers) {
+ layer.min_bitrate_bps = 10000;
+ layer.target_bitrate_bps = 15000;
+ layer.max_bitrate_bps = 20000;
+ }
+ send_config->rtp.c_name = "SomeCName";
send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
@@ -322,12 +301,12 @@ TEST_F(StatsEndToEndTest, GetStats) {
void PerformTest() override {
Clock* clock = Clock::GetRealTimeClock();
- int64_t now = clock->TimeInMilliseconds();
- int64_t stop_time = now + test::CallTest::kLongTimeoutMs;
+ int64_t now_ms = clock->TimeInMilliseconds();
+ int64_t stop_time_ms = now_ms + test::CallTest::kLongTimeoutMs;
bool receive_ok = false;
bool send_ok = false;
- while (now < stop_time) {
+ while (now_ms < stop_time_ms) {
if (!receive_ok)
receive_ok = CheckReceiveStats();
if (!send_ok)
@@ -336,26 +315,21 @@ TEST_F(StatsEndToEndTest, GetStats) {
if (receive_ok && send_ok)
return;
- int64_t time_until_timout_ = stop_time - now;
- if (time_until_timout_ > 0)
- check_stats_event_.Wait(time_until_timout_);
- now = clock->TimeInMilliseconds();
+ int64_t time_until_timeout_ms = stop_time_ms - now_ms;
+ if (time_until_timeout_ms > 0)
+ check_stats_event_.Wait(time_until_timeout_ms);
+ now_ms = clock->TimeInMilliseconds();
}
ADD_FAILURE() << "Timed out waiting for filled stats.";
- for (std::map<std::string, bool>::const_iterator it =
- receive_stats_filled_.begin();
- it != receive_stats_filled_.end(); ++it) {
- if (!it->second) {
- ADD_FAILURE() << "Missing receive stats: " << it->first;
+ for (const auto& kv : receive_stats_filled_) {
+ if (!kv.second) {
+ ADD_FAILURE() << "Missing receive stats: " << kv.first;
}
}
-
- for (std::map<std::string, bool>::const_iterator it =
- send_stats_filled_.begin();
- it != send_stats_filled_.end(); ++it) {
- if (!it->second) {
- ADD_FAILURE() << "Missing send stats: " << it->first;
+ for (const auto& kv : send_stats_filled_) {
+ if (!kv.second) {
+ ADD_FAILURE() << "Missing send stats: " << kv.first;
}
}
}
@@ -369,7 +343,6 @@ TEST_F(StatsEndToEndTest, GetStats) {
std::vector<uint32_t> expected_receive_ssrcs_;
std::set<uint32_t> expected_send_ssrcs_;
- std::string expected_cname_;
rtc::Event check_stats_event_;
ReceiveStreamRenderer receive_stream_renderer_;
@@ -393,9 +366,9 @@ TEST_F(StatsEndToEndTest, TimingFramesAreReported) {
send_config->rtp.extensions.clear();
send_config->rtp.extensions.push_back(
RtpExtension(RtpExtension::kVideoTimingUri, kExtensionId));
- for (size_t i = 0; i < receive_configs->size(); ++i) {
- (*receive_configs)[i].rtp.extensions.clear();
- (*receive_configs)[i].rtp.extensions.push_back(
+ for (auto& receive_config : *receive_configs) {
+ receive_config.rtp.extensions.clear();
+ receive_config.rtp.extensions.push_back(
RtpExtension(RtpExtension::kVideoTimingUri, kExtensionId));
}
}
@@ -408,14 +381,14 @@ TEST_F(StatsEndToEndTest, TimingFramesAreReported) {
void PerformTest() override {
// No frames reported initially.
- for (size_t i = 0; i < receive_streams_.size(); ++i) {
- EXPECT_FALSE(receive_streams_[i]->GetStats().timing_frame_info);
+ for (const auto& receive_stream : receive_streams_) {
+ EXPECT_FALSE(receive_stream->GetStats().timing_frame_info);
}
// Wait for at least one timing frame to be sent with 100ms grace period.
SleepMs(kDefaultTimingFramesDelayMs + 100);
// Check that timing frames are reported for each stream.
- for (size_t i = 0; i < receive_streams_.size(); ++i) {
- EXPECT_TRUE(receive_streams_[i]->GetStats().timing_frame_info);
+ for (const auto& receive_stream : receive_streams_) {
+ EXPECT_TRUE(receive_stream->GetStats().timing_frame_info);
}
}
@@ -554,8 +527,7 @@ TEST_F(StatsEndToEndTest, MAYBE_ContentTypeSwitches) {
// Start with realtime video.
GetVideoEncoderConfig()->content_type =
VideoEncoderConfig::ContentType::kRealtimeVideo;
- // Second encoder config for the second part of the test uses
- // screenshare
+ // Encoder config for the second part of the test uses screenshare.
encoder_config_with_screenshare = GetVideoEncoderConfig()->Copy();
encoder_config_with_screenshare.content_type =
VideoEncoderConfig::ContentType::kScreen;
@@ -589,18 +561,19 @@ TEST_F(StatsEndToEndTest, MAYBE_ContentTypeSwitches) {
});
// Verify that stats have been updated for both screenshare and video.
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.EndToEndDelayInMs"));
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.Screenshare.EndToEndDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.EndToEndDelayMaxInMs"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.EndToEndDelayInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.EndToEndDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.EndToEndDelayMaxInMs"));
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples("WebRTC.Video.Screenshare.EndToEndDelayMaxInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
}
TEST_F(StatsEndToEndTest, VerifyNackStats) {
@@ -612,8 +585,7 @@ TEST_F(StatsEndToEndTest, VerifyNackStats) {
sent_rtp_packets_(0),
dropped_rtp_packet_(0),
dropped_rtp_packet_requested_(false),
- send_stream_(nullptr),
- start_runtime_ms_(-1) {}
+ send_stream_(nullptr) {}
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
@@ -647,15 +619,13 @@ TEST_F(StatsEndToEndTest, VerifyNackStats) {
int send_stream_nack_packets = 0;
int receive_stream_nack_packets = 0;
VideoSendStream::Stats stats = send_stream_->GetStats();
- for (std::map<uint32_t, VideoSendStream::StreamStats>::const_iterator it =
- stats.substreams.begin();
- it != stats.substreams.end(); ++it) {
- const VideoSendStream::StreamStats& stream_stats = it->second;
+ for (const auto& kv : stats.substreams) {
+ const VideoSendStream::StreamStats& stream_stats = kv.second;
send_stream_nack_packets +=
stream_stats.rtcp_packet_type_counts.nack_packets;
}
- for (size_t i = 0; i < receive_streams_.size(); ++i) {
- VideoReceiveStream::Stats stats = receive_streams_[i]->GetStats();
+ for (const auto& receive_stream : receive_streams_) {
+ VideoReceiveStream::Stats stats = receive_stream->GetStats();
receive_stream_nack_packets +=
stats.rtcp_packet_type_counts.nack_packets;
}
@@ -667,12 +637,11 @@ TEST_F(StatsEndToEndTest, VerifyNackStats) {
}
bool MinMetricRunTimePassed() {
- int64_t now = Clock::GetRealTimeClock()->TimeInMilliseconds();
- if (start_runtime_ms_ == -1) {
- start_runtime_ms_ = now;
- return false;
- }
- int64_t elapsed_sec = (now - start_runtime_ms_) / 1000;
+ int64_t now_ms = Clock::GetRealTimeClock()->TimeInMilliseconds();
+ if (!start_runtime_ms_)
+ start_runtime_ms_ = now_ms;
+
+ int64_t elapsed_sec = (now_ms - *start_runtime_ms_) / 1000;
return elapsed_sec > metrics::kMinRunTimeInSeconds;
}
@@ -703,17 +672,18 @@ TEST_F(StatsEndToEndTest, VerifyNackStats) {
bool dropped_rtp_packet_requested_ RTC_GUARDED_BY(&crit_);
std::vector<VideoReceiveStream*> receive_streams_;
VideoSendStream* send_stream_;
- int64_t start_runtime_ms_;
+ absl::optional<int64_t> start_runtime_ms_;
} test;
metrics::Reset();
RunBaseTest(&test);
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples("WebRTC.Video.UniqueNackRequestsSentInPercent"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.UniqueNackRequestsReceivedInPercent"));
- EXPECT_GT(metrics::MinSample("WebRTC.Video.NackPacketsSentPerMinute"), 0);
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.UniqueNackRequestsReceivedInPercent"));
+ EXPECT_METRIC_GT(metrics::MinSample("WebRTC.Video.NackPacketsSentPerMinute"),
+ 0);
}
TEST_F(StatsEndToEndTest, CallReportsRttForSender) {
diff --git a/chromium/third_party/webrtc/video/end_to_end_tests/transport_feedback_tests.cc b/chromium/third_party/webrtc/video/end_to_end_tests/transport_feedback_tests.cc
index 55c8bc4245d..4291bc48126 100644
--- a/chromium/third_party/webrtc/video/end_to_end_tests/transport_feedback_tests.cc
+++ b/chromium/third_party/webrtc/video/end_to_end_tests/transport_feedback_tests.cc
@@ -16,6 +16,8 @@
#include "call/simulated_network.h"
#include "modules/include/module_common_types_public.h"
#include "modules/rtp_rtcp/source/byte_io.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
#include "test/call_test.h"
#include "test/field_trial.h"
#include "test/gtest.h"
@@ -41,7 +43,6 @@ TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) {
RtpExtensionHeaderObserver(
TaskQueueBase* task_queue,
Call* sender_call,
- const uint32_t& first_media_ssrc,
const std::map<uint32_t, uint32_t>& ssrc_map,
const std::map<uint8_t, MediaType>& payload_type_map)
: DirectTransport(task_queue,
@@ -51,14 +52,12 @@ TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) {
BuiltInNetworkBehaviorConfig())),
sender_call,
payload_type_map),
- parser_(RtpHeaderParser::CreateForTest()),
- first_media_ssrc_(first_media_ssrc),
rtx_to_media_ssrcs_(ssrc_map),
rtx_padding_observed_(false),
retransmit_observed_(false),
started_(false) {
- parser_->RegisterRtpHeaderExtension(kRtpExtensionTransportSequenceNumber,
- kTransportSequenceNumberExtensionId);
+ extensions_.Register<TransportSequenceNumber>(
+ kTransportSequenceNumberExtensionId);
}
virtual ~RtpExtensionHeaderObserver() {}
@@ -72,13 +71,14 @@ TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) {
return false;
if (started_) {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(data, length, &header));
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(data, length));
bool drop_packet = false;
- EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
- EXPECT_EQ(options.packet_id,
- header.extension.transportSequenceNumber);
+ uint16_t transport_sequence_number = 0;
+ EXPECT_TRUE(rtp_packet.GetExtension<TransportSequenceNumber>(
+ &transport_sequence_number));
+ EXPECT_EQ(options.packet_id, transport_sequence_number);
if (!streams_observed_.empty()) {
// Unwrap packet id and verify uniqueness.
int64_t packet_id = unwrapper_.Unwrap(options.packet_id);
@@ -86,21 +86,22 @@ TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) {
}
// Drop (up to) every 17th packet, so we get retransmits.
- // Only drop media, and not on the first stream (otherwise it will be
- // hard to distinguish from padding, which is always sent on the first
- // stream).
- if (header.payloadType != kSendRtxPayloadType &&
- header.ssrc != first_media_ssrc_ &&
- header.extension.transportSequenceNumber % 17 == 0) {
- dropped_seq_[header.ssrc].insert(header.sequenceNumber);
+ // Only drop media, do not drop padding packets.
+ if (rtp_packet.PayloadType() != kSendRtxPayloadType &&
+ rtp_packet.payload_size() > 0 &&
+ transport_sequence_number % 17 == 0) {
+ dropped_seq_[rtp_packet.Ssrc()].insert(rtp_packet.SequenceNumber());
drop_packet = true;
}
- if (header.payloadType == kSendRtxPayloadType) {
+ if (rtp_packet.payload_size() == 0) {
+ // Ignore padding packets.
+ } else if (rtp_packet.PayloadType() == kSendRtxPayloadType) {
uint16_t original_sequence_number =
- ByteReader<uint16_t>::ReadBigEndian(&data[header.headerLength]);
+ ByteReader<uint16_t>::ReadBigEndian(
+ rtp_packet.payload().data());
uint32_t original_ssrc =
- rtx_to_media_ssrcs_.find(header.ssrc)->second;
+ rtx_to_media_ssrcs_.find(rtp_packet.Ssrc())->second;
std::set<uint16_t>* seq_no_map = &dropped_seq_[original_ssrc];
auto it = seq_no_map->find(original_sequence_number);
if (it != seq_no_map->end()) {
@@ -110,7 +111,7 @@ TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) {
rtx_padding_observed_ = true;
}
} else {
- streams_observed_.insert(header.ssrc);
+ streams_observed_.insert(rtp_packet.Ssrc());
}
if (IsDone())
@@ -149,12 +150,11 @@ TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) {
private:
rtc::CriticalSection lock_;
rtc::Event done_;
- std::unique_ptr<RtpHeaderParser> parser_;
+ RtpHeaderExtensionMap extensions_;
SequenceNumberUnwrapper unwrapper_;
std::set<int64_t> received_packed_ids_;
std::set<uint32_t> streams_observed_;
std::map<uint32_t, std::set<uint16_t>> dropped_seq_;
- const uint32_t& first_media_ssrc_;
const std::map<uint32_t, uint32_t>& rtx_to_media_ssrcs_;
bool rtx_padding_observed_;
bool retransmit_observed_;
@@ -163,8 +163,7 @@ TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) {
class TransportSequenceNumberTester : public MultiStreamTester {
public:
- TransportSequenceNumberTester()
- : first_media_ssrc_(0), observer_(nullptr) {}
+ TransportSequenceNumberTester() : observer_(nullptr) {}
~TransportSequenceNumberTester() override = default;
protected:
@@ -197,9 +196,6 @@ TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) {
send_config->rtp.rtx.payload_type = kSendRtxPayloadType;
rtx_to_media_ssrcs_[kSendRtxSsrcs[stream_index]] =
send_config->rtp.ssrcs[0];
-
- if (stream_index == 0)
- first_media_ssrc_ = send_config->rtp.ssrcs[0];
}
void UpdateReceiveConfig(
@@ -222,15 +218,13 @@ TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) {
payload_type_map.end());
payload_type_map[kSendRtxPayloadType] = MediaType::VIDEO;
auto observer = std::make_unique<RtpExtensionHeaderObserver>(
- task_queue, sender_call, first_media_ssrc_, rtx_to_media_ssrcs_,
- payload_type_map);
+ task_queue, sender_call, rtx_to_media_ssrcs_, payload_type_map);
observer_ = observer.get();
return observer;
}
private:
test::FakeVideoRenderer fake_renderer_;
- uint32_t first_media_ssrc_;
std::map<uint32_t, uint32_t> rtx_to_media_ssrcs_;
RtpExtensionHeaderObserver* observer_;
} tester;
@@ -369,10 +363,9 @@ TEST_F(TransportFeedbackEndToEndTest,
protected:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
- const bool only_padding =
- header.headerLength + header.paddingLength == length;
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ const bool only_padding = rtp_packet.payload_size() == 0;
rtc::CritScope lock(&crit_);
// Padding is expected in congested state to probe for connectivity when
// packets has been dropped.
@@ -449,8 +442,8 @@ TEST_F(TransportFeedbackEndToEndTest, TransportSeqNumOnAudioAndVideo) {
: EndToEndTest(kDefaultTimeoutMs),
video_observed_(false),
audio_observed_(false) {
- parser_->RegisterRtpHeaderExtension(kRtpExtensionTransportSequenceNumber,
- kTransportSequenceNumberExtensionId);
+ extensions_.Register<TransportSequenceNumber>(
+ kTransportSequenceNumberExtensionId);
}
size_t GetNumVideoStreams() const override { return 1; }
@@ -468,17 +461,18 @@ TEST_F(TransportFeedbackEndToEndTest, TransportSeqNumOnAudioAndVideo) {
}
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
- EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ uint16_t transport_sequence_number = 0;
+ EXPECT_TRUE(rtp_packet.GetExtension<TransportSequenceNumber>(
+ &transport_sequence_number));
// Unwrap packet id and verify uniqueness.
- int64_t packet_id =
- unwrapper_.Unwrap(header.extension.transportSequenceNumber);
+ int64_t packet_id = unwrapper_.Unwrap(transport_sequence_number);
EXPECT_TRUE(received_packet_ids_.insert(packet_id).second);
- if (header.ssrc == kVideoSendSsrcs[0])
+ if (rtp_packet.Ssrc() == kVideoSendSsrcs[0])
video_observed_ = true;
- if (header.ssrc == kAudioSendSsrc)
+ if (rtp_packet.Ssrc() == kAudioSendSsrc)
audio_observed_ = true;
if (audio_observed_ && video_observed_ &&
received_packet_ids_.size() >= kMinPacketsToWaitFor) {
@@ -506,6 +500,7 @@ TEST_F(TransportFeedbackEndToEndTest, TransportSeqNumOnAudioAndVideo) {
bool audio_observed_;
SequenceNumberUnwrapper unwrapper_;
std::set<int64_t> received_packet_ids_;
+ RtpHeaderExtensionMap extensions_;
} test;
RunBaseTest(&test);
diff --git a/chromium/third_party/webrtc/video/full_stack_tests.cc b/chromium/third_party/webrtc/video/full_stack_tests.cc
index 823137b973a..7307b462b73 100644
--- a/chromium/third_party/webrtc/video/full_stack_tests.cc
+++ b/chromium/third_party/webrtc/video/full_stack_tests.cc
@@ -824,9 +824,6 @@ TEST(FullStackTest, ScreenshareSlidesVP8_2TL) {
#if !defined(WEBRTC_MAC) && !defined(WEBRTC_WIN)
// TODO(bugs.webrtc.org/9840): Investigate why is this test flaky on Win/Mac.
-const char kScreenshareSimulcastVariableFramerateExperiment[] =
- "WebRTC-VP8VariableFramerateScreenshare/"
- "Enabled,min_fps:5.0,min_qp:15,undershoot:30/";
TEST(FullStackTest, ScreenshareSlidesVP8_2TL_Simulcast) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging screenshare;
@@ -855,64 +852,6 @@ TEST(FullStackTest, ScreenshareSlidesVP8_2TL_Simulcast) {
fixture->RunWithAnalyzer(screenshare);
}
-TEST(FullStackTest, ScreenshareSlidesVP8_2TL_Simulcast_Variable_Framerate) {
- test::ScopedFieldTrials field_trial(
- AppendFieldTrials(kScreenshareSimulcastVariableFramerateExperiment));
- auto fixture = CreateVideoQualityTestFixture();
- ParamsWithLogging screenshare;
- screenshare.call.send_side_bwe = true;
- screenshare.screenshare[0] = {true, false, 10};
- screenshare.video[0] = {true, 1850, 1110, 30, 800000, 2500000,
- 2500000, false, "VP8", 2, 1, 400000,
- false, false, false, ""};
- screenshare.analyzer = {"screenshare_slides_simulcast_variable_framerate",
- 0.0, 0.0, kFullStackTestDurationSecs};
- ParamsWithLogging screenshare_params_high;
- screenshare_params_high.video[0] = {
- true, 1850, 1110, 60, 600000, 1250000, 1250000, false,
- "VP8", 2, 0, 400000, false, false, false, ""};
- VideoQualityTest::Params screenshare_params_low;
- screenshare_params_low.video[0] = {true, 1850, 1110, 5, 30000, 200000,
- 1000000, false, "VP8", 2, 0, 400000,
- false, false, false, ""};
-
- std::vector<VideoStream> streams = {
- VideoQualityTest::DefaultVideoStream(screenshare_params_low, 0),
- VideoQualityTest::DefaultVideoStream(screenshare_params_high, 0)};
- screenshare.ss[0] = {
- streams, 1, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
- false};
- fixture->RunWithAnalyzer(screenshare);
-}
-
-TEST(FullStackTest, ScreenshareSlidesVP8_2TL_Simulcast_low) {
- auto fixture = CreateVideoQualityTestFixture();
- ParamsWithLogging screenshare;
- screenshare.call.send_side_bwe = true;
- screenshare.screenshare[0] = {true, false, 10};
- screenshare.video[0] = {true, 1850, 1110, 30, 800000, 2500000,
- 2500000, false, "VP8", 2, 1, 400000,
- false, false, false, ""};
- screenshare.analyzer = {"screenshare_slides_simulcast_low", 0.0, 0.0,
- kFullStackTestDurationSecs};
- VideoQualityTest::Params screenshare_params_high;
- screenshare_params_high.video[0] = {
- true, 1850, 1110, 60, 600000, 1250000, 1250000, false,
- "VP8", 2, 0, 400000, false, false, false, ""};
- VideoQualityTest::Params screenshare_params_low;
- screenshare_params_low.video[0] = {true, 1850, 1110, 5, 30000, 200000,
- 1000000, false, "VP8", 2, 0, 400000,
- false, false, false, ""};
-
- std::vector<VideoStream> streams = {
- VideoQualityTest::DefaultVideoStream(screenshare_params_low, 0),
- VideoQualityTest::DefaultVideoStream(screenshare_params_high, 0)};
- screenshare.ss[0] = {
- streams, 0, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
- false};
- fixture->RunWithAnalyzer(screenshare);
-}
-
#endif // !defined(WEBRTC_MAC) && !defined(WEBRTC_WIN)
TEST(FullStackTest, ScreenshareSlidesVP8_2TL_Scroll) {
@@ -1063,66 +1002,7 @@ TEST(FullStackTest, ScreenshareSlidesVP9_3SL_High_Fps) {
fixture->RunWithAnalyzer(screenshare);
}
-TEST(FullStackTest, ScreenshareSlidesVP9_3SL_Variable_Fps) {
- webrtc::test::ScopedFieldTrials override_trials(
- AppendFieldTrials("WebRTC-VP9VariableFramerateScreenshare/"
- "Enabled,min_qp:32,min_fps:5.0,undershoot:30,frames_"
- "before_steady_state:5/"));
- auto fixture = CreateVideoQualityTestFixture();
- ParamsWithLogging screenshare;
- screenshare.call.send_side_bwe = true;
- screenshare.video[0] = {true, 1850, 1110, 30, 50000, 200000,
- 2000000, false, "VP9", 1, 0, 400000,
- false, false, false, ""};
- screenshare.screenshare[0] = {true, false, 10};
- screenshare.analyzer = {"screenshare_slides_vp9_3sl_variable_fps", 0.0, 0.0,
- kFullStackTestDurationSecs};
- screenshare.ss[0] = {
- std::vector<VideoStream>(), 0, 3, 2, InterLayerPredMode::kOn,
- std::vector<SpatialLayer>(), true};
- fixture->RunWithAnalyzer(screenshare);
-}
-
-TEST(FullStackTest, VP9SVC_3SL_High) {
- auto fixture = CreateVideoQualityTestFixture();
- ParamsWithLogging simulcast;
- simulcast.call.send_side_bwe = true;
- simulcast.video[0] = SvcVp9Video();
- simulcast.analyzer = {"vp9svc_3sl_high", 0.0, 0.0,
- kFullStackTestDurationSecs};
-
- simulcast.ss[0] = {
- std::vector<VideoStream>(), 0, 3, 2, InterLayerPredMode::kOn,
- std::vector<SpatialLayer>(), false};
- fixture->RunWithAnalyzer(simulcast);
-}
-
-TEST(FullStackTest, VP9SVC_3SL_Medium) {
- auto fixture = CreateVideoQualityTestFixture();
- ParamsWithLogging simulcast;
- simulcast.call.send_side_bwe = true;
- simulcast.video[0] = SvcVp9Video();
- simulcast.analyzer = {"vp9svc_3sl_medium", 0.0, 0.0,
- kFullStackTestDurationSecs};
- simulcast.ss[0] = {
- std::vector<VideoStream>(), 0, 3, 1, InterLayerPredMode::kOn,
- std::vector<SpatialLayer>(), false};
- fixture->RunWithAnalyzer(simulcast);
-}
-
-TEST(FullStackTest, VP9SVC_3SL_Low) {
- auto fixture = CreateVideoQualityTestFixture();
- ParamsWithLogging simulcast;
- simulcast.call.send_side_bwe = true;
- simulcast.video[0] = SvcVp9Video();
- simulcast.analyzer = {"vp9svc_3sl_low", 0.0, 0.0, kFullStackTestDurationSecs};
- simulcast.ss[0] = {
- std::vector<VideoStream>(), 0, 3, 0, InterLayerPredMode::kOn,
- std::vector<SpatialLayer>(), false};
- fixture->RunWithAnalyzer(simulcast);
-}
-
-// bugs.webrtc.org/9506
+// TODO(http://bugs.webrtc.org/9506): investigate.
#if !defined(WEBRTC_MAC)
TEST(FullStackTest, VP9KSVC_3SL_High) {
@@ -1140,29 +1020,31 @@ TEST(FullStackTest, VP9KSVC_3SL_High) {
fixture->RunWithAnalyzer(simulcast);
}
-TEST(FullStackTest, VP9KSVC_3SL_Medium) {
+TEST(FullStackTest, VP9KSVC_3SL_Low) {
webrtc::test::ScopedFieldTrials override_trials(
AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"));
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging simulcast;
simulcast.call.send_side_bwe = true;
simulcast.video[0] = SvcVp9Video();
- simulcast.analyzer = {"vp9ksvc_3sl_medium", 0.0, 0.0,
+ simulcast.analyzer = {"vp9ksvc_3sl_low", 0.0, 0.0,
kFullStackTestDurationSecs};
simulcast.ss[0] = {
- std::vector<VideoStream>(), 0, 3, 1, InterLayerPredMode::kOnKeyPic,
+ std::vector<VideoStream>(), 0, 3, 0, InterLayerPredMode::kOnKeyPic,
std::vector<SpatialLayer>(), false};
fixture->RunWithAnalyzer(simulcast);
}
-TEST(FullStackTest, VP9KSVC_3SL_Low) {
+TEST(FullStackTest, VP9KSVC_3SL_Low_Bw_Limited) {
webrtc::test::ScopedFieldTrials override_trials(
- AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"));
+ AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/"
+ "WebRTC-Vp9ExternalRefCtrl/Enabled/"));
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging simulcast;
+ simulcast.config->link_capacity_kbps = 500;
simulcast.call.send_side_bwe = true;
simulcast.video[0] = SvcVp9Video();
- simulcast.analyzer = {"vp9ksvc_3sl_low", 0.0, 0.0,
+ simulcast.analyzer = {"vp9ksvc_3sl_low_bw_limited", 0.0, 0.0,
kFullStackTestDurationSecs};
simulcast.ss[0] = {
std::vector<VideoStream>(), 0, 3, 0, InterLayerPredMode::kOnKeyPic,
@@ -1266,32 +1148,6 @@ TEST(FullStackTest, SimulcastVP8_3SL_High) {
fixture->RunWithAnalyzer(simulcast);
}
-TEST(FullStackTest, SimulcastVP8_3SL_Medium) {
- auto fixture = CreateVideoQualityTestFixture();
- ParamsWithLogging simulcast;
- simulcast.call.send_side_bwe = true;
- simulcast.video[0] = SimulcastVp8VideoHigh();
- simulcast.analyzer = {"simulcast_vp8_3sl_medium", 0.0, 0.0,
- kFullStackTestDurationSecs};
- simulcast.config->loss_percent = 0;
- simulcast.config->queue_delay_ms = 100;
- ParamsWithLogging video_params_high;
- video_params_high.video[0] = SimulcastVp8VideoHigh();
- ParamsWithLogging video_params_medium;
- video_params_medium.video[0] = SimulcastVp8VideoMedium();
- ParamsWithLogging video_params_low;
- video_params_low.video[0] = SimulcastVp8VideoLow();
-
- std::vector<VideoStream> streams = {
- VideoQualityTest::DefaultVideoStream(video_params_low, 0),
- VideoQualityTest::DefaultVideoStream(video_params_medium, 0),
- VideoQualityTest::DefaultVideoStream(video_params_high, 0)};
- simulcast.ss[0] = {
- streams, 1, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
- false};
- fixture->RunWithAnalyzer(simulcast);
-}
-
TEST(FullStackTest, SimulcastVP8_3SL_Low) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging simulcast;
@@ -1355,70 +1211,14 @@ TEST(FullStackTest, MAYBE_HighBitrateWithFakeCodec) {
fixture->RunWithAnalyzer(generator);
}
-TEST(FullStackTest, LargeRoomVP8_5thumb) {
- auto fixture = CreateVideoQualityTestFixture();
- ParamsWithLogging large_room;
- large_room.call.send_side_bwe = true;
- large_room.video[0] = SimulcastVp8VideoHigh();
- large_room.analyzer = {"largeroom_5thumb", 0.0, 0.0,
- kFullStackTestDurationSecs};
- large_room.config->loss_percent = 0;
- large_room.config->queue_delay_ms = 100;
- ParamsWithLogging video_params_high;
- video_params_high.video[0] = SimulcastVp8VideoHigh();
- ParamsWithLogging video_params_medium;
- video_params_medium.video[0] = SimulcastVp8VideoMedium();
- ParamsWithLogging video_params_low;
- video_params_low.video[0] = SimulcastVp8VideoLow();
-
- std::vector<VideoStream> streams = {
- VideoQualityTest::DefaultVideoStream(video_params_low, 0),
- VideoQualityTest::DefaultVideoStream(video_params_medium, 0),
- VideoQualityTest::DefaultVideoStream(video_params_high, 0)};
- large_room.call.num_thumbnails = 5;
- large_room.ss[0] = {
- streams, 2, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
- false};
- fixture->RunWithAnalyzer(large_room);
-}
-
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
// Fails on mobile devices:
// https://bugs.chromium.org/p/webrtc/issues/detail?id=7301
#define MAYBE_LargeRoomVP8_50thumb DISABLED_LargeRoomVP8_50thumb
-#define MAYBE_LargeRoomVP8_15thumb DISABLED_LargeRoomVP8_15thumb
#else
#define MAYBE_LargeRoomVP8_50thumb LargeRoomVP8_50thumb
-#define MAYBE_LargeRoomVP8_15thumb LargeRoomVP8_15thumb
#endif
-TEST(FullStackTest, MAYBE_LargeRoomVP8_15thumb) {
- auto fixture = CreateVideoQualityTestFixture();
- ParamsWithLogging large_room;
- large_room.call.send_side_bwe = true;
- large_room.video[0] = SimulcastVp8VideoHigh();
- large_room.analyzer = {"largeroom_15thumb", 0.0, 0.0,
- kFullStackTestDurationSecs};
- large_room.config->loss_percent = 0;
- large_room.config->queue_delay_ms = 100;
- ParamsWithLogging video_params_high;
- video_params_high.video[0] = SimulcastVp8VideoHigh();
- ParamsWithLogging video_params_medium;
- video_params_medium.video[0] = SimulcastVp8VideoMedium();
- ParamsWithLogging video_params_low;
- video_params_low.video[0] = SimulcastVp8VideoLow();
-
- std::vector<VideoStream> streams = {
- VideoQualityTest::DefaultVideoStream(video_params_low, 0),
- VideoQualityTest::DefaultVideoStream(video_params_medium, 0),
- VideoQualityTest::DefaultVideoStream(video_params_high, 0)};
- large_room.call.num_thumbnails = 15;
- large_room.ss[0] = {
- streams, 2, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
- false};
- fixture->RunWithAnalyzer(large_room);
-}
-
TEST(FullStackTest, MAYBE_LargeRoomVP8_50thumb) {
auto fixture = CreateVideoQualityTestFixture();
ParamsWithLogging large_room;
@@ -1452,110 +1252,4 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values("WebRTC-GenericDescriptor/Disabled/",
"WebRTC-GenericDescriptor/Enabled/"));
-class DualStreamsTest : public ::testing::TestWithParam<int> {};
-
-// Disable dual video test on mobile device becuase it's too heavy.
-// TODO(bugs.webrtc.org/9840): Investigate why is this test flaky on MAC.
-#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS) && !defined(WEBRTC_MAC)
-TEST_P(DualStreamsTest,
- ModeratelyRestricted_SlidesVp8_2TL_Simulcast_Video_Simulcast_High) {
- const int first_stream = GetParam();
- ParamsWithLogging dual_streams;
-
- // Screenshare Settings.
- dual_streams.screenshare[first_stream] = {true, false, 10};
- dual_streams.video[first_stream] = {true, 1850, 1110, 5, 800000, 2500000,
- 2500000, false, "VP8", 2, 1, 400000,
- false, false, false, ""};
-
- ParamsWithLogging screenshare_params_high;
- screenshare_params_high.video[0] = {
- true, 1850, 1110, 60, 600000, 1250000, 1250000, false,
- "VP8", 2, 0, 400000, false, false, false, ""};
- VideoQualityTest::Params screenshare_params_low;
- screenshare_params_low.video[0] = {true, 1850, 1110, 5, 30000, 200000,
- 1000000, false, "VP8", 2, 0, 400000,
- false, false, false, ""};
- std::vector<VideoStream> screenhsare_streams = {
- VideoQualityTest::DefaultVideoStream(screenshare_params_low, 0),
- VideoQualityTest::DefaultVideoStream(screenshare_params_high, 0)};
-
- dual_streams.ss[first_stream] = {
- screenhsare_streams, 1, 1, 0, InterLayerPredMode::kOn,
- std::vector<SpatialLayer>(), false};
-
- // Video settings.
- dual_streams.video[1 - first_stream] = SimulcastVp8VideoHigh();
-
- ParamsWithLogging video_params_high;
- video_params_high.video[0] = SimulcastVp8VideoHigh();
- ParamsWithLogging video_params_medium;
- video_params_medium.video[0] = SimulcastVp8VideoMedium();
- ParamsWithLogging video_params_low;
- video_params_low.video[0] = SimulcastVp8VideoLow();
- std::vector<VideoStream> streams = {
- VideoQualityTest::DefaultVideoStream(video_params_low, 0),
- VideoQualityTest::DefaultVideoStream(video_params_medium, 0),
- VideoQualityTest::DefaultVideoStream(video_params_high, 0)};
-
- dual_streams.ss[1 - first_stream] = {
- streams, 2, 1, 0, InterLayerPredMode::kOn, std::vector<SpatialLayer>(),
- false};
-
- // Call settings.
- dual_streams.call.send_side_bwe = true;
- dual_streams.call.dual_video = true;
- std::string test_label = "dualstreams_moderately_restricted_screenshare_" +
- std::to_string(first_stream);
- dual_streams.analyzer = {test_label, 0.0, 0.0, kFullStackTestDurationSecs};
- dual_streams.config->loss_percent = 1;
- dual_streams.config->link_capacity_kbps = 7500;
- dual_streams.config->queue_length_packets = 30;
- dual_streams.config->queue_delay_ms = 100;
-
- auto fixture = CreateVideoQualityTestFixture();
- fixture->RunWithAnalyzer(dual_streams);
-}
-#endif // !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS) &&
- // !defined(WEBRTC_MAC)
-
-TEST_P(DualStreamsTest, Conference_Restricted) {
- const int first_stream = GetParam();
- ParamsWithLogging dual_streams;
-
- // Screenshare Settings.
- dual_streams.screenshare[first_stream] = {true, false, 10};
- dual_streams.video[first_stream] = {true, 1850, 1110, 5, 800000, 2500000,
- 2500000, false, "VP8", 3, 2, 400000,
- false, false, false, ""};
- // Video settings.
- dual_streams.video[1 - first_stream] = {
- true, 1280,
- 720, 30,
- 150000, 500000,
- 700000, false,
- "VP8", 3,
- 2, 400000,
- false, false,
- false, ClipNameToClipPath("ConferenceMotion_1280_720_50")};
-
- // Call settings.
- dual_streams.call.send_side_bwe = true;
- dual_streams.call.dual_video = true;
- std::string test_label = "dualstreams_conference_restricted_screenshare_" +
- std::to_string(first_stream);
- dual_streams.analyzer = {test_label, 0.0, 0.0, kFullStackTestDurationSecs};
- dual_streams.config->loss_percent = 1;
- dual_streams.config->link_capacity_kbps = 5000;
- dual_streams.config->queue_length_packets = 30;
- dual_streams.config->queue_delay_ms = 100;
-
- auto fixture = CreateVideoQualityTestFixture();
- fixture->RunWithAnalyzer(dual_streams);
-}
-
-INSTANTIATE_TEST_SUITE_P(FullStackTest,
- DualStreamsTest,
- ::testing::Values(0, 1));
-
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/pc_full_stack_tests.cc b/chromium/third_party/webrtc/video/pc_full_stack_tests.cc
index 751eebed728..a955071d8a9 100644
--- a/chromium/third_party/webrtc/video/pc_full_stack_tests.cc
+++ b/chromium/third_party/webrtc/video/pc_full_stack_tests.cc
@@ -41,6 +41,8 @@ using ScreenShareConfig =
webrtc_pc_e2e::PeerConnectionE2EQualityTestFixture::ScreenShareConfig;
using VideoSimulcastConfig =
webrtc_pc_e2e::PeerConnectionE2EQualityTestFixture::VideoSimulcastConfig;
+using VideoCodecConfig =
+ webrtc_pc_e2e::PeerConnectionE2EQualityTestFixture::VideoCodecConfig;
namespace {
@@ -136,10 +138,10 @@ TEST(PCFullStackTest, ForemanCifWithoutPacketLossVp9) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp9CodecName;
- run_params.video_codec_required_params = {
- {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}};
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}})};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -161,10 +163,10 @@ TEST_P(PCGenericDescriptorTest, ForemanCifPlr5Vp9) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp9CodecName;
- run_params.video_codec_required_params = {
- {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}};
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}})};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -194,10 +196,10 @@ TEST(PCFullStackTest, MAYBE_GeneratorWithoutPacketLossVp9Profile2) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp9CodecName;
- run_params.video_codec_required_params = {
- {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile2)}};
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile2)}})};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -249,8 +251,8 @@ TEST(PCFullStackTest, ParisQcifWithoutPacketLoss) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -270,8 +272,8 @@ TEST_P(PCGenericDescriptorTest, ForemanCifWithoutPacketLoss) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -297,8 +299,8 @@ TEST_P(PCGenericDescriptorTest, ForemanCif30kbpsWithoutPacketLoss) {
alice->SetBitrateParameters(bitrate_params);
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -329,8 +331,8 @@ TEST_P(PCGenericDescriptorTest,
alice->SetBitrateParameters(bitrate_params);
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -352,8 +354,8 @@ TEST(PCFullStackTest, ForemanCifLink150kbpsWithoutPacketLoss) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -376,8 +378,8 @@ TEST(PCFullStackTest, ForemanCifLink130kbps100msDelay1PercentPacketLossUlpfec) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = true;
fixture->Run(std::move(run_params));
@@ -400,8 +402,8 @@ TEST(PCFullStackTest, ForemanCifLink50kbps100msDelay1PercentPacketLossUlpfec) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = true;
fixture->Run(std::move(run_params));
@@ -425,8 +427,8 @@ TEST(PCFullStackTest, ForemanCifLink150kbpsBadRateController) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
run_params.video_encoder_bitrate_multiplier = 1.30;
@@ -455,8 +457,8 @@ TEST(PCFullStackTest, ForemanCifMediaCapacitySmallLossAndQueue) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
run_params.video_encoder_bitrate_multiplier = 1.30;
@@ -479,8 +481,8 @@ TEST_P(PCGenericDescriptorTest, ForemanCifPlr5) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -502,8 +504,8 @@ TEST_P(PCGenericDescriptorTest, ForemanCifPlr5Ulpfec) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = true;
fixture->Run(std::move(run_params));
@@ -525,8 +527,8 @@ TEST(PCFullStackTest, ForemanCifPlr5Flexfec) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = true;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -549,8 +551,8 @@ TEST(PCFullStackTest, ForemanCif500kbpsPlr3Flexfec) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = true;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -573,8 +575,8 @@ TEST(PCFullStackTest, ForemanCif500kbpsPlr3Ulpfec) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = true;
fixture->Run(std::move(run_params));
@@ -595,8 +597,8 @@ TEST(PCFullStackTest, ForemanCifWithoutPacketlossH264) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kH264CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kH264CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -622,8 +624,8 @@ TEST(PCFullStackTest, ForemanCif30kbpsWithoutPacketlossH264) {
alice->SetBitrateParameters(bitrate_params);
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kH264CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kH264CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -645,8 +647,8 @@ TEST_P(PCGenericDescriptorTest, ForemanCifPlr5H264) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kH264CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kH264CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -671,8 +673,8 @@ TEST(PCFullStackTest, ForemanCifPlr5H264SpsPpsIdrIsKeyframe) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kH264CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kH264CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -694,8 +696,8 @@ TEST(PCFullStackTest, ForemanCifPlr5H264Flexfec) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kH264CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kH264CodecName)};
run_params.use_flex_fec = true;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -719,8 +721,8 @@ TEST(PCFullStackTest, DISABLED_ForemanCifPlr5H264Ulpfec) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kH264CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kH264CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = true;
fixture->Run(std::move(run_params));
@@ -744,8 +746,8 @@ TEST(PCFullStackTest, ForemanCif500kbps) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -768,8 +770,8 @@ TEST(PCFullStackTest, ForemanCif500kbpsLimitedQueue) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -792,8 +794,8 @@ TEST(PCFullStackTest, ForemanCif500kbps100ms) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -816,8 +818,8 @@ TEST_P(PCGenericDescriptorTest, ForemanCif500kbps100msLimitedQueue) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -861,8 +863,8 @@ TEST(PCFullStackTest, ForemanCif1000kbps100msLimitedQueue) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -887,8 +889,8 @@ TEST(PCFullStackTest, ConferenceMotionHd2000kbps100msLimitedQueue) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -916,8 +918,8 @@ TEST(PCFullStackTest, ConferenceMotionHd1TLModerateLimitsWhitelistVp8) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -1068,10 +1070,10 @@ TEST(PCFullStackTest, ConferenceMotionHd2000kbps100msLimitedQueueVP9) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp9CodecName;
- run_params.video_codec_required_params = {
- {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}};
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}})};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -1087,13 +1089,13 @@ TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_NoConferenceMode) {
BuiltInNetworkBehaviorConfig()),
[](PeerConfigurer* alice) {
VideoConfig video(1850, 1110, 5);
- video.screen_share_config = ScreenShareConfig(TimeDelta::seconds(10));
+ video.screen_share_config = ScreenShareConfig(TimeDelta::Seconds(10));
video.stream_label = "alice-video";
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -1108,13 +1110,13 @@ TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL) {
BuiltInNetworkBehaviorConfig()),
[](PeerConfigurer* alice) {
VideoConfig video(1850, 1110, 5);
- video.screen_share_config = ScreenShareConfig(TimeDelta::seconds(10));
+ video.screen_share_config = ScreenShareConfig(TimeDelta::Seconds(10));
video.stream_label = "alice-video";
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
run_params.use_conference_mode = true;
@@ -1132,15 +1134,15 @@ TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Simulcast_NoConferenceMode) {
BuiltInNetworkBehaviorConfig()),
[](PeerConfigurer* alice) {
VideoConfig video(1850, 1110, 30);
- video.screen_share_config = ScreenShareConfig(TimeDelta::seconds(10));
+ video.screen_share_config = ScreenShareConfig(TimeDelta::Seconds(10));
video.simulcast_config = VideoSimulcastConfig(2, 1);
video.temporal_layers_count = 2;
video.stream_label = "alice-video";
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -1155,15 +1157,15 @@ TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Simulcast) {
BuiltInNetworkBehaviorConfig()),
[](PeerConfigurer* alice) {
VideoConfig video(1850, 1110, 30);
- video.screen_share_config = ScreenShareConfig(TimeDelta::seconds(10));
+ video.screen_share_config = ScreenShareConfig(TimeDelta::Seconds(10));
video.simulcast_config = VideoSimulcastConfig(2, 1);
video.temporal_layers_count = 2;
video.stream_label = "alice-video";
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
run_params.use_conference_mode = true;
@@ -1395,45 +1397,15 @@ TEST(PCFullStackTest, ScreenshareSlidesVP9_3SL_High_Fps) {
[](PeerConfigurer* alice) {
VideoConfig video(1850, 1110, 30);
video.stream_label = "alice-video";
- video.screen_share_config = ScreenShareConfig(TimeDelta::seconds(10));
+ video.screen_share_config = ScreenShareConfig(TimeDelta::Seconds(10));
video.simulcast_config = VideoSimulcastConfig(3, 2);
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp9CodecName;
- run_params.video_codec_required_params = {
- {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}};
- run_params.use_flex_fec = false;
- run_params.use_ulp_fec = false;
- fixture->Run(std::move(run_params));
-}
-
-TEST(PCFullStackTest, ScreenshareSlidesVP9_3SL_Variable_Fps) {
- webrtc::test::ScopedFieldTrials override_trials(
- AppendFieldTrials("WebRTC-VP9VariableFramerateScreenshare/"
- "Enabled,min_qp:32,min_fps:5.0,undershoot:30,frames_"
- "before_steady_state:5/"
- "WebRTC-Vp9InterLayerPred/"
- "Enabled,inter_layer_pred_mode:on/"));
- std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
- CreateNetworkEmulationManager();
- auto fixture = CreateTestFixture(
- "pc_screenshare_slides_vp9_3sl_variable_fps",
- CreateTwoNetworkLinks(network_emulation_manager.get(),
- BuiltInNetworkBehaviorConfig()),
- [](PeerConfigurer* alice) {
- VideoConfig video(1850, 1110, 30);
- video.stream_label = "alice-video";
- video.screen_share_config = ScreenShareConfig(TimeDelta::seconds(10));
- video.simulcast_config = VideoSimulcastConfig(3, 2);
- alice->AddVideoConfig(std::move(video));
- },
- [](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp9CodecName;
- run_params.video_codec_required_params = {
- {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}};
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}})};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -1459,39 +1431,10 @@ TEST(PCFullStackTest, VP9SVC_3SL_High) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp9CodecName;
- run_params.video_codec_required_params = {
- {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}};
- run_params.use_flex_fec = false;
- run_params.use_ulp_fec = false;
- fixture->Run(std::move(run_params));
-}
-
-TEST(PCFullStackTest, VP9SVC_3SL_Medium) {
- webrtc::test::ScopedFieldTrials override_trials(
- AppendFieldTrials("WebRTC-Vp9InterLayerPred/"
- "Enabled,inter_layer_pred_mode:on/"));
- std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
- CreateNetworkEmulationManager();
- auto fixture = CreateTestFixture(
- "pc_vp9svc_3sl_medium",
- CreateTwoNetworkLinks(network_emulation_manager.get(),
- BuiltInNetworkBehaviorConfig()),
- [](PeerConfigurer* alice) {
- VideoConfig video(1280, 720, 30);
- video.stream_label = "alice-video";
- video.input_file_name =
- ClipNameToClipPath("ConferenceMotion_1280_720_50");
- video.simulcast_config = VideoSimulcastConfig(3, 1);
- video.temporal_layers_count = 3;
- alice->AddVideoConfig(std::move(video));
- },
- [](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp9CodecName;
- run_params.video_codec_required_params = {
- {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}};
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}})};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -1517,10 +1460,10 @@ TEST(PCFullStackTest, VP9SVC_3SL_Low) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp9CodecName;
- run_params.video_codec_required_params = {
- {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}};
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(
+ /*name=*/cricket::kVp9CodecName, /*required_params=*/{
+ {kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}})};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -1646,8 +1589,8 @@ TEST(PCFullStackTest, MAYBE_SimulcastFullHdOveruse) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -1671,33 +1614,8 @@ TEST(PCFullStackTest, SimulcastVP8_3SL_High) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
- run_params.use_flex_fec = false;
- run_params.use_ulp_fec = false;
- fixture->Run(std::move(run_params));
-}
-
-TEST(PCFullStackTest, SimulcastVP8_3SL_Medium) {
- std::unique_ptr<NetworkEmulationManager> network_emulation_manager =
- CreateNetworkEmulationManager();
- BuiltInNetworkBehaviorConfig config;
- config.loss_percent = 0;
- config.queue_delay_ms = 100;
- auto fixture = CreateTestFixture(
- "pc_simulcast_vp8_3sl_medium",
- CreateTwoNetworkLinks(network_emulation_manager.get(), config),
- [](PeerConfigurer* alice) {
- VideoConfig video(1280, 720, 30);
- video.input_file_name =
- ClipNameToClipPath("ConferenceMotion_1280_720_50");
- video.simulcast_config = VideoSimulcastConfig(3, 1);
- video.stream_label = "alice-video";
- alice->AddVideoConfig(std::move(video));
- },
- [](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
@@ -1721,8 +1639,8 @@ TEST(PCFullStackTest, SimulcastVP8_3SL_Low) {
alice->AddVideoConfig(std::move(video));
},
[](PeerConfigurer* bob) {});
- RunParams run_params(TimeDelta::seconds(kTestDurationSec));
- run_params.video_codec_name = cricket::kVp8CodecName;
+ RunParams run_params(TimeDelta::Seconds(kTestDurationSec));
+ run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)};
run_params.use_flex_fec = false;
run_params.use_ulp_fec = false;
fixture->Run(std::move(run_params));
diff --git a/chromium/third_party/webrtc/video/picture_id_tests.cc b/chromium/third_party/webrtc/video/picture_id_tests.cc
index 267a8e063b4..19c1141b0a0 100644
--- a/chromium/third_party/webrtc/video/picture_id_tests.cc
+++ b/chromium/third_party/webrtc/video/picture_id_tests.cc
@@ -16,7 +16,8 @@
#include "call/simulated_network.h"
#include "media/engine/internal_encoder_factory.h"
#include "media/engine/simulcast_encoder_adapter.h"
-#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "modules/video_coding/codecs/vp9/include/vp9.h"
#include "rtc_base/numerics/safe_conversions.h"
@@ -42,7 +43,7 @@ class PictureIdObserver : public test::RtpRtcpObserver {
public:
explicit PictureIdObserver(VideoCodecType codec_type)
: test::RtpRtcpObserver(test::CallTest::kDefaultTimeoutMs),
- codec_type_(codec_type),
+ depacketizer_(CreateVideoRtpDepacketizer(codec_type)),
max_expected_picture_id_gap_(0),
max_expected_tl0_idx_gap_(0),
num_ssrcs_to_observe_(1) {}
@@ -80,51 +81,39 @@ class PictureIdObserver : public test::RtpRtcpObserver {
bool ParsePayload(const uint8_t* packet,
size_t length,
ParsedPacket* parsed) const {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
- EXPECT_TRUE(header.ssrc == test::CallTest::kVideoSendSsrcs[0] ||
- header.ssrc == test::CallTest::kVideoSendSsrcs[1] ||
- header.ssrc == test::CallTest::kVideoSendSsrcs[2])
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ EXPECT_TRUE(rtp_packet.Ssrc() == test::CallTest::kVideoSendSsrcs[0] ||
+ rtp_packet.Ssrc() == test::CallTest::kVideoSendSsrcs[1] ||
+ rtp_packet.Ssrc() == test::CallTest::kVideoSendSsrcs[2])
<< "Unknown SSRC sent.";
- EXPECT_GE(length, header.headerLength + header.paddingLength);
- size_t payload_length = length - header.headerLength - header.paddingLength;
- if (payload_length == 0) {
+ if (rtp_packet.payload_size() == 0) {
return false; // Padding packet.
}
- parsed->timestamp = header.timestamp;
- parsed->ssrc = header.ssrc;
-
- std::unique_ptr<RtpDepacketizer> depacketizer(
- RtpDepacketizer::Create(codec_type_));
- RtpDepacketizer::ParsedPayload parsed_payload;
- EXPECT_TRUE(depacketizer->Parse(
- &parsed_payload, &packet[header.headerLength], payload_length));
-
- switch (codec_type_) {
- case kVideoCodecVP8: {
- const auto& vp8_header = absl::get<RTPVideoHeaderVP8>(
- parsed_payload.video_header().video_type_header);
- parsed->picture_id = vp8_header.pictureId;
- parsed->tl0_pic_idx = vp8_header.tl0PicIdx;
- parsed->temporal_idx = vp8_header.temporalIdx;
- break;
- }
- case kVideoCodecVP9: {
- const auto& vp9_header = absl::get<RTPVideoHeaderVP9>(
- parsed_payload.video_header().video_type_header);
- parsed->picture_id = vp9_header.picture_id;
- parsed->tl0_pic_idx = vp9_header.tl0_pic_idx;
- parsed->temporal_idx = vp9_header.temporal_idx;
- break;
- }
- default:
- RTC_NOTREACHED();
- break;
+ parsed->timestamp = rtp_packet.Timestamp();
+ parsed->ssrc = rtp_packet.Ssrc();
+
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload =
+ depacketizer_->Parse(rtp_packet.PayloadBuffer());
+ EXPECT_TRUE(parsed_payload);
+
+ if (const auto* vp8_header = absl::get_if<RTPVideoHeaderVP8>(
+ &parsed_payload->video_header.video_type_header)) {
+ parsed->picture_id = vp8_header->pictureId;
+ parsed->tl0_pic_idx = vp8_header->tl0PicIdx;
+ parsed->temporal_idx = vp8_header->temporalIdx;
+ } else if (const auto* vp9_header = absl::get_if<RTPVideoHeaderVP9>(
+ &parsed_payload->video_header.video_type_header)) {
+ parsed->picture_id = vp9_header->picture_id;
+ parsed->tl0_pic_idx = vp9_header->tl0_pic_idx;
+ parsed->temporal_idx = vp9_header->temporal_idx;
+ } else {
+ RTC_NOTREACHED();
}
- parsed->frame_type = parsed_payload.video_header().frame_type;
+ parsed->frame_type = parsed_payload->video_header.frame_type;
return true;
}
@@ -208,7 +197,7 @@ class PictureIdObserver : public test::RtpRtcpObserver {
}
rtc::CriticalSection crit_;
- const VideoCodecType codec_type_;
+ const std::unique_ptr<VideoRtpDepacketizer> depacketizer_;
std::map<uint32_t, ParsedPacket> last_observed_packet_ RTC_GUARDED_BY(crit_);
std::map<uint32_t, size_t> num_packets_sent_ RTC_GUARDED_BY(crit_);
int max_expected_picture_id_gap_ RTC_GUARDED_BY(crit_);
diff --git a/chromium/third_party/webrtc/video/receive_statistics_proxy_unittest.cc b/chromium/third_party/webrtc/video/receive_statistics_proxy_unittest.cc
index a7756246569..626542c810f 100644
--- a/chromium/third_party/webrtc/video/receive_statistics_proxy_unittest.cc
+++ b/chromium/third_party/webrtc/video/receive_statistics_proxy_unittest.cc
@@ -101,8 +101,10 @@ TEST_F(ReceiveStatisticsProxyTest, DecodedFpsIsReported) {
}
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.DecodedFramesPerSecond"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.DecodedFramesPerSecond", kFps));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DecodedFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DecodedFramesPerSecond", kFps));
}
TEST_F(ReceiveStatisticsProxyTest, DecodedFpsIsNotReportedForTooFewSamples) {
@@ -116,7 +118,8 @@ TEST_F(ReceiveStatisticsProxyTest, DecodedFpsIsNotReportedForTooFewSamples) {
}
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.DecodedFramesPerSecond"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.DecodedFramesPerSecond"));
}
TEST_F(ReceiveStatisticsProxyTest,
@@ -186,9 +189,9 @@ TEST_F(ReceiveStatisticsProxyTest, ReportsContentType) {
TEST_F(ReceiveStatisticsProxyTest, ReportsMaxTotalInterFrameDelay) {
webrtc::VideoFrame frame = CreateFrame(kWidth, kHeight);
- const TimeDelta kInterFrameDelay1 = TimeDelta::ms(100);
- const TimeDelta kInterFrameDelay2 = TimeDelta::ms(200);
- const TimeDelta kInterFrameDelay3 = TimeDelta::ms(300);
+ const TimeDelta kInterFrameDelay1 = TimeDelta::Millis(100);
+ const TimeDelta kInterFrameDelay2 = TimeDelta::Millis(200);
+ const TimeDelta kInterFrameDelay3 = TimeDelta::Millis(300);
double expected_total_inter_frame_delay = 0;
double expected_total_squared_inter_frame_delay = 0;
EXPECT_EQ(expected_total_inter_frame_delay,
@@ -592,10 +595,11 @@ TEST_F(ReceiveStatisticsProxyTest, LifetimeHistogramIsUpdated) {
statistics_proxy_->OnCompleteFrame(true, 1000, VideoContentType::UNSPECIFIED);
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.ReceiveStreamLifetimeInSeconds",
- kTimeSec));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.ReceiveStreamLifetimeInSeconds",
+ kTimeSec));
}
TEST_F(ReceiveStatisticsProxyTest,
@@ -605,8 +609,8 @@ TEST_F(ReceiveStatisticsProxyTest,
// No frames received.
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(0,
- metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds"));
}
TEST_F(ReceiveStatisticsProxyTest, BadCallHistogramsAreUpdated) {
@@ -627,21 +631,23 @@ TEST_F(ReceiveStatisticsProxyTest, BadCallHistogramsAreUpdated) {
statistics_proxy_->OnRenderedFrame(frame);
}
statistics_proxy_->UpdateHistograms(absl::nullopt, counters, nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.BadCall.Any"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.BadCall.Any", 100));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.BadCall.Any"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents("WebRTC.Video.BadCall.Any", 100));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.BadCall.FrameRate"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.BadCall.FrameRate", 100));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.BadCall.FrameRate"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.BadCall.FrameRate", 100));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.BadCall.FrameRateVariance"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.BadCall.FrameRateVariance"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.BadCall.Qp"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.BadCall.Qp"));
}
TEST_F(ReceiveStatisticsProxyTest, PacketLossHistogramIsUpdated) {
statistics_proxy_->UpdateHistograms(10, StreamDataCounters(), nullptr);
- EXPECT_EQ(0,
- metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
// Restart
SetUp();
@@ -649,9 +655,9 @@ TEST_F(ReceiveStatisticsProxyTest, PacketLossHistogramIsUpdated) {
// Min run time has passed.
fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
statistics_proxy_->UpdateHistograms(10, StreamDataCounters(), nullptr);
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.ReceivedPacketsLostInPercent", 10));
}
@@ -691,9 +697,9 @@ TEST_F(ReceiveStatisticsProxyTest, AvSyncOffsetHistogramIsUpdated) {
kFreqKhz);
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AVSyncOffsetInMs"));
- EXPECT_EQ(1,
- metrics::NumEvents("WebRTC.Video.AVSyncOffsetInMs", kSyncOffsetMs));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.AVSyncOffsetInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AVSyncOffsetInMs", kSyncOffsetMs));
}
TEST_F(ReceiveStatisticsProxyTest, RtpToNtpFrequencyOffsetHistogramIsUpdated) {
@@ -717,8 +723,10 @@ TEST_F(ReceiveStatisticsProxyTest, RtpToNtpFrequencyOffsetHistogramIsUpdated) {
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
// Average reported: (2 + 4) / 2 = 3.
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RtpToNtpFreqOffsetInKhz"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.RtpToNtpFreqOffsetInKhz", 3));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.RtpToNtpFreqOffsetInKhz"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.RtpToNtpFreqOffsetInKhz", 3));
}
TEST_F(ReceiveStatisticsProxyTest, Vp8QpHistogramIsUpdated) {
@@ -729,8 +737,8 @@ TEST_F(ReceiveStatisticsProxyTest, Vp8QpHistogramIsUpdated) {
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Decoded.Vp8.Qp", kQp));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents("WebRTC.Video.Decoded.Vp8.Qp", kQp));
}
TEST_F(ReceiveStatisticsProxyTest, Vp8QpHistogramIsNotUpdatedForTooFewSamples) {
@@ -741,7 +749,7 @@ TEST_F(ReceiveStatisticsProxyTest, Vp8QpHistogramIsNotUpdatedForTooFewSamples) {
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
}
TEST_F(ReceiveStatisticsProxyTest, Vp8QpHistogramIsNotUpdatedIfNoQpValue) {
@@ -750,7 +758,7 @@ TEST_F(ReceiveStatisticsProxyTest, Vp8QpHistogramIsNotUpdatedIfNoQpValue) {
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.Decoded.Vp8.Qp"));
}
TEST_F(ReceiveStatisticsProxyTest,
@@ -768,7 +776,8 @@ TEST_F(ReceiveStatisticsProxyTest,
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
}
TEST_F(ReceiveStatisticsProxyTest,
@@ -786,9 +795,10 @@ TEST_F(ReceiveStatisticsProxyTest,
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
- EXPECT_EQ(1,
- metrics::NumEvents("WebRTC.Video.KeyFramesReceivedInPermille", 0));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.KeyFramesReceivedInPermille", 0));
}
TEST_F(ReceiveStatisticsProxyTest, KeyFrameHistogramIsUpdated) {
@@ -809,8 +819,9 @@ TEST_F(ReceiveStatisticsProxyTest, KeyFrameHistogramIsUpdated) {
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.KeyFramesReceivedInPermille", 500));
}
@@ -830,11 +841,12 @@ TEST_F(ReceiveStatisticsProxyTest, TimingHistogramsNotUpdatedForTooFewSamples) {
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.DecodeTimeInMs"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.DecodeTimeInMs"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
}
TEST_F(ReceiveStatisticsProxyTest, TimingHistogramsAreUpdated) {
@@ -853,19 +865,20 @@ TEST_F(ReceiveStatisticsProxyTest, TimingHistogramsAreUpdated) {
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
-
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.JitterBufferDelayInMs",
- kJitterBufferMs));
- EXPECT_EQ(1,
- metrics::NumEvents("WebRTC.Video.TargetDelayInMs", kTargetDelayMs));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.JitterBufferDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.TargetDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.CurrentDelayInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.OnewayDelayInMs"));
+
+ EXPECT_METRIC_EQ(1, metrics::NumEvents("WebRTC.Video.JitterBufferDelayInMs",
+ kJitterBufferMs));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.TargetDelayInMs", kTargetDelayMs));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.CurrentDelayInMs", kCurrentDelayMs));
- EXPECT_EQ(1,
- metrics::NumEvents("WebRTC.Video.OnewayDelayInMs", kTargetDelayMs));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.OnewayDelayInMs", kTargetDelayMs));
}
TEST_F(ReceiveStatisticsProxyTest, DoesNotReportStaleFramerates) {
@@ -910,10 +923,14 @@ TEST_F(ReceiveStatisticsProxyTest,
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.ReceivedWidthInPixels"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.ReceivedHeightInPixels"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.ReceivedWidthInPixels"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.ReceivedHeightInPixels"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
}
TEST_F(ReceiveStatisticsProxyTest, ReceivedFrameHistogramsAreUpdated) {
@@ -922,14 +939,18 @@ TEST_F(ReceiveStatisticsProxyTest, ReceivedFrameHistogramsAreUpdated) {
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.ReceivedWidthInPixels"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.ReceivedHeightInPixels"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
- EXPECT_EQ(1,
- metrics::NumEvents("WebRTC.Video.ReceivedWidthInPixels", kWidth));
- EXPECT_EQ(1,
- metrics::NumEvents("WebRTC.Video.ReceivedHeightInPixels", kHeight));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.ReceivedWidthInPixels"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.ReceivedHeightInPixels"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.RenderFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.RenderSqrtPixelsPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.ReceivedWidthInPixels", kWidth));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.ReceivedHeightInPixels", kHeight));
}
TEST_F(ReceiveStatisticsProxyTest, ZeroDelayReportedIfFrameNotDelayed) {
@@ -945,10 +966,12 @@ TEST_F(ReceiveStatisticsProxyTest, ZeroDelayReportedIfFrameNotDelayed) {
fake_clock_.AdvanceTimeMilliseconds((metrics::kMinRunTimeInSeconds * 1000));
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer", 0));
- EXPECT_EQ(0, metrics::NumSamples(
- "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer", 0));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
}
TEST_F(ReceiveStatisticsProxyTest,
@@ -966,9 +989,10 @@ TEST_F(ReceiveStatisticsProxyTest,
1);
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
- EXPECT_EQ(0, metrics::NumSamples(
- "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
}
TEST_F(ReceiveStatisticsProxyTest,
@@ -981,9 +1005,10 @@ TEST_F(ReceiveStatisticsProxyTest,
fake_clock_.AdvanceTimeMilliseconds((metrics::kMinRunTimeInSeconds * 1000));
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
- EXPECT_EQ(0, metrics::NumSamples(
- "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
}
TEST_F(ReceiveStatisticsProxyTest, DelayReportedIfFrameIsDelayed) {
@@ -999,12 +1024,15 @@ TEST_F(ReceiveStatisticsProxyTest, DelayReportedIfFrameIsDelayed) {
fake_clock_.AdvanceTimeMilliseconds((metrics::kMinRunTimeInSeconds * 1000));
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer", 100));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs", 1));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer", 100));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs",
+ 1));
}
TEST_F(ReceiveStatisticsProxyTest, AverageDelayOfDelayedFramesIsReported) {
@@ -1023,12 +1051,15 @@ TEST_F(ReceiveStatisticsProxyTest, AverageDelayOfDelayedFramesIsReported) {
fake_clock_.AdvanceTimeMilliseconds((metrics::kMinRunTimeInSeconds * 1000));
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer", 50));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs", 8));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.DelayedFramesToRenderer"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer", 50));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs",
+ 8));
}
TEST_F(ReceiveStatisticsProxyTest,
@@ -1043,9 +1074,12 @@ TEST_F(ReceiveStatisticsProxyTest,
statistics_proxy_->RtcpPacketTypesCounterUpdated(kRemoteSsrc, counter);
statistics_proxy_->UpdateHistograms(absl::nullopt, data_counters, nullptr);
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
}
TEST_F(ReceiveStatisticsProxyTest, RtcpHistogramsAreUpdated) {
@@ -1064,16 +1098,19 @@ TEST_F(ReceiveStatisticsProxyTest, RtcpHistogramsAreUpdated) {
statistics_proxy_->RtcpPacketTypesCounterUpdated(kRemoteSsrc, counter);
statistics_proxy_->UpdateHistograms(absl::nullopt, data_counters, nullptr);
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.FirPacketsSentPerMinute",
kFirPackets * 60 / metrics::kMinRunTimeInSeconds));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.PliPacketsSentPerMinute",
kPliPackets * 60 / metrics::kMinRunTimeInSeconds));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.NackPacketsSentPerMinute",
kNackPackets * 60 / metrics::kMinRunTimeInSeconds));
}
@@ -1162,17 +1199,17 @@ TEST_P(ReceiveStatisticsProxyTestWithContent, InterFrameDelaysAreReported) {
kInterFrameDelayMs * 2) /
kMinRequiredSamples;
if (videocontenttypehelpers::IsScreenshare(content_type_)) {
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
kExpectedInterFrame,
metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
kInterFrameDelayMs * 2,
metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
} else {
- EXPECT_EQ(kExpectedInterFrame,
- metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
- EXPECT_EQ(kInterFrameDelayMs * 2,
- metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(kExpectedInterFrame,
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(kInterFrameDelayMs * 2,
+ metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
}
}
@@ -1199,11 +1236,12 @@ TEST_P(ReceiveStatisticsProxyTestWithContent,
nullptr);
const int kExpectedInterFrame = kInterFrameDelayMs * 2;
if (videocontenttypehelpers::IsScreenshare(content_type_)) {
- EXPECT_EQ(kExpectedInterFrame,
- metrics::MinSample(
- "WebRTC.Video.Screenshare.InterframeDelay95PercentileInMs"));
+ EXPECT_METRIC_EQ(
+ kExpectedInterFrame,
+ metrics::MinSample(
+ "WebRTC.Video.Screenshare.InterframeDelay95PercentileInMs"));
} else {
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
kExpectedInterFrame,
metrics::MinSample("WebRTC.Video.InterframeDelay95PercentileInMs"));
}
@@ -1223,12 +1261,13 @@ TEST_P(ReceiveStatisticsProxyTestWithContent,
// means we're one frame short of having a valid data set.
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(
0, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"));
- EXPECT_EQ(0, metrics::NumSamples(
- "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
}
TEST_P(ReceiveStatisticsProxyTestWithContent, MaxInterFrameDelayOnlyWithPause) {
@@ -1254,23 +1293,25 @@ TEST_P(ReceiveStatisticsProxyTestWithContent, MaxInterFrameDelayOnlyWithPause) {
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
if (videocontenttypehelpers::IsScreenshare(content_type_)) {
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(
kInterFrameDelayMs,
metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
kInterFrameDelayMs,
metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
} else {
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
- EXPECT_EQ(kInterFrameDelayMs,
- metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
- EXPECT_EQ(kInterFrameDelayMs,
- metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(kInterFrameDelayMs,
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(kInterFrameDelayMs,
+ metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
}
}
@@ -1297,22 +1338,23 @@ TEST_P(ReceiveStatisticsProxyTestWithContent, FreezesAreReported) {
kInterFrameDelayMs * (kMinRequiredSamples - 1);
const int kExpectedNumberFreezesPerMinute = 60 * 1000 / kCallDurationMs;
if (videocontenttypehelpers::IsScreenshare(content_type_)) {
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
kFreezeDelayMs + kInterFrameDelayMs,
metrics::MinSample("WebRTC.Video.Screenshare.MeanFreezeDurationMs"));
- EXPECT_EQ(kExpectedTimeBetweenFreezes,
- metrics::MinSample(
- "WebRTC.Video.Screenshare.MeanTimeBetweenFreezesMs"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(kExpectedTimeBetweenFreezes,
+ metrics::MinSample(
+ "WebRTC.Video.Screenshare.MeanTimeBetweenFreezesMs"));
+ EXPECT_METRIC_EQ(
kExpectedNumberFreezesPerMinute,
metrics::MinSample("WebRTC.Video.Screenshare.NumberFreezesPerMinute"));
} else {
- EXPECT_EQ(kFreezeDelayMs + kInterFrameDelayMs,
- metrics::MinSample("WebRTC.Video.MeanFreezeDurationMs"));
- EXPECT_EQ(kExpectedTimeBetweenFreezes,
- metrics::MinSample("WebRTC.Video.MeanTimeBetweenFreezesMs"));
- EXPECT_EQ(kExpectedNumberFreezesPerMinute,
- metrics::MinSample("WebRTC.Video.NumberFreezesPerMinute"));
+ EXPECT_METRIC_EQ(kFreezeDelayMs + kInterFrameDelayMs,
+ metrics::MinSample("WebRTC.Video.MeanFreezeDurationMs"));
+ EXPECT_METRIC_EQ(
+ kExpectedTimeBetweenFreezes,
+ metrics::MinSample("WebRTC.Video.MeanTimeBetweenFreezesMs"));
+ EXPECT_METRIC_EQ(kExpectedNumberFreezesPerMinute,
+ metrics::MinSample("WebRTC.Video.NumberFreezesPerMinute"));
}
}
@@ -1354,11 +1396,12 @@ TEST_P(ReceiveStatisticsProxyTestWithContent, HarmonicFrameRateIsReported) {
const int kExpectedHarmonicFrameRateFps =
std::round(kCallDurationMs / (1000 * kSumSquaredFrameDurationSecs));
if (videocontenttypehelpers::IsScreenshare(content_type_)) {
- EXPECT_EQ(kExpectedHarmonicFrameRateFps,
- metrics::MinSample("WebRTC.Video.Screenshare.HarmonicFrameRate"));
+ EXPECT_METRIC_EQ(
+ kExpectedHarmonicFrameRateFps,
+ metrics::MinSample("WebRTC.Video.Screenshare.HarmonicFrameRate"));
} else {
- EXPECT_EQ(kExpectedHarmonicFrameRateFps,
- metrics::MinSample("WebRTC.Video.HarmonicFrameRate"));
+ EXPECT_METRIC_EQ(kExpectedHarmonicFrameRateFps,
+ metrics::MinSample("WebRTC.Video.HarmonicFrameRate"));
}
}
@@ -1389,15 +1432,17 @@ TEST_P(ReceiveStatisticsProxyTestWithContent, PausesAreIgnored) {
const int kExpectedTimeBetweenFreezes =
kInterFrameDelayMs * kMinRequiredSamples * 2;
if (videocontenttypehelpers::IsScreenshare(content_type_)) {
- EXPECT_EQ(-1, metrics::MinSample(
- "WebRTC.Video.Screenshare.MeanFreezeDurationMs"));
- EXPECT_EQ(kExpectedTimeBetweenFreezes,
- metrics::MinSample(
- "WebRTC.Video.Screenshare.MeanTimeBetweenFreezesMs"));
+ EXPECT_METRIC_EQ(-1, metrics::MinSample(
+ "WebRTC.Video.Screenshare.MeanFreezeDurationMs"));
+ EXPECT_METRIC_EQ(kExpectedTimeBetweenFreezes,
+ metrics::MinSample(
+ "WebRTC.Video.Screenshare.MeanTimeBetweenFreezesMs"));
} else {
- EXPECT_EQ(-1, metrics::MinSample("WebRTC.Video.MeanFreezeDurationMs"));
- EXPECT_EQ(kExpectedTimeBetweenFreezes,
- metrics::MinSample("WebRTC.Video.MeanTimeBetweenFreezesMs"));
+ EXPECT_METRIC_EQ(-1,
+ metrics::MinSample("WebRTC.Video.MeanFreezeDurationMs"));
+ EXPECT_METRIC_EQ(
+ kExpectedTimeBetweenFreezes,
+ metrics::MinSample("WebRTC.Video.MeanTimeBetweenFreezesMs"));
}
}
@@ -1421,10 +1466,11 @@ TEST_P(ReceiveStatisticsProxyTestWithContent, ManyPausesAtTheBeginning) {
nullptr);
// No freezes should be detected, as all long inter-frame delays were pauses.
if (videocontenttypehelpers::IsScreenshare(content_type_)) {
- EXPECT_EQ(-1, metrics::MinSample(
- "WebRTC.Video.Screenshare.MeanFreezeDurationMs"));
+ EXPECT_METRIC_EQ(-1, metrics::MinSample(
+ "WebRTC.Video.Screenshare.MeanFreezeDurationMs"));
} else {
- EXPECT_EQ(-1, metrics::MinSample("WebRTC.Video.MeanFreezeDurationMs"));
+ EXPECT_METRIC_EQ(-1,
+ metrics::MinSample("WebRTC.Video.MeanFreezeDurationMs"));
}
}
@@ -1454,12 +1500,12 @@ TEST_P(ReceiveStatisticsProxyTestWithContent, TimeInHdReported) {
nullptr);
const int kExpectedTimeInHdPercents = 33;
if (videocontenttypehelpers::IsScreenshare(content_type_)) {
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
kExpectedTimeInHdPercents,
metrics::MinSample("WebRTC.Video.Screenshare.TimeInHdPercentage"));
} else {
- EXPECT_EQ(kExpectedTimeInHdPercents,
- metrics::MinSample("WebRTC.Video.TimeInHdPercentage"));
+ EXPECT_METRIC_EQ(kExpectedTimeInHdPercents,
+ metrics::MinSample("WebRTC.Video.TimeInHdPercentage"));
}
}
@@ -1489,12 +1535,14 @@ TEST_P(ReceiveStatisticsProxyTestWithContent, TimeInBlockyVideoReported) {
nullptr);
const int kExpectedTimeInHdPercents = 66;
if (videocontenttypehelpers::IsScreenshare(content_type_)) {
- EXPECT_EQ(kExpectedTimeInHdPercents,
- metrics::MinSample(
- "WebRTC.Video.Screenshare.TimeInBlockyVideoPercentage"));
+ EXPECT_METRIC_EQ(
+ kExpectedTimeInHdPercents,
+ metrics::MinSample(
+ "WebRTC.Video.Screenshare.TimeInBlockyVideoPercentage"));
} else {
- EXPECT_EQ(kExpectedTimeInHdPercents,
- metrics::MinSample("WebRTC.Video.TimeInBlockyVideoPercentage"));
+ EXPECT_METRIC_EQ(
+ kExpectedTimeInHdPercents,
+ metrics::MinSample("WebRTC.Video.TimeInBlockyVideoPercentage"));
}
}
@@ -1523,14 +1571,14 @@ TEST_P(ReceiveStatisticsProxyTestWithContent, DownscalesReported) {
nullptr);
const int kExpectedDownscales = 30; // 2 per 4 seconds = 30 per minute.
if (videocontenttypehelpers::IsScreenshare(content_type_)) {
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
kExpectedDownscales,
metrics::MinSample(
"WebRTC.Video.Screenshare.NumberResolutionDownswitchesPerMinute"));
} else {
- EXPECT_EQ(kExpectedDownscales,
- metrics::MinSample(
- "WebRTC.Video.NumberResolutionDownswitchesPerMinute"));
+ EXPECT_METRIC_EQ(kExpectedDownscales,
+ metrics::MinSample(
+ "WebRTC.Video.NumberResolutionDownswitchesPerMinute"));
}
}
@@ -1547,7 +1595,8 @@ TEST_P(ReceiveStatisticsProxyTestWithContent, DecodeTimeReported) {
}
statistics_proxy_->UpdateHistograms(absl::nullopt, StreamDataCounters(),
nullptr);
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.DecodeTimeInMs", kDecodeMs));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.DecodeTimeInMs", kDecodeMs));
}
TEST_P(ReceiveStatisticsProxyTestWithContent,
@@ -1574,62 +1623,71 @@ TEST_P(ReceiveStatisticsProxyTestWithContent,
nullptr);
if (videocontenttypehelpers::IsScreenshare(content_type)) {
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.InterframeDelayInMs.S0"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.InterframeDelayMaxInMs.S0"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.InterframeDelayInMs.S1"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.InterframeDelayMaxInMs.S1"));
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"
- ".ExperimentGroup0"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayInMs.S0"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs.S0"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayInMs.S1"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.InterframeDelayMaxInMs.S1"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayInMs"
+ ".ExperimentGroup0"));
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"
".ExperimentGroup0"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
kInterFrameDelayMs1,
metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs.S0"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
kInterFrameDelayMs2,
metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs.S1"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
(kInterFrameDelayMs1 + kInterFrameDelayMs2) / 2,
metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayInMs"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
kInterFrameDelayMs2,
metrics::MinSample("WebRTC.Video.Screenshare.InterframeDelayMaxInMs"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
(kInterFrameDelayMs1 + kInterFrameDelayMs2) / 2,
metrics::MinSample(
"WebRTC.Video.Screenshare.InterframeDelayInMs.ExperimentGroup0"));
} else {
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs.S0"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs.S0"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs.S1"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs.S1"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"
- ".ExperimentGroup0"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"
- ".ExperimentGroup0"));
- EXPECT_EQ(kInterFrameDelayMs1,
- metrics::MinSample("WebRTC.Video.InterframeDelayInMs.S0"));
- EXPECT_EQ(kInterFrameDelayMs2,
- metrics::MinSample("WebRTC.Video.InterframeDelayInMs.S1"));
- EXPECT_EQ((kInterFrameDelayMs1 + kInterFrameDelayMs2) / 2,
- metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
- EXPECT_EQ(kInterFrameDelayMs2,
- metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
- EXPECT_EQ((kInterFrameDelayMs1 + kInterFrameDelayMs2) / 2,
- metrics::MinSample(
- "WebRTC.Video.InterframeDelayInMs.ExperimentGroup0"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs.S0"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs.S0"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs.S1"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs.S1"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InterframeDelayInMs"
+ ".ExperimentGroup0"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.InterframeDelayMaxInMs"
+ ".ExperimentGroup0"));
+ EXPECT_METRIC_EQ(kInterFrameDelayMs1,
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs.S0"));
+ EXPECT_METRIC_EQ(kInterFrameDelayMs2,
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs.S1"));
+ EXPECT_METRIC_EQ((kInterFrameDelayMs1 + kInterFrameDelayMs2) / 2,
+ metrics::MinSample("WebRTC.Video.InterframeDelayInMs"));
+ EXPECT_METRIC_EQ(kInterFrameDelayMs2,
+ metrics::MinSample("WebRTC.Video.InterframeDelayMaxInMs"));
+ EXPECT_METRIC_EQ((kInterFrameDelayMs1 + kInterFrameDelayMs2) / 2,
+ metrics::MinSample(
+ "WebRTC.Video.InterframeDelayInMs.ExperimentGroup0"));
}
}
@@ -1684,10 +1742,10 @@ TEST_P(ReceiveStatisticsProxyTestWithDecodeTimeHistograms,
fake_clock_.AdvanceTimeMilliseconds(kFrameDurationMs);
}
- EXPECT_EQ(expected_number_of_samples_,
- metrics::NumSamples(uma_histogram_name_));
- EXPECT_EQ(expected_number_of_samples_,
- metrics::NumEvents(uma_histogram_name_, kDecodeTimeMs));
+ EXPECT_METRIC_EQ(expected_number_of_samples_,
+ metrics::NumSamples(uma_histogram_name_));
+ EXPECT_METRIC_EQ(expected_number_of_samples_,
+ metrics::NumEvents(uma_histogram_name_, kDecodeTimeMs));
}
const auto kVp94kHw = std::make_tuple(/*killswitch=*/false,
diff --git a/chromium/third_party/webrtc/video/rtp_streams_synchronizer.cc b/chromium/third_party/webrtc/video/rtp_streams_synchronizer.cc
index 156ebbb41fb..3dedc43eaa9 100644
--- a/chromium/third_party/webrtc/video/rtp_streams_synchronizer.cc
+++ b/chromium/third_party/webrtc/video/rtp_streams_synchronizer.cc
@@ -13,12 +13,16 @@
#include "absl/types/optional.h"
#include "call/syncable.h"
#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
#include "rtc_base/time_utils.h"
#include "rtc_base/trace_event.h"
#include "system_wrappers/include/rtp_to_ntp_estimator.h"
namespace webrtc {
namespace {
+// Time interval for logging stats.
+constexpr int64_t kStatsLogIntervalMs = 10000;
+
bool UpdateMeasurements(StreamSynchronization::Measurements* stream,
const Syncable::Info& info) {
RTC_DCHECK(stream);
@@ -38,7 +42,8 @@ RtpStreamsSynchronizer::RtpStreamsSynchronizer(Syncable* syncable_video)
: syncable_video_(syncable_video),
syncable_audio_(nullptr),
sync_(),
- last_sync_time_(rtc::TimeNanos()) {
+ last_sync_time_(rtc::TimeNanos()),
+ last_stats_log_ms_(rtc::TimeMillis()) {
RTC_DCHECK(syncable_video);
process_thread_checker_.Detach();
}
@@ -77,6 +82,13 @@ void RtpStreamsSynchronizer::Process() {
}
RTC_DCHECK(sync_.get());
+ bool log_stats = false;
+ const int64_t now_ms = rtc::TimeMillis();
+ if (now_ms - last_stats_log_ms_ > kStatsLogIntervalMs) {
+ last_stats_log_ms_ = now_ms;
+ log_stats = true;
+ }
+
absl::optional<Syncable::Info> audio_info = syncable_audio_->GetInfo();
if (!audio_info || !UpdateMeasurements(&audio_measurement_, *audio_info)) {
return;
@@ -100,11 +112,21 @@ void RtpStreamsSynchronizer::Process() {
return;
}
+ if (log_stats) {
+ RTC_LOG(LS_INFO) << "Sync info stats: " << now_ms
+ << ", {ssrc: " << sync_->audio_stream_id() << ", "
+ << "cur_delay_ms: " << audio_info->current_delay_ms
+ << "} {ssrc: " << sync_->video_stream_id() << ", "
+ << "cur_delay_ms: " << video_info->current_delay_ms
+ << "} {relative_delay_ms: " << relative_delay_ms << "} ";
+ }
+
TRACE_COUNTER1("webrtc", "SyncCurrentVideoDelay",
video_info->current_delay_ms);
TRACE_COUNTER1("webrtc", "SyncCurrentAudioDelay",
audio_info->current_delay_ms);
TRACE_COUNTER1("webrtc", "SyncRelativeDelay", relative_delay_ms);
+
int target_audio_delay_ms = 0;
int target_video_delay_ms = video_info->current_delay_ms;
// Calculate the necessary extra audio delay and desired total video
@@ -114,6 +136,14 @@ void RtpStreamsSynchronizer::Process() {
return;
}
+ if (log_stats) {
+ RTC_LOG(LS_INFO) << "Sync delay stats: " << now_ms
+ << ", {ssrc: " << sync_->audio_stream_id() << ", "
+ << "target_delay_ms: " << target_audio_delay_ms
+ << "} {ssrc: " << sync_->video_stream_id() << ", "
+ << "target_delay_ms: " << target_video_delay_ms << "} ";
+ }
+
syncable_audio_->SetMinimumPlayoutDelay(target_audio_delay_ms);
syncable_video_->SetMinimumPlayoutDelay(target_video_delay_ms);
}
diff --git a/chromium/third_party/webrtc/video/rtp_streams_synchronizer.h b/chromium/third_party/webrtc/video/rtp_streams_synchronizer.h
index b6e5e615753..60e2c8ee32e 100644
--- a/chromium/third_party/webrtc/video/rtp_streams_synchronizer.h
+++ b/chromium/third_party/webrtc/video/rtp_streams_synchronizer.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-// RtpStreamsSynchronizer is responsible for synchronization audio and video for
-// a given voice engine channel and video receive stream.
+// RtpStreamsSynchronizer is responsible for synchronizing audio and video for
+// a given audio receive stream and video receive stream.
#ifndef VIDEO_RTP_STREAMS_SYNCHRONIZER_H_
#define VIDEO_RTP_STREAMS_SYNCHRONIZER_H_
@@ -58,6 +58,7 @@ class RtpStreamsSynchronizer : public Module {
rtc::ThreadChecker process_thread_checker_;
int64_t last_sync_time_ RTC_GUARDED_BY(&process_thread_checker_);
+ int64_t last_stats_log_ms_ RTC_GUARDED_BY(&process_thread_checker_);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver.cc b/chromium/third_party/webrtc/video/rtp_video_stream_receiver.cc
index 33730249868..d67d7fc0515 100644
--- a/chromium/third_party/webrtc/video/rtp_video_stream_receiver.cc
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver.cc
@@ -12,11 +12,14 @@
#include <algorithm>
#include <limits>
+#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
#include "absl/memory/memory.h"
+#include "absl/types/optional.h"
#include "media/base/media_constants.h"
#include "modules/pacing/packet_router.h"
#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
@@ -24,11 +27,16 @@
#include "modules/rtp_rtcp/include/rtp_cvo.h"
#include "modules/rtp_rtcp/include/rtp_rtcp.h"
#include "modules/rtp_rtcp/include/ulpfec_receiver.h"
+#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
#include "modules/rtp_rtcp/source/rtp_format.h"
+#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h"
#include "modules/utility/include/process_thread.h"
#include "modules/video_coding/frame_object.h"
#include "modules/video_coding/h264_sprop_parameter_sets.h"
@@ -39,9 +47,9 @@
#include "rtc_base/location.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
-#include "rtc_base/system/fallthrough.h"
#include "system_wrappers/include/field_trial.h"
#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/ntp_time.h"
#include "video/receive_statistics_proxy.h"
namespace webrtc {
@@ -76,7 +84,7 @@ std::unique_ptr<RtpRtcp> CreateRtpRtcpModule(
ReceiveStatistics* receive_statistics,
Transport* outgoing_transport,
RtcpRttStats* rtt_stats,
- RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
+ ReceiveStatisticsProxy* rtcp_statistics_observer,
uint32_t local_ssrc) {
RtpRtcp::Configuration configuration;
configuration.clock = clock;
@@ -85,8 +93,8 @@ std::unique_ptr<RtpRtcp> CreateRtpRtcpModule(
configuration.receive_statistics = receive_statistics;
configuration.outgoing_transport = outgoing_transport;
configuration.rtt_stats = rtt_stats;
- configuration.rtcp_packet_type_counter_observer =
- rtcp_packet_type_counter_observer;
+ configuration.rtcp_packet_type_counter_observer = rtcp_statistics_observer;
+ configuration.rtcp_cname_callback = rtcp_statistics_observer;
configuration.local_media_ssrc = local_ssrc;
std::unique_ptr<RtpRtcp> rtp_rtcp = RtpRtcp::Create(configuration);
@@ -188,7 +196,8 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver(
NackSender* nack_sender,
KeyFrameRequestSender* keyframe_request_sender,
video_coding::OnCompleteFrameCallback* complete_frame_callback,
- rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor)
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)
: clock_(clock),
config_(*config),
packet_router_(packet_router),
@@ -247,9 +256,6 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver(
if (config_.rtp.rtcp_xr.receiver_reference_time_report)
rtp_rtcp_->SetRtcpXrRrtrStatus(true);
- // Stats callback for CNAME changes.
- rtp_rtcp_->RegisterRtcpCnameCallback(receive_stats_proxy);
-
process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE);
if (config_.rtp.lntf.enabled) {
@@ -275,6 +281,14 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver(
buffered_frame_decryptor_->SetFrameDecryptor(std::move(frame_decryptor));
}
}
+
+ if (frame_transformer) {
+ frame_transformer_delegate_ = new rtc::RefCountedObject<
+ RtpVideoStreamReceiverFrameTransformerDelegate>(
+ this, std::move(frame_transformer), rtc::Thread::Current(),
+ config_.rtp.remote_ssrc);
+ frame_transformer_delegate_->Init();
+ }
}
RtpVideoStreamReceiver::~RtpVideoStreamReceiver() {
@@ -289,17 +303,18 @@ RtpVideoStreamReceiver::~RtpVideoStreamReceiver() {
if (packet_router_)
packet_router_->RemoveReceiveRtpModule(rtp_rtcp_.get());
UpdateHistograms();
+ if (frame_transformer_delegate_)
+ frame_transformer_delegate_->Reset();
}
void RtpVideoStreamReceiver::AddReceiveCodec(
const VideoCodec& video_codec,
const std::map<std::string, std::string>& codec_params,
bool raw_payload) {
- absl::optional<VideoCodecType> video_type;
- if (!raw_payload) {
- video_type = video_codec.codecType;
- }
- payload_type_map_.emplace(video_codec.plType, video_type);
+ payload_type_map_.emplace(
+ video_codec.plType,
+ raw_payload ? std::make_unique<VideoRtpDepacketizerRaw>()
+ : CreateVideoRtpDepacketizer(video_codec.codecType));
pt_codec_params_.emplace(video_codec.plType, codec_params);
}
@@ -323,28 +338,146 @@ absl::optional<Syncable::Info> RtpVideoStreamReceiver::GetSyncInfo() const {
return info;
}
+RtpVideoStreamReceiver::ParseGenericDependenciesResult
+RtpVideoStreamReceiver::ParseGenericDependenciesExtension(
+ const RtpPacketReceived& rtp_packet,
+ RTPVideoHeader* video_header) {
+ if (rtp_packet.HasExtension<RtpDependencyDescriptorExtension>()) {
+ webrtc::DependencyDescriptor dependency_descriptor;
+ if (!rtp_packet.GetExtension<RtpDependencyDescriptorExtension>(
+ video_structure_.get(), &dependency_descriptor)) {
+ // Descriptor is there, but failed to parse. Either it is invalid,
+ // or too old packet (after relevant video_structure_ changed),
+ // or too new packet (before relevant video_structure_ arrived).
+ // Drop such packet to be on the safe side.
+ // TODO(bugs.webrtc.org/10342): Stash too new packet.
+ RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
+ << " Failed to parse dependency descriptor.";
+ return kDropPacket;
+ }
+ if (dependency_descriptor.attached_structure != nullptr &&
+ !dependency_descriptor.first_packet_in_frame) {
+ RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
+ << "Invalid dependency descriptor: structure "
+ "attached to non first packet of a frame.";
+ return kDropPacket;
+ }
+ video_header->is_first_packet_in_frame =
+ dependency_descriptor.first_packet_in_frame;
+ video_header->is_last_packet_in_frame =
+ dependency_descriptor.last_packet_in_frame;
+
+ int64_t frame_id =
+ frame_id_unwrapper_.Unwrap(dependency_descriptor.frame_number);
+ auto& generic_descriptor_info = video_header->generic.emplace();
+ generic_descriptor_info.frame_id = frame_id;
+ generic_descriptor_info.spatial_index =
+ dependency_descriptor.frame_dependencies.spatial_id;
+ generic_descriptor_info.temporal_index =
+ dependency_descriptor.frame_dependencies.temporal_id;
+ for (int fdiff : dependency_descriptor.frame_dependencies.frame_diffs) {
+ generic_descriptor_info.dependencies.push_back(frame_id - fdiff);
+ }
+ generic_descriptor_info.decode_target_indications =
+ dependency_descriptor.frame_dependencies.decode_target_indications;
+ generic_descriptor_info.discardable =
+ absl::c_linear_search(generic_descriptor_info.decode_target_indications,
+ DecodeTargetIndication::kDiscardable);
+ if (dependency_descriptor.resolution) {
+ video_header->width = dependency_descriptor.resolution->Width();
+ video_header->height = dependency_descriptor.resolution->Height();
+ }
+
+ // FrameDependencyStructure is sent in dependency descriptor of the first
+ // packet of a key frame and required for parsed dependency descriptor in
+ // all the following packets until next key frame.
+ // Save it if there is a (potentially) new structure.
+ if (dependency_descriptor.attached_structure) {
+ RTC_DCHECK(dependency_descriptor.first_packet_in_frame);
+ if (video_structure_frame_id_ > frame_id) {
+ RTC_LOG(LS_WARNING)
+ << "Arrived key frame with id " << frame_id << " and structure id "
+ << dependency_descriptor.attached_structure->structure_id
+ << " is older than the latest received key frame with id "
+ << *video_structure_frame_id_ << " and structure id "
+ << video_structure_->structure_id;
+ return kDropPacket;
+ }
+ video_structure_ = std::move(dependency_descriptor.attached_structure);
+ video_structure_frame_id_ = frame_id;
+ video_header->frame_type = VideoFrameType::kVideoFrameKey;
+ } else {
+ video_header->frame_type = VideoFrameType::kVideoFrameDelta;
+ }
+ return kHasGenericDescriptor;
+ }
+
+ if (rtp_packet.HasExtension<RtpGenericFrameDescriptorExtension00>() &&
+ rtp_packet.HasExtension<RtpGenericFrameDescriptorExtension01>()) {
+ RTC_LOG(LS_WARNING) << "RTP packet had two different GFD versions.";
+ return kDropPacket;
+ }
+
+ RtpGenericFrameDescriptor generic_frame_descriptor;
+ bool has_generic_descriptor =
+ rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension01>(
+ &generic_frame_descriptor) ||
+ rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension00>(
+ &generic_frame_descriptor);
+ if (!has_generic_descriptor) {
+ return kNoGenericDescriptor;
+ }
+
+ video_header->is_first_packet_in_frame =
+ generic_frame_descriptor.FirstPacketInSubFrame();
+ video_header->is_last_packet_in_frame =
+ generic_frame_descriptor.LastPacketInSubFrame();
+
+ if (generic_frame_descriptor.FirstPacketInSubFrame()) {
+ video_header->frame_type =
+ generic_frame_descriptor.FrameDependenciesDiffs().empty()
+ ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+
+ auto& generic_descriptor_info = video_header->generic.emplace();
+ int64_t frame_id =
+ frame_id_unwrapper_.Unwrap(generic_frame_descriptor.FrameId());
+ generic_descriptor_info.frame_id = frame_id;
+ generic_descriptor_info.spatial_index =
+ generic_frame_descriptor.SpatialLayer();
+ generic_descriptor_info.temporal_index =
+ generic_frame_descriptor.TemporalLayer();
+ generic_descriptor_info.discardable =
+ generic_frame_descriptor.Discardable().value_or(false);
+ for (uint16_t fdiff : generic_frame_descriptor.FrameDependenciesDiffs()) {
+ generic_descriptor_info.dependencies.push_back(frame_id - fdiff);
+ }
+ }
+ video_header->width = generic_frame_descriptor.Width();
+ video_header->height = generic_frame_descriptor.Height();
+ return kHasGenericDescriptor;
+}
+
void RtpVideoStreamReceiver::OnReceivedPayloadData(
- rtc::ArrayView<const uint8_t> codec_payload,
+ rtc::CopyOnWriteBuffer codec_payload,
const RtpPacketReceived& rtp_packet,
const RTPVideoHeader& video) {
RTC_DCHECK_RUN_ON(&worker_task_checker_);
- video_coding::PacketBuffer::Packet packet(
+ auto packet = std::make_unique<video_coding::PacketBuffer::Packet>(
rtp_packet, video, ntp_estimator_.Estimate(rtp_packet.Timestamp()),
clock_->TimeInMilliseconds());
// Try to extrapolate absolute capture time if it is missing.
- // TODO(bugs.webrtc.org/10739): Add support for estimated capture clock
- // offset.
- packet.packet_info.set_absolute_capture_time(
+ packet->packet_info.set_absolute_capture_time(
absolute_capture_time_receiver_.OnReceivePacket(
- AbsoluteCaptureTimeReceiver::GetSource(packet.packet_info.ssrc(),
- packet.packet_info.csrcs()),
- packet.packet_info.rtp_timestamp(),
+ AbsoluteCaptureTimeReceiver::GetSource(packet->packet_info.ssrc(),
+ packet->packet_info.csrcs()),
+ packet->packet_info.rtp_timestamp(),
// Assume frequency is the same one for all video frames.
kVideoPayloadTypeFrequency,
- packet.packet_info.absolute_capture_time()));
+ packet->packet_info.absolute_capture_time()));
- RTPVideoHeader& video_header = packet.video_header;
+ RTPVideoHeader& video_header = packet->video_header;
video_header.rotation = kVideoRotation_0;
video_header.content_type = VideoContentType::UNSPECIFIED;
video_header.video_timing.flags = VideoSendTiming::kInvalid;
@@ -364,40 +497,10 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
rtp_packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
rtp_packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);
- RtpGenericFrameDescriptor& generic_descriptor =
- packet.generic_descriptor.emplace();
- if (rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension01>(
- &generic_descriptor)) {
- if (rtp_packet.HasExtension<RtpGenericFrameDescriptorExtension00>()) {
- RTC_LOG(LS_WARNING) << "RTP packet had two different GFD versions.";
- return;
- }
- generic_descriptor.SetByteRepresentation(
- rtp_packet.GetRawExtension<RtpGenericFrameDescriptorExtension01>());
- } else if ((rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension00>(
- &generic_descriptor))) {
- generic_descriptor.SetByteRepresentation(
- rtp_packet.GetRawExtension<RtpGenericFrameDescriptorExtension00>());
- } else {
- packet.generic_descriptor = absl::nullopt;
- }
- if (packet.generic_descriptor != absl::nullopt) {
- video_header.is_first_packet_in_frame =
- packet.generic_descriptor->FirstPacketInSubFrame();
- video_header.is_last_packet_in_frame =
- rtp_packet.Marker() ||
- packet.generic_descriptor->LastPacketInSubFrame();
-
- if (packet.generic_descriptor->FirstPacketInSubFrame()) {
- video_header.frame_type =
- packet.generic_descriptor->FrameDependenciesDiffs().empty()
- ? VideoFrameType::kVideoFrameKey
- : VideoFrameType::kVideoFrameDelta;
- }
-
- video_header.width = packet.generic_descriptor->Width();
- video_header.height = packet.generic_descriptor->Height();
- }
+ ParseGenericDependenciesResult generic_descriptor_state =
+ ParseGenericDependenciesExtension(rtp_packet, &video_header);
+ if (generic_descriptor_state == kDropPacket)
+ return;
// Color space should only be transmitted in the last packet of a frame,
// therefore, neglect it otherwise so that last_color_space_ is not reset by
@@ -420,12 +523,23 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
// TODO(bugs.webrtc.org/10336): Implement support for reordering.
RTC_LOG(LS_INFO)
<< "LossNotificationController does not support reordering.";
- } else if (!packet.generic_descriptor) {
+ } else if (generic_descriptor_state == kNoGenericDescriptor) {
RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
"frame descriptor, but it is missing.";
} else {
- loss_notification_controller_->OnReceivedPacket(
- rtp_packet.SequenceNumber(), *packet.generic_descriptor);
+ if (video_header.is_first_packet_in_frame) {
+ RTC_DCHECK(video_header.generic);
+ LossNotificationController::FrameDetails frame;
+ frame.is_keyframe =
+ video_header.frame_type == VideoFrameType::kVideoFrameKey;
+ frame.frame_id = video_header.generic->frame_id;
+ frame.frame_dependencies = video_header.generic->dependencies;
+ loss_notification_controller_->OnReceivedPacket(
+ rtp_packet.SequenceNumber(), &frame);
+ } else {
+ loss_notification_controller_->OnReceivedPacket(
+ rtp_packet.SequenceNumber(), nullptr);
+ }
}
}
@@ -434,49 +548,51 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
video_header.is_first_packet_in_frame &&
video_header.frame_type == VideoFrameType::kVideoFrameKey;
- packet.times_nacked = nack_module_->OnReceivedPacket(
+ packet->times_nacked = nack_module_->OnReceivedPacket(
rtp_packet.SequenceNumber(), is_keyframe, rtp_packet.recovered());
} else {
- packet.times_nacked = -1;
+ packet->times_nacked = -1;
}
- if (codec_payload.empty()) {
- NotifyReceiverOfEmptyPacket(packet.seq_num);
+ if (codec_payload.size() == 0) {
+ NotifyReceiverOfEmptyPacket(packet->seq_num);
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
return;
}
- if (packet.codec() == kVideoCodecH264) {
+ if (packet->codec() == kVideoCodecH264) {
// Only when we start to receive packets will we know what payload type
// that will be used. When we know the payload type insert the correct
// sps/pps into the tracker.
- if (packet.payload_type != last_payload_type_) {
- last_payload_type_ = packet.payload_type;
- InsertSpsPpsIntoTracker(packet.payload_type);
+ if (packet->payload_type != last_payload_type_) {
+ last_payload_type_ = packet->payload_type;
+ InsertSpsPpsIntoTracker(packet->payload_type);
}
video_coding::H264SpsPpsTracker::FixedBitstream fixed =
- tracker_.CopyAndFixBitstream(codec_payload, &packet.video_header);
+ tracker_.CopyAndFixBitstream(
+ rtc::MakeArrayView(codec_payload.cdata(), codec_payload.size()),
+ &packet->video_header);
switch (fixed.action) {
case video_coding::H264SpsPpsTracker::kRequestKeyframe:
rtcp_feedback_buffer_.RequestKeyFrame();
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
- RTC_FALLTHROUGH();
+ ABSL_FALLTHROUGH_INTENDED;
case video_coding::H264SpsPpsTracker::kDrop:
return;
case video_coding::H264SpsPpsTracker::kInsert:
- packet.video_payload = std::move(fixed.bitstream);
+ packet->video_payload = std::move(fixed.bitstream);
break;
}
} else {
- packet.video_payload.SetData(codec_payload.data(), codec_payload.size());
+ packet->video_payload = std::move(codec_payload);
}
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
- frame_counter_.Add(packet.timestamp);
- OnInsertedPacket(packet_buffer_.InsertPacket(&packet));
+ frame_counter_.Add(packet->timestamp);
+ OnInsertedPacket(packet_buffer_.InsertPacket(std::move(packet)));
}
void RtpVideoStreamReceiver::OnRecoveredPacket(const uint8_t* rtp_packet,
@@ -592,9 +708,69 @@ bool RtpVideoStreamReceiver::IsDecryptable() const {
void RtpVideoStreamReceiver::OnInsertedPacket(
video_coding::PacketBuffer::InsertResult result) {
- for (std::unique_ptr<video_coding::RtpFrameObject>& frame : result.frames) {
- OnAssembledFrame(std::move(frame));
+ video_coding::PacketBuffer::Packet* first_packet = nullptr;
+ int max_nack_count;
+ int64_t min_recv_time;
+ int64_t max_recv_time;
+ std::vector<rtc::ArrayView<const uint8_t>> payloads;
+ RtpPacketInfos::vector_type packet_infos;
+
+ bool frame_boundary = true;
+ for (auto& packet : result.packets) {
+ // PacketBuffer promisses frame boundaries are correctly set on each
+ // packet. Document that assumption with the DCHECKs.
+ RTC_DCHECK_EQ(frame_boundary, packet->is_first_packet_in_frame());
+ if (packet->is_first_packet_in_frame()) {
+ first_packet = packet.get();
+ max_nack_count = packet->times_nacked;
+ min_recv_time = packet->packet_info.receive_time_ms();
+ max_recv_time = packet->packet_info.receive_time_ms();
+ payloads.clear();
+ packet_infos.clear();
+ } else {
+ max_nack_count = std::max(max_nack_count, packet->times_nacked);
+ min_recv_time =
+ std::min(min_recv_time, packet->packet_info.receive_time_ms());
+ max_recv_time =
+ std::max(max_recv_time, packet->packet_info.receive_time_ms());
+ }
+ payloads.emplace_back(packet->video_payload);
+ packet_infos.push_back(packet->packet_info);
+
+ frame_boundary = packet->is_last_packet_in_frame();
+ if (packet->is_last_packet_in_frame()) {
+ auto depacketizer_it = payload_type_map_.find(first_packet->payload_type);
+ RTC_CHECK(depacketizer_it != payload_type_map_.end());
+
+ rtc::scoped_refptr<EncodedImageBuffer> bitstream =
+ depacketizer_it->second->AssembleFrame(payloads);
+ if (!bitstream) {
+ // Failed to assemble a frame. Discard and continue.
+ continue;
+ }
+
+ const video_coding::PacketBuffer::Packet& last_packet = *packet;
+ OnAssembledFrame(std::make_unique<video_coding::RtpFrameObject>(
+ first_packet->seq_num, //
+ last_packet.seq_num, //
+ last_packet.marker_bit, //
+ max_nack_count, //
+ min_recv_time, //
+ max_recv_time, //
+ first_packet->timestamp, //
+ first_packet->ntp_time_ms, //
+ last_packet.video_header.video_timing, //
+ first_packet->payload_type, //
+ first_packet->codec(), //
+ last_packet.video_header.rotation, //
+ last_packet.video_header.content_type, //
+ first_packet->video_header, //
+ last_packet.video_header.color_space, //
+ RtpPacketInfos(std::move(packet_infos)), //
+ std::move(bitstream)));
+ }
}
+ RTC_DCHECK(frame_boundary);
if (result.buffer_cleared) {
RequestKeyFrame();
}
@@ -605,14 +781,13 @@ void RtpVideoStreamReceiver::OnAssembledFrame(
RTC_DCHECK_RUN_ON(&network_tc_);
RTC_DCHECK(frame);
- absl::optional<RtpGenericFrameDescriptor> descriptor =
- frame->GetGenericFrameDescriptor();
+ const absl::optional<RTPVideoHeader::GenericDescriptorInfo>& descriptor =
+ frame->GetRtpVideoHeader().generic;
if (loss_notification_controller_ && descriptor) {
loss_notification_controller_->OnAssembledFrame(
- frame->first_seq_num(), descriptor->FrameId(),
- descriptor->Discardable().value_or(false),
- descriptor->FrameDependenciesDiffs());
+ frame->first_seq_num(), descriptor->frame_id, descriptor->discardable,
+ descriptor->dependencies);
}
// If frames arrive before a key frame, they would not be decodable.
@@ -660,10 +835,12 @@ void RtpVideoStreamReceiver::OnAssembledFrame(
last_assembled_frame_rtp_timestamp_ = frame->Timestamp();
}
- if (buffered_frame_decryptor_ == nullptr) {
- reference_finder_->ManageFrame(std::move(frame));
- } else {
+ if (buffered_frame_decryptor_ != nullptr) {
buffered_frame_decryptor_->ManageEncryptedFrame(std::move(frame));
+ } else if (frame_transformer_delegate_) {
+ frame_transformer_delegate_->TransformFrame(std::move(frame));
+ } else {
+ reference_finder_->ManageFrame(std::move(frame));
}
}
@@ -704,6 +881,16 @@ void RtpVideoStreamReceiver::SetFrameDecryptor(
buffered_frame_decryptor_->SetFrameDecryptor(std::move(frame_decryptor));
}
+void RtpVideoStreamReceiver::SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) {
+ RTC_DCHECK_RUN_ON(&network_tc_);
+ frame_transformer_delegate_ =
+ new rtc::RefCountedObject<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ this, std::move(frame_transformer), rtc::Thread::Current(),
+ config_.rtp.remote_ssrc);
+ frame_transformer_delegate_->Init();
+}
+
void RtpVideoStreamReceiver::UpdateRtt(int64_t max_rtt_ms) {
if (nack_module_)
nack_module_->UpdateRtt(max_rtt_ms);
@@ -738,6 +925,12 @@ void RtpVideoStreamReceiver::RemoveSecondarySink(
secondary_sinks_.erase(it);
}
+void RtpVideoStreamReceiver::ManageFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame) {
+ rtc::CritScope lock(&reference_finder_lock_);
+ reference_finder_->ManageFrame(std::move(frame));
+}
+
void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
if (packet.payload_size() == 0) {
// Padding or keep-alive packet.
@@ -755,23 +948,15 @@ void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
if (type_it == payload_type_map_.end()) {
return;
}
- auto depacketizer =
- absl::WrapUnique(RtpDepacketizer::Create(type_it->second));
-
- if (!depacketizer) {
- RTC_LOG(LS_ERROR) << "Failed to create depacketizer.";
- return;
- }
- RtpDepacketizer::ParsedPayload parsed_payload;
- if (!depacketizer->Parse(&parsed_payload, packet.payload().data(),
- packet.payload().size())) {
+ absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload =
+ type_it->second->Parse(packet.PayloadBuffer());
+ if (parsed_payload == absl::nullopt) {
RTC_LOG(LS_WARNING) << "Failed parsing payload.";
return;
}
- OnReceivedPayloadData(
- rtc::MakeArrayView(parsed_payload.payload, parsed_payload.payload_length),
- packet, parsed_payload.video);
+ OnReceivedPayloadData(std::move(parsed_payload->video_payload), packet,
+ parsed_payload->video_header);
}
void RtpVideoStreamReceiver::ParseAndHandleEncapsulatingHeader(
@@ -844,6 +1029,12 @@ bool RtpVideoStreamReceiver::DeliverRtcp(const uint8_t* rtcp_packet,
// Don't use old SRs to estimate time.
if (time_since_recieved <= 1) {
ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp);
+ absl::optional<int64_t> remote_to_local_clock_offset_ms =
+ ntp_estimator_.EstimateRemoteToLocalClockOffsetMs();
+ if (remote_to_local_clock_offset_ms.has_value()) {
+ absolute_capture_time_receiver_.SetRemoteToLocalClockOffset(
+ Int64MsToQ32x32(*remote_to_local_clock_offset_ms));
+ }
}
return true;
@@ -930,7 +1121,8 @@ void RtpVideoStreamReceiver::InsertSpsPpsIntoTracker(uint8_t payload_type) {
return;
RTC_LOG(LS_INFO) << "Found out of band supplied codec parameters for"
- << " payload type: " << static_cast<int>(payload_type);
+ " payload type: "
+ << static_cast<int>(payload_type);
H264SpropParameterSets sprop_decoder;
auto sprop_base64_it =
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver.h b/chromium/third_party/webrtc/video/rtp_video_stream_receiver.h
index 5bd5061de83..ba617fd02be 100644
--- a/chromium/third_party/webrtc/video/rtp_video_stream_receiver.h
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver.h
@@ -32,8 +32,10 @@
#include "modules/rtp_rtcp/include/rtp_rtcp.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "modules/rtp_rtcp/source/absolute_capture_time_receiver.h"
+#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "modules/rtp_rtcp/source/rtp_video_header.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
#include "modules/video_coding/h264_sps_pps_tracker.h"
#include "modules/video_coding/loss_notification_controller.h"
#include "modules/video_coding/packet_buffer.h"
@@ -46,6 +48,7 @@
#include "rtc_base/thread_annotations.h"
#include "rtc_base/thread_checker.h"
#include "video/buffered_frame_decryptor.h"
+#include "video/rtp_video_stream_receiver_frame_transformer_delegate.h"
namespace webrtc {
@@ -84,7 +87,8 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
// requests are sent via the internal RtpRtcp module.
KeyFrameRequestSender* keyframe_request_sender,
video_coding::OnCompleteFrameCallback* complete_frame_callback,
- rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor);
+ rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer);
~RtpVideoStreamReceiver() override;
void AddReceiveCodec(const VideoCodec& video_codec,
@@ -116,7 +120,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
// TODO(philipel): Stop using VCMPacket in the new jitter buffer and then
// remove this function. Public only for tests.
- void OnReceivedPayloadData(rtc::ArrayView<const uint8_t> codec_payload,
+ void OnReceivedPayloadData(rtc::CopyOnWriteBuffer codec_payload,
const RtpPacketReceived& rtp_packet,
const RTPVideoHeader& video);
@@ -160,6 +164,11 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
void SetFrameDecryptor(
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor);
+ // Sets a frame transformer after a stream has started, if no transformer
+ // has previously been set. Does not reset the decoder state.
+ void SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer);
+
// Called by VideoReceiveStream when stats are updated.
void UpdateRtt(int64_t max_rtt_ms);
@@ -173,6 +182,8 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
void AddSecondarySink(RtpPacketSinkInterface* sink);
void RemoveSecondarySink(const RtpPacketSinkInterface* sink);
+ virtual void ManageFrame(std::unique_ptr<video_coding::RtpFrameObject> frame);
+
private:
// Used for buffering RTCP feedback messages and sending them all together.
// Note:
@@ -235,6 +246,11 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
};
absl::optional<LossNotificationState> lntf_state_ RTC_GUARDED_BY(cs_);
};
+ enum ParseGenericDependenciesResult {
+ kDropPacket,
+ kHasGenericDescriptor,
+ kNoGenericDescriptor
+ };
// Entry point doing non-stats work for a received packet. Called
// for the same packet both before and after RED decapsulation.
@@ -247,6 +263,9 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
bool IsRedEnabled() const;
void InsertSpsPpsIntoTracker(uint8_t payload_type);
void OnInsertedPacket(video_coding::PacketBuffer::InsertResult result);
+ ParseGenericDependenciesResult ParseGenericDependenciesExtension(
+ const RtpPacketReceived& rtp_packet,
+ RTPVideoHeader* video_header) RTC_RUN_ON(worker_task_checker_);
void OnAssembledFrame(std::unique_ptr<video_coding::RtpFrameObject> frame);
Clock* const clock_;
@@ -276,6 +295,18 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
video_coding::PacketBuffer packet_buffer_;
UniqueTimestampCounter frame_counter_ RTC_GUARDED_BY(worker_task_checker_);
+ SeqNumUnwrapper<uint16_t> frame_id_unwrapper_
+ RTC_GUARDED_BY(worker_task_checker_);
+
+ // Video structure provided in the dependency descriptor in a first packet
+ // of a key frame. It is required to parse dependency descriptor in the
+ // following delta packets.
+ std::unique_ptr<FrameDependencyStructure> video_structure_
+ RTC_GUARDED_BY(worker_task_checker_);
+ // Frame id of the last frame with the attached video structure.
+ // absl::nullopt when `video_structure_ == nullptr`;
+ absl::optional<int64_t> video_structure_frame_id_
+ RTC_GUARDED_BY(worker_task_checker_);
rtc::CriticalSection reference_finder_lock_;
std::unique_ptr<video_coding::RtpFrameReferenceFinder> reference_finder_
@@ -288,8 +319,8 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
RTC_GUARDED_BY(last_seq_num_cs_);
video_coding::H264SpsPpsTracker tracker_;
- // Maps payload type to codec type, for packetization.
- std::map<uint8_t, absl::optional<VideoCodecType>> payload_type_map_;
+ // Maps payload id to the depacketizer.
+ std::map<uint8_t, std::unique_ptr<VideoRtpDepacketizer>> payload_type_map_;
// TODO(johan): Remove pt_codec_params_ once
// https://bugs.chromium.org/p/webrtc/issues/detail?id=6883 is resolved.
@@ -324,6 +355,9 @@ class RtpVideoStreamReceiver : public LossNotificationSender,
RTC_GUARDED_BY(worker_task_checker_);
int64_t last_completed_picture_id_ = 0;
+
+ rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate>
+ frame_transformer_delegate_;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc
new file mode 100644
index 00000000000..c2fb8feb429
--- /dev/null
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.cc
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/rtp_video_stream_receiver_frame_transformer_delegate.h"
+
+#include <utility>
+#include <vector>
+
+#include "absl/memory/memory.h"
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+#include "rtc_base/task_utils/to_queued_task.h"
+#include "rtc_base/thread.h"
+#include "video/rtp_video_stream_receiver.h"
+
+namespace webrtc {
+
+namespace {
+class TransformableVideoReceiverFrame
+ : public TransformableVideoFrameInterface {
+ public:
+ TransformableVideoReceiverFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame,
+ uint32_t ssrc)
+ : frame_(std::move(frame)), ssrc_(ssrc) {}
+ ~TransformableVideoReceiverFrame() override = default;
+
+ // Implements TransformableVideoFrameInterface.
+ rtc::ArrayView<const uint8_t> GetData() const override {
+ return *frame_->GetEncodedData();
+ }
+
+ void SetData(rtc::ArrayView<const uint8_t> data) override {
+ frame_->SetEncodedData(
+ EncodedImageBuffer::Create(data.data(), data.size()));
+ }
+
+ uint32_t GetTimestamp() const override { return frame_->Timestamp(); }
+ uint32_t GetSsrc() const override { return ssrc_; }
+
+ bool IsKeyFrame() const override {
+ return frame_->FrameType() == VideoFrameType::kVideoFrameKey;
+ }
+
+ std::vector<uint8_t> GetAdditionalData() const override {
+ return RtpDescriptorAuthentication(frame_->GetRtpVideoHeader());
+ }
+
+ std::unique_ptr<video_coding::RtpFrameObject> ExtractFrame() && {
+ return std::move(frame_);
+ }
+
+ private:
+ std::unique_ptr<video_coding::RtpFrameObject> frame_;
+ const uint32_t ssrc_;
+};
+} // namespace
+
+RtpVideoStreamReceiverFrameTransformerDelegate::
+ RtpVideoStreamReceiverFrameTransformerDelegate(
+ RtpVideoStreamReceiver* receiver,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ rtc::Thread* network_thread,
+ uint32_t ssrc)
+ : receiver_(receiver),
+ frame_transformer_(std::move(frame_transformer)),
+ network_thread_(network_thread),
+ ssrc_(ssrc) {}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::Init() {
+ RTC_DCHECK_RUN_ON(&network_sequence_checker_);
+ frame_transformer_->RegisterTransformedFrameSinkCallback(
+ rtc::scoped_refptr<TransformedFrameCallback>(this), ssrc_);
+}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::Reset() {
+ RTC_DCHECK_RUN_ON(&network_sequence_checker_);
+ frame_transformer_->UnregisterTransformedFrameSinkCallback(ssrc_);
+ frame_transformer_ = nullptr;
+ receiver_ = nullptr;
+}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::TransformFrame(
+ std::unique_ptr<video_coding::RtpFrameObject> frame) {
+ RTC_DCHECK_RUN_ON(&network_sequence_checker_);
+ // TODO(bugs.webrtc.org/11380) remove once this version of TransformFrame is
+ // deprecated.
+ auto additional_data =
+ RtpDescriptorAuthentication(frame->GetRtpVideoHeader());
+ auto frame_copy =
+ std::make_unique<video_coding::RtpFrameObject>(*frame.get());
+ frame_transformer_->TransformFrame(std::move(frame_copy),
+ std::move(additional_data), ssrc_);
+
+ frame_transformer_->Transform(
+ std::make_unique<TransformableVideoReceiverFrame>(std::move(frame),
+ ssrc_));
+}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::OnTransformedFrame(
+ std::unique_ptr<video_coding::EncodedFrame> frame) {
+ rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate> delegate =
+ this;
+ network_thread_->PostTask(ToQueuedTask(
+ [delegate = std::move(delegate), frame = std::move(frame)]() mutable {
+ delegate->ManageFrame(std::move(frame));
+ }));
+}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::OnTransformedFrame(
+ std::unique_ptr<TransformableFrameInterface> frame) {
+ rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate> delegate =
+ this;
+ network_thread_->PostTask(ToQueuedTask(
+ [delegate = std::move(delegate), frame = std::move(frame)]() mutable {
+ delegate->ManageFrame(std::move(frame));
+ }));
+}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::ManageFrame(
+ std::unique_ptr<video_coding::EncodedFrame> frame) {
+ RTC_DCHECK_RUN_ON(&network_sequence_checker_);
+ if (!receiver_)
+ return;
+ auto transformed_frame = absl::WrapUnique(
+ static_cast<video_coding::RtpFrameObject*>(frame.release()));
+ receiver_->ManageFrame(std::move(transformed_frame));
+}
+
+void RtpVideoStreamReceiverFrameTransformerDelegate::ManageFrame(
+ std::unique_ptr<TransformableFrameInterface> frame) {
+ RTC_DCHECK_RUN_ON(&network_sequence_checker_);
+ if (!receiver_)
+ return;
+ auto transformed_frame = absl::WrapUnique(
+ static_cast<TransformableVideoReceiverFrame*>(frame.release()));
+ receiver_->ManageFrame(std::move(*transformed_frame).ExtractFrame());
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h
new file mode 100644
index 00000000000..eb3c2625f01
--- /dev/null
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_RTP_VIDEO_STREAM_RECEIVER_FRAME_TRANSFORMER_DELEGATE_H_
+#define VIDEO_RTP_VIDEO_STREAM_RECEIVER_FRAME_TRANSFORMER_DELEGATE_H_
+
+#include <memory>
+
+#include "api/frame_transformer_interface.h"
+#include "modules/video_coding/frame_object.h"
+#include "rtc_base/synchronization/sequence_checker.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+class RtpVideoStreamReceiver;
+
+// Delegates calls to FrameTransformerInterface to transform frames, and to
+// RtpVideoStreamReceiver to manage transformed frames on the |network_thread_|.
+class RtpVideoStreamReceiverFrameTransformerDelegate
+ : public TransformedFrameCallback {
+ public:
+ RtpVideoStreamReceiverFrameTransformerDelegate(
+ RtpVideoStreamReceiver* receiver,
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer,
+ rtc::Thread* network_thread,
+ uint32_t ssrc);
+
+ void Init();
+ void Reset();
+
+ // Delegates the call to FrameTransformerInterface::TransformFrame.
+ void TransformFrame(std::unique_ptr<video_coding::RtpFrameObject> frame);
+
+ // Implements TransformedFrameCallback. Can be called on any thread. Posts
+ // the transformed frame to be managed on the |network_thread_|.
+ void OnTransformedFrame(
+ std::unique_ptr<video_coding::EncodedFrame> frame) override;
+ void OnTransformedFrame(
+ std::unique_ptr<TransformableFrameInterface> frame) override;
+
+ // Delegates the call to RtpVideoReceiver::ManageFrame on the
+ // |network_thread_|.
+ void ManageFrame(std::unique_ptr<video_coding::EncodedFrame> frame);
+ void ManageFrame(std::unique_ptr<TransformableFrameInterface> frame);
+
+ protected:
+ ~RtpVideoStreamReceiverFrameTransformerDelegate() override = default;
+
+ private:
+ SequenceChecker network_sequence_checker_;
+ RtpVideoStreamReceiver* receiver_ RTC_GUARDED_BY(network_sequence_checker_);
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer_
+ RTC_GUARDED_BY(network_sequence_checker_);
+ rtc::Thread* const network_thread_;
+ const uint32_t ssrc_;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_RTP_VIDEO_STREAM_RECEIVER_FRAME_TRANSFORMER_DELEGATE_H_
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc
new file mode 100644
index 00000000000..5626d83d392
--- /dev/null
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/rtp_video_stream_receiver_frame_transformer_delegate.h"
+
+#include <cstdio>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "api/call/transport.h"
+#include "call/video_receive_stream.h"
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
+#include "modules/utility/include/process_thread.h"
+#include "rtc_base/event.h"
+#include "rtc_base/task_utils/to_queued_task.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "video/rtp_video_stream_receiver.h"
+
+namespace webrtc {
+namespace {
+
+using ::testing::_;
+
+std::unique_ptr<video_coding::RtpFrameObject> CreateRtpFrameObject() {
+ return std::make_unique<video_coding::RtpFrameObject>(
+ 0, 0, true, 0, 0, 0, 0, 0, VideoSendTiming(), 0, kVideoCodecGeneric,
+ kVideoRotation_0, VideoContentType::UNSPECIFIED, RTPVideoHeader(),
+ absl::nullopt, RtpPacketInfos(), EncodedImageBuffer::Create(0));
+}
+
+class FakeTransport : public Transport {
+ public:
+ bool SendRtp(const uint8_t* packet,
+ size_t length,
+ const PacketOptions& options) {
+ return true;
+ }
+ bool SendRtcp(const uint8_t* packet, size_t length) { return true; }
+};
+
+class FakeNackSender : public NackSender {
+ public:
+ void SendNack(const std::vector<uint16_t>& sequence_numbers) {}
+ void SendNack(const std::vector<uint16_t>& sequence_numbers,
+ bool buffering_allowed) {}
+};
+
+class FakeOnCompleteFrameCallback
+ : public video_coding::OnCompleteFrameCallback {
+ public:
+ void OnCompleteFrame(
+ std::unique_ptr<video_coding::EncodedFrame> frame) override {}
+};
+
+class TestRtpVideoStreamReceiverInitializer {
+ public:
+ TestRtpVideoStreamReceiverInitializer()
+ : test_config_(nullptr),
+ test_process_thread_(ProcessThread::Create("TestThread")) {
+ test_config_.rtp.remote_ssrc = 1111;
+ test_config_.rtp.local_ssrc = 2222;
+ test_rtp_receive_statistics_ =
+ ReceiveStatistics::Create(Clock::GetRealTimeClock());
+ }
+
+ protected:
+ VideoReceiveStream::Config test_config_;
+ FakeTransport fake_transport_;
+ FakeNackSender fake_nack_sender_;
+ FakeOnCompleteFrameCallback fake_on_complete_frame_callback_;
+ std::unique_ptr<ProcessThread> test_process_thread_;
+ std::unique_ptr<ReceiveStatistics> test_rtp_receive_statistics_;
+};
+
+class TestRtpVideoStreamReceiver : public TestRtpVideoStreamReceiverInitializer,
+ public RtpVideoStreamReceiver {
+ public:
+ TestRtpVideoStreamReceiver()
+ : TestRtpVideoStreamReceiverInitializer(),
+ RtpVideoStreamReceiver(Clock::GetRealTimeClock(),
+ &fake_transport_,
+ nullptr,
+ nullptr,
+ &test_config_,
+ test_rtp_receive_statistics_.get(),
+ nullptr,
+ test_process_thread_.get(),
+ &fake_nack_sender_,
+ nullptr,
+ &fake_on_complete_frame_callback_,
+ nullptr,
+ nullptr) {}
+ ~TestRtpVideoStreamReceiver() override = default;
+
+ MOCK_METHOD(void,
+ ManageFrame,
+ (std::unique_ptr<video_coding::RtpFrameObject> frame),
+ (override));
+};
+
+class MockFrameTransformer : public FrameTransformerInterface {
+ public:
+ ~MockFrameTransformer() override = default;
+ MOCK_METHOD(void,
+ TransformFrame,
+ (std::unique_ptr<video_coding::EncodedFrame>,
+ std::vector<uint8_t>,
+ uint32_t),
+ (override));
+ MOCK_METHOD(void,
+ RegisterTransformedFrameSinkCallback,
+ (rtc::scoped_refptr<TransformedFrameCallback>, uint32_t),
+ (override));
+ MOCK_METHOD(void,
+ UnregisterTransformedFrameSinkCallback,
+ (uint32_t),
+ (override));
+};
+
+TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
+ RegisterTransformedFrameCallbackSinkOnInit) {
+ TestRtpVideoStreamReceiver receiver;
+ rtc::scoped_refptr<MockFrameTransformer> frame_transformer(
+ new rtc::RefCountedObject<MockFrameTransformer>());
+ rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate> delegate(
+ new rtc::RefCountedObject<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ &receiver, frame_transformer, rtc::Thread::Current(),
+ /*remote_ssrc*/ 1111));
+ EXPECT_CALL(*frame_transformer,
+ RegisterTransformedFrameSinkCallback(testing::_, 1111));
+ delegate->Init();
+}
+
+TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
+ UnregisterTransformedFrameSinkCallbackOnReset) {
+ TestRtpVideoStreamReceiver receiver;
+ rtc::scoped_refptr<MockFrameTransformer> frame_transformer(
+ new rtc::RefCountedObject<MockFrameTransformer>());
+ rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate> delegate(
+ new rtc::RefCountedObject<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ &receiver, frame_transformer, rtc::Thread::Current(),
+ /*remote_ssrc*/ 1111));
+ EXPECT_CALL(*frame_transformer, UnregisterTransformedFrameSinkCallback(1111));
+ delegate->Reset();
+}
+
+TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, TransformFrame) {
+ TestRtpVideoStreamReceiver receiver;
+ rtc::scoped_refptr<MockFrameTransformer> frame_transformer(
+ new rtc::RefCountedObject<MockFrameTransformer>());
+ rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate> delegate(
+ new rtc::RefCountedObject<RtpVideoStreamReceiverFrameTransformerDelegate>(
+ &receiver, frame_transformer, rtc::Thread::Current(),
+ /*remote_ssrc*/ 1111));
+ auto frame = CreateRtpFrameObject();
+ EXPECT_CALL(*frame_transformer,
+ TransformFrame(_, RtpDescriptorAuthentication(RTPVideoHeader()),
+ /*remote_ssrc*/ 1111));
+ delegate->TransformFrame(std::move(frame));
+}
+
+TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest,
+ ManageFrameOnTransformedFrame) {
+ auto main_thread = rtc::Thread::Create();
+ main_thread->Start();
+ auto network_thread = rtc::Thread::Create();
+ network_thread->Start();
+
+ TestRtpVideoStreamReceiver receiver;
+ rtc::scoped_refptr<MockFrameTransformer> frame_transformer(
+ new rtc::RefCountedObject<MockFrameTransformer>());
+ auto delegate = network_thread->Invoke<
+ rtc::scoped_refptr<RtpVideoStreamReceiverFrameTransformerDelegate>>(
+ RTC_FROM_HERE, [&]() mutable {
+ return new rtc::RefCountedObject<
+ RtpVideoStreamReceiverFrameTransformerDelegate>(
+ &receiver, frame_transformer, network_thread.get(),
+ /*remote_ssrc*/ 1111);
+ });
+
+ auto frame = CreateRtpFrameObject();
+
+ EXPECT_CALL(receiver, ManageFrame)
+ .WillOnce([&network_thread](
+ std::unique_ptr<video_coding::RtpFrameObject> frame) {
+ EXPECT_TRUE(network_thread->IsCurrent());
+ });
+ main_thread->Invoke<void>(RTC_FROM_HERE, [&]() mutable {
+ delegate->OnTransformedFrame(std::move(frame));
+ });
+ rtc::ThreadManager::ProcessAllMessageQueuesForTesting();
+
+ main_thread->Stop();
+ network_thread->Stop();
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_unittest.cc b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_unittest.cc
index f7e6269c064..088465c301d 100644
--- a/chromium/third_party/webrtc/video/rtp_video_stream_receiver_unittest.cc
+++ b/chromium/third_party/webrtc/video/rtp_video_stream_receiver_unittest.cc
@@ -11,11 +11,13 @@
#include "video/rtp_video_stream_receiver.h"
#include <memory>
+#include <utility>
#include "api/video/video_codec_type.h"
#include "api/video/video_frame_type.h"
#include "common_video/h264/h264_common.h"
#include "media/base/media_constants.h"
+#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
#include "modules/rtp_rtcp/source/rtp_format.h"
#include "modules/rtp_rtcp/source/rtp_format_vp9.h"
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
@@ -123,14 +125,27 @@ class MockRtpPacketSink : public RtpPacketSinkInterface {
MOCK_METHOD1(OnRtpPacket, void(const RtpPacketReceived&));
};
+class MockFrameTransformer : public FrameTransformerInterface {
+ public:
+ MOCK_METHOD3(TransformFrame,
+ void(std::unique_ptr<video_coding::EncodedFrame> frame,
+ std::vector<uint8_t> additional_data,
+ uint32_t ssrc));
+ MOCK_METHOD2(RegisterTransformedFrameSinkCallback,
+ void(rtc::scoped_refptr<TransformedFrameCallback>, uint32_t));
+ MOCK_METHOD1(UnregisterTransformedFrameSinkCallback, void(uint32_t));
+};
+
constexpr uint32_t kSsrc = 111;
constexpr uint16_t kSequenceNumber = 222;
-std::unique_ptr<RtpPacketReceived> CreateRtpPacketReceived(
- uint32_t ssrc = kSsrc,
- uint16_t sequence_number = kSequenceNumber) {
+constexpr int kPayloadType = 100;
+constexpr int kRedPayloadType = 125;
+
+std::unique_ptr<RtpPacketReceived> CreateRtpPacketReceived() {
auto packet = std::make_unique<RtpPacketReceived>();
- packet->SetSsrc(ssrc);
- packet->SetSequenceNumber(sequence_number);
+ packet->SetSsrc(kSsrc);
+ packet->SetSequenceNumber(kSequenceNumber);
+ packet->SetPayloadType(kPayloadType);
return packet;
}
@@ -147,16 +162,19 @@ class RtpVideoStreamReceiverTest : public ::testing::Test {
explicit RtpVideoStreamReceiverTest(std::string field_trials)
: override_field_trials_(field_trials),
config_(CreateConfig()),
- process_thread_(ProcessThread::Create("TestThread")) {}
-
- void SetUp() {
+ process_thread_(ProcessThread::Create("TestThread")) {
rtp_receive_statistics_ =
ReceiveStatistics::Create(Clock::GetRealTimeClock());
rtp_video_stream_receiver_ = std::make_unique<RtpVideoStreamReceiver>(
Clock::GetRealTimeClock(), &mock_transport_, nullptr, nullptr, &config_,
rtp_receive_statistics_.get(), nullptr, process_thread_.get(),
&mock_nack_sender_, &mock_key_frame_request_sender_,
- &mock_on_complete_frame_callback_, nullptr);
+ &mock_on_complete_frame_callback_, nullptr, nullptr);
+ VideoCodec codec;
+ codec.plType = kPayloadType;
+ codec.codecType = kVideoCodecGeneric;
+ rtp_video_stream_receiver_->AddReceiveCodec(codec, {},
+ /*raw_payload=*/false);
}
RTPVideoHeader GetDefaultH264VideoHeader() {
@@ -170,13 +188,12 @@ class RtpVideoStreamReceiverTest : public ::testing::Test {
// code.
void AddSps(RTPVideoHeader* video_header,
uint8_t sps_id,
- std::vector<uint8_t>* data) {
+ rtc::CopyOnWriteBuffer* data) {
NaluInfo info;
info.type = H264::NaluType::kSps;
info.sps_id = sps_id;
info.pps_id = -1;
- data->push_back(H264::NaluType::kSps);
- data->push_back(sps_id);
+ data->AppendData({H264::NaluType::kSps, sps_id});
auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
h264.nalus[h264.nalus_length++] = info;
}
@@ -184,13 +201,12 @@ class RtpVideoStreamReceiverTest : public ::testing::Test {
void AddPps(RTPVideoHeader* video_header,
uint8_t sps_id,
uint8_t pps_id,
- std::vector<uint8_t>* data) {
+ rtc::CopyOnWriteBuffer* data) {
NaluInfo info;
info.type = H264::NaluType::kPps;
info.sps_id = sps_id;
info.pps_id = pps_id;
- data->push_back(H264::NaluType::kPps);
- data->push_back(pps_id);
+ data->AppendData({H264::NaluType::kPps, pps_id});
auto& h264 = absl::get<RTPVideoHeaderH264>(video_header->video_type_header);
h264.nalus[h264.nalus_length++] = info;
}
@@ -209,6 +225,7 @@ class RtpVideoStreamReceiverTest : public ::testing::Test {
VideoReceiveStream::Config config(nullptr);
config.rtp.remote_ssrc = 1111;
config.rtp.local_ssrc = 2222;
+ config.rtp.red_payload_type = kRedPayloadType;
return config;
}
@@ -226,7 +243,7 @@ class RtpVideoStreamReceiverTest : public ::testing::Test {
TEST_F(RtpVideoStreamReceiverTest, CacheColorSpaceFromLastPacketOfKeyframe) {
// Test that color space is cached from the last packet of a key frame and
// that it's not reset by padding packets without color space.
- constexpr int kPayloadType = 99;
+ constexpr int kVp9PayloadType = 99;
const ColorSpace kColorSpace(
ColorSpace::PrimaryID::kFILM, ColorSpace::TransferID::kBT2020_12,
ColorSpace::MatrixID::kBT2020_NCL, ColorSpace::RangeID::kFull);
@@ -263,7 +280,7 @@ TEST_F(RtpVideoStreamReceiverTest, CacheColorSpaceFromLastPacketOfKeyframe) {
RtpPacketToSend packet_to_send(&extension_map);
packet_to_send.SetSequenceNumber(sequence_number_++);
packet_to_send.SetSsrc(kSsrc);
- packet_to_send.SetPayloadType(kPayloadType);
+ packet_to_send.SetPayloadType(kVp9PayloadType);
bool include_color_space =
(rtp_packetizer_->NumPackets() == 1u &&
video_frame_type_ == VideoFrameType::kVideoFrameKey);
@@ -288,7 +305,7 @@ TEST_F(RtpVideoStreamReceiverTest, CacheColorSpaceFromLastPacketOfKeyframe) {
// Prepare the receiver for VP9.
VideoCodec codec;
- codec.plType = kPayloadType;
+ codec.plType = kVp9PayloadType;
codec.codecType = kVideoCodecVP9;
std::map<std::string, std::string> codec_params;
rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params,
@@ -342,7 +359,8 @@ TEST_F(RtpVideoStreamReceiverTest, CacheColorSpaceFromLastPacketOfKeyframe) {
TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrame) {
RtpPacketReceived rtp_packet;
RTPVideoHeader video_header;
- const std::vector<uint8_t> data({1, 2, 3, 4});
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
+ rtp_packet.SetPayloadType(kPayloadType);
rtp_packet.SetSequenceNumber(1);
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
@@ -362,8 +380,9 @@ TEST_F(RtpVideoStreamReceiverTest, PacketInfoIsPropagatedIntoVideoFrames) {
RtpHeaderExtensionMap extension_map;
extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
RtpPacketReceived rtp_packet(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
RTPVideoHeader video_header;
- const std::vector<uint8_t> data({1, 2, 3, 4});
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
rtp_packet.SetSequenceNumber(1);
rtp_packet.SetTimestamp(1);
rtp_packet.SetSsrc(kSsrc);
@@ -395,9 +414,10 @@ TEST_F(RtpVideoStreamReceiverTest,
RtpHeaderExtensionMap extension_map;
extension_map.Register<AbsoluteCaptureTimeExtension>(kId0);
RtpPacketReceived rtp_packet(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
RTPVideoHeader video_header;
- const std::vector<uint8_t> data({1, 2, 3, 4});
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
uint16_t sequence_number = 1;
uint32_t rtp_timestamp = 1;
rtp_packet.SetSequenceNumber(sequence_number);
@@ -419,6 +439,7 @@ TEST_F(RtpVideoStreamReceiverTest,
// Rtp packet without absolute capture time.
rtp_packet = RtpPacketReceived(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
rtp_packet.SetSequenceNumber(++sequence_number);
rtp_packet.SetTimestamp(++rtp_timestamp);
rtp_packet.SetSsrc(kSsrc);
@@ -435,10 +456,6 @@ TEST_F(RtpVideoStreamReceiverTest,
}
TEST_F(RtpVideoStreamReceiverTest, NoInfiniteRecursionOnEncapsulatedRedPacket) {
- const uint8_t kRedPayloadType = 125;
- VideoCodec codec;
- codec.plType = kRedPayloadType;
- rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/false);
const std::vector<uint8_t> data({
0x80, // RTP version.
kRedPayloadType, // Payload type.
@@ -478,8 +495,9 @@ TEST_F(RtpVideoStreamReceiverTest,
TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrameBitstreamError) {
RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
RTPVideoHeader video_header;
- const std::vector<uint8_t> data({1, 2, 3, 4});
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
rtp_packet.SetSequenceNumber(1);
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
@@ -505,12 +523,19 @@ INSTANTIATE_TEST_SUITE_P(SpsPpsIdrIsKeyframe,
RtpVideoStreamReceiverTestH264,
Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
-TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
- std::vector<uint8_t> sps_data;
+// Fails on MSAN: https://bugs.chromium.org/p/webrtc/issues/detail?id=11376.
+#if defined(MEMORY_SANITIZER)
+#define MAYBE_InBandSpsPps DISABLED_InBandSpsPps
+#else
+#define MAYBE_InBandSpsPps InBandSpsPps
+#endif
+TEST_P(RtpVideoStreamReceiverTestH264, MAYBE_InBandSpsPps) {
+ rtc::CopyOnWriteBuffer sps_data;
RtpPacketReceived rtp_packet;
RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader();
AddSps(&sps_video_header, 0, &sps_data);
rtp_packet.SetSequenceNumber(0);
+ rtp_packet.SetPayloadType(kPayloadType);
sps_video_header.is_first_packet_in_frame = true;
sps_video_header.frame_type = VideoFrameType::kEmptyFrame;
mock_on_complete_frame_callback_.AppendExpectedBitstream(
@@ -520,7 +545,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet,
sps_video_header);
- std::vector<uint8_t> pps_data;
+ rtc::CopyOnWriteBuffer pps_data;
RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader();
AddPps(&pps_video_header, 0, 1, &pps_data);
rtp_packet.SetSequenceNumber(1);
@@ -533,14 +558,15 @@ TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet,
pps_video_header);
- std::vector<uint8_t> idr_data;
+ rtc::CopyOnWriteBuffer idr_data;
RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader();
AddIdr(&idr_video_header, 1);
rtp_packet.SetSequenceNumber(2);
idr_video_header.is_first_packet_in_frame = true;
idr_video_header.is_last_packet_in_frame = true;
idr_video_header.frame_type = VideoFrameType::kVideoFrameKey;
- idr_data.insert(idr_data.end(), {0x65, 1, 2, 3});
+ const uint8_t idr[] = {0x65, 1, 2, 3};
+ idr_data.AppendData(idr);
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(),
@@ -573,7 +599,6 @@ TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
mock_on_complete_frame_callback_.AppendExpectedBitstream(binary_pps,
sizeof(binary_pps));
- std::vector<uint8_t> data;
RtpPacketReceived rtp_packet;
RTPVideoHeader video_header = GetDefaultH264VideoHeader();
AddIdr(&video_header, 0);
@@ -583,7 +608,7 @@ TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
video_header.is_last_packet_in_frame = true;
video_header.codec = kVideoCodecH264;
video_header.frame_type = VideoFrameType::kVideoFrameKey;
- data.insert(data.end(), {1, 2, 3});
+ rtc::CopyOnWriteBuffer data({1, 2, 3});
mock_on_complete_frame_callback_.AppendExpectedBitstream(
kH264StartCode, sizeof(kH264StartCode));
mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
@@ -596,9 +621,8 @@ TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) {
RtpPacketReceived rtp_packet;
RTPVideoHeader video_header = GetDefaultH264VideoHeader();
- std::vector<uint8_t> data;
- data.insert(data.end(), {1, 2, 3});
- rtp_packet.SetPayloadType(99);
+ rtc::CopyOnWriteBuffer data({1, 2, 3});
+ rtp_packet.SetPayloadType(kPayloadType);
rtp_packet.SetSequenceNumber(2);
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
@@ -633,8 +657,9 @@ TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) {
TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeIfFirstFrameIsDelta) {
RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
RTPVideoHeader video_header;
- const std::vector<uint8_t> data({1, 2, 3, 4});
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
rtp_packet.SetSequenceNumber(1);
video_header.is_first_packet_in_frame = true;
video_header.is_last_packet_in_frame = true;
@@ -649,8 +674,9 @@ TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeWhenPacketBufferGetsFull) {
constexpr int kPacketBufferMaxSize = 2048;
RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
RTPVideoHeader video_header;
- const std::vector<uint8_t> data({1, 2, 3, 4});
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
video_header.is_first_packet_in_frame = true;
// Incomplete frames so that the packet buffer is filling up.
video_header.is_last_packet_in_frame = false;
@@ -794,17 +820,14 @@ TEST_P(RtpVideoStreamReceiverGenericDescriptorTest,
const int version = GetParam();
const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
- const int kPayloadType = 123;
const int kSpatialIndex = 1;
- VideoCodec codec;
- codec.plType = kPayloadType;
- rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/false);
rtp_video_stream_receiver_->StartReceive();
RtpHeaderExtensionMap extension_map;
RegisterRtpGenericFrameDescriptorExtension(&extension_map, version);
RtpPacketReceived rtp_packet(&extension_map);
+ rtp_packet.SetPayloadType(kPayloadType);
RtpGenericFrameDescriptor generic_descriptor;
generic_descriptor.SetFirstPacketInSubFrame(true);
@@ -843,12 +866,8 @@ TEST_P(RtpVideoStreamReceiverGenericDescriptorTest,
const int version = GetParam();
const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
- const int kPayloadType = 123;
const int kSpatialIndex = 1;
- VideoCodec codec;
- codec.plType = kPayloadType;
- rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/false);
rtp_video_stream_receiver_->StartReceive();
RtpHeaderExtensionMap extension_map;
@@ -906,11 +925,7 @@ TEST_P(RtpVideoStreamReceiverGenericDescriptorTest,
TEST_F(RtpVideoStreamReceiverGenericDescriptorTest,
DropPacketsWithMultipleVersionsOfExtension) {
const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
- const int kPayloadType = 123;
- VideoCodec codec;
- codec.plType = kPayloadType;
- rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/false);
rtp_video_stream_receiver_->StartReceive();
RtpHeaderExtensionMap extension_map;
@@ -947,10 +962,10 @@ TEST_P(RtpVideoStreamReceiverGenericDescriptorTest,
const int version = GetParam();
const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
- const int kPayloadType = 123;
+ const int kRawPayloadType = 123;
VideoCodec codec;
- codec.plType = kPayloadType;
+ codec.plType = kRawPayloadType;
rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true);
rtp_video_stream_receiver_->StartReceive();
@@ -970,13 +985,226 @@ TEST_P(RtpVideoStreamReceiverGenericDescriptorTest,
data.size());
rtp_packet.SetMarker(true);
- rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetPayloadType(kRawPayloadType);
rtp_packet.SetSequenceNumber(1);
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
}
+TEST_P(RtpVideoStreamReceiverGenericDescriptorTest, UnwrapsFrameId) {
+ const int version = GetParam();
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ const int kPayloadType = 123;
+
+ VideoCodec codec;
+ codec.plType = kPayloadType;
+ rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true);
+ rtp_video_stream_receiver_->StartReceive();
+ RtpHeaderExtensionMap extension_map;
+ RegisterRtpGenericFrameDescriptorExtension(&extension_map, version);
+
+ uint16_t rtp_sequence_number = 1;
+ auto inject_packet = [&](uint16_t wrapped_frame_id) {
+ RtpPacketReceived rtp_packet(&extension_map);
+
+ RtpGenericFrameDescriptor generic_descriptor;
+ generic_descriptor.SetFirstPacketInSubFrame(true);
+ generic_descriptor.SetLastPacketInSubFrame(true);
+ generic_descriptor.SetFrameId(wrapped_frame_id);
+ ASSERT_TRUE(SetExtensionRtpGenericFrameDescriptorExtension(
+ generic_descriptor, &rtp_packet, version));
+
+ uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
+ ASSERT_TRUE(payload);
+ memcpy(payload, data.data(), data.size());
+ mock_on_complete_frame_callback_.ClearExpectedBitstream();
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ rtp_packet.SetMarker(true);
+ rtp_packet.SetPayloadType(kPayloadType);
+ rtp_packet.SetSequenceNumber(++rtp_sequence_number);
+ rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
+ };
+
+ int64_t first_picture_id;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ first_picture_id = frame->id.picture_id;
+ });
+ inject_packet(/*wrapped_frame_id=*/0xffff);
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ EXPECT_EQ(frame->id.picture_id - first_picture_id, 3);
+ });
+ inject_packet(/*wrapped_frame_id=*/0x0002);
+}
+
+class RtpVideoStreamReceiverDependencyDescriptorTest
+ : public RtpVideoStreamReceiverTest {
+ public:
+ RtpVideoStreamReceiverDependencyDescriptorTest() {
+ VideoCodec codec;
+ codec.plType = payload_type_;
+ rtp_video_stream_receiver_->AddReceiveCodec(codec, {},
+ /*raw_payload=*/true);
+ extension_map_.Register<RtpDependencyDescriptorExtension>(7);
+ rtp_video_stream_receiver_->StartReceive();
+ }
+
+ // Returns some valid structure for the DependencyDescriptors.
+ // First template of that structure always fit for a key frame.
+ static FrameDependencyStructure CreateStreamStructure() {
+ FrameDependencyStructure stream_structure;
+ stream_structure.num_decode_targets = 1;
+ stream_structure.templates = {
+ GenericFrameInfo::Builder().Dtis("S").Build(),
+ GenericFrameInfo::Builder().Dtis("S").Fdiffs({1}).Build(),
+ };
+ return stream_structure;
+ }
+
+ void InjectPacketWith(const FrameDependencyStructure& stream_structure,
+ const DependencyDescriptor& dependency_descriptor) {
+ const std::vector<uint8_t> data = {0, 1, 2, 3, 4};
+ RtpPacketReceived rtp_packet(&extension_map_);
+ ASSERT_TRUE(rtp_packet.SetExtension<RtpDependencyDescriptorExtension>(
+ stream_structure, dependency_descriptor));
+ uint8_t* payload = rtp_packet.SetPayloadSize(data.size());
+ ASSERT_TRUE(payload);
+ memcpy(payload, data.data(), data.size());
+ mock_on_complete_frame_callback_.ClearExpectedBitstream();
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ rtp_packet.SetMarker(true);
+ rtp_packet.SetPayloadType(payload_type_);
+ rtp_packet.SetSequenceNumber(++rtp_sequence_number_);
+ rtp_video_stream_receiver_->OnRtpPacket(rtp_packet);
+ }
+
+ private:
+ const int payload_type_ = 123;
+ RtpHeaderExtensionMap extension_map_;
+ uint16_t rtp_sequence_number_ = 321;
+};
+
+TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest, UnwrapsFrameId) {
+ FrameDependencyStructure stream_structure = CreateStreamStructure();
+
+ DependencyDescriptor keyframe_descriptor;
+ keyframe_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure);
+ keyframe_descriptor.frame_dependencies = stream_structure.templates[0];
+ keyframe_descriptor.frame_number = 0xfff0;
+ // DependencyDescriptor doesn't support reordering delta frame before
+ // keyframe. Thus feed a key frame first, then test reodered delta frames.
+ int64_t first_picture_id;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ first_picture_id = frame->id.picture_id;
+ });
+ InjectPacketWith(stream_structure, keyframe_descriptor);
+
+ DependencyDescriptor deltaframe1_descriptor;
+ deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
+ deltaframe1_descriptor.frame_number = 0xfffe;
+
+ DependencyDescriptor deltaframe2_descriptor;
+ deltaframe1_descriptor.frame_dependencies = stream_structure.templates[1];
+ deltaframe2_descriptor.frame_number = 0x0002;
+
+ // Parser should unwrap frame ids correctly even if packets were reordered by
+ // the network.
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ // 0x0002 - 0xfff0
+ EXPECT_EQ(frame->id.picture_id - first_picture_id, 18);
+ })
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ // 0xfffe - 0xfff0
+ EXPECT_EQ(frame->id.picture_id - first_picture_id, 14);
+ });
+ InjectPacketWith(stream_structure, deltaframe2_descriptor);
+ InjectPacketWith(stream_structure, deltaframe1_descriptor);
+}
+
+TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest,
+ DropsLateDeltaFramePacketWithDependencyDescriptorExtension) {
+ FrameDependencyStructure stream_structure1 = CreateStreamStructure();
+ FrameDependencyStructure stream_structure2 = CreateStreamStructure();
+ // Make sure template ids for these two structures do not collide:
+ // adjust structure_id (that is also used as template id offset).
+ stream_structure1.structure_id = 13;
+ stream_structure2.structure_id =
+ stream_structure1.structure_id + stream_structure1.templates.size();
+
+ DependencyDescriptor keyframe1_descriptor;
+ keyframe1_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure1);
+ keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
+ keyframe1_descriptor.frame_number = 1;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
+ InjectPacketWith(stream_structure1, keyframe1_descriptor);
+
+ // Pass in 2nd key frame with different structure.
+ DependencyDescriptor keyframe2_descriptor;
+ keyframe2_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure2);
+ keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
+ keyframe2_descriptor.frame_number = 3;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame);
+ InjectPacketWith(stream_structure2, keyframe2_descriptor);
+
+ // Pass in late delta frame that uses structure of the 1st key frame.
+ DependencyDescriptor deltaframe_descriptor;
+ deltaframe_descriptor.frame_dependencies = stream_structure1.templates[0];
+ deltaframe_descriptor.frame_number = 2;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame).Times(0);
+ InjectPacketWith(stream_structure1, deltaframe_descriptor);
+}
+
+TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest,
+ DropsLateKeyFramePacketWithDependencyDescriptorExtension) {
+ FrameDependencyStructure stream_structure1 = CreateStreamStructure();
+ FrameDependencyStructure stream_structure2 = CreateStreamStructure();
+ // Make sure template ids for these two structures do not collide:
+ // adjust structure_id (that is also used as template id offset).
+ stream_structure1.structure_id = 13;
+ stream_structure2.structure_id =
+ stream_structure1.structure_id + stream_structure1.templates.size();
+
+ DependencyDescriptor keyframe1_descriptor;
+ keyframe1_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure1);
+ keyframe1_descriptor.frame_dependencies = stream_structure1.templates[0];
+ keyframe1_descriptor.frame_number = 1;
+
+ DependencyDescriptor keyframe2_descriptor;
+ keyframe2_descriptor.attached_structure =
+ std::make_unique<FrameDependencyStructure>(stream_structure2);
+ keyframe2_descriptor.frame_dependencies = stream_structure2.templates[0];
+ keyframe2_descriptor.frame_number = 3;
+
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ EXPECT_EQ(frame->id.picture_id & 0xFFFF, 3);
+ });
+ InjectPacketWith(stream_structure2, keyframe2_descriptor);
+ InjectPacketWith(stream_structure1, keyframe1_descriptor);
+
+ // Pass in delta frame that uses structure of the 2nd key frame. Late key
+ // frame shouldn't block it.
+ DependencyDescriptor deltaframe_descriptor;
+ deltaframe_descriptor.frame_dependencies = stream_structure2.templates[0];
+ deltaframe_descriptor.frame_number = 4;
+ EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
+ .WillOnce([&](video_coding::EncodedFrame* frame) {
+ EXPECT_EQ(frame->id.picture_id & 0xFFFF, 4);
+ });
+ InjectPacketWith(stream_structure2, deltaframe_descriptor);
+}
+
#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
TEST_F(RtpVideoStreamReceiverTest, RepeatedSecondarySinkDisallowed) {
MockRtpPacketSink secondary_sink;
@@ -990,4 +1218,40 @@ TEST_F(RtpVideoStreamReceiverTest, RepeatedSecondarySinkDisallowed) {
}
#endif
+TEST_F(RtpVideoStreamReceiverTest, TransformFrame) {
+ rtc::scoped_refptr<MockFrameTransformer> mock_frame_transformer =
+ new rtc::RefCountedObject<MockFrameTransformer>();
+ EXPECT_CALL(*mock_frame_transformer,
+ RegisterTransformedFrameSinkCallback(_, config_.rtp.remote_ssrc));
+ auto receiver = std::make_unique<RtpVideoStreamReceiver>(
+ Clock::GetRealTimeClock(), &mock_transport_, nullptr, nullptr, &config_,
+ rtp_receive_statistics_.get(), nullptr, process_thread_.get(),
+ &mock_nack_sender_, nullptr, &mock_on_complete_frame_callback_, nullptr,
+ mock_frame_transformer);
+ VideoCodec video_codec;
+ video_codec.plType = kPayloadType;
+ video_codec.codecType = kVideoCodecGeneric;
+ receiver->AddReceiveCodec(video_codec, {}, /*raw_payload=*/false);
+
+ RtpPacketReceived rtp_packet;
+ rtp_packet.SetPayloadType(kPayloadType);
+ RTPVideoHeader video_header;
+ rtc::CopyOnWriteBuffer data({1, 2, 3, 4});
+ rtp_packet.SetSequenceNumber(1);
+ video_header.is_first_packet_in_frame = true;
+ video_header.is_last_packet_in_frame = true;
+ video_header.codec = kVideoCodecGeneric;
+ video_header.frame_type = VideoFrameType::kVideoFrameKey;
+ mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(),
+ data.size());
+ EXPECT_CALL(*mock_frame_transformer,
+ TransformFrame(_, RtpDescriptorAuthentication(video_header),
+ config_.rtp.remote_ssrc));
+ receiver->OnReceivedPayloadData(data, rtp_packet, video_header);
+
+ EXPECT_CALL(*mock_frame_transformer,
+ UnregisterTransformedFrameSinkCallback(config_.rtp.remote_ssrc));
+ receiver = nullptr;
+}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/send_delay_stats_unittest.cc b/chromium/third_party/webrtc/video/send_delay_stats_unittest.cc
index 37969e4489e..e7481f929eb 100644
--- a/chromium/third_party/webrtc/video/send_delay_stats_unittest.cc
+++ b/chromium/third_party/webrtc/video/send_delay_stats_unittest.cc
@@ -123,9 +123,11 @@ TEST_F(SendDelayStatsTest, HistogramsAreUpdated) {
EXPECT_TRUE(OnSentPacket(id));
}
stats_.reset();
- EXPECT_EQ(2, metrics::NumSamples("WebRTC.Video.SendDelayInMs"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SendDelayInMs", kDelayMs1));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SendDelayInMs", kDelayMs2));
+ EXPECT_METRIC_EQ(2, metrics::NumSamples("WebRTC.Video.SendDelayInMs"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.SendDelayInMs", kDelayMs1));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.SendDelayInMs", kDelayMs2));
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/send_statistics_proxy.cc b/chromium/third_party/webrtc/video/send_statistics_proxy.cc
index a4f17547bdb..e75b955b20a 100644
--- a/chromium/third_party/webrtc/video/send_statistics_proxy.cc
+++ b/chromium/third_party/webrtc/video/send_statistics_proxy.cc
@@ -16,7 +16,7 @@
#include <limits>
#include <utility>
-#include "absl/algorithm/container.h"
+#include "absl/strings/match.h"
#include "api/video/video_codec_constants.h"
#include "api/video/video_codec_type.h"
#include "api/video_codecs/video_codec.h"
@@ -113,15 +113,17 @@ absl::optional<int> GetFallbackMaxPixels(const std::string& group) {
absl::optional<int> GetFallbackMaxPixelsIfFieldTrialEnabled() {
std::string group =
webrtc::field_trial::FindFullName(kVp8ForcedFallbackEncoderFieldTrial);
- return (group.find("Enabled") == 0) ? GetFallbackMaxPixels(group.substr(7))
- : absl::optional<int>();
+ return (absl::StartsWith(group, "Enabled"))
+ ? GetFallbackMaxPixels(group.substr(7))
+ : absl::optional<int>();
}
absl::optional<int> GetFallbackMaxPixelsIfFieldTrialDisabled() {
std::string group =
webrtc::field_trial::FindFullName(kVp8ForcedFallbackEncoderFieldTrial);
- return (group.find("Disabled") == 0) ? GetFallbackMaxPixels(group.substr(8))
- : absl::optional<int>();
+ return (absl::StartsWith(group, "Disabled"))
+ ? GetFallbackMaxPixels(group.substr(8))
+ : absl::optional<int>();
}
} // namespace
@@ -148,6 +150,7 @@ SendStatisticsProxy::SendStatisticsProxy(
last_num_simulcast_streams_(0),
last_spatial_layer_use_{},
bw_limited_layers_(false),
+ internal_encoder_scaler_(false),
uma_container_(
new UmaSamplesContainer(GetUmaPrefix(content_type_), stats_, clock)) {
}
@@ -205,12 +208,17 @@ void SendStatisticsProxy::UmaSamplesContainer::InitializeBitrateCounters(
retransmit_byte_counter_.SetLast(
it.second.rtp_stats.retransmitted.TotalBytes(), ssrc);
fec_byte_counter_.SetLast(it.second.rtp_stats.fec.TotalBytes(), ssrc);
- if (it.second.is_rtx) {
- rtx_byte_counter_.SetLast(it.second.rtp_stats.transmitted.TotalBytes(),
- ssrc);
- } else {
- media_byte_counter_.SetLast(it.second.rtp_stats.MediaPayloadBytes(),
+ switch (it.second.type) {
+ case VideoSendStream::StreamStats::StreamType::kMedia:
+ media_byte_counter_.SetLast(it.second.rtp_stats.MediaPayloadBytes(),
+ ssrc);
+ break;
+ case VideoSendStream::StreamStats::StreamType::kRtx:
+ rtx_byte_counter_.SetLast(it.second.rtp_stats.transmitted.TotalBytes(),
ssrc);
+ break;
+ case VideoSendStream::StreamStats::StreamType::kFlexfec:
+ break;
}
}
}
@@ -652,9 +660,11 @@ void SendStatisticsProxy::UmaSamplesContainer::UpdateHistograms(
RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "DroppedFrames.Encoder",
current_stats.frames_dropped_by_encoder);
log_stream << uma_prefix_ << "DroppedFrames.Ratelimiter "
- << current_stats.frames_dropped_by_rate_limiter;
+ << current_stats.frames_dropped_by_rate_limiter << "\n";
RTC_HISTOGRAMS_COUNTS_1000(kIndex, uma_prefix_ + "DroppedFrames.Ratelimiter",
current_stats.frames_dropped_by_rate_limiter);
+ log_stream << uma_prefix_ << "DroppedFrames.CongestionWindow "
+ << current_stats.frames_dropped_by_congestion_window;
RTC_LOG(LS_INFO) << log_stream.str();
}
@@ -758,17 +768,42 @@ VideoSendStream::StreamStats* SendStatisticsProxy::GetStatsEntry(
if (it != stats_.substreams.end())
return &it->second;
- bool is_media = absl::c_linear_search(rtp_config_.ssrcs, ssrc);
+ bool is_media = rtp_config_.IsMediaSsrc(ssrc);
bool is_flexfec = rtp_config_.flexfec.payload_type != -1 &&
ssrc == rtp_config_.flexfec.ssrc;
- bool is_rtx = absl::c_linear_search(rtp_config_.rtx.ssrcs, ssrc);
+ bool is_rtx = rtp_config_.IsRtxSsrc(ssrc);
if (!is_media && !is_flexfec && !is_rtx)
return nullptr;
// Insert new entry and return ptr.
VideoSendStream::StreamStats* entry = &stats_.substreams[ssrc];
- entry->is_rtx = is_rtx;
- entry->is_flexfec = is_flexfec;
+ if (is_media) {
+ entry->type = VideoSendStream::StreamStats::StreamType::kMedia;
+ } else if (is_rtx) {
+ entry->type = VideoSendStream::StreamStats::StreamType::kRtx;
+ } else if (is_flexfec) {
+ entry->type = VideoSendStream::StreamStats::StreamType::kFlexfec;
+ } else {
+ RTC_NOTREACHED();
+ }
+ switch (entry->type) {
+ case VideoSendStream::StreamStats::StreamType::kMedia:
+ break;
+ case VideoSendStream::StreamStats::StreamType::kRtx:
+ entry->referenced_media_ssrc =
+ rtp_config_.GetMediaSsrcAssociatedWithRtxSsrc(ssrc);
+ entry->is_rtx = true;
+ break;
+ case VideoSendStream::StreamStats::StreamType::kFlexfec:
+ entry->referenced_media_ssrc =
+ rtp_config_.GetMediaSsrcAssociatedWithFlexfecSsrc(ssrc);
+ entry->is_flexfec = true;
+ break;
+ }
+ // TODO(hbos): Remove these booleans once downstream projects stop depedning
+ // on them, reading the value of |type| instead.
+ RTC_DCHECK_EQ(entry->is_rtx, is_rtx);
+ RTC_DCHECK_EQ(entry->is_flexfec, is_flexfec);
return entry;
}
@@ -1042,6 +1077,9 @@ void SendStatisticsProxy::OnFrameDropped(DropReason reason) {
case DropReason::kMediaOptimization:
++stats_.frames_dropped_by_rate_limiter;
break;
+ case DropReason::kCongestionWindow:
+ ++stats_.frames_dropped_by_congestion_window;
+ break;
}
}
@@ -1078,7 +1116,7 @@ void SendStatisticsProxy::UpdateAdaptationStats() {
cpu_counts_.num_framerate_reductions > 0;
bool is_bandwidth_limited = quality_counts_.num_resolution_reductions > 0 ||
quality_counts_.num_framerate_reductions > 0 ||
- bw_limited_layers_;
+ bw_limited_layers_ || internal_encoder_scaler_;
if (is_bandwidth_limited) {
// We may be both CPU limited and bandwidth limited at the same time but
// there is no way to express this in standardized stats. Heuristically,
@@ -1112,6 +1150,10 @@ void SendStatisticsProxy::UpdateAdaptationStats() {
}
}
}
+ if (internal_encoder_scaler_) {
+ stats_.bw_limited_resolution = true;
+ }
+
stats_.quality_limitation_reason =
quality_limitation_reason_tracker_.current_reason();
@@ -1159,6 +1201,15 @@ void SendStatisticsProxy::OnBitrateAllocationUpdated(
last_num_simulcast_streams_ = num_simulcast_streams;
}
+// Informes observer if an internal encoder scaler has reduced video
+// resolution or not. |is_scaled| is a flag indicating if the video is scaled
+// down.
+void SendStatisticsProxy::OnEncoderInternalScalerUpdate(bool is_scaled) {
+ rtc::CritScope lock(&crit_);
+ internal_encoder_scaler_ = is_scaled;
+ UpdateAdaptationStats();
+}
+
// TODO(asapersson): Include fps changes.
void SendStatisticsProxy::OnInitialQualityResolutionAdaptDown() {
rtc::CritScope lock(&crit_);
@@ -1233,7 +1284,7 @@ void SendStatisticsProxy::DataCountersUpdated(
VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
RTC_DCHECK(stats) << "DataCountersUpdated reported for unknown ssrc " << ssrc;
- if (stats->is_flexfec) {
+ if (stats->type == VideoSendStream::StreamStats::StreamType::kFlexfec) {
// The same counters are reported for both the media ssrc and flexfec ssrc.
// Bitrate stats are summed for all SSRCs. Use fec stats from media update.
return;
@@ -1254,11 +1305,17 @@ void SendStatisticsProxy::DataCountersUpdated(
uma_container_->retransmit_byte_counter_.Set(
counters.retransmitted.TotalBytes(), ssrc);
uma_container_->fec_byte_counter_.Set(counters.fec.TotalBytes(), ssrc);
- if (stats->is_rtx) {
- uma_container_->rtx_byte_counter_.Set(counters.transmitted.TotalBytes(),
- ssrc);
- } else {
- uma_container_->media_byte_counter_.Set(counters.MediaPayloadBytes(), ssrc);
+ switch (stats->type) {
+ case VideoSendStream::StreamStats::StreamType::kMedia:
+ uma_container_->media_byte_counter_.Set(counters.MediaPayloadBytes(),
+ ssrc);
+ break;
+ case VideoSendStream::StreamStats::StreamType::kRtx:
+ uma_container_->rtx_byte_counter_.Set(counters.transmitted.TotalBytes(),
+ ssrc);
+ break;
+ case VideoSendStream::StreamStats::StreamType::kFlexfec:
+ break;
}
}
diff --git a/chromium/third_party/webrtc/video/send_statistics_proxy.h b/chromium/third_party/webrtc/video/send_statistics_proxy.h
index a67725e17ab..abe39992cd9 100644
--- a/chromium/third_party/webrtc/video/send_statistics_proxy.h
+++ b/chromium/third_party/webrtc/video/send_statistics_proxy.h
@@ -78,6 +78,8 @@ class SendStatisticsProxy : public VideoStreamEncoderObserver,
const VideoCodec& codec,
const VideoBitrateAllocation& allocation) override;
+ void OnEncoderInternalScalerUpdate(bool is_scaled) override;
+
void OnMinPixelLimitReached() override;
void OnInitialQualityResolutionAdaptDown() override;
@@ -264,6 +266,8 @@ class SendStatisticsProxy : public VideoStreamEncoderObserver,
// Indicates if the latest bitrate allocation had layers disabled by low
// available bandwidth.
bool bw_limited_layers_ RTC_GUARDED_BY(crit_);
+ // Indicastes if the encoder internally downscales input image.
+ bool internal_encoder_scaler_ RTC_GUARDED_BY(crit_);
AdaptationSteps cpu_counts_ RTC_GUARDED_BY(crit_);
AdaptationSteps quality_counts_ RTC_GUARDED_BY(crit_);
diff --git a/chromium/third_party/webrtc/video/send_statistics_proxy_unittest.cc b/chromium/third_party/webrtc/video/send_statistics_proxy_unittest.cc
index 3b2b3ad3587..8b49a268b63 100644
--- a/chromium/third_party/webrtc/video/send_statistics_proxy_unittest.cc
+++ b/chromium/third_party/webrtc/video/send_statistics_proxy_unittest.cc
@@ -65,10 +65,16 @@ class SendStatisticsProxyTest : public ::testing::Test {
&fake_clock_, GetTestConfig(),
VideoEncoderConfig::ContentType::kRealtimeVideo));
expected_ = VideoSendStream::Stats();
- for (const auto& ssrc : config_.rtp.ssrcs)
- expected_.substreams[ssrc].is_rtx = false;
- for (const auto& ssrc : config_.rtp.rtx.ssrcs)
- expected_.substreams[ssrc].is_rtx = true;
+ for (const auto& ssrc : config_.rtp.ssrcs) {
+ expected_.substreams[ssrc].type =
+ VideoSendStream::StreamStats::StreamType::kMedia;
+ }
+ for (size_t i = 0; i < config_.rtp.rtx.ssrcs.size(); ++i) {
+ uint32_t ssrc = config_.rtp.rtx.ssrcs[i];
+ expected_.substreams[ssrc].type =
+ VideoSendStream::StreamStats::StreamType::kRtx;
+ expected_.substreams[ssrc].referenced_media_ssrc = config_.rtp.ssrcs[i];
+ }
}
VideoSendStream::Config GetTestConfig() {
@@ -89,6 +95,7 @@ class SendStatisticsProxyTest : public ::testing::Test {
config.rtp.rtx.ssrcs.push_back(kSecondRtxSsrc);
config.rtp.flexfec.payload_type = 50;
config.rtp.flexfec.ssrc = kFlexFecSsrc;
+ config.rtp.flexfec.protected_media_ssrcs = {kFirstSsrc};
return config;
}
@@ -123,7 +130,7 @@ class SendStatisticsProxyTest : public ::testing::Test {
const VideoSendStream::StreamStats& a = it->second;
const VideoSendStream::StreamStats& b = corresponding_it->second;
- EXPECT_EQ(a.is_rtx, b.is_rtx);
+ EXPECT_EQ(a.type, b.type);
EXPECT_EQ(a.frame_counts.key_frames, b.frame_counts.key_frames);
EXPECT_EQ(a.frame_counts.delta_frames, b.frame_counts.delta_frames);
EXPECT_EQ(a.total_bitrate_bps, b.total_bitrate_bps);
@@ -397,7 +404,8 @@ TEST_F(SendStatisticsProxyTest,
// TODO(https://crbug.com/webrtc/10640): When the RateTracker uses a Clock
// this test can stop relying on rtc::ScopedFakeClock.
rtc::ScopedFakeClock fake_global_clock;
- fake_global_clock.SetTime(Timestamp::ms(fake_clock_.TimeInMilliseconds()));
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
statistics_proxy_->OnSetEncoderTargetRate(kTargetBytesPerSecond * 8);
EncodedImage encoded_image;
@@ -408,7 +416,8 @@ TEST_F(SendStatisticsProxyTest,
statistics_proxy_->GetStats().total_encoded_bytes_target;
// Second frame
fake_clock_.AdvanceTimeMilliseconds(kInterframeDelayMs);
- fake_global_clock.SetTime(Timestamp::ms(fake_clock_.TimeInMilliseconds()));
+ fake_global_clock.SetTime(
+ Timestamp::Millis(fake_clock_.TimeInMilliseconds()));
encoded_image.SetTimestamp(encoded_image.Timestamp() +
90 * kInterframeDelayMs);
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
@@ -544,9 +553,10 @@ TEST_F(SendStatisticsProxyTest, AdaptChangesNotReported_AdaptationNotEnabled) {
// Min runtime has passed.
fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
- EXPECT_EQ(0,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
}
TEST_F(SendStatisticsProxyTest, AdaptChangesNotReported_MinRuntimeNotPassed) {
@@ -561,9 +571,10 @@ TEST_F(SendStatisticsProxyTest, AdaptChangesNotReported_MinRuntimeNotPassed) {
// Min runtime has not passed.
fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000 - 1);
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
- EXPECT_EQ(0,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
}
TEST_F(SendStatisticsProxyTest, ZeroAdaptChangesReported) {
@@ -578,11 +589,13 @@ TEST_F(SendStatisticsProxyTest, ZeroAdaptChangesReported) {
// Min runtime has passed.
fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 0));
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 0));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 0));
}
@@ -601,8 +614,10 @@ TEST_F(SendStatisticsProxyTest, CpuAdaptChangesReported) {
quality_counts);
fake_clock_.AdvanceTimeMilliseconds(10000);
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 6));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 6));
}
TEST_F(SendStatisticsProxyTest, ExcludesInitialQualityAdaptDownChange) {
@@ -621,9 +636,9 @@ TEST_F(SendStatisticsProxyTest, ExcludesInitialQualityAdaptDownChange) {
statistics_proxy_->OnInitialQualityResolutionAdaptDown();
fake_clock_.AdvanceTimeMilliseconds(10000);
statistics_proxy_.reset();
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 0));
}
@@ -653,9 +668,9 @@ TEST_F(SendStatisticsProxyTest, ExcludesInitialQualityAdaptDownChanges) {
quality_counts);
fake_clock_.AdvanceTimeMilliseconds(10000);
statistics_proxy_.reset();
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
}
@@ -676,9 +691,9 @@ TEST_F(SendStatisticsProxyTest, InitialQualityAdaptChangesNotExcludedOnError) {
statistics_proxy_->OnInitialQualityResolutionAdaptDown();
fake_clock_.AdvanceTimeMilliseconds(10000);
statistics_proxy_.reset();
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
}
@@ -729,9 +744,9 @@ TEST_F(SendStatisticsProxyTest, ExcludesInitialQualityAdaptDownAndUpChanges) {
fake_clock_.AdvanceTimeMilliseconds(10000);
statistics_proxy_.reset();
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 24));
}
@@ -799,9 +814,9 @@ TEST_F(SendStatisticsProxyTest, AdaptChangesStatsExcludesDisabledTime) {
// Adapt changes: 3, elapsed time: 30 sec => 6 per minute.
statistics_proxy_.reset();
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
}
@@ -818,9 +833,10 @@ TEST_F(SendStatisticsProxyTest,
// Min runtime has passed but scaling not enabled.
fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
- EXPECT_EQ(0,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
}
TEST_F(SendStatisticsProxyTest, QualityAdaptChangesStatsExcludesSuspendedTime) {
@@ -855,9 +871,9 @@ TEST_F(SendStatisticsProxyTest, QualityAdaptChangesStatsExcludesSuspendedTime) {
// Adapt changes: 3, elapsed time: 30 sec => 6 per minute.
statistics_proxy_.reset();
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
}
@@ -913,8 +929,10 @@ TEST_F(SendStatisticsProxyTest, CpuAdaptChangesStatsExcludesSuspendedTime) {
// Adapt changes: 2, elapsed time: 30 sec => 4 per minute.
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 4));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 4));
}
TEST_F(SendStatisticsProxyTest, AdaptChangesStatsNotStartedIfVideoSuspended) {
@@ -942,8 +960,10 @@ TEST_F(SendStatisticsProxyTest, AdaptChangesStatsNotStartedIfVideoSuspended) {
// Adapt changes: 1, elapsed time: 10 sec => 6 per minute.
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 6));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 6));
}
TEST_F(SendStatisticsProxyTest, AdaptChangesStatsRestartsOnFirstSentPacket) {
@@ -966,9 +986,9 @@ TEST_F(SendStatisticsProxyTest, AdaptChangesStatsRestartsOnFirstSentPacket) {
// Adapt changes: 1, elapsed time: 10 sec => 6 per minute.
statistics_proxy_.reset();
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Quality", 6));
}
@@ -1007,8 +1027,10 @@ TEST_F(SendStatisticsProxyTest, AdaptChangesStatsStartedAfterFirstSentPacket) {
// Adapt changes: 1, elapsed time: 20 sec => 3 per minute.
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 3));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 3));
}
TEST_F(SendStatisticsProxyTest, AdaptChangesReportedAfterContentSwitch) {
@@ -1036,10 +1058,12 @@ TEST_F(SendStatisticsProxyTest, AdaptChangesReportedAfterContentSwitch) {
VideoEncoderConfig config;
config.content_type = VideoEncoderConfig::ContentType::kScreen;
statistics_proxy_->OnEncoderReconfigured(config, {});
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 8));
- EXPECT_EQ(0,
- metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.AdaptChangesPerMinute.Cpu", 8));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.AdaptChangesPerMinute.Quality"));
// First RTP packet sent, scaling enabled.
UpdateDataCounters(kFirstSsrc);
@@ -1063,12 +1087,15 @@ TEST_F(SendStatisticsProxyTest, AdaptChangesReportedAfterContentSwitch) {
fake_clock_.AdvanceTimeMilliseconds(120000);
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Cpu"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Cpu", 2));
- EXPECT_EQ(0, metrics::NumSamples(
- "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Quality"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Cpu"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Cpu", 2));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.AdaptChangesPerMinute.Quality"));
}
TEST_F(SendStatisticsProxyTest,
@@ -1452,12 +1479,12 @@ TEST_F(SendStatisticsProxyTest, SwitchContentTypeUpdatesHistograms) {
VideoEncoderConfig config;
config.content_type = VideoEncoderConfig::ContentType::kRealtimeVideo;
statistics_proxy_->OnEncoderReconfigured(config, {});
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
// Switch to screenshare, real-time stats should be updated.
config.content_type = VideoEncoderConfig::ContentType::kScreen;
statistics_proxy_->OnEncoderReconfigured(config, {});
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
}
TEST_F(SendStatisticsProxyTest, InputResolutionHistogramsAreUpdated) {
@@ -1465,10 +1492,12 @@ TEST_F(SendStatisticsProxyTest, InputResolutionHistogramsAreUpdated) {
statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.InputWidthInPixels", kWidth));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InputHeightInPixels"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.InputHeightInPixels", kHeight));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InputWidthInPixels"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.InputWidthInPixels", kWidth));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InputHeightInPixels"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.InputHeightInPixels", kHeight));
}
TEST_F(SendStatisticsProxyTest, SentResolutionHistogramsAreUpdated) {
@@ -1486,8 +1515,8 @@ TEST_F(SendStatisticsProxyTest, SentResolutionHistogramsAreUpdated) {
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
}
SetUp(); // Reset stats proxy also causes histograms to be reported.
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.SentWidthInPixels"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.SentHeightInPixels"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.SentWidthInPixels"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.SentHeightInPixels"));
// Enough samples, max resolution per frame should be reported.
encoded_image.SetTimestamp(0xffff0000); // Will wrap.
@@ -1503,10 +1532,12 @@ TEST_F(SendStatisticsProxyTest, SentResolutionHistogramsAreUpdated) {
}
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SentWidthInPixels"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SentWidthInPixels", kWidth));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SentHeightInPixels"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SentHeightInPixels", kHeight));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.SentWidthInPixels"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.SentWidthInPixels", kWidth));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.SentHeightInPixels"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.SentHeightInPixels", kHeight));
}
TEST_F(SendStatisticsProxyTest, InputFpsHistogramIsUpdated) {
@@ -1518,8 +1549,9 @@ TEST_F(SendStatisticsProxyTest, InputFpsHistogramIsUpdated) {
statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
}
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InputFramesPerSecond"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.InputFramesPerSecond", kFps));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InputFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.InputFramesPerSecond", kFps));
}
TEST_F(SendStatisticsProxyTest, SentFpsHistogramIsUpdated) {
@@ -1535,8 +1567,9 @@ TEST_F(SendStatisticsProxyTest, SentFpsHistogramIsUpdated) {
statistics_proxy_->OnSendEncodedImage(encoded_image, nullptr);
}
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SentFramesPerSecond"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SentFramesPerSecond", kFps));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.SentFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.SentFramesPerSecond", kFps));
}
TEST_F(SendStatisticsProxyTest, InputFpsHistogramExcludesSuspendedTime) {
@@ -1558,8 +1591,9 @@ TEST_F(SendStatisticsProxyTest, InputFpsHistogramExcludesSuspendedTime) {
}
// Suspended time interval should not affect the framerate.
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.InputFramesPerSecond"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.InputFramesPerSecond", kFps));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.InputFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.InputFramesPerSecond", kFps));
}
TEST_F(SendStatisticsProxyTest, SentFpsHistogramExcludesSuspendedTime) {
@@ -1584,8 +1618,9 @@ TEST_F(SendStatisticsProxyTest, SentFpsHistogramExcludesSuspendedTime) {
}
// Suspended time interval should not affect the framerate.
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SentFramesPerSecond"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SentFramesPerSecond", kFps));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.SentFramesPerSecond"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.SentFramesPerSecond", kFps));
}
TEST_F(SendStatisticsProxyTest, CpuLimitedHistogramNotUpdatedWhenDisabled) {
@@ -1600,8 +1635,8 @@ TEST_F(SendStatisticsProxyTest, CpuLimitedHistogramNotUpdatedWhenDisabled) {
statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
statistics_proxy_.reset();
- EXPECT_EQ(0,
- metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
}
TEST_F(SendStatisticsProxyTest, CpuLimitedHistogramUpdated) {
@@ -1624,9 +1659,9 @@ TEST_F(SendStatisticsProxyTest, CpuLimitedHistogramUpdated) {
statistics_proxy_->OnIncomingFrame(kWidth, kHeight);
statistics_proxy_.reset();
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.CpuLimitedResolutionInPercent", 50));
}
@@ -1634,15 +1669,17 @@ TEST_F(SendStatisticsProxyTest, LifetimeHistogramIsUpdated) {
const int64_t kTimeSec = 3;
fake_clock_.AdvanceTimeMilliseconds(kTimeSec * 1000);
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SendStreamLifetimeInSeconds"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.SendStreamLifetimeInSeconds",
- kTimeSec));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.SendStreamLifetimeInSeconds"));
+ EXPECT_METRIC_EQ(
+ 1,
+ metrics::NumEvents("WebRTC.Video.SendStreamLifetimeInSeconds", kTimeSec));
}
TEST_F(SendStatisticsProxyTest, CodecTypeHistogramIsUpdated) {
fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoder.CodecType"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoder.CodecType"));
}
TEST_F(SendStatisticsProxyTest, PauseEventHistogramIsUpdated) {
@@ -1652,8 +1689,9 @@ TEST_F(SendStatisticsProxyTest, PauseEventHistogramIsUpdated) {
// Min runtime has passed.
fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 0));
}
TEST_F(SendStatisticsProxyTest,
@@ -1664,8 +1702,8 @@ TEST_F(SendStatisticsProxyTest,
// Min runtime has not passed.
fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000 - 1);
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
}
TEST_F(SendStatisticsProxyTest,
@@ -1673,7 +1711,7 @@ TEST_F(SendStatisticsProxyTest,
// First RTP packet not sent.
fake_clock_.AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
}
TEST_F(SendStatisticsProxyTest, NoPauseEvent) {
@@ -1686,10 +1724,12 @@ TEST_F(SendStatisticsProxyTest, NoPauseEvent) {
statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 0));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 0));
}
TEST_F(SendStatisticsProxyTest, OnePauseEvent) {
@@ -1704,10 +1744,12 @@ TEST_F(SendStatisticsProxyTest, OnePauseEvent) {
statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 1));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 30));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 1));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 30));
}
TEST_F(SendStatisticsProxyTest, TwoPauseEvents) {
@@ -1732,10 +1774,12 @@ TEST_F(SendStatisticsProxyTest, TwoPauseEvents) {
statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 2));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 5));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.NumberOfPauseEvents"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.NumberOfPauseEvents", 2));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.PausedTimeInPercent", 5));
}
TEST_F(SendStatisticsProxyTest,
@@ -1750,7 +1794,7 @@ TEST_F(SendStatisticsProxyTest,
statistics_proxy_->OnSetEncoderTargetRate(0); // VideoSendStream::Stop
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.PausedTimeInPercent"));
}
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8) {
@@ -1767,10 +1811,12 @@ TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8) {
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8.S0"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8.S0", kQpIdx0));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8.S1"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8.S1", kQpIdx1));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8.S0"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8.S0", kQpIdx0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8.S1"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8.S1", kQpIdx1));
}
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8OneSsrc) {
@@ -1789,8 +1835,9 @@ TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8OneSsrc) {
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8", kQpIdx0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp8"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp8", kQpIdx0));
}
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9) {
@@ -1808,10 +1855,12 @@ TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9) {
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9.S0"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9.S0", kQpIdx0));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9.S1"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9.S1", kQpIdx1));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9.S0"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9.S0", kQpIdx0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9.S1"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9.S1", kQpIdx1));
}
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9OneSpatialLayer) {
@@ -1830,8 +1879,9 @@ TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9OneSpatialLayer) {
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9", kQpIdx0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.Vp9"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.Encoded.Qp.Vp9", kQpIdx0));
}
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_H264) {
@@ -1848,10 +1898,12 @@ TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_H264) {
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.H264.S0"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.H264.S0", kQpIdx0));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.H264.S1"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.H264.S1", kQpIdx1));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.H264.S0"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.H264.S0", kQpIdx0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.Encoded.Qp.H264.S1"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Encoded.Qp.H264.S1", kQpIdx1));
}
TEST_F(SendStatisticsProxyTest,
@@ -1883,10 +1935,10 @@ TEST_F(SendStatisticsProxyTest,
// Histograms are updated when the statistics_proxy_ is deleted.
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples(
- "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
- EXPECT_EQ(0, metrics::NumSamples(
- "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
}
TEST_F(SendStatisticsProxyTest,
@@ -1924,13 +1976,14 @@ TEST_F(SendStatisticsProxyTest,
// Histograms are updated when the statistics_proxy_ is deleted.
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.BandwidthLimitedResolutionInPercent", 0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.BandwidthLimitedResolutionInPercent",
+ 0));
// No resolution disabled.
- EXPECT_EQ(0, metrics::NumSamples(
- "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
}
TEST_F(SendStatisticsProxyTest,
@@ -1965,15 +2018,17 @@ TEST_F(SendStatisticsProxyTest,
// Histograms are updated when the statistics_proxy_ is deleted.
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.BandwidthLimitedResolutionInPercent", 100));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.BandwidthLimitedResolutionInPercent",
+ 100));
// One resolution disabled.
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.BandwidthLimitedResolutionsDisabled", 1));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.BandwidthLimitedResolutionsDisabled"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.BandwidthLimitedResolutionsDisabled",
+ 1));
}
TEST_F(SendStatisticsProxyTest,
@@ -1991,10 +2046,10 @@ TEST_F(SendStatisticsProxyTest,
// Histograms are updated when the statistics_proxy_ is deleted.
statistics_proxy_.reset();
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
0, metrics::NumSamples("WebRTC.Video.QualityLimitedResolutionInPercent"));
- EXPECT_EQ(0, metrics::NumSamples(
- "WebRTC.Video.QualityLimitedResolutionDownscales"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.QualityLimitedResolutionDownscales"));
}
TEST_F(SendStatisticsProxyTest,
@@ -2012,13 +2067,13 @@ TEST_F(SendStatisticsProxyTest,
// Histograms are updated when the statistics_proxy_ is deleted.
statistics_proxy_.reset();
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples("WebRTC.Video.QualityLimitedResolutionInPercent"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.QualityLimitedResolutionInPercent", 0));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.QualityLimitedResolutionInPercent", 0));
// No resolution downscale.
- EXPECT_EQ(0, metrics::NumSamples(
- "WebRTC.Video.QualityLimitedResolutionDownscales"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples(
+ "WebRTC.Video.QualityLimitedResolutionDownscales"));
}
TEST_F(SendStatisticsProxyTest,
@@ -2036,14 +2091,15 @@ TEST_F(SendStatisticsProxyTest,
statistics_proxy_->OnSendEncodedImage(encoded_image, &kDefaultCodecInfo);
// Histograms are updated when the statistics_proxy_ is deleted.
statistics_proxy_.reset();
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples("WebRTC.Video.QualityLimitedResolutionInPercent"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.QualityLimitedResolutionInPercent", 100));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.QualityLimitedResolutionInPercent",
+ 100));
// Resolution downscales.
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.QualityLimitedResolutionDownscales"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.QualityLimitedResolutionDownscales"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.QualityLimitedResolutionDownscales",
kDownscales));
}
@@ -2098,6 +2154,15 @@ TEST_F(SendStatisticsProxyTest, GetStatsReportsBandwidthLimitedResolution) {
allocation.set_bw_limited(true);
statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
+
+ // Revert for the next test.
+ allocation.set_bw_limited(false);
+ statistics_proxy_->OnBitrateAllocationUpdated(codec, allocation);
+ EXPECT_FALSE(statistics_proxy_->GetStats().bw_limited_resolution);
+
+ // Internal encoder scaler reduced resolution.
+ statistics_proxy_->OnEncoderInternalScalerUpdate(/*scaled=*/true);
+ EXPECT_TRUE(statistics_proxy_->GetStats().bw_limited_resolution);
}
TEST_F(SendStatisticsProxyTest, GetStatsReportsTargetMediaBitrate) {
@@ -2249,22 +2314,27 @@ TEST_F(SendStatisticsProxyTest, ResetsRtcpCountersOnContentChange) {
config.content_type = VideoEncoderConfig::ContentType::kScreen;
statistics_proxy_->OnEncoderReconfigured(config, {});
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.NackPacketsReceivedPerMinute"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FirPacketsReceivedPerMinute"));
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PliPacketsReceivedPerMinute"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.UniqueNackRequestsReceivedInPercent"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.NackPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.FirPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.PliPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.UniqueNackRequestsReceivedInPercent"));
const int kRate = 60 * 2; // Packets per minute with two streams.
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.NackPacketsReceivedPerMinute",
- 1 * kRate));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.FirPacketsReceivedPerMinute",
- 2 * kRate));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PliPacketsReceivedPerMinute",
- 3 * kRate));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.NackPacketsReceivedPerMinute",
+ 1 * kRate));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.FirPacketsReceivedPerMinute",
+ 2 * kRate));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.PliPacketsReceivedPerMinute",
+ 3 * kRate));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.UniqueNackRequestsReceivedInPercent",
4 * 100 / 5));
@@ -2285,29 +2355,50 @@ TEST_F(SendStatisticsProxyTest, ResetsRtcpCountersOnContentChange) {
SetUp(); // Reset stats proxy also causes histograms to be reported.
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.NackPacketsReceivedPerMinute"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.FirPacketsReceivedPerMinute"));
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.PliPacketsReceivedPerMinute"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.NackPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.FirPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(
+ "WebRTC.Video.Screenshare.PliPacketsReceivedPerMinute"));
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples(
"WebRTC.Video.Screenshare.UniqueNackRequestsReceivedInPercent"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.Screenshare.NackPacketsReceivedPerMinute",
- 1 * kRate));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.Screenshare.FirPacketsReceivedPerMinute",
- 2 * kRate));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.Screenshare.PliPacketsReceivedPerMinute",
- 3 * kRate));
- EXPECT_EQ(1,
- metrics::NumEvents(
- "WebRTC.Video.Screenshare.UniqueNackRequestsReceivedInPercent",
- 4 * 100 / 5));
+ EXPECT_METRIC_EQ(
+ 1,
+ metrics::NumEvents(
+ "WebRTC.Video.Screenshare.NackPacketsReceivedPerMinute", 1 * kRate));
+ EXPECT_METRIC_EQ(
+ 1,
+ metrics::NumEvents("WebRTC.Video.Screenshare.FirPacketsReceivedPerMinute",
+ 2 * kRate));
+ EXPECT_METRIC_EQ(
+ 1,
+ metrics::NumEvents("WebRTC.Video.Screenshare.PliPacketsReceivedPerMinute",
+ 3 * kRate));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.UniqueNackRequestsReceivedInPercent",
+ 4 * 100 / 5));
+}
+
+TEST_F(SendStatisticsProxyTest, GetStatsReportsIsRtx) {
+ StreamDataCountersCallback* proxy =
+ static_cast<StreamDataCountersCallback*>(statistics_proxy_.get());
+ StreamDataCounters counters;
+ proxy->DataCountersUpdated(counters, kFirstSsrc);
+ proxy->DataCountersUpdated(counters, kFirstRtxSsrc);
+
+ EXPECT_NE(GetStreamStats(kFirstSsrc).type,
+ VideoSendStream::StreamStats::StreamType::kRtx);
+ EXPECT_EQ(GetStreamStats(kFirstSsrc).referenced_media_ssrc, absl::nullopt);
+ EXPECT_EQ(GetStreamStats(kFirstRtxSsrc).type,
+ VideoSendStream::StreamStats::StreamType::kRtx);
+ EXPECT_EQ(GetStreamStats(kFirstRtxSsrc).referenced_media_ssrc, kFirstSsrc);
}
TEST_F(SendStatisticsProxyTest, GetStatsReportsIsFlexFec) {
@@ -2321,8 +2412,12 @@ TEST_F(SendStatisticsProxyTest, GetStatsReportsIsFlexFec) {
proxy->DataCountersUpdated(counters, kFirstSsrc);
proxy->DataCountersUpdated(counters, kFlexFecSsrc);
- EXPECT_FALSE(GetStreamStats(kFirstSsrc).is_flexfec);
- EXPECT_TRUE(GetStreamStats(kFlexFecSsrc).is_flexfec);
+ EXPECT_NE(GetStreamStats(kFirstSsrc).type,
+ VideoSendStream::StreamStats::StreamType::kFlexfec);
+ EXPECT_EQ(GetStreamStats(kFirstSsrc).referenced_media_ssrc, absl::nullopt);
+ EXPECT_EQ(GetStreamStats(kFlexFecSsrc).type,
+ VideoSendStream::StreamStats::StreamType::kFlexfec);
+ EXPECT_EQ(GetStreamStats(kFlexFecSsrc).referenced_media_ssrc, kFirstSsrc);
}
TEST_F(SendStatisticsProxyTest, SendBitratesAreReportedWithFlexFecEnabled) {
@@ -2359,24 +2454,30 @@ TEST_F(SendStatisticsProxyTest, SendBitratesAreReportedWithFlexFecEnabled) {
statistics_proxy_.reset();
// Interval: 3500 bytes * 4 / 2 sec = 7000 bytes / sec = 56 kbps
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.BitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.BitrateSentInKbps", 56));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.BitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents("WebRTC.Video.BitrateSentInKbps", 56));
// Interval: 3500 bytes * 2 / 2 sec = 3500 bytes / sec = 28 kbps
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 28));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 28));
// Interval: (2000 - 2 * 250) bytes / 2 sec = 1500 bytes / sec = 12 kbps
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.MediaBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.MediaBitrateSentInKbps", 12));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.MediaBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.MediaBitrateSentInKbps", 12));
// Interval: 1000 bytes * 4 / 2 sec = 2000 bytes / sec = 16 kbps
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PaddingBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PaddingBitrateSentInKbps", 16));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.PaddingBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.PaddingBitrateSentInKbps", 16));
// Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 3));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 3));
// Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.RetransmittedBitrateSentInKbps"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.RetransmittedBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.RetransmittedBitrateSentInKbps", 3));
}
@@ -2414,24 +2515,30 @@ TEST_F(SendStatisticsProxyTest, ResetsRtpCountersOnContentChange) {
statistics_proxy_->OnEncoderReconfigured(config, {});
// Interval: 3500 bytes * 4 / 2 sec = 7000 bytes / sec = 56 kbps
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.BitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.BitrateSentInKbps", 56));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.BitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents("WebRTC.Video.BitrateSentInKbps", 56));
// Interval: 3500 bytes * 2 / 2 sec = 3500 bytes / sec = 28 kbps
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 28));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 28));
// Interval: (2000 - 2 * 250) bytes / 2 sec = 1500 bytes / sec = 12 kbps
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.MediaBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.MediaBitrateSentInKbps", 12));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples("WebRTC.Video.MediaBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.MediaBitrateSentInKbps", 12));
// Interval: 1000 bytes * 4 / 2 sec = 2000 bytes / sec = 16 kbps
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PaddingBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.PaddingBitrateSentInKbps", 16));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.PaddingBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.PaddingBitrateSentInKbps", 16));
// Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 3));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 3));
// Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.RetransmittedBitrateSentInKbps"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.RetransmittedBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.RetransmittedBitrateSentInKbps", 3));
// New metric counters but same data counters.
@@ -2460,36 +2567,39 @@ TEST_F(SendStatisticsProxyTest, ResetsRtpCountersOnContentChange) {
statistics_proxy_.reset();
// Interval: 3500 bytes * 4 / 2 sec = 7000 bytes / sec = 56 kbps
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.Screenshare.BitrateSentInKbps"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.Screenshare.BitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.Screenshare.BitrateSentInKbps", 56));
// Interval: 3500 bytes * 2 / 2 sec = 3500 bytes / sec = 28 kbps
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples("WebRTC.Video.Screenshare.RtxBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.Screenshare.RtxBitrateSentInKbps", 28));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.RtxBitrateSentInKbps", 28));
// Interval: (2000 - 2 * 250) bytes / 2 sec = 1500 bytes / sec = 12 kbps
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.MediaBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.Screenshare.MediaBitrateSentInKbps", 12));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.MediaBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.MediaBitrateSentInKbps",
+ 12));
// Interval: 1000 bytes * 4 / 2 sec = 2000 bytes / sec = 16 kbps
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.PaddingBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.Screenshare.PaddingBitrateSentInKbps", 16));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.PaddingBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents("WebRTC.Video.Screenshare.PaddingBitrateSentInKbps",
+ 16));
// Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
1, metrics::NumSamples("WebRTC.Video.Screenshare.FecBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents(
- "WebRTC.Video.Screenshare.FecBitrateSentInKbps", 3));
+ EXPECT_METRIC_EQ(1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.FecBitrateSentInKbps", 3));
// Interval: 375 bytes * 2 / 2 sec = 375 bytes / sec = 3 kbps
- EXPECT_EQ(1, metrics::NumSamples(
- "WebRTC.Video.Screenshare.RetransmittedBitrateSentInKbps"));
- EXPECT_EQ(1,
- metrics::NumEvents(
- "WebRTC.Video.Screenshare.RetransmittedBitrateSentInKbps", 3));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(
+ "WebRTC.Video.Screenshare.RetransmittedBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(
+ "WebRTC.Video.Screenshare.RetransmittedBitrateSentInKbps", 3));
}
TEST_F(SendStatisticsProxyTest, RtxBitrateIsZeroWhenEnabledAndNoRtxDataIsSent) {
@@ -2512,8 +2622,9 @@ TEST_F(SendStatisticsProxyTest, RtxBitrateIsZeroWhenEnabledAndNoRtxDataIsSent) {
// RTX enabled. No data sent over RTX.
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.RtxBitrateSentInKbps", 0));
}
TEST_F(SendStatisticsProxyTest, RtxBitrateNotReportedWhenNotEnabled) {
@@ -2540,7 +2651,7 @@ TEST_F(SendStatisticsProxyTest, RtxBitrateNotReportedWhenNotEnabled) {
// RTX not enabled.
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.RtxBitrateSentInKbps"));
}
TEST_F(SendStatisticsProxyTest, FecBitrateIsZeroWhenEnabledAndNoFecDataIsSent) {
@@ -2562,8 +2673,9 @@ TEST_F(SendStatisticsProxyTest, FecBitrateIsZeroWhenEnabledAndNoFecDataIsSent) {
// FEC enabled. No FEC data sent.
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
- EXPECT_EQ(1, metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 0));
+ EXPECT_METRIC_EQ(1, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumEvents("WebRTC.Video.FecBitrateSentInKbps", 0));
}
TEST_F(SendStatisticsProxyTest, FecBitrateNotReportedWhenNotEnabled) {
@@ -2590,7 +2702,7 @@ TEST_F(SendStatisticsProxyTest, FecBitrateNotReportedWhenNotEnabled) {
// FEC not enabled.
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
+ EXPECT_METRIC_EQ(0, metrics::NumSamples("WebRTC.Video.FecBitrateSentInKbps"));
}
TEST_F(SendStatisticsProxyTest, GetStatsReportsEncoderImplementationName) {
@@ -2690,49 +2802,63 @@ class ForcedFallbackEnabled : public ForcedFallbackTest {
TEST_F(ForcedFallbackEnabled, StatsNotUpdatedIfMinRunTimeHasNotPassed) {
InsertEncodedFrames(kMinFrames, kFrameIntervalMs - 1);
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
}
TEST_F(ForcedFallbackEnabled, StatsUpdated) {
InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
- EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 0));
- EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
- EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 0));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 0));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 0));
}
TEST_F(ForcedFallbackEnabled, StatsNotUpdatedIfNotVp8) {
codec_info_.codecType = kVideoCodecVP9;
InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
}
TEST_F(ForcedFallbackEnabled, StatsNotUpdatedForTemporalLayers) {
codec_info_.codecSpecific.VP8.temporalIdx = 1;
InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
}
TEST_F(ForcedFallbackEnabled, StatsNotUpdatedForSimulcast) {
encoded_image_.SetSpatialIndex(1);
InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
}
TEST_F(ForcedFallbackDisabled, StatsNotUpdatedIfNoFieldTrial) {
InsertEncodedFrames(kMinFrames, kFrameIntervalMs);
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
}
TEST_F(ForcedFallbackDisabled, EnteredLowResolutionSetIfAtMaxPixels) {
@@ -2781,10 +2907,14 @@ TEST_F(ForcedFallbackEnabled, OneFallbackEvent) {
EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
- EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 25));
- EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
- EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 3));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 25));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 3));
}
TEST_F(ForcedFallbackEnabled, ThreeFallbackEvents) {
@@ -2810,10 +2940,14 @@ TEST_F(ForcedFallbackEnabled, ThreeFallbackEvents) {
EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
- EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 25));
- EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
- EXPECT_EQ(1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 3));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackTimeInPercent.Vp8", 25));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumEvents(kPrefix + "FallbackChangesPerMinute.Vp8", 3));
}
TEST_F(ForcedFallbackEnabled, NoFallbackIfAboveMaxPixels) {
@@ -2823,8 +2957,10 @@ TEST_F(ForcedFallbackEnabled, NoFallbackIfAboveMaxPixels) {
EXPECT_FALSE(statistics_proxy_->GetStats().has_entered_low_resolution);
statistics_proxy_.reset();
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
- EXPECT_EQ(0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(0,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 0, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
}
TEST_F(ForcedFallbackEnabled, FallbackIfAtMaxPixels) {
@@ -2834,8 +2970,10 @@ TEST_F(ForcedFallbackEnabled, FallbackIfAtMaxPixels) {
EXPECT_TRUE(statistics_proxy_->GetStats().has_entered_low_resolution);
statistics_proxy_.reset();
- EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
- EXPECT_EQ(1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
+ EXPECT_METRIC_EQ(1,
+ metrics::NumSamples(kPrefix + "FallbackTimeInPercent.Vp8"));
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples(kPrefix + "FallbackChangesPerMinute.Vp8"));
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/stream_synchronization.cc b/chromium/third_party/webrtc/video/stream_synchronization.cc
index 493ed33b1db..8c808f13c6b 100644
--- a/chromium/third_party/webrtc/video/stream_synchronization.cc
+++ b/chromium/third_party/webrtc/video/stream_synchronization.cc
@@ -10,7 +10,6 @@
#include "video/stream_synchronization.h"
-#include <assert.h>
#include <stdlib.h>
#include <algorithm>
@@ -25,8 +24,8 @@ static const int kFilterLength = 4;
// Minimum difference between audio and video to warrant a change.
static const int kMinDeltaMs = 30;
-StreamSynchronization::StreamSynchronization(int video_stream_id,
- int audio_stream_id)
+StreamSynchronization::StreamSynchronization(uint32_t video_stream_id,
+ uint32_t audio_stream_id)
: video_stream_id_(video_stream_id),
audio_stream_id_(audio_stream_id),
base_target_delay_ms_(0),
@@ -36,7 +35,6 @@ bool StreamSynchronization::ComputeRelativeDelay(
const Measurements& audio_measurement,
const Measurements& video_measurement,
int* relative_delay_ms) {
- assert(relative_delay_ms);
int64_t audio_last_capture_time_ms;
if (!audio_measurement.rtp_to_ntp.Estimate(audio_measurement.latest_timestamp,
&audio_last_capture_time_ms)) {
@@ -55,6 +53,7 @@ bool StreamSynchronization::ComputeRelativeDelay(
video_measurement.latest_receive_time_ms -
audio_measurement.latest_receive_time_ms -
(video_last_capture_time_ms - audio_last_capture_time_ms);
+
if (*relative_delay_ms > kMaxDeltaDelayMs ||
*relative_delay_ms < -kMaxDeltaDelayMs) {
return false;
@@ -66,14 +65,14 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
int current_audio_delay_ms,
int* total_audio_delay_target_ms,
int* total_video_delay_target_ms) {
- assert(total_audio_delay_target_ms && total_video_delay_target_ms);
-
int current_video_delay_ms = *total_video_delay_target_ms;
+
RTC_LOG(LS_VERBOSE) << "Audio delay: " << current_audio_delay_ms
<< " current diff: " << relative_delay_ms
<< " for stream " << audio_stream_id_;
- // Calculate the difference between the lowest possible video delay and
- // the current audio delay.
+
+ // Calculate the difference between the lowest possible video delay and the
+ // current audio delay.
int current_diff_ms =
current_video_delay_ms - current_audio_delay_ms + relative_delay_ms;
@@ -95,82 +94,77 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
if (diff_ms > 0) {
// The minimum video delay is longer than the current audio delay.
// We need to decrease extra video delay, or add extra audio delay.
- if (channel_delay_.extra_video_delay_ms > base_target_delay_ms_) {
+ if (video_delay_.extra_ms > base_target_delay_ms_) {
// We have extra delay added to ViE. Reduce this delay before adding
// extra delay to VoE.
- channel_delay_.extra_video_delay_ms -= diff_ms;
- channel_delay_.extra_audio_delay_ms = base_target_delay_ms_;
- } else { // channel_delay_.extra_video_delay_ms > 0
+ video_delay_.extra_ms -= diff_ms;
+ audio_delay_.extra_ms = base_target_delay_ms_;
+ } else { // video_delay_.extra_ms > 0
// We have no extra video delay to remove, increase the audio delay.
- channel_delay_.extra_audio_delay_ms += diff_ms;
- channel_delay_.extra_video_delay_ms = base_target_delay_ms_;
+ audio_delay_.extra_ms += diff_ms;
+ video_delay_.extra_ms = base_target_delay_ms_;
}
} else { // if (diff_ms > 0)
// The video delay is lower than the current audio delay.
// We need to decrease extra audio delay, or add extra video delay.
- if (channel_delay_.extra_audio_delay_ms > base_target_delay_ms_) {
+ if (audio_delay_.extra_ms > base_target_delay_ms_) {
// We have extra delay in VoiceEngine.
// Start with decreasing the voice delay.
// Note: diff_ms is negative; add the negative difference.
- channel_delay_.extra_audio_delay_ms += diff_ms;
- channel_delay_.extra_video_delay_ms = base_target_delay_ms_;
- } else { // channel_delay_.extra_audio_delay_ms > base_target_delay_ms_
+ audio_delay_.extra_ms += diff_ms;
+ video_delay_.extra_ms = base_target_delay_ms_;
+ } else { // audio_delay_.extra_ms > base_target_delay_ms_
// We have no extra delay in VoiceEngine, increase the video delay.
// Note: diff_ms is negative; subtract the negative difference.
- channel_delay_.extra_video_delay_ms -= diff_ms; // X - (-Y) = X + Y.
- channel_delay_.extra_audio_delay_ms = base_target_delay_ms_;
+ video_delay_.extra_ms -= diff_ms; // X - (-Y) = X + Y.
+ audio_delay_.extra_ms = base_target_delay_ms_;
}
}
// Make sure that video is never below our target.
- channel_delay_.extra_video_delay_ms =
- std::max(channel_delay_.extra_video_delay_ms, base_target_delay_ms_);
+ video_delay_.extra_ms =
+ std::max(video_delay_.extra_ms, base_target_delay_ms_);
int new_video_delay_ms;
- if (channel_delay_.extra_video_delay_ms > base_target_delay_ms_) {
- new_video_delay_ms = channel_delay_.extra_video_delay_ms;
+ if (video_delay_.extra_ms > base_target_delay_ms_) {
+ new_video_delay_ms = video_delay_.extra_ms;
} else {
// No change to the extra video delay. We are changing audio and we only
// allow to change one at the time.
- new_video_delay_ms = channel_delay_.last_video_delay_ms;
+ new_video_delay_ms = video_delay_.last_ms;
}
// Make sure that we don't go below the extra video delay.
- new_video_delay_ms =
- std::max(new_video_delay_ms, channel_delay_.extra_video_delay_ms);
+ new_video_delay_ms = std::max(new_video_delay_ms, video_delay_.extra_ms);
// Verify we don't go above the maximum allowed video delay.
new_video_delay_ms =
std::min(new_video_delay_ms, base_target_delay_ms_ + kMaxDeltaDelayMs);
int new_audio_delay_ms;
- if (channel_delay_.extra_audio_delay_ms > base_target_delay_ms_) {
- new_audio_delay_ms = channel_delay_.extra_audio_delay_ms;
+ if (audio_delay_.extra_ms > base_target_delay_ms_) {
+ new_audio_delay_ms = audio_delay_.extra_ms;
} else {
- // No change to the audio delay. We are changing video and we only
- // allow to change one at the time.
- new_audio_delay_ms = channel_delay_.last_audio_delay_ms;
+ // No change to the audio delay. We are changing video and we only allow to
+ // change one at the time.
+ new_audio_delay_ms = audio_delay_.last_ms;
}
// Make sure that we don't go below the extra audio delay.
- new_audio_delay_ms =
- std::max(new_audio_delay_ms, channel_delay_.extra_audio_delay_ms);
+ new_audio_delay_ms = std::max(new_audio_delay_ms, audio_delay_.extra_ms);
// Verify we don't go above the maximum allowed audio delay.
new_audio_delay_ms =
std::min(new_audio_delay_ms, base_target_delay_ms_ + kMaxDeltaDelayMs);
- // Remember our last audio and video delays.
- channel_delay_.last_video_delay_ms = new_video_delay_ms;
- channel_delay_.last_audio_delay_ms = new_audio_delay_ms;
+ video_delay_.last_ms = new_video_delay_ms;
+ audio_delay_.last_ms = new_audio_delay_ms;
RTC_LOG(LS_VERBOSE) << "Sync video delay " << new_video_delay_ms
<< " for video stream " << video_stream_id_
- << " and audio delay "
- << channel_delay_.extra_audio_delay_ms
+ << " and audio delay " << audio_delay_.extra_ms
<< " for audio stream " << audio_stream_id_;
- // Return values.
*total_video_delay_target_ms = new_video_delay_ms;
*total_audio_delay_target_ms = new_audio_delay_ms;
return true;
@@ -178,16 +172,13 @@ bool StreamSynchronization::ComputeDelays(int relative_delay_ms,
void StreamSynchronization::SetTargetBufferingDelay(int target_delay_ms) {
// Initial extra delay for audio (accounting for existing extra delay).
- channel_delay_.extra_audio_delay_ms +=
- target_delay_ms - base_target_delay_ms_;
- channel_delay_.last_audio_delay_ms += target_delay_ms - base_target_delay_ms_;
+ audio_delay_.extra_ms += target_delay_ms - base_target_delay_ms_;
+ audio_delay_.last_ms += target_delay_ms - base_target_delay_ms_;
// The video delay is compared to the last value (and how much we can update
// is limited by that as well).
- channel_delay_.last_video_delay_ms += target_delay_ms - base_target_delay_ms_;
-
- channel_delay_.extra_video_delay_ms +=
- target_delay_ms - base_target_delay_ms_;
+ video_delay_.last_ms += target_delay_ms - base_target_delay_ms_;
+ video_delay_.extra_ms += target_delay_ms - base_target_delay_ms_;
// Video is already delayed by the desired amount.
base_target_delay_ms_ = target_delay_ms;
diff --git a/chromium/third_party/webrtc/video/stream_synchronization.h b/chromium/third_party/webrtc/video/stream_synchronization.h
index 9050d22acc2..1aba62d1e72 100644
--- a/chromium/third_party/webrtc/video/stream_synchronization.h
+++ b/chromium/third_party/webrtc/video/stream_synchronization.h
@@ -26,34 +26,37 @@ class StreamSynchronization {
uint32_t latest_timestamp;
};
- StreamSynchronization(int video_stream_id, int audio_stream_id);
+ StreamSynchronization(uint32_t video_stream_id, uint32_t audio_stream_id);
bool ComputeDelays(int relative_delay_ms,
int current_audio_delay_ms,
- int* extra_audio_delay_ms,
+ int* total_audio_delay_target_ms,
int* total_video_delay_target_ms);
- // On success |relative_delay| contains the number of milliseconds later video
- // is rendered relative audio. If audio is played back later than video a
- // |relative_delay| will be negative.
+ // On success |relative_delay_ms| contains the number of milliseconds later
+ // video is rendered relative audio. If audio is played back later than video
+ // |relative_delay_ms| will be negative.
static bool ComputeRelativeDelay(const Measurements& audio_measurement,
const Measurements& video_measurement,
int* relative_delay_ms);
- // Set target buffering delay - All audio and video will be delayed by at
- // least target_delay_ms.
+
+ // Set target buffering delay. Audio and video will be delayed by at least
+ // |target_delay_ms|.
void SetTargetBufferingDelay(int target_delay_ms);
+ uint32_t audio_stream_id() const { return audio_stream_id_; }
+ uint32_t video_stream_id() const { return video_stream_id_; }
+
private:
struct SynchronizationDelays {
- int extra_video_delay_ms = 0;
- int last_video_delay_ms = 0;
- int extra_audio_delay_ms = 0;
- int last_audio_delay_ms = 0;
+ int extra_ms = 0;
+ int last_ms = 0;
};
- SynchronizationDelays channel_delay_;
- const int video_stream_id_;
- const int audio_stream_id_;
+ const uint32_t video_stream_id_;
+ const uint32_t audio_stream_id_;
+ SynchronizationDelays audio_delay_;
+ SynchronizationDelays video_delay_;
int base_target_delay_ms_;
int avg_diff_ms_;
};
diff --git a/chromium/third_party/webrtc/video/stream_synchronization_unittest.cc b/chromium/third_party/webrtc/video/stream_synchronization_unittest.cc
index f9b885d490a..04a43c21f91 100644
--- a/chromium/third_party/webrtc/video/stream_synchronization_unittest.cc
+++ b/chromium/third_party/webrtc/video/stream_synchronization_unittest.cc
@@ -18,7 +18,7 @@
namespace webrtc {
namespace {
-constexpr int kMaxAudioDiffMs = 80; // From stream_synchronization.cc
+constexpr int kMaxChangeMs = 80; // From stream_synchronization.cc
constexpr int kDefaultAudioFrequency = 8000;
constexpr int kDefaultVideoFrequency = 90000;
constexpr int kSmoothingFilter = 4 * 2;
@@ -33,13 +33,13 @@ class StreamSynchronizationTest : public ::testing::Test {
// Generates the necessary RTCP measurements and RTP timestamps and computes
// the audio and video delays needed to get the two streams in sync.
// |audio_delay_ms| and |video_delay_ms| are the number of milliseconds after
- // capture which the frames are rendered.
+ // capture which the frames are received.
// |current_audio_delay_ms| is the number of milliseconds which audio is
// currently being delayed by the receiver.
bool DelayedStreams(int audio_delay_ms,
int video_delay_ms,
int current_audio_delay_ms,
- int* extra_audio_delay_ms,
+ int* total_audio_delay_ms,
int* total_video_delay_ms) {
int audio_frequency =
static_cast<int>(kDefaultAudioFrequency * audio_clock_drift_ + 0.5);
@@ -95,186 +95,175 @@ class StreamSynchronizationTest : public ::testing::Test {
clock_receiver_.AdvanceTimeMilliseconds(video_delay_ms - audio_delay_ms);
video.latest_receive_time_ms = clock_receiver_.CurrentTime().ms();
}
+
int relative_delay_ms;
- StreamSynchronization::ComputeRelativeDelay(audio, video,
- &relative_delay_ms);
+ EXPECT_TRUE(StreamSynchronization::ComputeRelativeDelay(
+ audio, video, &relative_delay_ms));
EXPECT_EQ(video_delay_ms - audio_delay_ms, relative_delay_ms);
+
return sync_.ComputeDelays(relative_delay_ms, current_audio_delay_ms,
- extra_audio_delay_ms, total_video_delay_ms);
+ total_audio_delay_ms, total_video_delay_ms);
}
// Simulate audio playback 300 ms after capture and video rendering 100 ms
// after capture. Verify that the correct extra delays are calculated for
// audio and video, and that they change correctly when we simulate that
// NetEQ or the VCM adds more delay to the streams.
- // TODO(holmer): This is currently wrong! We should simply change
- // audio_delay_ms or video_delay_ms since those now include VCM and NetEQ
- // delays.
- void BothDelayedAudioLaterTest(int base_target_delay) {
- int current_audio_delay_ms = base_target_delay;
- int audio_delay_ms = base_target_delay + 300;
- int video_delay_ms = base_target_delay + 100;
- int extra_audio_delay_ms = 0;
- int total_video_delay_ms = base_target_delay;
- int filtered_move = (audio_delay_ms - video_delay_ms) / kSmoothingFilter;
- const int kNeteqDelayIncrease = 50;
- const int kNeteqDelayDecrease = 10;
-
- EXPECT_TRUE(DelayedStreams(audio_delay_ms, video_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
+ void BothDelayedAudioLaterTest(int base_target_delay_ms) {
+ const int kAudioDelayMs = base_target_delay_ms + 300;
+ const int kVideoDelayMs = base_target_delay_ms + 100;
+ int current_audio_delay_ms = base_target_delay_ms;
+ int total_audio_delay_ms = 0;
+ int total_video_delay_ms = base_target_delay_ms;
+ int filtered_move = (kAudioDelayMs - kVideoDelayMs) / kSmoothingFilter;
+
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
- EXPECT_EQ(base_target_delay + filtered_move, total_video_delay_ms);
- EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
- current_audio_delay_ms = extra_audio_delay_ms;
+ EXPECT_EQ(base_target_delay_ms + filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
+ // Set new current delay.
+ current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(
- 1000 - std::max(audio_delay_ms, video_delay_ms));
- // Simulate base_target_delay minimum delay in the VCM.
- total_video_delay_ms = base_target_delay;
- EXPECT_TRUE(DelayedStreams(audio_delay_ms, video_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
+ 1000 - std::max(kAudioDelayMs, kVideoDelayMs));
+ // Simulate base_target_delay_ms minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay_ms;
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
- EXPECT_EQ(base_target_delay + 2 * filtered_move, total_video_delay_ms);
- EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
- current_audio_delay_ms = extra_audio_delay_ms;
+ EXPECT_EQ(base_target_delay_ms + 2 * filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
+ // Set new current delay.
+ current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(
- 1000 - std::max(audio_delay_ms, video_delay_ms));
- // Simulate base_target_delay minimum delay in the VCM.
- total_video_delay_ms = base_target_delay;
- EXPECT_TRUE(DelayedStreams(audio_delay_ms, video_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
+ 1000 - std::max(kAudioDelayMs, kVideoDelayMs));
+ // Simulate base_target_delay_ms minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay_ms;
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
- EXPECT_EQ(base_target_delay + 3 * filtered_move, total_video_delay_ms);
- EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
+ EXPECT_EQ(base_target_delay_ms + 3 * filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
// Simulate that NetEQ introduces some audio delay.
- current_audio_delay_ms = base_target_delay + kNeteqDelayIncrease;
+ const int kNeteqDelayIncrease = 50;
+ current_audio_delay_ms = base_target_delay_ms + kNeteqDelayIncrease;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(
- 1000 - std::max(audio_delay_ms, video_delay_ms));
- // Simulate base_target_delay minimum delay in the VCM.
- total_video_delay_ms = base_target_delay;
- EXPECT_TRUE(DelayedStreams(audio_delay_ms, video_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
+ 1000 - std::max(kAudioDelayMs, kVideoDelayMs));
+ // Simulate base_target_delay_ms minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay_ms;
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
filtered_move = 3 * filtered_move +
- (kNeteqDelayIncrease + audio_delay_ms - video_delay_ms) /
+ (kNeteqDelayIncrease + kAudioDelayMs - kVideoDelayMs) /
kSmoothingFilter;
- EXPECT_EQ(base_target_delay + filtered_move, total_video_delay_ms);
- EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
+ EXPECT_EQ(base_target_delay_ms + filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
// Simulate that NetEQ reduces its delay.
- current_audio_delay_ms = base_target_delay + kNeteqDelayDecrease;
+ const int kNeteqDelayDecrease = 10;
+ current_audio_delay_ms = base_target_delay_ms + kNeteqDelayDecrease;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(
- 1000 - std::max(audio_delay_ms, video_delay_ms));
- // Simulate base_target_delay minimum delay in the VCM.
- total_video_delay_ms = base_target_delay;
- EXPECT_TRUE(DelayedStreams(audio_delay_ms, video_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
+ 1000 - std::max(kAudioDelayMs, kVideoDelayMs));
+ // Simulate base_target_delay_ms minimum delay in the VCM.
+ total_video_delay_ms = base_target_delay_ms;
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
-
- filtered_move = filtered_move +
- (kNeteqDelayDecrease + audio_delay_ms - video_delay_ms) /
- kSmoothingFilter;
-
- EXPECT_EQ(base_target_delay + filtered_move, total_video_delay_ms);
- EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
+ filtered_move =
+ filtered_move + (kNeteqDelayDecrease + kAudioDelayMs - kVideoDelayMs) /
+ kSmoothingFilter;
+ EXPECT_EQ(base_target_delay_ms + filtered_move, total_video_delay_ms);
+ EXPECT_EQ(base_target_delay_ms, total_audio_delay_ms);
}
- void BothDelayedVideoLaterTest(int base_target_delay) {
- int current_audio_delay_ms = base_target_delay;
- int audio_delay_ms = base_target_delay + 100;
- int video_delay_ms = base_target_delay + 300;
- int extra_audio_delay_ms = 0;
- int total_video_delay_ms = base_target_delay;
+ void BothDelayedVideoLaterTest(int base_target_delay_ms) {
+ const int kAudioDelayMs = base_target_delay_ms + 100;
+ const int kVideoDelayMs = base_target_delay_ms + 300;
+ int current_audio_delay_ms = base_target_delay_ms;
+ int total_audio_delay_ms = 0;
+ int total_video_delay_ms = base_target_delay_ms;
- EXPECT_TRUE(DelayedStreams(audio_delay_ms, video_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
- EXPECT_EQ(base_target_delay, total_video_delay_ms);
- // The audio delay is not allowed to change more than this in 1 second.
- EXPECT_GE(base_target_delay + kMaxAudioDiffMs, extra_audio_delay_ms);
- current_audio_delay_ms = extra_audio_delay_ms;
- int current_extra_delay_ms = extra_audio_delay_ms;
+ EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
+ // The audio delay is not allowed to change more than this.
+ EXPECT_GE(base_target_delay_ms + kMaxChangeMs, total_audio_delay_ms);
+ int last_total_audio_delay_ms = total_audio_delay_ms;
+ // Set new current audio delay.
+ current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
- EXPECT_TRUE(DelayedStreams(audio_delay_ms, video_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
- EXPECT_EQ(base_target_delay, total_video_delay_ms);
- // The audio delay is not allowed to change more than the half of the
- // required change in delay.
- EXPECT_EQ(current_extra_delay_ms +
- MaxAudioDelayIncrease(
+ EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(
current_audio_delay_ms,
- base_target_delay + video_delay_ms - audio_delay_ms),
- extra_audio_delay_ms);
- current_audio_delay_ms = extra_audio_delay_ms;
- current_extra_delay_ms = extra_audio_delay_ms;
+ base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
+ // Set new current audio delay.
+ current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
- EXPECT_TRUE(DelayedStreams(audio_delay_ms, video_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
- EXPECT_EQ(base_target_delay, total_video_delay_ms);
- // The audio delay is not allowed to change more than the half of the
- // required change in delay.
- EXPECT_EQ(current_extra_delay_ms +
- MaxAudioDelayIncrease(
+ EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(
current_audio_delay_ms,
- base_target_delay + video_delay_ms - audio_delay_ms),
- extra_audio_delay_ms);
- current_extra_delay_ms = extra_audio_delay_ms;
+ base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
// Simulate that NetEQ for some reason reduced the delay.
- current_audio_delay_ms = base_target_delay + 10;
+ current_audio_delay_ms = base_target_delay_ms + 10;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
- EXPECT_TRUE(DelayedStreams(audio_delay_ms, video_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
- EXPECT_EQ(base_target_delay, total_video_delay_ms);
- // Since we only can ask NetEQ for a certain amount of extra delay, and
- // we only measure the total NetEQ delay, we will ask for additional delay
- // here to try to stay in sync.
- EXPECT_EQ(current_extra_delay_ms +
- MaxAudioDelayIncrease(
+ EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(
current_audio_delay_ms,
- base_target_delay + video_delay_ms - audio_delay_ms),
- extra_audio_delay_ms);
- current_extra_delay_ms = extra_audio_delay_ms;
+ base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
// Simulate that NetEQ for some reason significantly increased the delay.
- current_audio_delay_ms = base_target_delay + 350;
+ current_audio_delay_ms = base_target_delay_ms + 350;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
- EXPECT_TRUE(DelayedStreams(audio_delay_ms, video_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
&total_video_delay_ms));
- EXPECT_EQ(base_target_delay, total_video_delay_ms);
- // The audio delay is not allowed to change more than the half of the
- // required change in delay.
- EXPECT_EQ(current_extra_delay_ms +
- MaxAudioDelayIncrease(
+ EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(
current_audio_delay_ms,
- base_target_delay + video_delay_ms - audio_delay_ms),
- extra_audio_delay_ms);
- }
-
- int MaxAudioDelayIncrease(int current_audio_delay_ms, int delay_ms) {
- return std::min((delay_ms - current_audio_delay_ms) / kSmoothingFilter,
- kMaxAudioDiffMs);
+ base_target_delay_ms + kVideoDelayMs - kAudioDelayMs),
+ total_audio_delay_ms);
}
- int MaxAudioDelayDecrease(int current_audio_delay_ms, int delay_ms) {
- return std::max((delay_ms - current_audio_delay_ms) / kSmoothingFilter,
- -kMaxAudioDiffMs);
+ int MaxAudioDelayChangeMs(int current_audio_delay_ms, int delay_ms) const {
+ int diff_ms = (delay_ms - current_audio_delay_ms) / kSmoothingFilter;
+ diff_ms = std::min(diff_ms, kMaxChangeMs);
+ diff_ms = std::max(diff_ms, -kMaxChangeMs);
+ return diff_ms;
}
StreamSynchronization sync_;
@@ -285,114 +274,113 @@ class StreamSynchronizationTest : public ::testing::Test {
};
TEST_F(StreamSynchronizationTest, NoDelay) {
- uint32_t current_audio_delay_ms = 0;
- int extra_audio_delay_ms = 0;
+ int total_audio_delay_ms = 0;
int total_video_delay_ms = 0;
- EXPECT_FALSE(DelayedStreams(0, 0, current_audio_delay_ms,
- &extra_audio_delay_ms, &total_video_delay_ms));
- EXPECT_EQ(0, extra_audio_delay_ms);
+ EXPECT_FALSE(DelayedStreams(/*audio_delay_ms=*/0, /*video_delay_ms=*/0,
+ /*current_audio_delay_ms=*/0,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_audio_delay_ms);
EXPECT_EQ(0, total_video_delay_ms);
}
-TEST_F(StreamSynchronizationTest, VideoDelay) {
- uint32_t current_audio_delay_ms = 0;
- int delay_ms = 200;
- int extra_audio_delay_ms = 0;
+TEST_F(StreamSynchronizationTest, VideoDelayed) {
+ const int kAudioDelayMs = 200;
+ int total_audio_delay_ms = 0;
int total_video_delay_ms = 0;
- EXPECT_TRUE(DelayedStreams(delay_ms, 0, current_audio_delay_ms,
- &extra_audio_delay_ms, &total_video_delay_ms));
- EXPECT_EQ(0, extra_audio_delay_ms);
- // The video delay is not allowed to change more than this in 1 second.
- EXPECT_EQ(delay_ms / kSmoothingFilter, total_video_delay_ms);
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, /*video_delay_ms=*/0,
+ /*current_audio_delay_ms=*/0,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_audio_delay_ms);
+ // The delay is not allowed to change more than this.
+ EXPECT_EQ(kAudioDelayMs / kSmoothingFilter, total_video_delay_ms);
- clock_sender_.AdvanceTimeMilliseconds(1000);
- clock_receiver_.AdvanceTimeMilliseconds(800);
// Simulate 0 minimum delay in the VCM.
total_video_delay_ms = 0;
- EXPECT_TRUE(DelayedStreams(delay_ms, 0, current_audio_delay_ms,
- &extra_audio_delay_ms, &total_video_delay_ms));
- EXPECT_EQ(0, extra_audio_delay_ms);
- // The video delay is not allowed to change more than this in 1 second.
- EXPECT_EQ(2 * delay_ms / kSmoothingFilter, total_video_delay_ms);
-
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, /*video_delay_ms=*/0,
+ /*current_audio_delay_ms=*/0,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_audio_delay_ms);
+ EXPECT_EQ(2 * kAudioDelayMs / kSmoothingFilter, total_video_delay_ms);
+
// Simulate 0 minimum delay in the VCM.
total_video_delay_ms = 0;
- EXPECT_TRUE(DelayedStreams(delay_ms, 0, current_audio_delay_ms,
- &extra_audio_delay_ms, &total_video_delay_ms));
- EXPECT_EQ(0, extra_audio_delay_ms);
- EXPECT_EQ(3 * delay_ms / kSmoothingFilter, total_video_delay_ms);
+ clock_sender_.AdvanceTimeMilliseconds(1000);
+ clock_receiver_.AdvanceTimeMilliseconds(800);
+ EXPECT_TRUE(DelayedStreams(kAudioDelayMs, /*video_delay_ms=*/0,
+ /*current_audio_delay_ms=*/0,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_EQ(0, total_audio_delay_ms);
+ EXPECT_EQ(3 * kAudioDelayMs / kSmoothingFilter, total_video_delay_ms);
}
-TEST_F(StreamSynchronizationTest, AudioDelay) {
+TEST_F(StreamSynchronizationTest, AudioDelayed) {
+ const int kVideoDelayMs = 200;
int current_audio_delay_ms = 0;
- int delay_ms = 200;
- int extra_audio_delay_ms = 0;
+ int total_audio_delay_ms = 0;
int total_video_delay_ms = 0;
- EXPECT_TRUE(DelayedStreams(0, delay_ms, current_audio_delay_ms,
- &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
- // The audio delay is not allowed to change more than this in 1 second.
- EXPECT_EQ(delay_ms / kSmoothingFilter, extra_audio_delay_ms);
- current_audio_delay_ms = extra_audio_delay_ms;
- int current_extra_delay_ms = extra_audio_delay_ms;
+ // The delay is not allowed to change more than this.
+ EXPECT_EQ(kVideoDelayMs / kSmoothingFilter, total_audio_delay_ms);
+ int last_total_audio_delay_ms = total_audio_delay_ms;
+ // Set new current audio delay.
+ current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
- EXPECT_TRUE(DelayedStreams(0, delay_ms, current_audio_delay_ms,
- &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
- // The audio delay is not allowed to change more than the half of the required
- // change in delay.
- EXPECT_EQ(current_extra_delay_ms +
- MaxAudioDelayIncrease(current_audio_delay_ms, delay_ms),
- extra_audio_delay_ms);
- current_audio_delay_ms = extra_audio_delay_ms;
- current_extra_delay_ms = extra_audio_delay_ms;
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
+ // Set new current audio delay.
+ current_audio_delay_ms = total_audio_delay_ms;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
- EXPECT_TRUE(DelayedStreams(0, delay_ms, current_audio_delay_ms,
- &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
- // The audio delay is not allowed to change more than the half of the required
- // change in delay.
- EXPECT_EQ(current_extra_delay_ms +
- MaxAudioDelayIncrease(current_audio_delay_ms, delay_ms),
- extra_audio_delay_ms);
- current_extra_delay_ms = extra_audio_delay_ms;
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
// Simulate that NetEQ for some reason reduced the delay.
current_audio_delay_ms = 10;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
- EXPECT_TRUE(DelayedStreams(0, delay_ms, current_audio_delay_ms,
- &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
- // Since we only can ask NetEQ for a certain amount of extra delay, and
- // we only measure the total NetEQ delay, we will ask for additional delay
- // here to try to
- EXPECT_EQ(current_extra_delay_ms +
- MaxAudioDelayIncrease(current_audio_delay_ms, delay_ms),
- extra_audio_delay_ms);
- current_extra_delay_ms = extra_audio_delay_ms;
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
+ total_audio_delay_ms);
+ last_total_audio_delay_ms = total_audio_delay_ms;
// Simulate that NetEQ for some reason significantly increased the delay.
current_audio_delay_ms = 350;
clock_sender_.AdvanceTimeMilliseconds(1000);
clock_receiver_.AdvanceTimeMilliseconds(800);
- EXPECT_TRUE(DelayedStreams(0, delay_ms, current_audio_delay_ms,
- &extra_audio_delay_ms, &total_video_delay_ms));
+ EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs,
+ current_audio_delay_ms, &total_audio_delay_ms,
+ &total_video_delay_ms));
EXPECT_EQ(0, total_video_delay_ms);
- // The audio delay is not allowed to change more than the half of the required
- // change in delay.
- EXPECT_EQ(current_extra_delay_ms +
- MaxAudioDelayDecrease(current_audio_delay_ms, delay_ms),
- extra_audio_delay_ms);
+ EXPECT_EQ(last_total_audio_delay_ms +
+ MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs),
+ total_audio_delay_ms);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoLater) {
@@ -423,77 +411,66 @@ TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDrift) {
BothDelayedAudioLaterTest(0);
}
-TEST_F(StreamSynchronizationTest, BaseDelay) {
- int base_target_delay_ms = 2000;
- int current_audio_delay_ms = 2000;
- int extra_audio_delay_ms = 0;
- int total_video_delay_ms = base_target_delay_ms;
- sync_.SetTargetBufferingDelay(base_target_delay_ms);
- // We are in sync don't change.
- EXPECT_FALSE(DelayedStreams(base_target_delay_ms, base_target_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
- &total_video_delay_ms));
- // Triggering another call with the same values. Delay should not be modified.
- base_target_delay_ms = 2000;
- current_audio_delay_ms = base_target_delay_ms;
- total_video_delay_ms = base_target_delay_ms;
- sync_.SetTargetBufferingDelay(base_target_delay_ms);
- // We are in sync don't change.
- EXPECT_FALSE(DelayedStreams(base_target_delay_ms, base_target_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
- &total_video_delay_ms));
- // Changing delay value - intended to test this module only. In practice it
- // would take VoE time to adapt.
- base_target_delay_ms = 5000;
- current_audio_delay_ms = base_target_delay_ms;
- total_video_delay_ms = base_target_delay_ms;
- sync_.SetTargetBufferingDelay(base_target_delay_ms);
- // We are in sync don't change.
- EXPECT_FALSE(DelayedStreams(base_target_delay_ms, base_target_delay_ms,
- current_audio_delay_ms, &extra_audio_delay_ms,
- &total_video_delay_ms));
+TEST_F(StreamSynchronizationTest, BothEquallyDelayed) {
+ const int kDelayMs = 2000;
+ int current_audio_delay_ms = kDelayMs;
+ int total_audio_delay_ms = 0;
+ int total_video_delay_ms = kDelayMs;
+ // In sync, expect no change.
+ EXPECT_FALSE(DelayedStreams(kDelayMs, kDelayMs, current_audio_delay_ms,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ // Trigger another call with the same values, delay should not be modified.
+ total_video_delay_ms = kDelayMs;
+ EXPECT_FALSE(DelayedStreams(kDelayMs, kDelayMs, current_audio_delay_ms,
+ &total_audio_delay_ms, &total_video_delay_ms));
+ // Change delay value, delay should not be modified.
+ const int kDelayMs2 = 5000;
+ current_audio_delay_ms = kDelayMs2;
+ total_video_delay_ms = kDelayMs2;
+ EXPECT_FALSE(DelayedStreams(kDelayMs2, kDelayMs2, current_audio_delay_ms,
+ &total_audio_delay_ms, &total_video_delay_ms));
}
TEST_F(StreamSynchronizationTest, BothDelayedAudioLaterWithBaseDelay) {
- int base_target_delay_ms = 3000;
- sync_.SetTargetBufferingDelay(base_target_delay_ms);
- BothDelayedAudioLaterTest(base_target_delay_ms);
+ const int kBaseTargetDelayMs = 3000;
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
+ BothDelayedAudioLaterTest(kBaseTargetDelayMs);
}
TEST_F(StreamSynchronizationTest, BothDelayedAudioClockDriftWithBaseDelay) {
- int base_target_delay_ms = 3000;
- sync_.SetTargetBufferingDelay(base_target_delay_ms);
+ const int kBaseTargetDelayMs = 3000;
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
audio_clock_drift_ = 1.05;
- BothDelayedAudioLaterTest(base_target_delay_ms);
+ BothDelayedAudioLaterTest(kBaseTargetDelayMs);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDriftWithBaseDelay) {
- int base_target_delay_ms = 3000;
- sync_.SetTargetBufferingDelay(base_target_delay_ms);
+ const int kBaseTargetDelayMs = 3000;
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
video_clock_drift_ = 1.05;
- BothDelayedAudioLaterTest(base_target_delay_ms);
+ BothDelayedAudioLaterTest(kBaseTargetDelayMs);
}
TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterWithBaseDelay) {
- int base_target_delay_ms = 2000;
- sync_.SetTargetBufferingDelay(base_target_delay_ms);
- BothDelayedVideoLaterTest(base_target_delay_ms);
+ const int kBaseTargetDelayMs = 2000;
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
+ BothDelayedVideoLaterTest(kBaseTargetDelayMs);
}
TEST_F(StreamSynchronizationTest,
BothDelayedVideoLaterAudioClockDriftWithBaseDelay) {
- int base_target_delay_ms = 2000;
+ const int kBaseTargetDelayMs = 2000;
audio_clock_drift_ = 1.05;
- sync_.SetTargetBufferingDelay(base_target_delay_ms);
- BothDelayedVideoLaterTest(base_target_delay_ms);
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
+ BothDelayedVideoLaterTest(kBaseTargetDelayMs);
}
TEST_F(StreamSynchronizationTest,
BothDelayedVideoLaterVideoClockDriftWithBaseDelay) {
- int base_target_delay_ms = 2000;
+ const int kBaseTargetDelayMs = 2000;
video_clock_drift_ = 1.05;
- sync_.SetTargetBufferingDelay(base_target_delay_ms);
- BothDelayedVideoLaterTest(base_target_delay_ms);
+ sync_.SetTargetBufferingDelay(kBaseTargetDelayMs);
+ BothDelayedVideoLaterTest(kBaseTargetDelayMs);
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/test/mock_video_stream_encoder.h b/chromium/third_party/webrtc/video/test/mock_video_stream_encoder.h
index 494419dffd5..8e429681b8d 100644
--- a/chromium/third_party/webrtc/video/test/mock_video_stream_encoder.h
+++ b/chromium/third_party/webrtc/video/test/mock_video_stream_encoder.h
@@ -24,8 +24,8 @@ class MockVideoStreamEncoder : public VideoStreamEncoderInterface {
MOCK_METHOD1(SetStartBitrate, void(int));
MOCK_METHOD0(SendKeyFrame, void());
MOCK_METHOD1(OnLossNotification, void(const VideoEncoder::LossNotification&));
- MOCK_METHOD5(OnBitrateUpdated,
- void(DataRate, DataRate, DataRate, uint8_t, int64_t));
+ MOCK_METHOD6(OnBitrateUpdated,
+ void(DataRate, DataRate, DataRate, uint8_t, int64_t, double));
MOCK_METHOD1(OnFrame, void(const VideoFrame&));
MOCK_METHOD1(SetBitrateAllocationObserver,
void(VideoBitrateAllocationObserver*));
diff --git a/chromium/third_party/webrtc/video/video_analyzer.cc b/chromium/third_party/webrtc/video/video_analyzer.cc
index 018ec8b4585..f4a1c96d746 100644
--- a/chromium/third_party/webrtc/video/video_analyzer.cc
+++ b/chromium/third_party/webrtc/video/video_analyzer.cc
@@ -16,8 +16,8 @@
#include "absl/flags/flag.h"
#include "absl/flags/parse.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
-#include "modules/rtp_rtcp/source/rtp_format.h"
-#include "modules/rtp_rtcp/source/rtp_utility.h"
+#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
#include "rtc_base/cpu_time.h"
#include "rtc_base/format_macros.h"
#include "rtc_base/memory_usage.h"
@@ -38,7 +38,7 @@ ABSL_FLAG(bool,
namespace webrtc {
namespace {
-constexpr TimeDelta kSendStatsPollingInterval = TimeDelta::Seconds<1>();
+constexpr TimeDelta kSendStatsPollingInterval = TimeDelta::Seconds(1);
constexpr size_t kMaxComparisons = 10;
// How often is keep alive message printed.
constexpr int kKeepAliveIntervalSeconds = 30;
@@ -57,6 +57,7 @@ VideoAnalyzer::VideoAnalyzer(test::LayerFilteringTransport* transport,
double avg_psnr_threshold,
double avg_ssim_threshold,
int duration_frames,
+ TimeDelta test_duration,
FILE* graph_data_output_file,
const std::string& graph_title,
uint32_t ssrc_to_analyze,
@@ -74,7 +75,7 @@ VideoAnalyzer::VideoAnalyzer(test::LayerFilteringTransport* transport,
send_stream_(nullptr),
receive_stream_(nullptr),
audio_receive_stream_(nullptr),
- captured_frame_forwarder_(this, clock, duration_frames),
+ captured_frame_forwarder_(this, clock, duration_frames, test_duration),
test_label_(test_label),
graph_data_output_file_(graph_data_output_file),
graph_title_(graph_title),
@@ -92,6 +93,7 @@ VideoAnalyzer::VideoAnalyzer(test::LayerFilteringTransport* transport,
render_frame_rate_(0),
last_fec_bytes_(0),
frames_to_process_(duration_frames),
+ test_end_(clock->CurrentTime() + test_duration),
frames_recorded_(0),
frames_processed_(0),
captured_frames_(0),
@@ -109,6 +111,8 @@ VideoAnalyzer::VideoAnalyzer(test::LayerFilteringTransport* transport,
is_quick_test_enabled_(is_quick_test_enabled),
quit_(false),
done_(true, false),
+ vp8_depacketizer_(CreateVideoRtpDepacketizer(kVideoCodecVP8)),
+ vp9_depacketizer_(CreateVideoRtpDepacketizer(kVideoCodecVP9)),
clock_(clock),
start_ms_(clock->TimeInMilliseconds()),
task_queue_(task_queue) {
@@ -221,18 +225,18 @@ PacketReceiver::DeliveryStatus VideoAnalyzer::DeliverPacket(
rtp_file_writer_->WritePacket(&p);
}
- RtpUtility::RtpHeaderParser parser(packet.cdata(), packet.size());
- RTPHeader header;
- parser.Parse(&header);
- if (!IsFlexfec(header.payloadType) && (header.ssrc == ssrc_to_analyze_ ||
- header.ssrc == rtx_ssrc_to_analyze_)) {
+ RtpPacket rtp_packet;
+ rtp_packet.Parse(packet);
+ if (!IsFlexfec(rtp_packet.PayloadType()) &&
+ (rtp_packet.Ssrc() == ssrc_to_analyze_ ||
+ rtp_packet.Ssrc() == rtx_ssrc_to_analyze_)) {
// Ignore FlexFEC timestamps, to avoid collisions with media timestamps.
// (FlexFEC and media are sent on different SSRCs, which have different
// timestamps spaces.)
// Also ignore packets from wrong SSRC, but include retransmits.
rtc::CritScope lock(&crit_);
int64_t timestamp =
- wrap_handler_.Unwrap(header.timestamp - rtp_timestamp_delta_);
+ wrap_handler_.Unwrap(rtp_packet.Timestamp() - rtp_timestamp_delta_);
recv_times_[timestamp] = clock_->CurrentNtpInMilliseconds();
}
@@ -262,32 +266,31 @@ void VideoAnalyzer::PostEncodeOnFrame(size_t stream_id, uint32_t timestamp) {
bool VideoAnalyzer::SendRtp(const uint8_t* packet,
size_t length,
const PacketOptions& options) {
- RtpUtility::RtpHeaderParser parser(packet, length);
- RTPHeader header;
- parser.Parse(&header);
+ RtpPacket rtp_packet;
+ rtp_packet.Parse(packet, length);
int64_t current_time = clock_->CurrentNtpInMilliseconds();
bool result = transport_->SendRtp(packet, length, options);
{
rtc::CritScope lock(&crit_);
- if (rtp_timestamp_delta_ == 0 && header.ssrc == ssrc_to_analyze_) {
+ if (rtp_timestamp_delta_ == 0 && rtp_packet.Ssrc() == ssrc_to_analyze_) {
RTC_CHECK(static_cast<bool>(first_sent_timestamp_));
- rtp_timestamp_delta_ = header.timestamp - *first_sent_timestamp_;
+ rtp_timestamp_delta_ = rtp_packet.Timestamp() - *first_sent_timestamp_;
}
- if (!IsFlexfec(header.payloadType) && header.ssrc == ssrc_to_analyze_) {
+ if (!IsFlexfec(rtp_packet.PayloadType()) &&
+ rtp_packet.Ssrc() == ssrc_to_analyze_) {
// Ignore FlexFEC timestamps, to avoid collisions with media timestamps.
// (FlexFEC and media are sent on different SSRCs, which have different
// timestamps spaces.)
// Also ignore packets from wrong SSRC and retransmits.
int64_t timestamp =
- wrap_handler_.Unwrap(header.timestamp - rtp_timestamp_delta_);
+ wrap_handler_.Unwrap(rtp_packet.Timestamp() - rtp_timestamp_delta_);
send_times_[timestamp] = current_time;
- if (IsInSelectedSpatialAndTemporalLayer(packet, length, header)) {
- encoded_frame_sizes_[timestamp] +=
- length - (header.headerLength + header.paddingLength);
+ if (IsInSelectedSpatialAndTemporalLayer(rtp_packet)) {
+ encoded_frame_sizes_[timestamp] += rtp_packet.payload_size();
}
}
}
@@ -378,11 +381,8 @@ void VideoAnalyzer::Wait() {
continue;
}
if (frames_processed == last_frames_processed &&
- last_frames_captured == frames_captured) {
- if (frames_captured < frames_to_process_) {
- EXPECT_GT(frames_processed, last_frames_processed)
- << "Analyzer stalled while waiting for test to finish.";
- }
+ last_frames_captured == frames_captured &&
+ clock_->CurrentTime() > test_end_) {
done_.Set();
break;
}
@@ -428,44 +428,31 @@ double VideoAnalyzer::GetCpuUsagePercent() {
}
bool VideoAnalyzer::IsInSelectedSpatialAndTemporalLayer(
- const uint8_t* packet,
- size_t length,
- const RTPHeader& header) {
- if (header.payloadType != test::CallTest::kPayloadTypeVP9 &&
- header.payloadType != test::CallTest::kPayloadTypeVP8) {
- return true;
- } else {
- // Get VP8 and VP9 specific header to check layers indexes.
- const uint8_t* payload = packet + header.headerLength;
- const size_t payload_length = length - header.headerLength;
- const size_t payload_data_length = payload_length - header.paddingLength;
- const bool is_vp8 = header.payloadType == test::CallTest::kPayloadTypeVP8;
- std::unique_ptr<RtpDepacketizer> depacketizer(
- RtpDepacketizer::Create(is_vp8 ? kVideoCodecVP8 : kVideoCodecVP9));
- RtpDepacketizer::ParsedPayload parsed_payload;
- bool result =
- depacketizer->Parse(&parsed_payload, payload, payload_data_length);
- RTC_DCHECK(result);
-
- int temporal_idx;
- int spatial_idx;
- if (is_vp8) {
- temporal_idx = absl::get<RTPVideoHeaderVP8>(
- parsed_payload.video_header().video_type_header)
- .temporalIdx;
- spatial_idx = kNoTemporalIdx;
- } else {
- const auto& vp9_header = absl::get<RTPVideoHeaderVP9>(
- parsed_payload.video_header().video_type_header);
- temporal_idx = vp9_header.temporal_idx;
- spatial_idx = vp9_header.spatial_idx;
- }
+ const RtpPacket& rtp_packet) {
+ if (rtp_packet.PayloadType() == test::CallTest::kPayloadTypeVP8) {
+ auto parsed_payload = vp8_depacketizer_->Parse(rtp_packet.PayloadBuffer());
+ RTC_DCHECK(parsed_payload);
+ const auto& vp8_header = absl::get<RTPVideoHeaderVP8>(
+ parsed_payload->video_header.video_type_header);
+ int temporal_idx = vp8_header.temporalIdx;
+ return selected_tl_ < 0 || temporal_idx == kNoTemporalIdx ||
+ temporal_idx <= selected_tl_;
+ }
+ if (rtp_packet.PayloadType() == test::CallTest::kPayloadTypeVP9) {
+ auto parsed_payload = vp9_depacketizer_->Parse(rtp_packet.PayloadBuffer());
+ RTC_DCHECK(parsed_payload);
+ const auto& vp9_header = absl::get<RTPVideoHeaderVP9>(
+ parsed_payload->video_header.video_type_header);
+ int temporal_idx = vp9_header.temporal_idx;
+ int spatial_idx = vp9_header.spatial_idx;
return (selected_tl_ < 0 || temporal_idx == kNoTemporalIdx ||
temporal_idx <= selected_tl_) &&
(selected_sl_ < 0 || spatial_idx == kNoSpatialIdx ||
spatial_idx <= selected_sl_);
}
+
+ return true;
}
void VideoAnalyzer::PollStats() {
@@ -601,14 +588,16 @@ void VideoAnalyzer::FrameRecorded() {
bool VideoAnalyzer::AllFramesRecorded() {
rtc::CritScope crit(&comparison_lock_);
RTC_DCHECK(frames_recorded_ <= frames_to_process_);
- return frames_recorded_ == frames_to_process_ || quit_;
+ return frames_recorded_ == frames_to_process_ ||
+ (clock_->CurrentTime() > test_end_ && comparisons_.empty()) || quit_;
}
bool VideoAnalyzer::FrameProcessed() {
rtc::CritScope crit(&comparison_lock_);
++frames_processed_;
assert(frames_processed_ <= frames_to_process_);
- return frames_processed_ == frames_to_process_;
+ return frames_processed_ == frames_to_process_ ||
+ (clock_->CurrentTime() > test_end_ && comparisons_.empty());
}
void VideoAnalyzer::PrintResults() {
@@ -985,13 +974,15 @@ VideoAnalyzer::Sample::Sample(int dropped,
VideoAnalyzer::CapturedFrameForwarder::CapturedFrameForwarder(
VideoAnalyzer* analyzer,
Clock* clock,
- int frames_to_process)
+ int frames_to_capture,
+ TimeDelta test_duration)
: analyzer_(analyzer),
send_stream_input_(nullptr),
video_source_(nullptr),
clock_(clock),
captured_frames_(0),
- frames_to_process_(frames_to_process) {}
+ frames_to_capture_(frames_to_capture),
+ test_end_(clock->CurrentTime() + test_duration) {}
void VideoAnalyzer::CapturedFrameForwarder::SetSource(
VideoSourceInterface<VideoFrame>* video_source) {
@@ -1010,8 +1001,10 @@ void VideoAnalyzer::CapturedFrameForwarder::OnFrame(
analyzer_->AddCapturedFrameForComparison(copy);
rtc::CritScope lock(&crit_);
++captured_frames_;
- if (send_stream_input_ && captured_frames_ <= frames_to_process_)
+ if (send_stream_input_ && clock_->CurrentTime() <= test_end_ &&
+ captured_frames_ <= frames_to_capture_) {
send_stream_input_->OnFrame(copy);
+ }
}
void VideoAnalyzer::CapturedFrameForwarder::AddOrUpdateSink(
diff --git a/chromium/third_party/webrtc/video/video_analyzer.h b/chromium/third_party/webrtc/video/video_analyzer.h
index 1069abf1ce5..14f77ac53c9 100644
--- a/chromium/third_party/webrtc/video/video_analyzer.h
+++ b/chromium/third_party/webrtc/video/video_analyzer.h
@@ -18,6 +18,8 @@
#include "api/task_queue/task_queue_base.h"
#include "api/video/video_source_interface.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
#include "rtc_base/event.h"
#include "rtc_base/numerics/running_statistics.h"
#include "rtc_base/platform_thread.h"
@@ -39,6 +41,7 @@ class VideoAnalyzer : public PacketReceiver,
double avg_psnr_threshold,
double avg_ssim_threshold,
int duration_frames,
+ TimeDelta test_duration,
FILE* graph_data_output_file,
const std::string& graph_title,
uint32_t ssrc_to_analyze,
@@ -145,7 +148,8 @@ class VideoAnalyzer : public PacketReceiver,
public:
CapturedFrameForwarder(VideoAnalyzer* analyzer,
Clock* clock,
- int frames_to_process);
+ int frames_to_capture,
+ TimeDelta test_duration);
void SetSource(rtc::VideoSourceInterface<VideoFrame>* video_source);
private:
@@ -165,7 +169,8 @@ class VideoAnalyzer : public PacketReceiver,
VideoSourceInterface<VideoFrame>* video_source_;
Clock* clock_;
int captured_frames_ RTC_GUARDED_BY(crit_);
- const int frames_to_process_;
+ const int frames_to_capture_;
+ const Timestamp test_end_;
};
struct FrameWithPsnr {
@@ -173,9 +178,7 @@ class VideoAnalyzer : public PacketReceiver,
VideoFrame frame;
};
- bool IsInSelectedSpatialAndTemporalLayer(const uint8_t* packet,
- size_t length,
- const RTPHeader& header);
+ bool IsInSelectedSpatialAndTemporalLayer(const RtpPacket& rtp_packet);
void AddFrameComparison(const VideoFrame& reference,
const VideoFrame& render,
@@ -263,6 +266,7 @@ class VideoAnalyzer : public PacketReceiver,
rtc::CriticalSection crit_;
const int frames_to_process_;
+ const Timestamp test_end_;
int frames_recorded_ RTC_GUARDED_BY(comparison_lock_);
int frames_processed_ RTC_GUARDED_BY(comparison_lock_);
int captured_frames_ RTC_GUARDED_BY(comparison_lock_);
@@ -296,6 +300,8 @@ class VideoAnalyzer : public PacketReceiver,
bool quit_ RTC_GUARDED_BY(comparison_lock_);
rtc::Event done_;
+ std::unique_ptr<VideoRtpDepacketizer> vp8_depacketizer_;
+ std::unique_ptr<VideoRtpDepacketizer> vp9_depacketizer_;
std::unique_ptr<test::RtpFileWriter> rtp_file_writer_;
Clock* const clock_;
const int64_t start_ms_;
diff --git a/chromium/third_party/webrtc/video/video_quality_test.cc b/chromium/third_party/webrtc/video/video_quality_test.cc
index ad8c8080888..42d502a6883 100644
--- a/chromium/third_party/webrtc/video/video_quality_test.cc
+++ b/chromium/third_party/webrtc/video/video_quality_test.cc
@@ -22,6 +22,7 @@
#include "api/rtc_event_log_output_file.h"
#include "api/task_queue/default_task_queue_factory.h"
#include "api/task_queue/task_queue_base.h"
+#include "api/test/create_frame_generator.h"
#include "api/video/builtin_video_bitrate_allocator_factory.h"
#include "api/video_codecs/video_encoder.h"
#include "call/fake_network_pipe.h"
@@ -525,10 +526,14 @@ void VideoQualityTest::CheckParamsAndInjectionComponents() {
RTC_CHECK_GE(params_.video[video_idx].target_bitrate_bps,
params_.video[video_idx].min_bitrate_bps);
int selected_stream = params_.ss[video_idx].selected_stream;
- int stream_tl = params_.ss[video_idx]
- .streams[selected_stream]
- .num_temporal_layers.value_or(1);
- RTC_CHECK_LT(params_.video[video_idx].selected_tl, stream_tl);
+ if (params_.video[video_idx].selected_tl > -1) {
+ RTC_CHECK_LT(selected_stream, params_.ss[video_idx].streams.size())
+ << "Can not use --selected_tl when --selected_stream is all streams";
+ int stream_tl = params_.ss[video_idx]
+ .streams[selected_stream]
+ .num_temporal_layers.value_or(1);
+ RTC_CHECK_LT(params_.video[video_idx].selected_tl, stream_tl);
+ }
RTC_CHECK_LE(params_.ss[video_idx].selected_stream,
params_.ss[video_idx].streams.size());
for (const VideoStream& stream : params_.ss[video_idx].streams) {
@@ -865,6 +870,7 @@ void VideoQualityTest::SetupVideo(Transport* send_transport,
VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
vp9_settings.denoisingOn = false;
vp9_settings.frameDroppingOn = false;
+ vp9_settings.automaticResizeOn = false;
vp9_settings.numberOfTemporalLayers = static_cast<unsigned char>(
params_.video[video_idx].num_temporal_layers);
vp9_settings.numberOfSpatialLayers = static_cast<unsigned char>(
@@ -887,6 +893,7 @@ void VideoQualityTest::SetupVideo(Transport* send_transport,
vp9_settings.numberOfSpatialLayers =
static_cast<unsigned char>(params_.ss[video_idx].num_spatial_layers);
vp9_settings.interLayerPred = params_.ss[video_idx].inter_layer_pred;
+ vp9_settings.automaticResizeOn = false;
video_encoder_configs_[video_idx].encoder_specific_settings =
new rtc::RefCountedObject<
VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
@@ -899,7 +906,9 @@ void VideoQualityTest::SetupVideo(Transport* send_transport,
VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
} else if (params_.video[video_idx].codec == "VP9") {
VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
- vp9_settings.automaticResizeOn = true;
+ // Only enable quality scaler for single spatial layer.
+ vp9_settings.automaticResizeOn =
+ params_.ss[video_idx].num_spatial_layers == 1;
video_encoder_configs_[video_idx].encoder_specific_settings =
new rtc::RefCountedObject<
VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
@@ -1049,24 +1058,23 @@ void VideoQualityTest::SetupThumbnailCapturers(size_t num_thumbnail_streams) {
auto frame_generator_capturer =
std::make_unique<test::FrameGeneratorCapturer>(
clock_,
- test::FrameGenerator::CreateSquareGenerator(
- static_cast<int>(thumbnail.width),
- static_cast<int>(thumbnail.height), absl::nullopt,
- absl::nullopt),
+ test::CreateSquareFrameGenerator(static_cast<int>(thumbnail.width),
+ static_cast<int>(thumbnail.height),
+ absl::nullopt, absl::nullopt),
thumbnail.max_framerate, *task_queue_factory_);
EXPECT_TRUE(frame_generator_capturer->Init());
thumbnail_capturers_.push_back(std::move(frame_generator_capturer));
}
}
-std::unique_ptr<test::FrameGenerator> VideoQualityTest::CreateFrameGenerator(
- size_t video_idx) {
+std::unique_ptr<test::FrameGeneratorInterface>
+VideoQualityTest::CreateFrameGenerator(size_t video_idx) {
// Setup frame generator.
const size_t kWidth = 1850;
const size_t kHeight = 1110;
- std::unique_ptr<test::FrameGenerator> frame_generator;
+ std::unique_ptr<test::FrameGeneratorInterface> frame_generator;
if (params_.screenshare[video_idx].generate_slides) {
- frame_generator = test::FrameGenerator::CreateSlideGenerator(
+ frame_generator = test::CreateSlideFrameGenerator(
kWidth, kHeight,
params_.screenshare[video_idx].slide_change_interval *
params_.video[video_idx].fps);
@@ -1080,7 +1088,7 @@ std::unique_ptr<test::FrameGenerator> VideoQualityTest::CreateFrameGenerator(
}
if (params_.screenshare[video_idx].scroll_duration == 0) {
// Cycle image every slide_change_interval seconds.
- frame_generator = test::FrameGenerator::CreateFromYuvFile(
+ frame_generator = test::CreateFromYuvFileFrameGenerator(
slides, kWidth, kHeight,
params_.screenshare[video_idx].slide_change_interval *
params_.video[video_idx].fps);
@@ -1095,7 +1103,7 @@ std::unique_ptr<test::FrameGenerator> VideoQualityTest::CreateFrameGenerator(
RTC_CHECK_LE(params_.screenshare[video_idx].scroll_duration,
params_.screenshare[video_idx].slide_change_interval);
- frame_generator = test::FrameGenerator::CreateScrollingInputFromYuvFiles(
+ frame_generator = test::CreateScrollingInputFromYuvFilesFrameGenerator(
clock_, slides, kWidth, kHeight, params_.video[video_idx].width,
params_.video[video_idx].height,
params_.screenshare[video_idx].scroll_duration * 1000,
@@ -1109,24 +1117,24 @@ void VideoQualityTest::CreateCapturers() {
RTC_DCHECK(video_sources_.empty());
video_sources_.resize(num_video_streams_);
for (size_t video_idx = 0; video_idx < num_video_streams_; ++video_idx) {
- std::unique_ptr<test::FrameGenerator> frame_generator;
+ std::unique_ptr<test::FrameGeneratorInterface> frame_generator;
if (params_.screenshare[video_idx].enabled) {
frame_generator = CreateFrameGenerator(video_idx);
} else if (params_.video[video_idx].clip_path == "Generator") {
- frame_generator = test::FrameGenerator::CreateSquareGenerator(
+ frame_generator = test::CreateSquareFrameGenerator(
static_cast<int>(params_.video[video_idx].width),
static_cast<int>(params_.video[video_idx].height), absl::nullopt,
absl::nullopt);
} else if (params_.video[video_idx].clip_path == "GeneratorI420A") {
- frame_generator = test::FrameGenerator::CreateSquareGenerator(
+ frame_generator = test::CreateSquareFrameGenerator(
static_cast<int>(params_.video[video_idx].width),
static_cast<int>(params_.video[video_idx].height),
- test::FrameGenerator::OutputType::kI420A, absl::nullopt);
+ test::FrameGeneratorInterface::OutputType::kI420A, absl::nullopt);
} else if (params_.video[video_idx].clip_path == "GeneratorI010") {
- frame_generator = test::FrameGenerator::CreateSquareGenerator(
+ frame_generator = test::CreateSquareFrameGenerator(
static_cast<int>(params_.video[video_idx].width),
static_cast<int>(params_.video[video_idx].height),
- test::FrameGenerator::OutputType::kI010, absl::nullopt);
+ test::FrameGeneratorInterface::OutputType::kI010, absl::nullopt);
} else if (params_.video[video_idx].clip_path.empty()) {
video_sources_[video_idx] = test::CreateVideoCapturer(
params_.video[video_idx].width, params_.video[video_idx].height,
@@ -1136,13 +1144,13 @@ void VideoQualityTest::CreateCapturers() {
continue;
} else {
// Failed to get actual camera, use chroma generator as backup.
- frame_generator = test::FrameGenerator::CreateSquareGenerator(
+ frame_generator = test::CreateSquareFrameGenerator(
static_cast<int>(params_.video[video_idx].width),
static_cast<int>(params_.video[video_idx].height), absl::nullopt,
absl::nullopt);
}
} else {
- frame_generator = test::FrameGenerator::CreateFromYuvFile(
+ frame_generator = test::CreateFromYuvFileFrameGenerator(
{params_.video[video_idx].clip_path}, params_.video[video_idx].width,
params_.video[video_idx].height, 1);
ASSERT_TRUE(frame_generator) << "Could not create capturer for "
@@ -1279,6 +1287,9 @@ void VideoQualityTest::RunWithAnalyzer(const Params& params) {
is_quick_test_enabled
? kFramesSentInQuickTest
: params_.analyzer.test_durations_secs * params_.video[0].fps,
+ is_quick_test_enabled
+ ? TimeDelta::Millis(1)
+ : TimeDelta::Seconds(params_.analyzer.test_durations_secs),
graph_data_output_file, graph_title,
kVideoSendSsrcs[params_.ss[0].selected_stream],
kSendRtxSsrcs[params_.ss[0].selected_stream],
diff --git a/chromium/third_party/webrtc/video/video_quality_test.h b/chromium/third_party/webrtc/video/video_quality_test.h
index c28769267d3..21778307940 100644
--- a/chromium/third_party/webrtc/video/video_quality_test.h
+++ b/chromium/third_party/webrtc/video/video_quality_test.h
@@ -19,13 +19,13 @@
#include "api/rtc_event_log/rtc_event_log_factory.h"
#include "api/task_queue/task_queue_base.h"
#include "api/task_queue/task_queue_factory.h"
+#include "api/test/frame_generator_interface.h"
#include "api/test/video_quality_test_fixture.h"
#include "api/video/video_bitrate_allocator_factory.h"
#include "call/fake_network_pipe.h"
#include "media/engine/internal_decoder_factory.h"
#include "media/engine/internal_encoder_factory.h"
#include "test/call_test.h"
-#include "test/frame_generator.h"
#include "test/layer_filtering_transport.h"
#include "video/video_analyzer.h"
#ifdef WEBRTC_WIN
@@ -76,7 +76,8 @@ class VideoQualityTest : public test::CallTest,
// Helper methods for setting up the call.
void CreateCapturers();
- std::unique_ptr<test::FrameGenerator> CreateFrameGenerator(size_t video_idx);
+ std::unique_ptr<test::FrameGeneratorInterface> CreateFrameGenerator(
+ size_t video_idx);
void SetupThumbnailCapturers(size_t num_thumbnail_streams);
std::unique_ptr<VideoDecoder> CreateVideoDecoder(
const SdpVideoFormat& format);
diff --git a/chromium/third_party/webrtc/video/video_receive_stream.cc b/chromium/third_party/webrtc/video/video_receive_stream.cc
index 7f68f76d2ea..b2b96db9bf8 100644
--- a/chromium/third_party/webrtc/video/video_receive_stream.cc
+++ b/chromium/third_party/webrtc/video/video_receive_stream.cc
@@ -101,7 +101,7 @@ class WebRtcRecordableEncodedFrame : public RecordableEncodedFrame {
EncodedResolution resolution() const override { return resolution_; }
Timestamp render_time() const override {
- return Timestamp::ms(render_time_ms_);
+ return Timestamp::Millis(render_time_ms_);
}
private:
@@ -215,7 +215,8 @@ VideoReceiveStream::VideoReceiveStream(
this, // NackSender
nullptr, // Use default KeyFrameRequestSender
this, // OnCompleteFrameCallback
- config_.frame_decryptor),
+ config_.frame_decryptor,
+ config_.frame_transformer),
rtp_stream_sync_(this),
max_wait_for_keyframe_ms_(KeyframeIntervalSettings::ParseFromFieldTrials()
.MaxWaitForKeyframeMs()
@@ -534,6 +535,12 @@ void VideoReceiveStream::SetFrameDecryptor(
rtp_video_stream_receiver_.SetFrameDecryptor(std::move(frame_decryptor));
}
+void VideoReceiveStream::SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) {
+ rtp_video_stream_receiver_.SetDepacketizerToDecoderFrameTransformer(
+ std::move(frame_transformer));
+}
+
void VideoReceiveStream::SendNack(const std::vector<uint16_t>& sequence_numbers,
bool buffering_allowed) {
RTC_DCHECK(buffering_allowed);
@@ -580,7 +587,7 @@ void VideoReceiveStream::OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) {
rtp_video_stream_receiver_.UpdateRtt(max_rtt_ms);
}
-int VideoReceiveStream::id() const {
+uint32_t VideoReceiveStream::id() const {
RTC_DCHECK_RUN_ON(&worker_sequence_checker_);
return config_.rtp.remote_ssrc;
}
diff --git a/chromium/third_party/webrtc/video/video_receive_stream.h b/chromium/third_party/webrtc/video/video_receive_stream.h
index f0977106306..c1ebf2b600e 100644
--- a/chromium/third_party/webrtc/video/video_receive_stream.h
+++ b/chromium/third_party/webrtc/video/video_receive_stream.h
@@ -99,6 +99,8 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream,
void SetFrameDecryptor(
rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor) override;
+ void SetDepacketizerToDecoderFrameTransformer(
+ rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) override;
// Implements rtc::VideoSinkInterface<VideoFrame>.
void OnFrame(const VideoFrame& video_frame) override;
@@ -117,7 +119,7 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream,
void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
// Implements Syncable.
- int id() const override;
+ uint32_t id() const override;
absl::optional<Syncable::Info> GetInfo() const override;
bool GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp,
int64_t* time_ms) const override;
diff --git a/chromium/third_party/webrtc/video/video_receive_stream_unittest.cc b/chromium/third_party/webrtc/video/video_receive_stream_unittest.cc
index 503660eca6b..54896e89d8a 100644
--- a/chromium/third_party/webrtc/video/video_receive_stream_unittest.cc
+++ b/chromium/third_party/webrtc/video/video_receive_stream_unittest.cc
@@ -475,7 +475,7 @@ class VideoReceiveStreamTestWithSimulatedClock : public ::testing::Test {
}
VideoReceiveStreamTestWithSimulatedClock()
- : time_controller_(Timestamp::ms(4711)),
+ : time_controller_(Timestamp::Millis(4711)),
fake_decoder_factory_([this] {
return std::make_unique<FakeDecoder2>([this] { OnFrameDecoded(); });
}),
@@ -523,8 +523,8 @@ class VideoReceiveStreamTestWithSimulatedClock : public ::testing::Test {
TEST_F(VideoReceiveStreamTestWithSimulatedClock,
RequestsKeyFramesUntilKeyFrameReceived) {
- auto tick =
- TimeDelta::ms(internal::VideoReceiveStream::kMaxWaitForKeyFrameMs / 2);
+ auto tick = TimeDelta::Millis(
+ internal::VideoReceiveStream::kMaxWaitForKeyFrameMs / 2);
EXPECT_CALL(mock_transport_, SendRtcp).Times(1);
video_receive_stream_.GenerateKeyFrame();
PassEncodedFrameAndWait(MakeFrame(VideoFrameType::kVideoFrameDelta, 0));
diff --git a/chromium/third_party/webrtc/video/video_replay.cc b/chromium/third_party/webrtc/video/video_replay.cc
deleted file mode 100644
index 6562f423f07..00000000000
--- a/chromium/third_party/webrtc/video/video_replay.cc
+++ /dev/null
@@ -1,524 +0,0 @@
-/*
- * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdio.h>
-
-#include <fstream>
-#include <map>
-#include <memory>
-
-#include "absl/flags/flag.h"
-#include "absl/flags/parse.h"
-#include "api/rtc_event_log/rtc_event_log.h"
-#include "api/task_queue/default_task_queue_factory.h"
-#include "api/test/video/function_video_decoder_factory.h"
-#include "api/video_codecs/video_decoder.h"
-#include "call/call.h"
-#include "common_video/libyuv/include/webrtc_libyuv.h"
-#include "media/engine/internal_decoder_factory.h"
-#include "rtc_base/checks.h"
-#include "rtc_base/string_to_number.h"
-#include "rtc_base/strings/json.h"
-#include "rtc_base/time_utils.h"
-#include "system_wrappers/include/clock.h"
-#include "system_wrappers/include/sleep.h"
-#include "test/call_config_utils.h"
-#include "test/call_test.h"
-#include "test/encoder_settings.h"
-#include "test/fake_decoder.h"
-#include "test/gtest.h"
-#include "test/null_transport.h"
-#include "test/rtp_file_reader.h"
-#include "test/rtp_header_parser.h"
-#include "test/run_loop.h"
-#include "test/run_test.h"
-#include "test/test_video_capturer.h"
-#include "test/testsupport/frame_writer.h"
-#include "test/video_renderer.h"
-
-// Flag for payload type.
-ABSL_FLAG(int,
- media_payload_type,
- webrtc::test::CallTest::kPayloadTypeVP8,
- "Media payload type");
-
-// Flag for RED payload type.
-ABSL_FLAG(int,
- red_payload_type,
- webrtc::test::CallTest::kRedPayloadType,
- "RED payload type");
-
-// Flag for ULPFEC payload type.
-ABSL_FLAG(int,
- ulpfec_payload_type,
- webrtc::test::CallTest::kUlpfecPayloadType,
- "ULPFEC payload type");
-
-ABSL_FLAG(int,
- media_payload_type_rtx,
- webrtc::test::CallTest::kSendRtxPayloadType,
- "Media over RTX payload type");
-
-ABSL_FLAG(int,
- red_payload_type_rtx,
- webrtc::test::CallTest::kRtxRedPayloadType,
- "RED over RTX payload type");
-
-// Flag for SSRC.
-const std::string& DefaultSsrc() {
- static const std::string ssrc =
- std::to_string(webrtc::test::CallTest::kVideoSendSsrcs[0]);
- return ssrc;
-}
-ABSL_FLAG(std::string, ssrc, DefaultSsrc().c_str(), "Incoming SSRC");
-
-const std::string& DefaultSsrcRtx() {
- static const std::string ssrc_rtx =
- std::to_string(webrtc::test::CallTest::kSendRtxSsrcs[0]);
- return ssrc_rtx;
-}
-ABSL_FLAG(std::string, ssrc_rtx, DefaultSsrcRtx().c_str(), "Incoming RTX SSRC");
-
-// Flag for abs-send-time id.
-ABSL_FLAG(int, abs_send_time_id, -1, "RTP extension ID for abs-send-time");
-
-// Flag for transmission-offset id.
-ABSL_FLAG(int,
- transmission_offset_id,
- -1,
- "RTP extension ID for transmission-offset");
-
-// Flag for rtpdump input file.
-ABSL_FLAG(std::string, input_file, "", "input file");
-
-ABSL_FLAG(std::string, config_file, "", "config file");
-
-// Flag for raw output files.
-ABSL_FLAG(std::string,
- out_base,
- "",
- "Basename (excluding .jpg) for raw output");
-
-ABSL_FLAG(std::string,
- decoder_bitstream_filename,
- "",
- "Decoder bitstream output file");
-
-// Flag for video codec.
-ABSL_FLAG(std::string, codec, "VP8", "Video codec");
-
-namespace {
-
-static bool ValidatePayloadType(int32_t payload_type) {
- return payload_type > 0 && payload_type <= 127;
-}
-
-static bool ValidateSsrc(const char* ssrc_string) {
- return rtc::StringToNumber<uint32_t>(ssrc_string).has_value();
-}
-
-static bool ValidateOptionalPayloadType(int32_t payload_type) {
- return payload_type == -1 || ValidatePayloadType(payload_type);
-}
-
-static bool ValidateRtpHeaderExtensionId(int32_t extension_id) {
- return extension_id >= -1 && extension_id < 15;
-}
-
-bool ValidateInputFilenameNotEmpty(const std::string& string) {
- return !string.empty();
-}
-
-static int MediaPayloadType() {
- return absl::GetFlag(FLAGS_media_payload_type);
-}
-
-static int RedPayloadType() {
- return absl::GetFlag(FLAGS_red_payload_type);
-}
-
-static int UlpfecPayloadType() {
- return absl::GetFlag(FLAGS_ulpfec_payload_type);
-}
-
-static int MediaPayloadTypeRtx() {
- return absl::GetFlag(FLAGS_media_payload_type_rtx);
-}
-
-static int RedPayloadTypeRtx() {
- return absl::GetFlag(FLAGS_red_payload_type_rtx);
-}
-
-static uint32_t Ssrc() {
- return rtc::StringToNumber<uint32_t>(absl::GetFlag(FLAGS_ssrc)).value();
-}
-
-static uint32_t SsrcRtx() {
- return rtc::StringToNumber<uint32_t>(absl::GetFlag(FLAGS_ssrc_rtx)).value();
-}
-
-static int AbsSendTimeId() {
- return absl::GetFlag(FLAGS_abs_send_time_id);
-}
-
-static int TransmissionOffsetId() {
- return absl::GetFlag(FLAGS_transmission_offset_id);
-}
-
-static std::string InputFile() {
- return absl::GetFlag(FLAGS_input_file);
-}
-
-static std::string ConfigFile() {
- return absl::GetFlag(FLAGS_config_file);
-}
-
-static std::string OutBase() {
- return absl::GetFlag(FLAGS_out_base);
-}
-
-static std::string DecoderBitstreamFilename() {
- return absl::GetFlag(FLAGS_decoder_bitstream_filename);
-}
-
-static std::string Codec() {
- return absl::GetFlag(FLAGS_codec);
-}
-
-} // namespace
-
-namespace webrtc {
-
-static const uint32_t kReceiverLocalSsrc = 0x123456;
-
-class FileRenderPassthrough : public rtc::VideoSinkInterface<VideoFrame> {
- public:
- FileRenderPassthrough(const std::string& basename,
- rtc::VideoSinkInterface<VideoFrame>* renderer)
- : basename_(basename), renderer_(renderer), file_(nullptr), count_(0) {}
-
- ~FileRenderPassthrough() override {
- if (file_)
- fclose(file_);
- }
-
- private:
- void OnFrame(const VideoFrame& video_frame) override {
- if (renderer_)
- renderer_->OnFrame(video_frame);
-
- if (basename_.empty())
- return;
-
- std::stringstream filename;
- filename << basename_ << count_++ << "_" << video_frame.timestamp()
- << ".jpg";
-
- test::JpegFrameWriter frame_writer(filename.str());
- RTC_CHECK(frame_writer.WriteFrame(video_frame, 100));
- }
-
- const std::string basename_;
- rtc::VideoSinkInterface<VideoFrame>* const renderer_;
- FILE* file_;
- size_t count_;
-};
-
-class DecoderBitstreamFileWriter : public test::FakeDecoder {
- public:
- explicit DecoderBitstreamFileWriter(const char* filename)
- : file_(fopen(filename, "wb")) {
- RTC_DCHECK(file_);
- }
- ~DecoderBitstreamFileWriter() override { fclose(file_); }
-
- int32_t Decode(const EncodedImage& encoded_frame,
- bool /* missing_frames */,
- int64_t /* render_time_ms */) override {
- if (fwrite(encoded_frame.data(), 1, encoded_frame.size(), file_) <
- encoded_frame.size()) {
- RTC_LOG_ERR(LS_ERROR) << "fwrite of encoded frame failed.";
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- return WEBRTC_VIDEO_CODEC_OK;
- }
-
- private:
- FILE* file_;
-};
-
-// The RtpReplayer is responsible for parsing the configuration provided by the
-// user, setting up the windows, recieve streams and decoders and then replaying
-// the provided RTP dump.
-class RtpReplayer final {
- public:
- // Replay a rtp dump with an optional json configuration.
- static void Replay(const std::string& replay_config_path,
- const std::string& rtp_dump_path) {
- std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
- webrtc::CreateDefaultTaskQueueFactory();
- webrtc::RtcEventLogNull event_log;
- Call::Config call_config(&event_log);
- call_config.task_queue_factory = task_queue_factory.get();
- std::unique_ptr<Call> call(Call::Create(call_config));
- std::unique_ptr<StreamState> stream_state;
- // Attempt to load the configuration
- if (replay_config_path.empty()) {
- stream_state = ConfigureFromFlags(rtp_dump_path, call.get());
- } else {
- stream_state = ConfigureFromFile(replay_config_path, call.get());
- }
- if (stream_state == nullptr) {
- return;
- }
- // Attempt to create an RtpReader from the input file.
- std::unique_ptr<test::RtpFileReader> rtp_reader =
- CreateRtpReader(rtp_dump_path);
- if (rtp_reader == nullptr) {
- return;
- }
- // Start replaying the provided stream now that it has been configured.
- for (const auto& receive_stream : stream_state->receive_streams) {
- receive_stream->Start();
- }
- ReplayPackets(call.get(), rtp_reader.get());
- for (const auto& receive_stream : stream_state->receive_streams) {
- call->DestroyVideoReceiveStream(receive_stream);
- }
- }
-
- private:
- // Holds all the shared memory structures required for a recieve stream. This
- // structure is used to prevent members being deallocated before the replay
- // has been finished.
- struct StreamState {
- test::NullTransport transport;
- std::vector<std::unique_ptr<rtc::VideoSinkInterface<VideoFrame>>> sinks;
- std::vector<VideoReceiveStream*> receive_streams;
- std::unique_ptr<VideoDecoderFactory> decoder_factory;
- };
-
- // Loads multiple configurations from the provided configuration file.
- static std::unique_ptr<StreamState> ConfigureFromFile(
- const std::string& config_path,
- Call* call) {
- auto stream_state = std::make_unique<StreamState>();
- // Parse the configuration file.
- std::ifstream config_file(config_path);
- std::stringstream raw_json_buffer;
- raw_json_buffer << config_file.rdbuf();
- std::string raw_json = raw_json_buffer.str();
- Json::Reader json_reader;
- Json::Value json_configs;
- if (!json_reader.parse(raw_json, json_configs)) {
- fprintf(stderr, "Error parsing JSON config\n");
- fprintf(stderr, "%s\n", json_reader.getFormatedErrorMessages().c_str());
- return nullptr;
- }
-
- stream_state->decoder_factory = std::make_unique<InternalDecoderFactory>();
- size_t config_count = 0;
- for (const auto& json : json_configs) {
- // Create the configuration and parse the JSON into the config.
- auto receive_config =
- ParseVideoReceiveStreamJsonConfig(&(stream_state->transport), json);
- // Instantiate the underlying decoder.
- for (auto& decoder : receive_config.decoders) {
- decoder = test::CreateMatchingDecoder(decoder.payload_type,
- decoder.video_format.name);
- decoder.decoder_factory = stream_state->decoder_factory.get();
- }
- // Create a window for this config.
- std::stringstream window_title;
- window_title << "Playback Video (" << config_count++ << ")";
- stream_state->sinks.emplace_back(
- test::VideoRenderer::Create(window_title.str().c_str(), 640, 480));
- // Create a receive stream for this config.
- receive_config.renderer = stream_state->sinks.back().get();
- stream_state->receive_streams.emplace_back(
- call->CreateVideoReceiveStream(std::move(receive_config)));
- }
- return stream_state;
- }
-
- // Loads the base configuration from flags passed in on the commandline.
- static std::unique_ptr<StreamState> ConfigureFromFlags(
- const std::string& rtp_dump_path,
- Call* call) {
- auto stream_state = std::make_unique<StreamState>();
- // Create the video renderers. We must add both to the stream state to keep
- // them from deallocating.
- std::stringstream window_title;
- window_title << "Playback Video (" << rtp_dump_path << ")";
- std::unique_ptr<test::VideoRenderer> playback_video(
- test::VideoRenderer::Create(window_title.str().c_str(), 640, 480));
- auto file_passthrough = std::make_unique<FileRenderPassthrough>(
- OutBase(), playback_video.get());
- stream_state->sinks.push_back(std::move(playback_video));
- stream_state->sinks.push_back(std::move(file_passthrough));
- // Setup the configuration from the flags.
- VideoReceiveStream::Config receive_config(&(stream_state->transport));
- receive_config.rtp.remote_ssrc = Ssrc();
- receive_config.rtp.local_ssrc = kReceiverLocalSsrc;
- receive_config.rtp.rtx_ssrc = SsrcRtx();
- receive_config.rtp.rtx_associated_payload_types[MediaPayloadTypeRtx()] =
- MediaPayloadType();
- receive_config.rtp.rtx_associated_payload_types[RedPayloadTypeRtx()] =
- RedPayloadType();
- receive_config.rtp.ulpfec_payload_type = UlpfecPayloadType();
- receive_config.rtp.red_payload_type = RedPayloadType();
- receive_config.rtp.nack.rtp_history_ms = 1000;
- if (TransmissionOffsetId() != -1) {
- receive_config.rtp.extensions.push_back(RtpExtension(
- RtpExtension::kTimestampOffsetUri, TransmissionOffsetId()));
- }
- if (AbsSendTimeId() != -1) {
- receive_config.rtp.extensions.push_back(
- RtpExtension(RtpExtension::kAbsSendTimeUri, AbsSendTimeId()));
- }
- receive_config.renderer = stream_state->sinks.back().get();
-
- // Setup the receiving stream
- VideoReceiveStream::Decoder decoder;
- decoder = test::CreateMatchingDecoder(MediaPayloadType(), Codec());
- if (DecoderBitstreamFilename().empty()) {
- stream_state->decoder_factory =
- std::make_unique<InternalDecoderFactory>();
- } else {
- // Replace decoder with file writer if we're writing the bitstream to a
- // file instead.
- stream_state->decoder_factory =
- std::make_unique<test::FunctionVideoDecoderFactory>([]() {
- return std::make_unique<DecoderBitstreamFileWriter>(
- DecoderBitstreamFilename().c_str());
- });
- }
- decoder.decoder_factory = stream_state->decoder_factory.get();
- receive_config.decoders.push_back(decoder);
-
- stream_state->receive_streams.emplace_back(
- call->CreateVideoReceiveStream(std::move(receive_config)));
- return stream_state;
- }
-
- static std::unique_ptr<test::RtpFileReader> CreateRtpReader(
- const std::string& rtp_dump_path) {
- std::unique_ptr<test::RtpFileReader> rtp_reader(test::RtpFileReader::Create(
- test::RtpFileReader::kRtpDump, rtp_dump_path));
- if (!rtp_reader) {
- rtp_reader.reset(test::RtpFileReader::Create(test::RtpFileReader::kPcap,
- rtp_dump_path));
- if (!rtp_reader) {
- fprintf(
- stderr,
- "Couldn't open input file as either a rtpdump or .pcap. Note "
- "that .pcapng is not supported.\nTrying to interpret the file as "
- "length/packet interleaved.\n");
- rtp_reader.reset(test::RtpFileReader::Create(
- test::RtpFileReader::kLengthPacketInterleaved, rtp_dump_path));
- if (!rtp_reader) {
- fprintf(stderr,
- "Unable to open input file with any supported format\n");
- return nullptr;
- }
- }
- }
- return rtp_reader;
- }
-
- static void ReplayPackets(Call* call, test::RtpFileReader* rtp_reader) {
- int64_t replay_start_ms = -1;
- int num_packets = 0;
- std::map<uint32_t, int> unknown_packets;
- while (true) {
- int64_t now_ms = rtc::TimeMillis();
- if (replay_start_ms == -1) {
- replay_start_ms = now_ms;
- }
-
- test::RtpPacket packet;
- if (!rtp_reader->NextPacket(&packet)) {
- break;
- }
-
- int64_t deliver_in_ms = replay_start_ms + packet.time_ms - now_ms;
- if (deliver_in_ms > 0) {
- SleepMs(deliver_in_ms);
- }
-
- ++num_packets;
- switch (call->Receiver()->DeliverPacket(
- webrtc::MediaType::VIDEO,
- rtc::CopyOnWriteBuffer(packet.data, packet.length),
- /* packet_time_us */ -1)) {
- case PacketReceiver::DELIVERY_OK:
- break;
- case PacketReceiver::DELIVERY_UNKNOWN_SSRC: {
- RTPHeader header;
- std::unique_ptr<RtpHeaderParser> parser(
- RtpHeaderParser::CreateForTest());
- parser->Parse(packet.data, packet.length, &header);
- if (unknown_packets[header.ssrc] == 0)
- fprintf(stderr, "Unknown SSRC: %u!\n", header.ssrc);
- ++unknown_packets[header.ssrc];
- break;
- }
- case PacketReceiver::DELIVERY_PACKET_ERROR: {
- fprintf(stderr,
- "Packet error, corrupt packets or incorrect setup?\n");
- RTPHeader header;
- std::unique_ptr<RtpHeaderParser> parser(
- RtpHeaderParser::CreateForTest());
- parser->Parse(packet.data, packet.length, &header);
- fprintf(stderr, "Packet len=%zu pt=%u seq=%u ts=%u ssrc=0x%8x\n",
- packet.length, header.payloadType, header.sequenceNumber,
- header.timestamp, header.ssrc);
- break;
- }
- }
- }
- fprintf(stderr, "num_packets: %d\n", num_packets);
-
- for (std::map<uint32_t, int>::const_iterator it = unknown_packets.begin();
- it != unknown_packets.end(); ++it) {
- fprintf(stderr, "Packets for unknown ssrc '%u': %d\n", it->first,
- it->second);
- }
- }
-}; // class RtpReplayer
-
-void RtpReplay() {
- RtpReplayer::Replay(ConfigFile(), InputFile());
-}
-
-} // namespace webrtc
-
-int main(int argc, char* argv[]) {
- ::testing::InitGoogleTest(&argc, argv);
- absl::ParseCommandLine(argc, argv);
-
- RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_media_payload_type)));
- RTC_CHECK(ValidatePayloadType(absl::GetFlag(FLAGS_media_payload_type_rtx)));
- RTC_CHECK(ValidateOptionalPayloadType(absl::GetFlag(FLAGS_red_payload_type)));
- RTC_CHECK(
- ValidateOptionalPayloadType(absl::GetFlag(FLAGS_red_payload_type_rtx)));
- RTC_CHECK(
- ValidateOptionalPayloadType(absl::GetFlag(FLAGS_ulpfec_payload_type)));
- RTC_CHECK(ValidateSsrc(absl::GetFlag(FLAGS_ssrc).c_str()));
- RTC_CHECK(ValidateSsrc(absl::GetFlag(FLAGS_ssrc_rtx).c_str()));
- RTC_CHECK(
- ValidateRtpHeaderExtensionId(absl::GetFlag(FLAGS_abs_send_time_id)));
- RTC_CHECK(ValidateRtpHeaderExtensionId(
- absl::GetFlag(FLAGS_transmission_offset_id)));
- RTC_CHECK(ValidateInputFilenameNotEmpty(absl::GetFlag(FLAGS_input_file)));
-
- webrtc::test::RunTest(webrtc::RtpReplay);
- return 0;
-}
diff --git a/chromium/third_party/webrtc/video/video_send_stream.cc b/chromium/third_party/webrtc/video/video_send_stream.cc
index 8fae407bc19..497db28c0ff 100644
--- a/chromium/third_party/webrtc/video/video_send_stream.cc
+++ b/chromium/third_party/webrtc/video/video_send_stream.cc
@@ -19,6 +19,7 @@
#include "modules/rtp_rtcp/source/rtp_sender.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
#include "rtc_base/task_utils/to_queued_task.h"
#include "system_wrappers/include/clock.h"
#include "system_wrappers/include/field_trial.h"
@@ -130,7 +131,23 @@ VideoSendStream::~VideoSendStream() {
void VideoSendStream::UpdateActiveSimulcastLayers(
const std::vector<bool> active_layers) {
RTC_DCHECK_RUN_ON(&thread_checker_);
- RTC_LOG(LS_INFO) << "VideoSendStream::UpdateActiveSimulcastLayers";
+
+ rtc::StringBuilder active_layers_string;
+ active_layers_string << "{";
+ for (size_t i = 0; i < active_layers.size(); ++i) {
+ if (active_layers[i]) {
+ active_layers_string << "1";
+ } else {
+ active_layers_string << "0";
+ }
+ if (i < active_layers.size() - 1) {
+ active_layers_string << ", ";
+ }
+ }
+ active_layers_string << "}";
+ RTC_LOG(LS_INFO) << "UpdateActiveSimulcastLayers: "
+ << active_layers_string.str();
+
VideoSendStreamImpl* send_stream = send_stream_.get();
worker_queue_->PostTask([this, send_stream, active_layers] {
send_stream->UpdateActiveSimulcastLayers(active_layers);
diff --git a/chromium/third_party/webrtc/video/video_send_stream_impl.cc b/chromium/third_party/webrtc/video/video_send_stream_impl.cc
index 97f3bb7f4cc..539eb353f7b 100644
--- a/chromium/third_party/webrtc/video/video_send_stream_impl.cc
+++ b/chromium/third_party/webrtc/video/video_send_stream_impl.cc
@@ -47,7 +47,7 @@ static constexpr int kMaxVbaSizeDifferencePercent = 10;
// Max time we will throttle similar video bitrate allocations.
static constexpr int64_t kMaxVbaThrottleTimeMs = 500;
-constexpr TimeDelta kEncoderTimeOut = TimeDelta::Seconds<2>();
+constexpr TimeDelta kEncoderTimeOut = TimeDelta::Seconds(2);
bool TransportSeqNumExtensionConfigured(const VideoSendStream::Config& config) {
const std::vector<RtpExtension>& extensions = config.rtp.extensions;
@@ -58,12 +58,16 @@ bool TransportSeqNumExtensionConfigured(const VideoSendStream::Config& config) {
// Calculate max padding bitrate for a multi layer codec.
int CalculateMaxPadBitrateBps(const std::vector<VideoStream>& streams,
+ bool is_svc,
VideoEncoderConfig::ContentType content_type,
int min_transmit_bitrate_bps,
bool pad_to_min_bitrate,
bool alr_probing) {
int pad_up_to_bitrate_bps = 0;
+ RTC_DCHECK(!is_svc || streams.size() <= 1) << "Only one stream is allowed in "
+ "SVC mode.";
+
// Filter out only the active streams;
std::vector<VideoStream> active_streams;
for (const VideoStream& stream : streams) {
@@ -71,7 +75,13 @@ int CalculateMaxPadBitrateBps(const std::vector<VideoStream>& streams,
active_streams.emplace_back(stream);
}
- if (active_streams.size() > 1) {
+ if (active_streams.size() > 1 || (!active_streams.empty() && is_svc)) {
+ // Simulcast or SVC is used.
+ // if SVC is used, stream bitrates should already encode svc bitrates:
+ // min_bitrate = min bitrate of a lowest svc layer.
+ // target_bitrate = sum of target bitrates of lower layers + min bitrate
+ // of the last one (as used in the calculations below).
+ // max_bitrate = sum of all active layers' max_bitrate.
if (alr_probing) {
// With alr probing, just pad to the min bitrate of the lowest stream,
// probing will handle the rest of the rampup.
@@ -82,17 +92,26 @@ int CalculateMaxPadBitrateBps(const std::vector<VideoStream>& streams,
const double hysteresis_factor =
RateControlSettings::ParseFromFieldTrials()
.GetSimulcastHysteresisFactor(content_type);
- const size_t top_active_stream_idx = active_streams.size() - 1;
- pad_up_to_bitrate_bps = std::min(
- static_cast<int>(
- hysteresis_factor *
- active_streams[top_active_stream_idx].min_bitrate_bps +
- 0.5),
- active_streams[top_active_stream_idx].target_bitrate_bps);
-
- // Add target_bitrate_bps of the lower active streams.
- for (size_t i = 0; i < top_active_stream_idx; ++i) {
- pad_up_to_bitrate_bps += active_streams[i].target_bitrate_bps;
+ if (is_svc) {
+ // For SVC, since there is only one "stream", the padding bitrate
+ // needed to enable the top spatial layer is stored in the
+ // |target_bitrate_bps| field.
+ // TODO(sprang): This behavior needs to die.
+ pad_up_to_bitrate_bps = static_cast<int>(
+ hysteresis_factor * active_streams[0].target_bitrate_bps + 0.5);
+ } else {
+ const size_t top_active_stream_idx = active_streams.size() - 1;
+ pad_up_to_bitrate_bps = std::min(
+ static_cast<int>(
+ hysteresis_factor *
+ active_streams[top_active_stream_idx].min_bitrate_bps +
+ 0.5),
+ active_streams[top_active_stream_idx].target_bitrate_bps);
+
+ // Add target_bitrate_bps of the lower active streams.
+ for (size_t i = 0; i < top_active_stream_idx; ++i) {
+ pad_up_to_bitrate_bps += active_streams[i].target_bitrate_bps;
+ }
}
}
} else if (!active_streams.empty() && pad_to_min_bitrate) {
@@ -158,7 +177,7 @@ bool SameStreamsEnabled(const VideoBitrateAllocation& lhs,
PacingConfig::PacingConfig()
: pacing_factor("factor", PacedSender::kDefaultPaceMultiplier),
max_pacing_delay("max_delay",
- TimeDelta::ms(PacedSender::kMaxQueueLengthMs)) {
+ TimeDelta::Millis(PacedSender::kMaxQueueLengthMs)) {
ParseFieldTrial({&pacing_factor, &max_pacing_delay},
field_trial::FindFullName("WebRTC-Video-Pacing"));
}
@@ -202,19 +221,20 @@ VideoSendStreamImpl::VideoSendStreamImpl(
video_stream_encoder_(video_stream_encoder),
encoder_feedback_(clock, config_->rtp.ssrcs, video_stream_encoder),
bandwidth_observer_(transport->GetBandwidthObserver()),
- rtp_video_sender_(transport_->CreateRtpVideoSender(
- suspended_ssrcs,
- suspended_payload_states,
- config_->rtp,
- config_->rtcp_report_interval_ms,
- config_->send_transport,
- CreateObservers(call_stats,
- &encoder_feedback_,
- stats_proxy_,
- send_delay_stats),
- event_log,
- std::move(fec_controller),
- CreateFrameEncryptionConfig(config_))),
+ rtp_video_sender_(
+ transport_->CreateRtpVideoSender(suspended_ssrcs,
+ suspended_payload_states,
+ config_->rtp,
+ config_->rtcp_report_interval_ms,
+ config_->send_transport,
+ CreateObservers(call_stats,
+ &encoder_feedback_,
+ stats_proxy_,
+ send_delay_stats),
+ event_log,
+ std::move(fec_controller),
+ CreateFrameEncryptionConfig(config_),
+ config->frame_transformer)),
weak_ptr_factory_(this) {
video_stream_encoder->SetFecControllerOverride(rtp_video_sender_);
RTC_DCHECK_RUN_ON(worker_queue_);
@@ -321,7 +341,6 @@ void VideoSendStreamImpl::DeliverRtcp(const uint8_t* packet, size_t length) {
void VideoSendStreamImpl::UpdateActiveSimulcastLayers(
const std::vector<bool> active_layers) {
RTC_DCHECK_RUN_ON(worker_queue_);
- RTC_LOG(LS_INFO) << "VideoSendStream::UpdateActiveSimulcastLayers";
bool previously_active = rtp_video_sender_->IsActive();
rtp_video_sender_->SetActiveModules(active_layers);
if (!rtp_video_sender_->IsActive() && previously_active) {
@@ -387,7 +406,7 @@ void VideoSendStreamImpl::StopVideoSendStream() {
bitrate_allocator_->RemoveObserver(this);
check_encoder_activity_task_.Stop();
video_stream_encoder_->OnBitrateUpdated(DataRate::Zero(), DataRate::Zero(),
- DataRate::Zero(), 0, 0);
+ DataRate::Zero(), 0, 0, 0);
stats_proxy_->OnSetEncoderTargetRate(0);
}
@@ -471,22 +490,23 @@ MediaStreamAllocationConfig VideoSendStreamImpl::GetAllocationConfig() const {
void VideoSendStreamImpl::OnEncoderConfigurationChanged(
std::vector<VideoStream> streams,
+ bool is_svc,
VideoEncoderConfig::ContentType content_type,
int min_transmit_bitrate_bps) {
if (!worker_queue_->IsCurrent()) {
rtc::WeakPtr<VideoSendStreamImpl> send_stream = weak_ptr_;
- worker_queue_->PostTask([send_stream, streams, content_type,
+ worker_queue_->PostTask([send_stream, streams, is_svc, content_type,
min_transmit_bitrate_bps]() mutable {
if (send_stream) {
send_stream->OnEncoderConfigurationChanged(
- std::move(streams), content_type, min_transmit_bitrate_bps);
+ std::move(streams), is_svc, content_type, min_transmit_bitrate_bps);
}
});
return;
}
+
RTC_DCHECK_GE(config_->rtp.ssrcs.size(), streams.size());
TRACE_EVENT0("webrtc", "VideoSendStream::OnEncoderConfigurationChanged");
- RTC_DCHECK_GE(config_->rtp.ssrcs.size(), streams.size());
RTC_DCHECK_RUN_ON(worker_queue_);
const VideoCodecType codec_type =
@@ -516,14 +536,9 @@ void VideoSendStreamImpl::OnEncoderConfigurationChanged(
encoder_max_bitrate_bps_);
// TODO(bugs.webrtc.org/10266): Query the VideoBitrateAllocator instead.
- if (codec_type == kVideoCodecVP9) {
- max_padding_bitrate_ = has_alr_probing_ ? streams[0].min_bitrate_bps
- : streams[0].target_bitrate_bps;
- } else {
- max_padding_bitrate_ = CalculateMaxPadBitrateBps(
- streams, content_type, min_transmit_bitrate_bps,
- config_->suspend_below_min_bitrate, has_alr_probing_);
- }
+ max_padding_bitrate_ = CalculateMaxPadBitrateBps(
+ streams, is_svc, content_type, min_transmit_bitrate_bps,
+ config_->suspend_below_min_bitrate, has_alr_probing_);
// Clear stats for disabled layers.
for (size_t i = streams.size(); i < config_->rtp.ssrcs.size(); ++i) {
@@ -624,29 +639,30 @@ uint32_t VideoSendStreamImpl::OnBitrateUpdated(BitrateAllocationUpdate update) {
DataRate link_allocation = DataRate::Zero();
if (encoder_target_rate_bps_ > protection_bitrate_bps) {
link_allocation =
- DataRate::bps(encoder_target_rate_bps_ - protection_bitrate_bps);
+ DataRate::BitsPerSec(encoder_target_rate_bps_ - protection_bitrate_bps);
}
DataRate overhead =
- update.target_bitrate - DataRate::bps(encoder_target_rate_bps_);
+ update.target_bitrate - DataRate::BitsPerSec(encoder_target_rate_bps_);
DataRate encoder_stable_target_rate = update.stable_target_bitrate;
if (encoder_stable_target_rate > overhead) {
encoder_stable_target_rate = encoder_stable_target_rate - overhead;
} else {
- encoder_stable_target_rate = DataRate::bps(encoder_target_rate_bps_);
+ encoder_stable_target_rate = DataRate::BitsPerSec(encoder_target_rate_bps_);
}
encoder_target_rate_bps_ =
std::min(encoder_max_bitrate_bps_, encoder_target_rate_bps_);
- encoder_stable_target_rate = std::min(DataRate::bps(encoder_max_bitrate_bps_),
- encoder_stable_target_rate);
+ encoder_stable_target_rate =
+ std::min(DataRate::BitsPerSec(encoder_max_bitrate_bps_),
+ encoder_stable_target_rate);
- DataRate encoder_target_rate = DataRate::bps(encoder_target_rate_bps_);
+ DataRate encoder_target_rate = DataRate::BitsPerSec(encoder_target_rate_bps_);
link_allocation = std::max(encoder_target_rate, link_allocation);
video_stream_encoder_->OnBitrateUpdated(
encoder_target_rate, encoder_stable_target_rate, link_allocation,
rtc::dchecked_cast<uint8_t>(update.packet_loss_ratio * 256),
- update.round_trip_time.ms());
+ update.round_trip_time.ms(), update.cwnd_reduce_ratio);
stats_proxy_->OnSetEncoderTargetRate(encoder_target_rate_bps_);
return protection_bitrate_bps;
}
diff --git a/chromium/third_party/webrtc/video/video_send_stream_impl.h b/chromium/third_party/webrtc/video/video_send_stream_impl.h
index 4195efcf824..d3f87e3bf31 100644
--- a/chromium/third_party/webrtc/video/video_send_stream_impl.h
+++ b/chromium/third_party/webrtc/video/video_send_stream_impl.h
@@ -116,6 +116,7 @@ class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver,
void OnEncoderConfigurationChanged(
std::vector<VideoStream> streams,
+ bool is_svc,
VideoEncoderConfig::ContentType content_type,
int min_transmit_bitrate_bps) override;
diff --git a/chromium/third_party/webrtc/video/video_send_stream_impl_unittest.cc b/chromium/third_party/webrtc/video/video_send_stream_impl_unittest.cc
index 1c44cc8dd49..cad5de73ac2 100644
--- a/chromium/third_party/webrtc/video/video_send_stream_impl_unittest.cc
+++ b/chromium/third_party/webrtc/video/video_send_stream_impl_unittest.cc
@@ -10,6 +10,7 @@
#include "video/video_send_stream_impl.h"
+#include <algorithm>
#include <memory>
#include <string>
@@ -42,6 +43,8 @@ bool operator==(const BitrateAllocationUpdate& a,
namespace internal {
namespace {
using ::testing::_;
+using ::testing::AllOf;
+using ::testing::Field;
using ::testing::Invoke;
using ::testing::NiceMock;
using ::testing::Return;
@@ -88,7 +91,7 @@ class MockRtpVideoSender : public RtpVideoSenderInterface {
BitrateAllocationUpdate CreateAllocation(int bitrate_bps) {
BitrateAllocationUpdate update;
- update.target_bitrate = DataRate::bps(bitrate_bps);
+ update.target_bitrate = DataRate::BitsPerSec(bitrate_bps);
update.packet_loss_ratio = 0;
update.round_trip_time = TimeDelta::Zero();
return update;
@@ -241,7 +244,7 @@ TEST_F(VideoSendStreamImplTest, UpdatesObserverOnConfigurationChange) {
static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
->OnEncoderConfigurationChanged(
- std::vector<VideoStream>{qvga_stream, vga_stream},
+ std::vector<VideoStream>{qvga_stream, vga_stream}, false,
VideoEncoderConfig::ContentType::kRealtimeVideo,
min_transmit_bitrate_bps);
vss_impl->Stop();
@@ -309,7 +312,7 @@ TEST_F(VideoSendStreamImplTest, UpdatesObserverOnConfigurationChangeWithAlr) {
static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
->OnEncoderConfigurationChanged(
- std::vector<VideoStream>{low_stream, high_stream},
+ std::vector<VideoStream>{low_stream, high_stream}, false,
VideoEncoderConfig::ContentType::kScreen,
min_transmit_bitrate_bps);
vss_impl->Stop();
@@ -371,7 +374,7 @@ TEST_F(VideoSendStreamImplTest,
static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
->OnEncoderConfigurationChanged(
- std::vector<VideoStream>{low_stream, high_stream},
+ std::vector<VideoStream>{low_stream, high_stream}, false,
VideoEncoderConfig::ContentType::kRealtimeVideo,
/*min_transmit_bitrate_bps=*/0);
vss_impl->Stop();
@@ -690,31 +693,31 @@ TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) {
static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
->OnEncoderConfigurationChanged(
- std::vector<VideoStream>{qvga_stream},
+ std::vector<VideoStream>{qvga_stream}, false,
VideoEncoderConfig::ContentType::kRealtimeVideo,
min_transmit_bitrate_bps);
const DataRate network_constrained_rate =
- DataRate::bps(qvga_stream.target_bitrate_bps);
+ DataRate::BitsPerSec(qvga_stream.target_bitrate_bps);
BitrateAllocationUpdate update;
update.target_bitrate = network_constrained_rate;
update.stable_target_bitrate = network_constrained_rate;
- update.round_trip_time = TimeDelta::ms(1);
+ update.round_trip_time = TimeDelta::Millis(1);
EXPECT_CALL(rtp_video_sender_, OnBitrateUpdated(update, _));
EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
.WillOnce(Return(network_constrained_rate.bps()));
EXPECT_CALL(
video_stream_encoder_,
OnBitrateUpdated(network_constrained_rate, network_constrained_rate,
- network_constrained_rate, 0, _));
+ network_constrained_rate, 0, _, 0));
static_cast<BitrateAllocatorObserver*>(vss_impl.get())
->OnBitrateUpdated(update);
// Test allocation where the link allocation is larger than the target,
// meaning we have some headroom on the link.
const DataRate qvga_max_bitrate =
- DataRate::bps(qvga_stream.max_bitrate_bps);
- const DataRate headroom = DataRate::bps(50000);
+ DataRate::BitsPerSec(qvga_stream.max_bitrate_bps);
+ const DataRate headroom = DataRate::BitsPerSec(50000);
const DataRate rate_with_headroom = qvga_max_bitrate + headroom;
update.target_bitrate = rate_with_headroom;
update.stable_target_bitrate = rate_with_headroom;
@@ -723,7 +726,7 @@ TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) {
.WillOnce(Return(rate_with_headroom.bps()));
EXPECT_CALL(video_stream_encoder_,
OnBitrateUpdated(qvga_max_bitrate, qvga_max_bitrate,
- rate_with_headroom, 0, _));
+ rate_with_headroom, 0, _, 0));
static_cast<BitrateAllocatorObserver*>(vss_impl.get())
->OnBitrateUpdated(update);
@@ -737,10 +740,10 @@ TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) {
EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps())
.WillOnce(Return(rate_with_headroom.bps()));
const DataRate headroom_minus_protection =
- rate_with_headroom - DataRate::bps(protection_bitrate_bps);
+ rate_with_headroom - DataRate::BitsPerSec(protection_bitrate_bps);
EXPECT_CALL(video_stream_encoder_,
OnBitrateUpdated(qvga_max_bitrate, qvga_max_bitrate,
- headroom_minus_protection, 0, _));
+ headroom_minus_protection, 0, _, 0));
static_cast<BitrateAllocatorObserver*>(vss_impl.get())
->OnBitrateUpdated(update);
@@ -753,14 +756,14 @@ TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) {
.WillOnce(Return(rate_with_headroom.bps()));
EXPECT_CALL(video_stream_encoder_,
OnBitrateUpdated(qvga_max_bitrate, qvga_max_bitrate,
- qvga_max_bitrate, 0, _));
+ qvga_max_bitrate, 0, _, 0));
static_cast<BitrateAllocatorObserver*>(vss_impl.get())
->OnBitrateUpdated(update);
// Set rates to zero on stop.
EXPECT_CALL(video_stream_encoder_,
OnBitrateUpdated(DataRate::Zero(), DataRate::Zero(),
- DataRate::Zero(), 0, 0));
+ DataRate::Zero(), 0, 0, 0));
vss_impl->Stop();
},
RTC_FROM_HERE);
@@ -816,7 +819,7 @@ TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) {
// Reconfigure e.g. due to a fake frame.
static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
->OnEncoderConfigurationChanged(
- std::vector<VideoStream>{qvga_stream},
+ std::vector<VideoStream>{qvga_stream}, false,
VideoEncoderConfig::ContentType::kRealtimeVideo,
min_transmit_bitrate_bps);
// Still no padding because no actual frames were passed, only
@@ -893,5 +896,114 @@ TEST_F(VideoSendStreamImplTest, KeepAliveOnDroppedFrame) {
ASSERT_TRUE(done.Wait(5000));
}
+TEST_F(VideoSendStreamImplTest, ConfiguresBitratesForSvc) {
+ struct TestConfig {
+ bool screenshare = false;
+ bool alr = false;
+ int min_padding_bitrate_bps = 0;
+ };
+
+ std::vector<TestConfig> test_variants;
+ for (bool screenshare : {false, true}) {
+ for (bool alr : {false, true}) {
+ for (int min_padding : {0, 400000}) {
+ test_variants.push_back({screenshare, alr, min_padding});
+ }
+ }
+ }
+
+ for (const TestConfig& test_config : test_variants) {
+ test_queue_.SendTask(
+ [this, test_config] {
+ const bool kSuspend = false;
+ config_.suspend_below_min_bitrate = kSuspend;
+ config_.rtp.extensions.emplace_back(
+ RtpExtension::kTransportSequenceNumberUri, 1);
+ config_.periodic_alr_bandwidth_probing = test_config.alr;
+ auto vss_impl = CreateVideoSendStreamImpl(
+ kDefaultInitialBitrateBps, kDefaultBitratePriority,
+ test_config.screenshare
+ ? VideoEncoderConfig::ContentType::kScreen
+ : VideoEncoderConfig::ContentType::kRealtimeVideo);
+ vss_impl->Start();
+
+ // Svc
+ VideoStream stream;
+ stream.width = 1920;
+ stream.height = 1080;
+ stream.max_framerate = 30;
+ stream.min_bitrate_bps = 60000;
+ stream.target_bitrate_bps = 6000000;
+ stream.max_bitrate_bps = 1250000;
+ stream.num_temporal_layers = 2;
+ stream.max_qp = 56;
+ stream.bitrate_priority = 1;
+
+ config_.rtp.ssrcs.emplace_back(1);
+ config_.rtp.ssrcs.emplace_back(2);
+
+ EXPECT_CALL(
+ bitrate_allocator_,
+ AddObserver(
+ vss_impl.get(),
+ AllOf(Field(&MediaStreamAllocationConfig::min_bitrate_bps,
+ static_cast<uint32_t>(stream.min_bitrate_bps)),
+ Field(&MediaStreamAllocationConfig::max_bitrate_bps,
+ static_cast<uint32_t>(stream.max_bitrate_bps)),
+ // Stream not yet active - no padding.
+ Field(&MediaStreamAllocationConfig::pad_up_bitrate_bps,
+ 0u),
+ Field(&MediaStreamAllocationConfig::enforce_min_bitrate,
+ !kSuspend))));
+
+ static_cast<VideoStreamEncoderInterface::EncoderSink*>(vss_impl.get())
+ ->OnEncoderConfigurationChanged(
+ std::vector<VideoStream>{stream}, true,
+ test_config.screenshare
+ ? VideoEncoderConfig::ContentType::kScreen
+ : VideoEncoderConfig::ContentType::kRealtimeVideo,
+ test_config.min_padding_bitrate_bps);
+ ::testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_);
+
+ // Simulate an encoded image, this will turn the stream active and
+ // enable padding.
+ EncodedImage encoded_image;
+ CodecSpecificInfo codec_specific;
+ EXPECT_CALL(rtp_video_sender_, OnEncodedImage)
+ .WillRepeatedly(Return(EncodedImageCallback::Result(
+ EncodedImageCallback::Result::OK)));
+
+ // Screensharing implicitly forces ALR.
+ const bool using_alr = test_config.alr || test_config.screenshare;
+ // If ALR is used, pads only to min bitrate as rampup is handled by
+ // probing. Otherwise target_bitrate contains the padding target.
+ int expected_padding =
+ using_alr ? stream.min_bitrate_bps : stream.target_bitrate_bps;
+ // Min padding bitrate may override padding target.
+ expected_padding =
+ std::max(expected_padding, test_config.min_padding_bitrate_bps);
+ EXPECT_CALL(
+ bitrate_allocator_,
+ AddObserver(
+ vss_impl.get(),
+ AllOf(Field(&MediaStreamAllocationConfig::min_bitrate_bps,
+ static_cast<uint32_t>(stream.min_bitrate_bps)),
+ Field(&MediaStreamAllocationConfig::max_bitrate_bps,
+ static_cast<uint32_t>(stream.max_bitrate_bps)),
+ // Stream now active - min bitrate use as padding target
+ // when ALR is active.
+ Field(&MediaStreamAllocationConfig::pad_up_bitrate_bps,
+ expected_padding),
+ Field(&MediaStreamAllocationConfig::enforce_min_bitrate,
+ !kSuspend))));
+ static_cast<EncodedImageCallback*>(vss_impl.get())
+ ->OnEncodedImage(encoded_image, &codec_specific, nullptr);
+ ::testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_);
+
+ vss_impl->Stop();
+ },
+ RTC_FROM_HERE);
+ }
+}
} // namespace internal
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/video_send_stream_tests.cc b/chromium/third_party/webrtc/video/video_send_stream_tests.cc
index 0d51cbd0070..e38653831b8 100644
--- a/chromium/third_party/webrtc/video/video_send_stream_tests.cc
+++ b/chromium/third_party/webrtc/video/video_send_stream_tests.cc
@@ -24,9 +24,12 @@
#include "call/rtp_transport_controller_send.h"
#include "call/simulated_network.h"
#include "call/video_send_stream.h"
+#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
#include "modules/rtp_rtcp/include/rtp_rtcp.h"
#include "modules/rtp_rtcp/source/rtcp_sender.h"
-#include "modules/rtp_rtcp/source/rtp_format_vp9.h"
+#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
+#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "modules/video_coding/codecs/vp9/include/vp9.h"
#include "rtc_base/checks.h"
@@ -47,7 +50,7 @@
#include "test/fake_encoder.h"
#include "test/fake_texture_frame.h"
#include "test/field_trial.h"
-#include "test/frame_generator.h"
+#include "test/frame_forwarder.h"
#include "test/frame_generator_capturer.h"
#include "test/frame_utils.h"
#include "test/gmock.h"
@@ -93,9 +96,15 @@ enum VideoFormat {
kGeneric,
kVP8,
};
-} // namespace
-VideoFrame CreateVideoFrame(int width, int height, uint8_t data);
+VideoFrame CreateVideoFrame(int width, int height, int64_t timestamp_ms) {
+ return webrtc::VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(width, height))
+ .set_rotation(webrtc::kVideoRotation_0)
+ .set_timestamp_ms(timestamp_ms)
+ .build();
+}
+} // namespace
class VideoSendStreamTest : public test::CallTest {
public:
@@ -182,18 +191,17 @@ TEST_F(VideoSendStreamTest, SupportsAbsoluteSendTime) {
class AbsoluteSendTimeObserver : public test::SendTest {
public:
AbsoluteSendTimeObserver() : SendTest(kDefaultTimeoutMs) {
- EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
- kRtpExtensionAbsoluteSendTime, kAbsSendTimeExtensionId));
+ extensions_.Register<AbsoluteSendTime>(kAbsSendTimeExtensionId);
}
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
- EXPECT_FALSE(header.extension.hasTransmissionTimeOffset);
- EXPECT_TRUE(header.extension.hasAbsoluteSendTime);
- EXPECT_EQ(header.extension.transmissionTimeOffset, 0);
- if (header.extension.absoluteSendTime != 0) {
+ uint32_t abs_send_time = 0;
+ EXPECT_FALSE(rtp_packet.HasExtension<TransmissionOffset>());
+ EXPECT_TRUE(rtp_packet.GetExtension<AbsoluteSendTime>(&abs_send_time));
+ if (abs_send_time != 0) {
// Wait for at least one packet with a non-zero send time. The send time
// is a 16-bit value derived from the system clock, and it is valid
// for a packet to have a zero send time. To tell that from an
@@ -220,6 +228,9 @@ TEST_F(VideoSendStreamTest, SupportsAbsoluteSendTime) {
void PerformTest() override {
EXPECT_TRUE(Wait()) << "Timed out while waiting for single RTP packet.";
}
+
+ private:
+ RtpHeaderExtensionMap extensions_;
} test;
RunBaseTest(&test);
@@ -234,19 +245,18 @@ TEST_F(VideoSendStreamTest, SupportsTransmissionTimeOffset) {
return std::make_unique<test::DelayedEncoder>(
Clock::GetRealTimeClock(), kEncodeDelayMs);
}) {
- EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
- kRtpExtensionTransmissionTimeOffset, kTimestampOffsetExtensionId));
+ extensions_.Register<TransmissionOffset>(kTimestampOffsetExtensionId);
}
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
- EXPECT_TRUE(header.extension.hasTransmissionTimeOffset);
- EXPECT_FALSE(header.extension.hasAbsoluteSendTime);
- EXPECT_GT(header.extension.transmissionTimeOffset, 0);
- EXPECT_EQ(header.extension.absoluteSendTime, 0u);
+ int32_t toffset = 0;
+ EXPECT_TRUE(rtp_packet.GetExtension<TransmissionOffset>(&toffset));
+ EXPECT_FALSE(rtp_packet.HasExtension<AbsoluteSendTime>());
+ EXPECT_GT(toffset, 0);
observation_complete_.Set();
return SEND_PACKET;
@@ -267,6 +277,7 @@ TEST_F(VideoSendStreamTest, SupportsTransmissionTimeOffset) {
}
test::FunctionVideoEncoderFactory encoder_factory_;
+ RtpHeaderExtensionMap extensions_;
} test;
RunBaseTest(&test);
@@ -281,18 +292,17 @@ TEST_F(VideoSendStreamTest, SupportsTransportWideSequenceNumbers) {
return std::make_unique<test::FakeEncoder>(
Clock::GetRealTimeClock());
}) {
- EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
- kRtpExtensionTransportSequenceNumber, kExtensionId));
+ extensions_.Register<TransportSequenceNumber>(kExtensionId);
}
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
- EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
- EXPECT_FALSE(header.extension.hasTransmissionTimeOffset);
- EXPECT_FALSE(header.extension.hasAbsoluteSendTime);
+ EXPECT_TRUE(rtp_packet.HasExtension<TransportSequenceNumber>());
+ EXPECT_FALSE(rtp_packet.HasExtension<TransmissionOffset>());
+ EXPECT_FALSE(rtp_packet.HasExtension<AbsoluteSendTime>());
observation_complete_.Set();
@@ -311,6 +321,7 @@ TEST_F(VideoSendStreamTest, SupportsTransportWideSequenceNumbers) {
}
test::FunctionVideoEncoderFactory encoder_factory_;
+ RtpHeaderExtensionMap extensions_;
} test;
RunBaseTest(&test);
@@ -320,18 +331,16 @@ TEST_F(VideoSendStreamTest, SupportsVideoRotation) {
class VideoRotationObserver : public test::SendTest {
public:
VideoRotationObserver() : SendTest(kDefaultTimeoutMs) {
- EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
- kRtpExtensionVideoRotation, kVideoRotationExtensionId));
+ extensions_.Register<VideoOrientation>(kVideoRotationExtensionId);
}
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
// Only the last packet of the frame is required to have the extension.
- if (!header.markerBit)
+ if (!rtp_packet.Marker())
return SEND_PACKET;
- EXPECT_TRUE(header.extension.hasVideoRotation);
- EXPECT_EQ(kVideoRotation_90, header.extension.videoRotation);
+ EXPECT_EQ(rtp_packet.GetExtension<VideoOrientation>(), kVideoRotation_90);
observation_complete_.Set();
return SEND_PACKET;
}
@@ -353,6 +362,9 @@ TEST_F(VideoSendStreamTest, SupportsVideoRotation) {
void PerformTest() override {
EXPECT_TRUE(Wait()) << "Timed out while waiting for single RTP packet.";
}
+
+ private:
+ RtpHeaderExtensionMap extensions_;
} test;
RunBaseTest(&test);
@@ -363,21 +375,21 @@ TEST_F(VideoSendStreamTest, SupportsVideoContentType) {
public:
VideoContentTypeObserver()
: SendTest(kDefaultTimeoutMs), first_frame_sent_(false) {
- EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
- kRtpExtensionVideoContentType, kVideoContentTypeExtensionId));
+ extensions_.Register<VideoContentTypeExtension>(
+ kVideoContentTypeExtensionId);
}
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
// Only the last packet of the key-frame must have extension.
- if (!header.markerBit || first_frame_sent_)
+ if (!rtp_packet.Marker() || first_frame_sent_)
return SEND_PACKET;
// First marker bit seen means that the first frame is sent.
first_frame_sent_ = true;
- EXPECT_TRUE(header.extension.hasVideoContentType);
- EXPECT_TRUE(videocontenttypehelpers::IsScreenshare(
- header.extension.videoContentType));
+ VideoContentType type;
+ EXPECT_TRUE(rtp_packet.GetExtension<VideoContentTypeExtension>(&type));
+ EXPECT_TRUE(videocontenttypehelpers::IsScreenshare(type));
observation_complete_.Set();
return SEND_PACKET;
}
@@ -398,6 +410,7 @@ TEST_F(VideoSendStreamTest, SupportsVideoContentType) {
private:
bool first_frame_sent_;
+ RtpHeaderExtensionMap extensions_;
} test;
RunBaseTest(&test);
@@ -408,19 +421,18 @@ TEST_F(VideoSendStreamTest, SupportsVideoTimingFrames) {
public:
VideoTimingObserver()
: SendTest(kDefaultTimeoutMs), first_frame_sent_(false) {
- EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(kRtpExtensionVideoTiming,
- kVideoTimingExtensionId));
+ extensions_.Register<VideoTimingExtension>(kVideoTimingExtensionId);
}
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
// Only the last packet of the frame must have extension.
// Also don't check packets of the second frame if they happen to get
// through before the test terminates.
- if (!header.markerBit || first_frame_sent_)
+ if (!rtp_packet.Marker() || first_frame_sent_)
return SEND_PACKET;
- EXPECT_TRUE(header.extension.has_video_timing);
+ EXPECT_TRUE(rtp_packet.HasExtension<VideoTimingExtension>());
observation_complete_.Set();
first_frame_sent_ = true;
return SEND_PACKET;
@@ -440,6 +452,7 @@ TEST_F(VideoSendStreamTest, SupportsVideoTimingFrames) {
}
private:
+ RtpHeaderExtensionMap extensions_;
bool first_frame_sent_;
} test;
@@ -489,21 +502,20 @@ class UlpfecObserver : public test::EndToEndTest {
sent_media_(false),
sent_ulpfec_(false),
header_extensions_enabled_(header_extensions_enabled) {
- parser_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
- kAbsSendTimeExtensionId);
- parser_->RegisterRtpHeaderExtension(kRtpExtensionTransportSequenceNumber,
- kTransportSequenceNumberExtensionId);
+ extensions_.Register<AbsoluteSendTime>(kAbsSendTimeExtensionId);
+ extensions_.Register<TransportSequenceNumber>(
+ kTransportSequenceNumberExtensionId);
}
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
int encapsulated_payload_type = -1;
- if (header.payloadType == VideoSendStreamTest::kRedPayloadType) {
+ if (rtp_packet.PayloadType() == VideoSendStreamTest::kRedPayloadType) {
EXPECT_TRUE(expect_red_);
- encapsulated_payload_type = static_cast<int>(packet[header.headerLength]);
+ encapsulated_payload_type = rtp_packet.payload()[0];
if (encapsulated_payload_type !=
VideoSendStreamTest::kFakeVideoSendPayloadType) {
EXPECT_EQ(VideoSendStreamTest::kUlpfecPayloadType,
@@ -511,9 +523,8 @@ class UlpfecObserver : public test::EndToEndTest {
}
} else {
EXPECT_EQ(VideoSendStreamTest::kFakeVideoSendPayloadType,
- header.payloadType);
- if (static_cast<size_t>(header.headerLength + header.paddingLength) <
- length) {
+ rtp_packet.PayloadType());
+ if (rtp_packet.payload_size() > 0) {
// Not padding-only, media received outside of RED.
EXPECT_FALSE(expect_red_);
sent_media_ = true;
@@ -521,21 +532,27 @@ class UlpfecObserver : public test::EndToEndTest {
}
if (header_extensions_enabled_) {
- EXPECT_TRUE(header.extension.hasAbsoluteSendTime);
- uint32_t kHalf24BitsSpace = 0xFFFFFF / 2;
- if (header.extension.absoluteSendTime <= kHalf24BitsSpace &&
- prev_header_.extension.absoluteSendTime > kHalf24BitsSpace) {
- // 24 bits wrap.
- EXPECT_GT(prev_header_.extension.absoluteSendTime,
- header.extension.absoluteSendTime);
- } else {
- EXPECT_GE(header.extension.absoluteSendTime,
- prev_header_.extension.absoluteSendTime);
+ uint32_t abs_send_time;
+ EXPECT_TRUE(rtp_packet.GetExtension<AbsoluteSendTime>(&abs_send_time));
+ uint16_t transport_seq_num;
+ EXPECT_TRUE(
+ rtp_packet.GetExtension<TransportSequenceNumber>(&transport_seq_num));
+ if (!first_packet_) {
+ uint32_t kHalf24BitsSpace = 0xFFFFFF / 2;
+ if (abs_send_time <= kHalf24BitsSpace &&
+ prev_abs_send_time_ > kHalf24BitsSpace) {
+ // 24 bits wrap.
+ EXPECT_GT(prev_abs_send_time_, abs_send_time);
+ } else {
+ EXPECT_GE(abs_send_time, prev_abs_send_time_);
+ }
+
+ uint16_t seq_num_diff = transport_seq_num - prev_transport_seq_num_;
+ EXPECT_EQ(1, seq_num_diff);
}
- EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
- uint16_t seq_num_diff = header.extension.transportSequenceNumber -
- prev_header_.extension.transportSequenceNumber;
- EXPECT_EQ(1, seq_num_diff);
+ first_packet_ = false;
+ prev_abs_send_time_ = abs_send_time;
+ prev_transport_seq_num_ = transport_seq_num;
}
if (encapsulated_payload_type != -1) {
@@ -552,8 +569,6 @@ class UlpfecObserver : public test::EndToEndTest {
observation_complete_.Set();
}
- prev_header_ = header;
-
return SEND_PACKET;
}
@@ -609,14 +624,17 @@ class UlpfecObserver : public test::EndToEndTest {
}
VideoEncoderFactory* encoder_factory_;
- std::string payload_name_;
+ RtpHeaderExtensionMap extensions_;
+ const std::string payload_name_;
const bool use_nack_;
const bool expect_red_;
const bool expect_ulpfec_;
bool sent_media_;
bool sent_ulpfec_;
- bool header_extensions_enabled_;
- RTPHeader prev_header_;
+ const bool header_extensions_enabled_;
+ bool first_packet_ = true;
+ uint32_t prev_abs_send_time_ = 0;
+ uint16_t prev_transport_seq_num_ = 0;
};
TEST_F(VideoSendStreamTest, SupportsUlpfecWithExtensions) {
@@ -713,12 +731,10 @@ class FlexfecObserver : public test::EndToEndTest {
sent_flexfec_(false),
header_extensions_enabled_(header_extensions_enabled),
num_video_streams_(num_video_streams) {
- parser_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
- kAbsSendTimeExtensionId);
- parser_->RegisterRtpHeaderExtension(kRtpExtensionTransmissionTimeOffset,
- kTimestampOffsetExtensionId);
- parser_->RegisterRtpHeaderExtension(kRtpExtensionTransportSequenceNumber,
- kTransportSequenceNumberExtensionId);
+ extensions_.Register<AbsoluteSendTime>(kAbsSendTimeExtensionId);
+ extensions_.Register<TransmissionOffset>(kTimestampOffsetExtensionId);
+ extensions_.Register<TransportSequenceNumber>(
+ kTransportSequenceNumberExtensionId);
}
size_t GetNumFlexfecStreams() const override { return 1; }
@@ -726,25 +742,25 @@ class FlexfecObserver : public test::EndToEndTest {
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet(&extensions_);
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
- if (header.payloadType == VideoSendStreamTest::kFlexfecPayloadType) {
- EXPECT_EQ(VideoSendStreamTest::kFlexfecSendSsrc, header.ssrc);
+ if (rtp_packet.PayloadType() == VideoSendStreamTest::kFlexfecPayloadType) {
+ EXPECT_EQ(VideoSendStreamTest::kFlexfecSendSsrc, rtp_packet.Ssrc());
sent_flexfec_ = true;
} else {
EXPECT_EQ(VideoSendStreamTest::kFakeVideoSendPayloadType,
- header.payloadType);
+ rtp_packet.PayloadType());
EXPECT_THAT(::testing::make_tuple(VideoSendStreamTest::kVideoSendSsrcs,
num_video_streams_),
- ::testing::Contains(header.ssrc));
+ ::testing::Contains(rtp_packet.Ssrc()));
sent_media_ = true;
}
if (header_extensions_enabled_) {
- EXPECT_TRUE(header.extension.hasAbsoluteSendTime);
- EXPECT_TRUE(header.extension.hasTransmissionTimeOffset);
- EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
+ EXPECT_TRUE(rtp_packet.HasExtension<AbsoluteSendTime>());
+ EXPECT_TRUE(rtp_packet.HasExtension<TransmissionOffset>());
+ EXPECT_TRUE(rtp_packet.HasExtension<TransportSequenceNumber>());
}
if (sent_media_ && sent_flexfec_) {
@@ -815,7 +831,8 @@ class FlexfecObserver : public test::EndToEndTest {
}
VideoEncoderFactory* encoder_factory_;
- std::string payload_name_;
+ RtpHeaderExtensionMap extensions_;
+ const std::string payload_name_;
const bool use_nack_;
bool sent_media_;
bool sent_flexfec_;
@@ -910,15 +927,15 @@ void VideoSendStreamTest::TestNackRetransmission(
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
// NACK packets two times at some arbitrary points.
const int kNackedPacketsAtOnceCount = 3;
const int kRetransmitTarget = kNackedPacketsAtOnceCount * 2;
// Skip padding packets because they will never be retransmitted.
- if (header.paddingLength + header.headerLength == length) {
+ if (rtp_packet.payload_size() == 0) {
return SEND_PACKET;
}
@@ -949,12 +966,12 @@ void VideoSendStreamTest::TestNackRetransmission(
&nacked_sequence_numbers_.front()));
}
- uint16_t sequence_number = header.sequenceNumber;
- if (header.ssrc == retransmit_ssrc_ &&
+ uint16_t sequence_number = rtp_packet.SequenceNumber();
+ if (rtp_packet.Ssrc() == retransmit_ssrc_ &&
retransmit_ssrc_ != kVideoSendSsrcs[0]) {
// Not kVideoSendSsrcs[0], assume correct RTX packet. Extract sequence
// number.
- const uint8_t* rtx_header = packet + header.headerLength;
+ const uint8_t* rtx_header = rtp_packet.payload().data();
sequence_number = (rtx_header[0] << 8) + rtx_header[1];
}
@@ -963,8 +980,8 @@ void VideoSendStreamTest::TestNackRetransmission(
nacked_sequence_numbers_.erase(found);
if (++retransmit_count_ == kRetransmitTarget) {
- EXPECT_EQ(retransmit_ssrc_, header.ssrc);
- EXPECT_EQ(retransmit_payload_type_, header.payloadType);
+ EXPECT_EQ(retransmit_ssrc_, rtp_packet.Ssrc());
+ EXPECT_EQ(retransmit_payload_type_, rtp_packet.PayloadType());
observation_complete_.Set();
}
} else {
@@ -994,8 +1011,8 @@ void VideoSendStreamTest::TestNackRetransmission(
std::unique_ptr<internal::TransportAdapter> transport_adapter_;
int send_count_;
int retransmit_count_;
- uint32_t retransmit_ssrc_;
- uint8_t retransmit_payload_type_;
+ const uint32_t retransmit_ssrc_;
+ const uint8_t retransmit_payload_type_;
std::vector<uint16_t> nacked_sequence_numbers_;
std::vector<uint16_t> non_padding_sequence_numbers_;
} test(retransmit_ssrc, retransmit_payload_type);
@@ -1057,14 +1074,14 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
private:
Action OnSendRtp(const uint8_t* packet, size_t size) override {
size_t length = size;
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
EXPECT_LE(length, max_packet_size_);
- if (use_fec_) {
- uint8_t payload_type = packet[header.headerLength];
- bool is_fec = header.payloadType == kRedPayloadType &&
+ if (use_fec_ && rtp_packet.payload_size() > 0) {
+ uint8_t payload_type = rtp_packet.payload()[0];
+ bool is_fec = rtp_packet.PayloadType() == kRedPayloadType &&
payload_type == kUlpfecPayloadType;
if (is_fec) {
fec_packet_received_ = true;
@@ -1075,10 +1092,10 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
accumulated_size_ += length;
if (use_fec_)
- TriggerLossReport(header);
+ TriggerLossReport(rtp_packet);
if (test_generic_packetization_) {
- size_t overhead = header.headerLength + header.paddingLength;
+ size_t overhead = rtp_packet.headers_size() + rtp_packet.padding_size();
// Only remove payload header and RED header if the packet actually
// contains payload.
if (length > overhead) {
@@ -1091,7 +1108,7 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
}
// Marker bit set indicates last packet of a frame.
- if (header.markerBit) {
+ if (rtp_packet.Marker()) {
if (use_fec_ && accumulated_payload_ == current_size_rtp_ - 1) {
// With FEC enabled, frame size is incremented asynchronously, so
// "old" frames one byte too small may arrive. Accept, but don't
@@ -1132,7 +1149,7 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
return SEND_PACKET;
}
- void TriggerLossReport(const RTPHeader& header) {
+ void TriggerLossReport(const RtpPacket& rtp_packet) {
// Send lossy receive reports to trigger FEC enabling.
const int kLossPercent = 5;
if (++packet_count_ % (100 / kLossPercent) == 0) {
@@ -1144,7 +1161,7 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
uint8_t loss_ratio =
static_cast<uint8_t>(loss_delta * 255 / packets_delta);
FakeReceiveStatistics lossy_receive_stats(
- kVideoSendSsrcs[0], header.sequenceNumber,
+ kVideoSendSsrcs[0], rtp_packet.SequenceNumber(),
packets_lost_, // Cumulative lost.
loss_ratio); // Loss percent.
RtpRtcp::Configuration config;
@@ -1309,23 +1326,23 @@ TEST_F(VideoSendStreamTest, SuspendBelowMinBitrate) {
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock(&crit_);
++rtp_count_;
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
- last_sequence_number_ = header.sequenceNumber;
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
+ last_sequence_number_ = rtp_packet.SequenceNumber();
if (test_state_ == kBeforeSuspend) {
// The stream has started. Try to suspend it.
SendRtcpFeedback(low_remb_bps_);
test_state_ = kDuringSuspend;
} else if (test_state_ == kDuringSuspend) {
- if (header.paddingLength == 0) {
+ if (rtp_packet.padding_size() == 0) {
// Received non-padding packet during suspension period. Reset the
// counter.
suspended_frame_count_ = 0;
}
SendRtcpFeedback(0); // REMB is only sent if value is > 0.
} else if (test_state_ == kWaitingForPacket) {
- if (header.paddingLength == 0) {
+ if (rtp_packet.padding_size() == 0) {
// Non-padding packet observed. Test is almost complete. Will just
// have to wait for the stats to change.
test_state_ = kWaitingForStats;
@@ -1441,7 +1458,6 @@ TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) {
NoPaddingWhenVideoIsMuted()
: SendTest(kDefaultTimeoutMs),
clock_(Clock::GetRealTimeClock()),
- last_packet_time_ms_(-1),
capturer_(nullptr) {}
private:
@@ -1449,10 +1465,9 @@ TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) {
rtc::CritScope lock(&crit_);
last_packet_time_ms_ = clock_->TimeInMilliseconds();
- RTPHeader header;
- parser_->Parse(packet, length, &header);
- const bool only_padding =
- header.headerLength + header.paddingLength == length;
+ RtpPacket rtp_packet;
+ rtp_packet.Parse(packet, length);
+ const bool only_padding = rtp_packet.payload_size() == 0;
if (test_state_ == kBeforeStopCapture) {
// Packets are flowing, stop camera.
@@ -1478,8 +1493,8 @@ TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) {
rtc::CritScope lock(&crit_);
const int kNoPacketsThresholdMs = 2000;
if (test_state_ == kWaitingForNoPackets &&
- (last_packet_time_ms_ > 0 &&
- clock_->TimeInMilliseconds() - last_packet_time_ms_ >
+ (last_packet_time_ms_ &&
+ clock_->TimeInMilliseconds() - last_packet_time_ms_.value() >
kNoPacketsThresholdMs)) {
// No packets seen for |kNoPacketsThresholdMs|, restart camera.
capturer_->Start();
@@ -1517,9 +1532,8 @@ TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) {
TestState test_state_ = kBeforeStopCapture;
Clock* const clock_;
- std::unique_ptr<internal::TransportAdapter> transport_adapter_;
rtc::CriticalSection crit_;
- int64_t last_packet_time_ms_ RTC_GUARDED_BY(crit_);
+ absl::optional<int64_t> last_packet_time_ms_ RTC_GUARDED_BY(crit_);
test::FrameGeneratorCapturer* capturer_ RTC_GUARDED_BY(crit_);
} test;
@@ -1545,9 +1559,9 @@ TEST_F(VideoSendStreamTest, PaddingIsPrimarilyRetransmissions) {
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock(&crit_);
- RTPHeader header;
- parser_->Parse(packet, length, &header);
- padding_length_ += header.paddingLength;
+ RtpPacket rtp_packet;
+ rtp_packet.Parse(packet, length);
+ padding_length_ += rtp_packet.padding_size();
total_length_ += length;
return SEND_PACKET;
}
@@ -1624,8 +1638,8 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) {
if (RtpHeaderParser::IsRtcp(packet, length))
return DROP_PACKET;
- RTPHeader header;
- if (!parser_->Parse(packet, length, &header))
+ RtpPacket rtp_packet;
+ if (!rtp_packet.Parse(packet, length))
return DROP_PACKET;
RTC_DCHECK(stream_);
VideoSendStream::Stats stats = stream_->GetStats();
@@ -1637,8 +1651,7 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) {
"bitrate_bps", static_cast<size_t>(total_bitrate_bps),
"bps", false);
if (total_bitrate_bps > kHighBitrateBps) {
- rtp_rtcp_->SetRemb(kRembBitrateBps,
- std::vector<uint32_t>(1, header.ssrc));
+ rtp_rtcp_->SetRemb(kRembBitrateBps, {rtp_packet.Ssrc()});
rtp_rtcp_->Process();
bitrate_capped_ = true;
} else if (bitrate_capped_ &&
@@ -1699,8 +1712,13 @@ TEST_F(VideoSendStreamTest, ChangingNetworkRoute) {
call_(nullptr) {
module_process_thread_.Detach();
task_queue_thread_.Detach();
- EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
- kRtpExtensionTransportSequenceNumber, kExtensionId));
+ extensions_.Register<TransportSequenceNumber>(kExtensionId);
+ }
+
+ ~ChangingNetworkRouteTest() {
+ // Block until all already posted tasks run to avoid 'use after free'
+ // when such task accesses |this|.
+ SendTask(RTC_FROM_HERE, task_queue_, [] {});
}
void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
@@ -1754,8 +1772,8 @@ TEST_F(VideoSendStreamTest, ChangingNetworkRoute) {
void PerformTest() override {
rtc::NetworkRoute new_route;
new_route.connected = true;
- new_route.local_network_id = 10;
- new_route.remote_network_id = 20;
+ new_route.local = rtc::RouteEndpoint::CreateWithNetworkId(10);
+ new_route.remote = rtc::RouteEndpoint::CreateWithNetworkId(20);
BitrateConstraints bitrate_config;
SendTask(RTC_FROM_HERE, task_queue_,
@@ -1781,7 +1799,8 @@ TEST_F(VideoSendStreamTest, ChangingNetworkRoute) {
// TODO(holmer): We should set the last sent packet id here and
// verify that we correctly ignore any packet loss reported prior to
// that id.
- ++new_route.local_network_id;
+ new_route.local = rtc::RouteEndpoint::CreateWithNetworkId(
+ new_route.local.network_id() + 1);
call_->GetTransportControllerSend()->OnNetworkRouteChanged(
"transport", new_route);
EXPECT_GE(call_->GetStats().send_bandwidth_bps, kStartBitrateBps);
@@ -1792,7 +1811,113 @@ TEST_F(VideoSendStreamTest, ChangingNetworkRoute) {
webrtc::SequenceChecker module_process_thread_;
webrtc::SequenceChecker task_queue_thread_;
TaskQueueBase* const task_queue_;
+ RtpHeaderExtensionMap extensions_;
+ Call* call_ RTC_GUARDED_BY(task_queue_thread_);
+ } test(task_queue());
+
+ RunBaseTest(&test);
+}
+
+// Test that if specified, relay cap is lifted on transition to direct
+// connection.
+TEST_F(VideoSendStreamTest, RelayToDirectRoute) {
+ static const int kStartBitrateBps = 300000;
+ static const int kRelayBandwidthCapBps = 800000;
+ static const int kMinPacketsToSend = 100;
+ webrtc::test::ScopedFieldTrials field_trials(
+ std::string(field_trial::GetFieldTrialString()) +
+ "WebRTC-Bwe-NetworkRouteConstraints/relay_cap:" +
+ std::to_string(kRelayBandwidthCapBps) + "bps/");
+
+ class RelayToDirectRouteTest : public test::EndToEndTest {
+ public:
+ explicit RelayToDirectRouteTest(TaskQueueBase* task_queue)
+ : EndToEndTest(test::CallTest::kDefaultTimeoutMs),
+ task_queue_(task_queue),
+ call_(nullptr),
+ packets_sent_(0),
+ relayed_phase_(true) {
+ module_process_thread_.Detach();
+ task_queue_thread_.Detach();
+ }
+
+ ~RelayToDirectRouteTest() {
+ // Block until all already posted tasks run to avoid 'use after free'
+ // when such task accesses |this|.
+ SendTask(RTC_FROM_HERE, task_queue_, [] {});
+ }
+
+ void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ RTC_DCHECK(!call_);
+ call_ = sender_call;
+ }
+
+ Action OnSendRtp(const uint8_t* packet, size_t length) override {
+ RTC_DCHECK_RUN_ON(&module_process_thread_);
+ task_queue_->PostTask(ToQueuedTask([this]() {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ if (!call_)
+ return;
+ bool had_time_to_exceed_cap_in_relayed_phase =
+ relayed_phase_ && ++packets_sent_ > kMinPacketsToSend;
+ bool did_exceed_cap =
+ call_->GetStats().send_bandwidth_bps > kRelayBandwidthCapBps;
+ if (did_exceed_cap || had_time_to_exceed_cap_in_relayed_phase)
+ observation_complete_.Set();
+ }));
+ return SEND_PACKET;
+ }
+
+ void OnStreamsStopped() override {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ call_ = nullptr;
+ }
+
+ void PerformTest() override {
+ rtc::NetworkRoute route;
+ route.connected = true;
+ route.local = rtc::RouteEndpoint::CreateWithNetworkId(10);
+ route.remote = rtc::RouteEndpoint::CreateWithNetworkId(20);
+
+ SendTask(RTC_FROM_HERE, task_queue_, [this, &route]() {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ relayed_phase_ = true;
+ route.remote = route.remote.CreateWithTurn(true);
+ call_->GetTransportControllerSend()->OnNetworkRouteChanged("transport",
+ route);
+ BitrateConstraints bitrate_config;
+ bitrate_config.start_bitrate_bps = kStartBitrateBps;
+
+ call_->GetTransportControllerSend()->SetSdpBitrateParameters(
+ bitrate_config);
+ });
+
+ EXPECT_TRUE(Wait())
+ << "Timeout waiting for sufficient packets sent count.";
+
+ SendTask(RTC_FROM_HERE, task_queue_, [this, &route]() {
+ RTC_DCHECK_RUN_ON(&task_queue_thread_);
+ EXPECT_LE(call_->GetStats().send_bandwidth_bps, kRelayBandwidthCapBps);
+
+ route.remote = route.remote.CreateWithTurn(false);
+ call_->GetTransportControllerSend()->OnNetworkRouteChanged("transport",
+ route);
+ relayed_phase_ = false;
+ observation_complete_.Reset();
+ });
+
+ EXPECT_TRUE(Wait())
+ << "Timeout while waiting for bandwidth to outgrow relay cap.";
+ }
+
+ private:
+ webrtc::SequenceChecker module_process_thread_;
+ webrtc::SequenceChecker task_queue_thread_;
+ TaskQueueBase* const task_queue_;
Call* call_ RTC_GUARDED_BY(task_queue_thread_);
+ int packets_sent_ RTC_GUARDED_BY(task_queue_thread_);
+ bool relayed_phase_ RTC_GUARDED_BY(task_queue_thread_);
} test(task_queue());
RunBaseTest(&test);
@@ -1887,6 +2012,12 @@ class MaxPaddingSetTest : public test::SendTest {
task_queue_thread_.Detach();
}
+ ~MaxPaddingSetTest() {
+ // Block until all already posted tasks run to avoid 'use after free'
+ // when such task accesses |this|.
+ SendTask(RTC_FROM_HERE, task_queue_, [] {});
+ }
+
void ModifyVideoConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
@@ -2332,22 +2463,6 @@ TEST_F(VideoSendStreamTest, VideoSendStreamUpdateActiveSimulcastLayers) {
});
}
-VideoFrame CreateVideoFrame(int width, int height, uint8_t data) {
- const int kSizeY = width * height * 2;
- std::unique_ptr<uint8_t[]> buffer(new uint8_t[kSizeY]);
- memset(buffer.get(), data, kSizeY);
- VideoFrame frame =
- webrtc::VideoFrame::Builder()
- .set_video_frame_buffer(I420Buffer::Create(width, height))
- .set_rotation(webrtc::kVideoRotation_0)
- .set_timestamp_us(data)
- .build();
- frame.set_timestamp(data);
- // Use data as a ms timestamp.
- frame.set_timestamp_us(data * rtc::kNumMicrosecsPerMillisec);
- return frame;
-}
-
TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
class EncoderStateObserver : public test::SendTest, public VideoEncoder {
public:
@@ -2444,8 +2559,7 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
stream_->ReconfigureVideoEncoder(std::move(encoder_config_));
EXPECT_EQ(0u, num_releases());
stream_->Stop();
- // Encoder should not be released before destroying the
- // VideoSendStream.
+ // Encoder should not be released before destroying the VideoSendStream.
EXPECT_FALSE(IsReleased());
EXPECT_TRUE(IsReadyForEncode());
stream_->Start();
@@ -2669,7 +2783,14 @@ TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp9Config) {
RunBaseTest(&test);
}
-TEST_F(VideoSendStreamTest, EncoderSetupPropagatesH264Config) {
+// Fails on MSAN: https://bugs.chromium.org/p/webrtc/issues/detail?id=11376.
+#if defined(MEMORY_SANITIZER)
+#define MAYBE_EncoderSetupPropagatesH264Config \
+ DISABLED_EncoderSetupPropagatesH264Config
+#else
+#define MAYBE_EncoderSetupPropagatesH264Config EncoderSetupPropagatesH264Config
+#endif
+TEST_F(VideoSendStreamTest, MAYBE_EncoderSetupPropagatesH264Config) {
VideoCodecConfigObserver<VideoCodecH264> test(kVideoCodecH264, "H264");
RunBaseTest(&test);
}
@@ -2685,10 +2806,10 @@ TEST_F(VideoSendStreamTest, RtcpSenderReportContainsMediaBytesSent) {
private:
Action OnSendRtp(const uint8_t* packet, size_t length) override {
rtc::CritScope lock(&crit_);
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
++rtp_packets_sent_;
- media_bytes_sent_ += length - header.headerLength - header.paddingLength;
+ media_bytes_sent_ += rtp_packet.payload_size();
return SEND_PACKET;
}
@@ -2844,9 +2965,8 @@ TEST_F(VideoSendStreamTest, ReconfigureBitratesSetsEncoderBitratesCorrectly) {
} else if (num_rate_allocator_creations_ == 2) {
EXPECT_EQ(static_cast<unsigned int>(kIncreasedMaxBitrateKbps),
codec.maxBitrate);
- // The start bitrate will be whatever the rate BitRateController
- // has currently configured but in the span of the set max and min
- // bitrate.
+ // The start bitrate will be whatever the rate BitRateController has
+ // currently configured but in the span of the set max and min bitrate.
}
++num_rate_allocator_creations_;
create_rate_allocator_event_.Set();
@@ -3128,39 +3248,47 @@ class Vp9HeaderObserver : public test::SendTest {
}
void PerformTest() override {
- EXPECT_TRUE(Wait()) << "Test timed out waiting for VP9 packet, num frames "
+ bool wait = Wait();
+ {
+ // In case of time out, OnSendRtp might still access frames_sent_;
+ rtc::CritScope lock(&crit_);
+ EXPECT_TRUE(wait) << "Test timed out waiting for VP9 packet, num frames "
<< frames_sent_;
+ }
}
Action OnSendRtp(const uint8_t* packet, size_t length) override {
- RTPHeader header;
- EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ RtpPacket rtp_packet;
+ EXPECT_TRUE(rtp_packet.Parse(packet, length));
- EXPECT_EQ(kVp9PayloadType, header.payloadType);
- const uint8_t* payload = packet + header.headerLength;
- size_t payload_length = length - header.headerLength - header.paddingLength;
+ EXPECT_EQ(kVp9PayloadType, rtp_packet.PayloadType());
+ rtc::ArrayView<const uint8_t> rtp_payload = rtp_packet.payload();
bool new_packet = packets_sent_ == 0 ||
- IsNewerSequenceNumber(header.sequenceNumber,
- last_header_.sequenceNumber);
- if (payload_length > 0 && new_packet) {
- RtpDepacketizer::ParsedPayload parsed;
- RtpDepacketizerVp9 depacketizer;
- EXPECT_TRUE(depacketizer.Parse(&parsed, payload, payload_length));
- EXPECT_EQ(VideoCodecType::kVideoCodecVP9, parsed.video_header().codec);
+ IsNewerSequenceNumber(rtp_packet.SequenceNumber(),
+ last_packet_sequence_number_);
+ if (!rtp_payload.empty() && new_packet) {
+ RTPVideoHeader video_header;
+ EXPECT_NE(
+ VideoRtpDepacketizerVp9::ParseRtpPayload(rtp_payload, &video_header),
+ 0);
+ EXPECT_EQ(VideoCodecType::kVideoCodecVP9, video_header.codec);
// Verify common fields for all configurations.
const auto& vp9_header =
- absl::get<RTPVideoHeaderVP9>(parsed.video_header().video_type_header);
+ absl::get<RTPVideoHeaderVP9>(video_header.video_type_header);
VerifyCommonHeader(vp9_header);
- CompareConsecutiveFrames(header, parsed.video_header());
+ CompareConsecutiveFrames(rtp_packet, video_header);
// Verify configuration specific settings.
InspectHeader(vp9_header);
++packets_sent_;
- if (header.markerBit) {
+ if (rtp_packet.Marker()) {
+ rtc::CritScope lock(&crit_);
++frames_sent_;
}
- last_header_ = header;
+ last_packet_marker_ = rtp_packet.Marker();
+ last_packet_sequence_number_ = rtp_packet.SequenceNumber();
+ last_packet_timestamp_ = rtp_packet.Timestamp();
last_vp9_ = vp9_header;
}
return SEND_PACKET;
@@ -3344,17 +3472,18 @@ class Vp9HeaderObserver : public test::SendTest {
}
}
- void CompareConsecutiveFrames(const RTPHeader& header,
+ void CompareConsecutiveFrames(const RtpPacket& rtp_packet,
const RTPVideoHeader& video) const {
const auto& vp9_header =
absl::get<RTPVideoHeaderVP9>(video.video_type_header);
- bool new_frame = packets_sent_ == 0 ||
- IsNewerTimestamp(header.timestamp, last_header_.timestamp);
+ bool new_frame =
+ packets_sent_ == 0 ||
+ IsNewerTimestamp(rtp_packet.Timestamp(), last_packet_timestamp_);
EXPECT_EQ(new_frame, video.is_first_packet_in_frame);
if (!new_frame) {
- EXPECT_FALSE(last_header_.markerBit);
- EXPECT_EQ(last_header_.timestamp, header.timestamp);
+ EXPECT_FALSE(last_packet_marker_);
+ EXPECT_EQ(last_packet_timestamp_, rtp_packet.Timestamp());
EXPECT_EQ(last_vp9_.picture_id, vp9_header.picture_id);
EXPECT_EQ(last_vp9_.temporal_idx, vp9_header.temporal_idx);
EXPECT_EQ(last_vp9_.tl0_pic_idx, vp9_header.tl0_pic_idx);
@@ -3368,7 +3497,7 @@ class Vp9HeaderObserver : public test::SendTest {
if (frames_sent_ == 0)
return;
EXPECT_TRUE(last_vp9_.end_of_frame);
- EXPECT_TRUE(last_header_.markerBit);
+ EXPECT_TRUE(last_packet_marker_);
EXPECT_TRUE(ContinuousPictureId(vp9_header));
VerifyTl0Idx(vp9_header);
}
@@ -3376,9 +3505,12 @@ class Vp9HeaderObserver : public test::SendTest {
test::FunctionVideoEncoderFactory encoder_factory_;
VideoCodecVP9 vp9_settings_;
webrtc::VideoEncoderConfig encoder_config_;
- RTPHeader last_header_;
+ bool last_packet_marker_ = false;
+ uint16_t last_packet_sequence_number_ = 0;
+ uint32_t last_packet_timestamp_ = 0;
RTPVideoHeaderVP9 last_vp9_;
size_t packets_sent_;
+ rtc::CriticalSection crit_;
size_t frames_sent_;
int expected_width_;
int expected_height_;
@@ -3453,6 +3585,7 @@ void VideoSendStreamTest::TestVp9NonFlexMode(uint8_t num_temporal_layers,
vp9_settings_.flexibleMode = false;
vp9_settings_.frameDroppingOn = false;
+ vp9_settings_.automaticResizeOn = false;
vp9_settings_.keyFrameInterval = kKeyFrameInterval;
vp9_settings_.numberOfTemporalLayers = num_temporal_layers_;
vp9_settings_.numberOfSpatialLayers = num_spatial_layers_;
diff --git a/chromium/third_party/webrtc/video/video_source_sink_controller.cc b/chromium/third_party/webrtc/video/video_source_sink_controller.cc
new file mode 100644
index 00000000000..a649adc68c1
--- /dev/null
+++ b/chromium/third_party/webrtc/video/video_source_sink_controller.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_source_sink_controller.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+VideoSourceSinkController::VideoSourceSinkController(
+ rtc::VideoSinkInterface<VideoFrame>* sink,
+ rtc::VideoSourceInterface<VideoFrame>* source)
+ : sink_(sink), source_(source) {
+ RTC_DCHECK(sink_);
+}
+
+void VideoSourceSinkController::SetSource(
+ rtc::VideoSourceInterface<VideoFrame>* source) {
+ rtc::VideoSourceInterface<VideoFrame>* old_source;
+ rtc::VideoSinkWants wants;
+ {
+ rtc::CritScope lock(&crit_);
+ old_source = source_;
+ source_ = source;
+ wants = CurrentSettingsToSinkWants();
+ }
+ if (old_source != source && old_source)
+ old_source->RemoveSink(sink_);
+ if (!source)
+ return;
+ source->AddOrUpdateSink(sink_, wants);
+}
+
+void VideoSourceSinkController::PushSourceSinkSettings() {
+ rtc::CritScope lock(&crit_);
+ if (!source_)
+ return;
+ source_->AddOrUpdateSink(sink_, CurrentSettingsToSinkWants());
+}
+
+VideoSourceRestrictions VideoSourceSinkController::restrictions() const {
+ rtc::CritScope lock(&crit_);
+ return restrictions_;
+}
+
+absl::optional<size_t> VideoSourceSinkController::pixels_per_frame_upper_limit()
+ const {
+ rtc::CritScope lock(&crit_);
+ return pixels_per_frame_upper_limit_;
+}
+
+absl::optional<double> VideoSourceSinkController::frame_rate_upper_limit()
+ const {
+ rtc::CritScope lock(&crit_);
+ return frame_rate_upper_limit_;
+}
+
+bool VideoSourceSinkController::rotation_applied() const {
+ rtc::CritScope lock(&crit_);
+ return rotation_applied_;
+}
+
+int VideoSourceSinkController::resolution_alignment() const {
+ rtc::CritScope lock(&crit_);
+ return resolution_alignment_;
+}
+
+void VideoSourceSinkController::SetRestrictions(
+ VideoSourceRestrictions restrictions) {
+ rtc::CritScope lock(&crit_);
+ restrictions_ = std::move(restrictions);
+}
+
+void VideoSourceSinkController::SetPixelsPerFrameUpperLimit(
+ absl::optional<size_t> pixels_per_frame_upper_limit) {
+ rtc::CritScope lock(&crit_);
+ pixels_per_frame_upper_limit_ = std::move(pixels_per_frame_upper_limit);
+}
+
+void VideoSourceSinkController::SetFrameRateUpperLimit(
+ absl::optional<double> frame_rate_upper_limit) {
+ rtc::CritScope lock(&crit_);
+ frame_rate_upper_limit_ = std::move(frame_rate_upper_limit);
+}
+
+void VideoSourceSinkController::SetRotationApplied(bool rotation_applied) {
+ rtc::CritScope lock(&crit_);
+ rotation_applied_ = rotation_applied;
+}
+
+void VideoSourceSinkController::SetResolutionAlignment(
+ int resolution_alignment) {
+ rtc::CritScope lock(&crit_);
+ resolution_alignment_ = resolution_alignment;
+}
+
+// RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_)
+rtc::VideoSinkWants VideoSourceSinkController::CurrentSettingsToSinkWants()
+ const {
+ rtc::VideoSinkWants wants;
+ wants.rotation_applied = rotation_applied_;
+ // |wants.black_frames| is not used, it always has its default value false.
+ wants.max_pixel_count =
+ rtc::dchecked_cast<int>(restrictions_.max_pixels_per_frame().value_or(
+ std::numeric_limits<int>::max()));
+ wants.target_pixel_count =
+ restrictions_.target_pixels_per_frame().has_value()
+ ? absl::optional<int>(rtc::dchecked_cast<int>(
+ restrictions_.target_pixels_per_frame().value()))
+ : absl::nullopt;
+ wants.max_framerate_fps =
+ restrictions_.max_frame_rate().has_value()
+ ? static_cast<int>(restrictions_.max_frame_rate().value())
+ : std::numeric_limits<int>::max();
+ wants.resolution_alignment = resolution_alignment_;
+ wants.max_pixel_count =
+ std::min(wants.max_pixel_count,
+ rtc::dchecked_cast<int>(pixels_per_frame_upper_limit_.value_or(
+ std::numeric_limits<int>::max())));
+ wants.max_framerate_fps =
+ std::min(wants.max_framerate_fps,
+ frame_rate_upper_limit_.has_value()
+ ? static_cast<int>(frame_rate_upper_limit_.value())
+ : std::numeric_limits<int>::max());
+ return wants;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/video_source_sink_controller.h b/chromium/third_party/webrtc/video/video_source_sink_controller.h
new file mode 100644
index 00000000000..4811b2866ef
--- /dev/null
+++ b/chromium/third_party/webrtc/video/video_source_sink_controller.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_
+#define VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_
+
+#include "absl/types/optional.h"
+#include "api/video/video_frame.h"
+#include "api/video/video_sink_interface.h"
+#include "api/video/video_source_interface.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "rtc_base/critical_section.h"
+
+namespace webrtc {
+
+// Responsible for configuring source/sink settings, i.e. performing
+// rtc::VideoSourceInterface<VideoFrame>::AddOrUpdateSink(). It does this by
+// storing settings internally which are converted to rtc::VideoSinkWants when
+// PushSourceSinkSettings() is performed.
+class VideoSourceSinkController {
+ public:
+ VideoSourceSinkController(rtc::VideoSinkInterface<VideoFrame>* sink,
+ rtc::VideoSourceInterface<VideoFrame>* source);
+
+ void SetSource(rtc::VideoSourceInterface<VideoFrame>* source);
+ // Must be called in order for changes to settings to have an effect. This
+ // allows you to modify multiple properties in a single push to the sink.
+ void PushSourceSinkSettings();
+
+ VideoSourceRestrictions restrictions() const;
+ absl::optional<size_t> pixels_per_frame_upper_limit() const;
+ absl::optional<double> frame_rate_upper_limit() const;
+ bool rotation_applied() const;
+ int resolution_alignment() const;
+
+ // Updates the settings stored internally. In order for these settings to be
+ // applied to the sink, PushSourceSinkSettings() must subsequently be called.
+ void SetRestrictions(VideoSourceRestrictions restrictions);
+ void SetPixelsPerFrameUpperLimit(
+ absl::optional<size_t> pixels_per_frame_upper_limit);
+ void SetFrameRateUpperLimit(absl::optional<double> frame_rate_upper_limit);
+ void SetRotationApplied(bool rotation_applied);
+ void SetResolutionAlignment(int resolution_alignment);
+
+ private:
+ rtc::VideoSinkWants CurrentSettingsToSinkWants() const
+ RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+ // TODO(hbos): If everything is handled on the same sequence (i.e.
+ // VideoStreamEncoder's encoder queue) then |crit_| can be replaced by
+ // sequence checker. Investigate if we want to do this.
+ mutable rtc::CriticalSection crit_;
+ rtc::VideoSinkInterface<VideoFrame>* const sink_;
+ rtc::VideoSourceInterface<VideoFrame>* source_ RTC_GUARDED_BY(&crit_);
+ // Pixel and frame rate restrictions.
+ VideoSourceRestrictions restrictions_ RTC_GUARDED_BY(&crit_);
+ // Ensures that even if we are not restricted, the sink is never configured
+ // above this limit. Example: We are not CPU limited (no |restrictions_|) but
+ // our encoder is capped at 30 fps (= |frame_rate_upper_limit_|).
+ absl::optional<size_t> pixels_per_frame_upper_limit_ RTC_GUARDED_BY(&crit_);
+ absl::optional<double> frame_rate_upper_limit_ RTC_GUARDED_BY(&crit_);
+ bool rotation_applied_ RTC_GUARDED_BY(&crit_) = false;
+ int resolution_alignment_ RTC_GUARDED_BY(&crit_) = 1;
+};
+
+} // namespace webrtc
+
+#endif // VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_
diff --git a/chromium/third_party/webrtc/video/video_source_sink_controller_unittest.cc b/chromium/third_party/webrtc/video/video_source_sink_controller_unittest.cc
new file mode 100644
index 00000000000..c4e2ea11d2f
--- /dev/null
+++ b/chromium/third_party/webrtc/video/video_source_sink_controller_unittest.cc
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2020 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_source_sink_controller.h"
+
+#include <limits>
+
+#include "api/video/video_frame.h"
+#include "api/video/video_source_interface.h"
+#include "call/adaptation/video_source_restrictions.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using testing::_;
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kIntUnconstrained = std::numeric_limits<int>::max();
+
+class MockVideoSinkWithVideoFrame : public rtc::VideoSinkInterface<VideoFrame> {
+ public:
+ ~MockVideoSinkWithVideoFrame() override {}
+
+ MOCK_METHOD1(OnFrame, void(const VideoFrame& frame));
+ MOCK_METHOD0(OnDiscardedFrame, void());
+};
+
+class MockVideoSourceWithVideoFrame
+ : public rtc::VideoSourceInterface<VideoFrame> {
+ public:
+ ~MockVideoSourceWithVideoFrame() override {}
+
+ MOCK_METHOD2(AddOrUpdateSink,
+ void(rtc::VideoSinkInterface<VideoFrame>*,
+ const rtc::VideoSinkWants&));
+ MOCK_METHOD1(RemoveSink, void(rtc::VideoSinkInterface<VideoFrame>*));
+};
+
+} // namespace
+
+TEST(VideoSourceSinkControllerTest, UnconstrainedByDefault) {
+ MockVideoSinkWithVideoFrame sink;
+ MockVideoSourceWithVideoFrame source;
+ VideoSourceSinkController controller(&sink, &source);
+ EXPECT_EQ(controller.restrictions(), VideoSourceRestrictions());
+ EXPECT_FALSE(controller.pixels_per_frame_upper_limit().has_value());
+ EXPECT_FALSE(controller.frame_rate_upper_limit().has_value());
+ EXPECT_FALSE(controller.rotation_applied());
+ EXPECT_EQ(controller.resolution_alignment(), 1);
+
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_FALSE(wants.rotation_applied);
+ EXPECT_EQ(wants.max_pixel_count, kIntUnconstrained);
+ EXPECT_EQ(wants.target_pixel_count, absl::nullopt);
+ EXPECT_EQ(wants.max_framerate_fps, kIntUnconstrained);
+ EXPECT_EQ(wants.resolution_alignment, 1);
+ });
+ controller.PushSourceSinkSettings();
+}
+
+TEST(VideoSourceSinkControllerTest, VideoRestrictionsToSinkWants) {
+ MockVideoSinkWithVideoFrame sink;
+ MockVideoSourceWithVideoFrame source;
+ VideoSourceSinkController controller(&sink, &source);
+
+ VideoSourceRestrictions restrictions = controller.restrictions();
+ // max_pixels_per_frame() maps to |max_pixel_count|.
+ restrictions.set_max_pixels_per_frame(42u);
+ // target_pixels_per_frame() maps to |target_pixel_count|.
+ restrictions.set_target_pixels_per_frame(200u);
+ // max_frame_rate() maps to |max_framerate_fps|.
+ restrictions.set_max_frame_rate(30.0);
+ controller.SetRestrictions(restrictions);
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_EQ(wants.max_pixel_count, 42);
+ EXPECT_EQ(wants.target_pixel_count, 200);
+ EXPECT_EQ(wants.max_framerate_fps, 30);
+ });
+ controller.PushSourceSinkSettings();
+
+ // pixels_per_frame_upper_limit() caps |max_pixel_count|.
+ controller.SetPixelsPerFrameUpperLimit(24);
+ // frame_rate_upper_limit() caps |max_framerate_fps|.
+ controller.SetFrameRateUpperLimit(10.0);
+
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_EQ(wants.max_pixel_count, 24);
+ EXPECT_EQ(wants.max_framerate_fps, 10);
+ });
+ controller.PushSourceSinkSettings();
+}
+
+TEST(VideoSourceSinkControllerTest, RotationApplied) {
+ MockVideoSinkWithVideoFrame sink;
+ MockVideoSourceWithVideoFrame source;
+ VideoSourceSinkController controller(&sink, &source);
+ controller.SetRotationApplied(true);
+ EXPECT_TRUE(controller.rotation_applied());
+
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_TRUE(wants.rotation_applied);
+ });
+ controller.PushSourceSinkSettings();
+}
+
+TEST(VideoSourceSinkControllerTest, ResolutionAlignment) {
+ MockVideoSinkWithVideoFrame sink;
+ MockVideoSourceWithVideoFrame source;
+ VideoSourceSinkController controller(&sink, &source);
+ controller.SetResolutionAlignment(13);
+ EXPECT_EQ(controller.resolution_alignment(), 13);
+
+ EXPECT_CALL(source, AddOrUpdateSink(_, _))
+ .WillOnce([](rtc::VideoSinkInterface<VideoFrame>* sink,
+ const rtc::VideoSinkWants& wants) {
+ EXPECT_EQ(wants.resolution_alignment, 13);
+ });
+ controller.PushSourceSinkSettings();
+}
+
+TEST(VideoSourceSinkControllerTest,
+ PushSourceSinkSettingsWithoutSourceDoesNotCrash) {
+ MockVideoSinkWithVideoFrame sink;
+ VideoSourceSinkController controller(&sink, nullptr);
+ controller.PushSourceSinkSettings();
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/video_stream_decoder_impl.cc b/chromium/third_party/webrtc/video/video_stream_decoder_impl.cc
index 0477be0c545..1e11d380508 100644
--- a/chromium/third_party/webrtc/video/video_stream_decoder_impl.cc
+++ b/chromium/third_party/webrtc/video/video_stream_decoder_impl.cc
@@ -24,48 +24,41 @@ VideoStreamDecoderImpl::VideoStreamDecoderImpl(
VideoDecoderFactory* decoder_factory,
TaskQueueFactory* task_queue_factory,
std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings)
- : callbacks_(callbacks),
+ : timing_(Clock::GetRealTimeClock()),
+ decode_callbacks_(this),
+ next_frame_timestamps_index_(0),
+ callbacks_(callbacks),
+ keyframe_required_(true),
decoder_factory_(decoder_factory),
decoder_settings_(std::move(decoder_settings)),
+ shut_down_(false),
+ frame_buffer_(Clock::GetRealTimeClock(), &timing_, nullptr),
bookkeeping_queue_(task_queue_factory->CreateTaskQueue(
"video_stream_decoder_bookkeeping_queue",
TaskQueueFactory::Priority::NORMAL)),
- decode_thread_(&DecodeLoop,
- this,
- "video_stream_decoder_decode_thread",
- rtc::kHighestPriority),
- timing_(Clock::GetRealTimeClock()),
- frame_buffer_(Clock::GetRealTimeClock(), &timing_, nullptr),
- next_frame_timestamps_index_(0) {
+ decode_queue_(task_queue_factory->CreateTaskQueue(
+ "video_stream_decoder_decode_queue",
+ TaskQueueFactory::Priority::NORMAL)) {
frame_timestamps_.fill({-1, -1, -1});
- decode_thread_.Start();
+ bookkeeping_queue_.PostTask([this]() {
+ RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
+ StartNextDecode();
+ });
}
VideoStreamDecoderImpl::~VideoStreamDecoderImpl() {
- frame_buffer_.Stop();
- decode_thread_.Stop();
+ rtc::CritScope lock(&shut_down_crit_);
+ shut_down_ = true;
}
void VideoStreamDecoderImpl::OnFrame(
std::unique_ptr<video_coding::EncodedFrame> frame) {
if (!bookkeeping_queue_.IsCurrent()) {
- struct OnFrameTask : QueuedTask {
- OnFrameTask(std::unique_ptr<video_coding::EncodedFrame> frame,
- VideoStreamDecoderImpl* video_stream_decoder)
- : frame_(std::move(frame)),
- video_stream_decoder_(video_stream_decoder) {}
-
- bool Run() override {
- video_stream_decoder_->OnFrame(std::move(frame_));
- return true;
- }
-
- std::unique_ptr<video_coding::EncodedFrame> frame_;
- VideoStreamDecoderImpl* video_stream_decoder_;
- };
+ bookkeeping_queue_.PostTask([this, frame = std::move(frame)]() mutable {
+ OnFrame(std::move(frame));
+ return true;
+ });
- bookkeeping_queue_.PostTask(
- std::make_unique<OnFrameTask>(std::move(frame), this));
return;
}
@@ -120,7 +113,8 @@ VideoDecoder* VideoStreamDecoderImpl::GetDecoder(int payload_type) {
return nullptr;
}
- int32_t register_result = decoder->RegisterDecodeCompleteCallback(this);
+ int32_t register_result =
+ decoder->RegisterDecodeCompleteCallback(&decode_callbacks_);
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
RTC_LOG(LS_WARNING) << "Failed to register decode callback.";
return nullptr;
@@ -131,102 +125,113 @@ VideoDecoder* VideoStreamDecoderImpl::GetDecoder(int payload_type) {
return decoder_.get();
}
-// static
-void VideoStreamDecoderImpl::DecodeLoop(void* ptr) {
- // TODO(philipel): Remove this and use rtc::Event::kForever when it's
- // supported by the |frame_buffer_|.
- static constexpr int kForever = 100000000;
-
- int max_wait_time_ms = kForever;
- bool keyframe_required = true;
- auto* vs_decoder = static_cast<VideoStreamDecoderImpl*>(ptr);
- while (true) {
- DecodeResult decode_result =
- vs_decoder->DecodeNextFrame(max_wait_time_ms, keyframe_required);
-
- switch (decode_result) {
- case kOk: {
- max_wait_time_ms = kForever;
- keyframe_required = false;
- break;
- }
- case kDecodeFailure: {
- max_wait_time_ms = 0;
- keyframe_required = true;
- break;
- }
- case kNoFrame: {
- max_wait_time_ms = kForever;
- // If we end up here it means that we got a decoding error and there is
- // no keyframe available in the |frame_buffer_|.
- vs_decoder->bookkeeping_queue_.PostTask([vs_decoder]() {
- RTC_DCHECK_RUN_ON(&vs_decoder->bookkeeping_queue_);
- vs_decoder->callbacks_->OnNonDecodableState();
- });
- break;
- }
- case kNoDecoder: {
- max_wait_time_ms = kForever;
- break;
- }
- case kShutdown: {
- return;
- }
- }
- }
+void VideoStreamDecoderImpl::SaveFrameTimestamps(
+ const video_coding::EncodedFrame& frame) {
+ FrameTimestamps* frame_timestamps =
+ &frame_timestamps_[next_frame_timestamps_index_];
+ frame_timestamps->timestamp = frame.Timestamp();
+ frame_timestamps->decode_start_time_ms = rtc::TimeMillis();
+ frame_timestamps->render_time_us = frame.RenderTimeMs() * 1000;
+
+ next_frame_timestamps_index_ =
+ Add<kFrameTimestampsMemory>(next_frame_timestamps_index_, 1);
}
-VideoStreamDecoderImpl::DecodeResult VideoStreamDecoderImpl::DecodeNextFrame(
- int max_wait_time_ms,
- bool keyframe_required) {
- std::unique_ptr<video_coding::EncodedFrame> frame;
- video_coding::FrameBuffer::ReturnReason res =
- frame_buffer_.NextFrame(max_wait_time_ms, &frame, keyframe_required);
-
- if (res == video_coding::FrameBuffer::ReturnReason::kStopped)
- return kShutdown;
-
- if (frame) {
- VideoDecoder* decoder = GetDecoder(frame->PayloadType());
- if (!decoder) {
- RTC_LOG(LS_WARNING) << "Failed to get decoder, dropping frame ("
- << frame->id.picture_id << ":"
- << frame->id.spatial_layer << ").";
- return kNoDecoder;
- }
+void VideoStreamDecoderImpl::StartNextDecode() {
+ int64_t max_wait_time = keyframe_required_ ? 200 : 3000;
- int64_t decode_start_time_ms = rtc::TimeMillis();
- int64_t timestamp = frame->Timestamp();
- int64_t render_time_us = frame->RenderTimeMs() * 1000;
- bookkeeping_queue_.PostTask(
- [this, decode_start_time_ms, timestamp, render_time_us]() {
+ frame_buffer_.NextFrame(
+ max_wait_time, keyframe_required_, &bookkeeping_queue_,
+ [this](std::unique_ptr<video_coding::EncodedFrame> frame,
+ video_coding::FrameBuffer::ReturnReason res) mutable {
+ RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
+ OnNextFrameCallback(std::move(frame), res);
+ });
+}
+
+void VideoStreamDecoderImpl::OnNextFrameCallback(
+ std::unique_ptr<video_coding::EncodedFrame> frame,
+ video_coding::FrameBuffer::ReturnReason result) {
+ switch (result) {
+ case video_coding::FrameBuffer::kFrameFound: {
+ RTC_DCHECK(frame);
+ SaveFrameTimestamps(*frame);
+
+ rtc::CritScope lock(&shut_down_crit_);
+ if (shut_down_) {
+ return;
+ }
+
+ decode_queue_.PostTask([this, frame = std::move(frame)]() mutable {
+ RTC_DCHECK_RUN_ON(&decode_queue_);
+ DecodeResult decode_result = DecodeFrame(std::move(frame));
+ bookkeeping_queue_.PostTask([this, decode_result]() {
RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
- // Saving decode start time this way wont work if we decode spatial
- // layers sequentially.
- FrameTimestamps* frame_timestamps =
- &frame_timestamps_[next_frame_timestamps_index_];
- frame_timestamps->timestamp = timestamp;
- frame_timestamps->decode_start_time_ms = decode_start_time_ms;
- frame_timestamps->render_time_us = render_time_us;
-
- next_frame_timestamps_index_ =
- Add<kFrameTimestampsMemory>(next_frame_timestamps_index_, 1);
+ switch (decode_result) {
+ case kOk: {
+ keyframe_required_ = false;
+ break;
+ }
+ case kOkRequestKeyframe: {
+ callbacks_->OnNonDecodableState();
+ keyframe_required_ = false;
+ break;
+ }
+ case kDecodeFailure: {
+ callbacks_->OnNonDecodableState();
+ keyframe_required_ = true;
+ break;
+ }
+ }
+ StartNextDecode();
});
+ });
+ break;
+ }
+ case video_coding::FrameBuffer::kTimeout: {
+ callbacks_->OnNonDecodableState();
+ // The |frame_buffer_| requires the frame callback function to complete
+ // before NextFrame is called again. For this reason we call
+ // StartNextDecode in a later task to allow this task to complete first.
+ bookkeeping_queue_.PostTask([this]() {
+ RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
+ StartNextDecode();
+ });
+ break;
+ }
+ case video_coding::FrameBuffer::kStopped: {
+ // We are shutting down, do nothing.
+ break;
+ }
+ }
+}
- int32_t decode_result = decoder->Decode(frame->EncodedImage(),
- false, // missing_frame
- frame->RenderTimeMs());
+VideoStreamDecoderImpl::DecodeResult VideoStreamDecoderImpl::DecodeFrame(
+ std::unique_ptr<video_coding::EncodedFrame> frame) {
+ RTC_DCHECK(frame);
- return decode_result == WEBRTC_VIDEO_CODEC_OK ? kOk : kDecodeFailure;
+ VideoDecoder* decoder = GetDecoder(frame->PayloadType());
+ if (!decoder) {
+ return kDecodeFailure;
}
- return kNoFrame;
+ int32_t decode_result = decoder->Decode(frame->EncodedImage(), //
+ /*missing_frames=*/false, //
+ frame->RenderTimeMs());
+ switch (decode_result) {
+ case WEBRTC_VIDEO_CODEC_OK: {
+ return kOk;
+ }
+ case WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME: {
+ return kOkRequestKeyframe;
+ }
+ default:
+ return kDecodeFailure;
+ }
}
VideoStreamDecoderImpl::FrameTimestamps*
VideoStreamDecoderImpl::GetFrameTimestamps(int64_t timestamp) {
- RTC_DCHECK_RUN_ON(&bookkeeping_queue_);
-
int start_time_index = next_frame_timestamps_index_;
for (int i = 0; i < kFrameTimestampsMemory; ++i) {
start_time_index = Subtract<kFrameTimestampsMemory>(start_time_index, 1);
@@ -238,23 +243,10 @@ VideoStreamDecoderImpl::GetFrameTimestamps(int64_t timestamp) {
return nullptr;
}
-// VideoDecoder::DecodedImageCallback
-int32_t VideoStreamDecoderImpl::Decoded(VideoFrame& decoded_image) {
- Decoded(decoded_image, absl::nullopt, absl::nullopt);
- return WEBRTC_VIDEO_CODEC_OK;
-}
-
-// VideoDecoder::DecodedImageCallback
-int32_t VideoStreamDecoderImpl::Decoded(VideoFrame& decoded_image,
- int64_t decode_time_ms) {
- Decoded(decoded_image, decode_time_ms, absl::nullopt);
- return WEBRTC_VIDEO_CODEC_OK;
-}
-
-// VideoDecoder::DecodedImageCallback
-void VideoStreamDecoderImpl::Decoded(VideoFrame& decoded_image,
- absl::optional<int32_t> decode_time_ms,
- absl::optional<uint8_t> qp) {
+void VideoStreamDecoderImpl::OnDecodedFrameCallback(
+ VideoFrame& decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
int64_t decode_stop_time_ms = rtc::TimeMillis();
bookkeeping_queue_.PostTask([this, decode_stop_time_ms, decoded_image,
@@ -284,4 +276,28 @@ void VideoStreamDecoderImpl::Decoded(VideoFrame& decoded_image,
});
}
+VideoStreamDecoderImpl::DecodeCallbacks::DecodeCallbacks(
+ VideoStreamDecoderImpl* video_stream_decoder_impl)
+ : video_stream_decoder_impl_(video_stream_decoder_impl) {}
+
+int32_t VideoStreamDecoderImpl::DecodeCallbacks::Decoded(
+ VideoFrame& decoded_image) {
+ Decoded(decoded_image, absl::nullopt, absl::nullopt);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t VideoStreamDecoderImpl::DecodeCallbacks::Decoded(
+ VideoFrame& decoded_image,
+ int64_t decode_time_ms) {
+ Decoded(decoded_image, decode_time_ms, absl::nullopt);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void VideoStreamDecoderImpl::DecodeCallbacks::Decoded(
+ VideoFrame& decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) {
+ video_stream_decoder_impl_->OnDecodedFrameCallback(decoded_image,
+ decode_time_ms, qp);
+}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/video_stream_decoder_impl.h b/chromium/third_party/webrtc/video/video_stream_decoder_impl.h
index c439be5c162..f3f09e4a794 100644
--- a/chromium/third_party/webrtc/video/video_stream_decoder_impl.h
+++ b/chromium/third_party/webrtc/video/video_stream_decoder_impl.h
@@ -26,8 +26,7 @@
namespace webrtc {
-class VideoStreamDecoderImpl : public VideoStreamDecoderInterface,
- private DecodedImageCallback {
+class VideoStreamDecoderImpl : public VideoStreamDecoderInterface {
public:
VideoStreamDecoderImpl(
VideoStreamDecoderInterface::Callbacks* callbacks,
@@ -43,12 +42,23 @@ class VideoStreamDecoderImpl : public VideoStreamDecoderInterface,
void SetMaxPlayoutDelay(TimeDelta max_delay) override;
private:
+ class DecodeCallbacks : public DecodedImageCallback {
+ public:
+ explicit DecodeCallbacks(VideoStreamDecoderImpl* video_stream_decoder_impl);
+ int32_t Decoded(VideoFrame& decodedImage) override;
+ int32_t Decoded(VideoFrame& decodedImage, int64_t decode_time_ms) override;
+ void Decoded(VideoFrame& decodedImage,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override;
+
+ private:
+ VideoStreamDecoderImpl* const video_stream_decoder_impl_;
+ };
+
enum DecodeResult {
kOk,
+ kOkRequestKeyframe,
kDecodeFailure,
- kNoFrame,
- kNoDecoder,
- kShutdown,
};
struct FrameTimestamps {
@@ -57,36 +67,25 @@ class VideoStreamDecoderImpl : public VideoStreamDecoderInterface,
int64_t render_time_us;
};
- VideoDecoder* GetDecoder(int payload_type);
- static void DecodeLoop(void* ptr);
- DecodeResult DecodeNextFrame(int max_wait_time_ms, bool keyframe_required);
-
- FrameTimestamps* GetFrameTimestamps(int64_t timestamp);
-
- // Implements DecodedImageCallback interface
- int32_t Decoded(VideoFrame& decodedImage) override;
- int32_t Decoded(VideoFrame& decodedImage, int64_t decode_time_ms) override;
- void Decoded(VideoFrame& decodedImage,
- absl::optional<int32_t> decode_time_ms,
- absl::optional<uint8_t> qp) override;
-
- VideoStreamDecoderInterface::Callbacks* const callbacks_
- RTC_PT_GUARDED_BY(bookkeeping_queue_);
- VideoDecoderFactory* const decoder_factory_;
- std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings_;
+ void SaveFrameTimestamps(const video_coding::EncodedFrame& frame)
+ RTC_RUN_ON(bookkeeping_queue_);
+ FrameTimestamps* GetFrameTimestamps(int64_t timestamp)
+ RTC_RUN_ON(bookkeeping_queue_);
+ void StartNextDecode() RTC_RUN_ON(bookkeeping_queue_);
+ void OnNextFrameCallback(std::unique_ptr<video_coding::EncodedFrame> frame,
+ video_coding::FrameBuffer::ReturnReason res)
+ RTC_RUN_ON(bookkeeping_queue_);
+ void OnDecodedFrameCallback(VideoFrame& decodedImage, // NOLINT
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp);
+
+ VideoDecoder* GetDecoder(int payload_type) RTC_RUN_ON(decode_queue_);
+ VideoStreamDecoderImpl::DecodeResult DecodeFrame(
+ std::unique_ptr<video_coding::EncodedFrame> frame)
+ RTC_RUN_ON(decode_queue_);
- // The |bookkeeping_queue_| is used to:
- // - Make |callbacks_|.
- // - Insert/extract frames from the |frame_buffer_|
- // - Synchronize with whatever thread that makes the Decoded callback.
- rtc::TaskQueue bookkeeping_queue_;
-
- rtc::PlatformThread decode_thread_;
VCMTiming timing_;
- video_coding::FrameBuffer frame_buffer_;
- video_coding::VideoLayerFrameId last_continuous_id_;
- absl::optional<int> current_payload_type_;
- std::unique_ptr<VideoDecoder> decoder_;
+ DecodeCallbacks decode_callbacks_;
// Some decoders are pipelined so it is not sufficient to save frame info
// for the last frame only.
@@ -94,6 +93,31 @@ class VideoStreamDecoderImpl : public VideoStreamDecoderInterface,
std::array<FrameTimestamps, kFrameTimestampsMemory> frame_timestamps_
RTC_GUARDED_BY(bookkeeping_queue_);
int next_frame_timestamps_index_ RTC_GUARDED_BY(bookkeeping_queue_);
+ VideoStreamDecoderInterface::Callbacks* const callbacks_
+ RTC_PT_GUARDED_BY(bookkeeping_queue_);
+ video_coding::VideoLayerFrameId last_continuous_id_
+ RTC_GUARDED_BY(bookkeeping_queue_);
+ bool keyframe_required_ RTC_GUARDED_BY(bookkeeping_queue_);
+
+ absl::optional<int> current_payload_type_ RTC_GUARDED_BY(decode_queue_);
+ VideoDecoderFactory* const decoder_factory_ RTC_PT_GUARDED_BY(decode_queue_);
+ std::map<int, std::pair<SdpVideoFormat, int>> decoder_settings_
+ RTC_GUARDED_BY(decode_queue_);
+
+ // The |bookkeeping_queue_| use the |frame_buffer_| and also posts tasks to
+ // the |decode_queue_|. The |decode_queue_| in turn use the |decoder_| to
+ // decode frames. When the |decoder_| is done it will post back to the
+ // |bookkeeping_queue_| with the decoded frame. During shutdown we start by
+ // isolating the |bookkeeping_queue_| from the |decode_queue_|, so now it's
+ // safe for the |decode_queue_| to be destructed. After that the |decoder_|
+ // can be destructed, and then the |bookkeeping_queue_|. Finally the
+ // |frame_buffer_| can be destructed.
+ rtc::CriticalSection shut_down_crit_;
+ bool shut_down_ RTC_GUARDED_BY(shut_down_crit_);
+ video_coding::FrameBuffer frame_buffer_ RTC_GUARDED_BY(bookkeeping_queue_);
+ rtc::TaskQueue bookkeeping_queue_;
+ std::unique_ptr<VideoDecoder> decoder_ RTC_GUARDED_BY(decode_queue_);
+ rtc::TaskQueue decode_queue_;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/video_stream_decoder_impl_unittest.cc b/chromium/third_party/webrtc/video/video_stream_decoder_impl_unittest.cc
new file mode 100644
index 00000000000..44e914001d4
--- /dev/null
+++ b/chromium/third_party/webrtc/video/video_stream_decoder_impl_unittest.cc
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "video/video_stream_decoder_impl.h"
+
+#include <vector>
+
+#include "api/video/i420_buffer.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/time_controller/simulated_time_controller.h"
+
+namespace webrtc {
+namespace {
+using ::testing::_;
+using ::testing::ByMove;
+using ::testing::NiceMock;
+using ::testing::Return;
+
+class MockVideoStreamDecoderCallbacks
+ : public VideoStreamDecoderInterface::Callbacks {
+ public:
+ MOCK_METHOD0(OnNonDecodableState, void());
+ MOCK_METHOD1(OnContinuousUntil,
+ void(const video_coding::VideoLayerFrameId& key));
+ MOCK_METHOD1(OnEncodedFrame, void(const video_coding::EncodedFrame& frame));
+ MOCK_METHOD3(OnDecodedFrame,
+ void(VideoFrame decodedImage,
+ absl::optional<int> decode_time_ms,
+ absl::optional<int> qp));
+};
+
+class StubVideoDecoder : public VideoDecoder {
+ public:
+ MOCK_METHOD2(InitDecode,
+ int32_t(const VideoCodec* codec_settings,
+ int32_t number_of_cores));
+
+ int32_t Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms) override {
+ int32_t ret_code = DecodeCall(input_image, missing_frames, render_time_ms);
+ if (ret_code == WEBRTC_VIDEO_CODEC_OK ||
+ ret_code == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME) {
+ VideoFrame frame = VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(1, 1))
+ .build();
+ callback_->Decoded(frame);
+ }
+ return ret_code;
+ }
+
+ MOCK_METHOD3(DecodeCall,
+ int32_t(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms));
+
+ int32_t Release() override { return 0; }
+
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override {
+ callback_ = callback;
+ return 0;
+ }
+
+ private:
+ DecodedImageCallback* callback_;
+};
+
+class WrappedVideoDecoder : public VideoDecoder {
+ public:
+ explicit WrappedVideoDecoder(StubVideoDecoder* decoder) : decoder_(decoder) {}
+
+ int32_t InitDecode(const VideoCodec* codec_settings,
+ int32_t number_of_cores) override {
+ return decoder_->InitDecode(codec_settings, number_of_cores);
+ }
+ int32_t Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ int64_t render_time_ms) override {
+ return decoder_->Decode(input_image, missing_frames, render_time_ms);
+ }
+ int32_t Release() override { return decoder_->Release(); }
+
+ int32_t RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) override {
+ return decoder_->RegisterDecodeCompleteCallback(callback);
+ }
+
+ private:
+ StubVideoDecoder* decoder_;
+};
+
+class FakeVideoDecoderFactory : public VideoDecoderFactory {
+ public:
+ std::vector<SdpVideoFormat> GetSupportedFormats() const override {
+ return {};
+ }
+ std::unique_ptr<VideoDecoder> CreateVideoDecoder(
+ const SdpVideoFormat& format) override {
+ if (format.name == "VP8") {
+ return std::make_unique<WrappedVideoDecoder>(&vp8_decoder_);
+ }
+
+ if (format.name == "AV1") {
+ return std::make_unique<WrappedVideoDecoder>(&av1_decoder_);
+ }
+
+ return {};
+ }
+
+ StubVideoDecoder& Vp8Decoder() { return vp8_decoder_; }
+ StubVideoDecoder& Av1Decoder() { return av1_decoder_; }
+
+ private:
+ NiceMock<StubVideoDecoder> vp8_decoder_;
+ NiceMock<StubVideoDecoder> av1_decoder_;
+};
+
+class FakeEncodedFrame : public video_coding::EncodedFrame {
+ public:
+ int64_t ReceivedTime() const override { return 0; }
+ int64_t RenderTime() const override { return 0; }
+
+ // Setters for protected variables.
+ void SetPayloadType(int payload_type) { _payloadType = payload_type; }
+};
+
+class FrameBuilder {
+ public:
+ FrameBuilder() : frame_(std::make_unique<FakeEncodedFrame>()) {}
+
+ FrameBuilder& WithPayloadType(int payload_type) {
+ frame_->SetPayloadType(payload_type);
+ return *this;
+ }
+
+ FrameBuilder& WithPictureId(int picture_id) {
+ frame_->id.picture_id = picture_id;
+ return *this;
+ }
+
+ std::unique_ptr<FakeEncodedFrame> Build() { return std::move(frame_); }
+
+ private:
+ std::unique_ptr<FakeEncodedFrame> frame_;
+};
+
+class VideoStreamDecoderImplTest : public ::testing::Test {
+ public:
+ VideoStreamDecoderImplTest()
+ : time_controller_(Timestamp::Seconds(0)),
+ video_stream_decoder_(&callbacks_,
+ &decoder_factory_,
+ time_controller_.GetTaskQueueFactory(),
+ {{1, std::make_pair(SdpVideoFormat("VP8"), 1)},
+ {2, std::make_pair(SdpVideoFormat("AV1"), 1)}}) {
+ }
+
+ NiceMock<MockVideoStreamDecoderCallbacks> callbacks_;
+ FakeVideoDecoderFactory decoder_factory_;
+ GlobalSimulatedTimeController time_controller_;
+ VideoStreamDecoderImpl video_stream_decoder_;
+};
+
+TEST_F(VideoStreamDecoderImplTest, InsertAndDecodeFrame) {
+ video_stream_decoder_.OnFrame(FrameBuilder().WithPayloadType(1).Build());
+ EXPECT_CALL(callbacks_, OnDecodedFrame);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+}
+
+TEST_F(VideoStreamDecoderImplTest, NonDecodableStateWaitingForKeyframe) {
+ EXPECT_CALL(callbacks_, OnNonDecodableState);
+ time_controller_.AdvanceTime(TimeDelta::Millis(200));
+}
+
+TEST_F(VideoStreamDecoderImplTest, NonDecodableStateWaitingForDeltaFrame) {
+ video_stream_decoder_.OnFrame(FrameBuilder().WithPayloadType(1).Build());
+ EXPECT_CALL(callbacks_, OnDecodedFrame);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+ EXPECT_CALL(callbacks_, OnNonDecodableState);
+ time_controller_.AdvanceTime(TimeDelta::Millis(3000));
+}
+
+TEST_F(VideoStreamDecoderImplTest, InsertAndDecodeFrameWithKeyframeRequest) {
+ video_stream_decoder_.OnFrame(FrameBuilder().WithPayloadType(1).Build());
+ EXPECT_CALL(decoder_factory_.Vp8Decoder(), DecodeCall)
+ .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME));
+ EXPECT_CALL(callbacks_, OnDecodedFrame);
+ EXPECT_CALL(callbacks_, OnNonDecodableState);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+}
+
+TEST_F(VideoStreamDecoderImplTest, FailToInitDecoder) {
+ video_stream_decoder_.OnFrame(FrameBuilder().WithPayloadType(1).Build());
+ ON_CALL(decoder_factory_.Vp8Decoder(), InitDecode)
+ .WillByDefault(Return(WEBRTC_VIDEO_CODEC_ERROR));
+ EXPECT_CALL(callbacks_, OnNonDecodableState);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+}
+
+TEST_F(VideoStreamDecoderImplTest, FailToDecodeFrame) {
+ video_stream_decoder_.OnFrame(FrameBuilder().WithPayloadType(1).Build());
+ ON_CALL(decoder_factory_.Vp8Decoder(), DecodeCall)
+ .WillByDefault(Return(WEBRTC_VIDEO_CODEC_ERROR));
+ EXPECT_CALL(callbacks_, OnNonDecodableState);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+}
+
+TEST_F(VideoStreamDecoderImplTest, ChangeFramePayloadType) {
+ video_stream_decoder_.OnFrame(
+ FrameBuilder().WithPayloadType(1).WithPictureId(0).Build());
+ EXPECT_CALL(decoder_factory_.Vp8Decoder(), DecodeCall);
+ EXPECT_CALL(callbacks_, OnDecodedFrame);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+
+ video_stream_decoder_.OnFrame(
+ FrameBuilder().WithPayloadType(2).WithPictureId(1).Build());
+ EXPECT_CALL(decoder_factory_.Av1Decoder(), DecodeCall);
+ EXPECT_CALL(callbacks_, OnDecodedFrame);
+ time_controller_.AdvanceTime(TimeDelta::Millis(1));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/video_stream_encoder.cc b/chromium/third_party/webrtc/video/video_stream_encoder.cc
index 8af9a7d8c0f..83c213a0c16 100644
--- a/chromium/third_party/webrtc/video/video_stream_encoder.cc
+++ b/chromium/third_party/webrtc/video/video_stream_encoder.cc
@@ -18,6 +18,7 @@
#include <utility>
#include "absl/algorithm/container.h"
+#include "absl/types/optional.h"
#include "api/video/encoded_image.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_bitrate_allocator_factory.h"
@@ -25,19 +26,17 @@
#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/codecs/vp9/svc_rate_allocator.h"
#include "modules/video_coding/include/video_codec_initializer.h"
-#include "modules/video_coding/utility/default_video_bitrate_allocator.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/alr_experiment.h"
-#include "rtc_base/experiments/quality_scaling_experiment.h"
#include "rtc_base/experiments/rate_control_settings.h"
#include "rtc_base/location.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
-#include "rtc_base/system/fallthrough.h"
#include "rtc_base/time_utils.h"
#include "rtc_base/trace_event.h"
#include "system_wrappers/include/field_trial.h"
+#include "video/adaptation/resource_adaptation_processor.h"
namespace webrtc {
@@ -45,21 +44,12 @@ namespace {
// Time interval for logging frame counts.
const int64_t kFrameLogIntervalMs = 60000;
-const int kMinFramerateFps = 2;
// Time to keep a single cached pending frame in paused state.
const int64_t kPendingFrameTimeoutMs = 1000;
-const char kInitialFramedropFieldTrial[] = "WebRTC-InitialFramedrop";
constexpr char kFrameDropperFieldTrial[] = "WebRTC-FrameDropper";
-// The maximum number of frames to drop at beginning of stream
-// to try and achieve desired bitrate.
-const int kMaxInitialFramedrop = 4;
-// When the first change in BWE above this threshold occurs,
-// enable DropFrameDueToSize logic.
-const float kFramedropThreshold = 0.3;
-
// Averaging window spanning 90 frames at default 30fps, matching old media
// optimization module defaults.
const int64_t kFrameRateAvergingWindowSizeMs = (1000 / 30) * 90;
@@ -71,40 +61,6 @@ const int64_t kParameterUpdateIntervalMs = 1000;
// Animation is capped to 720p.
constexpr int kMaxAnimationPixels = 1280 * 720;
-uint32_t abs_diff(uint32_t a, uint32_t b) {
- return (a < b) ? b - a : a - b;
-}
-
-bool IsResolutionScalingEnabled(DegradationPreference degradation_preference) {
- return degradation_preference == DegradationPreference::MAINTAIN_FRAMERATE ||
- degradation_preference == DegradationPreference::BALANCED;
-}
-
-bool IsFramerateScalingEnabled(DegradationPreference degradation_preference) {
- return degradation_preference == DegradationPreference::MAINTAIN_RESOLUTION ||
- degradation_preference == DegradationPreference::BALANCED;
-}
-
-// TODO(pbos): Lower these thresholds (to closer to 100%) when we handle
-// pipelining encoders better (multiple input frames before something comes
-// out). This should effectively turn off CPU adaptations for systems that
-// remotely cope with the load right now.
-CpuOveruseOptions GetCpuOveruseOptions(
- const VideoStreamEncoderSettings& settings,
- bool full_overuse_time) {
- CpuOveruseOptions options;
-
- if (full_overuse_time) {
- options.low_encode_usage_threshold_percent = 150;
- options.high_encode_usage_threshold_percent = 200;
- }
- if (settings.experiment_cpu_load_estimator) {
- options.filter_time_ms = 5 * rtc::kNumMillisecsPerSec;
- }
-
- return options;
-}
-
bool RequiresEncoderReset(const VideoCodec& prev_send_codec,
const VideoCodec& new_send_codec,
bool was_encode_called_since_last_initialization) {
@@ -154,6 +110,8 @@ bool RequiresEncoderReset(const VideoCodec& prev_send_codec,
prev_send_codec.simulcastStream[i].width ||
new_send_codec.simulcastStream[i].height !=
prev_send_codec.simulcastStream[i].height ||
+ new_send_codec.simulcastStream[i].maxFramerate !=
+ prev_send_codec.simulcastStream[i].maxFramerate ||
new_send_codec.simulcastStream[i].numberOfTemporalLayers !=
prev_send_codec.simulcastStream[i].numberOfTemporalLayers ||
new_send_codec.simulcastStream[i].qpMax !=
@@ -211,263 +169,8 @@ VideoBitrateAllocation UpdateAllocationFromEncoderInfo(
new_allocation.set_bw_limited(allocation.is_bw_limited());
return new_allocation;
}
-} // namespace
-
-// VideoSourceProxy is responsible ensuring thread safety between calls to
-// VideoStreamEncoder::SetSource that will happen on libjingle's worker thread
-// when a video capturer is connected to the encoder and the encoder task queue
-// (encoder_queue_) where the encoder reports its VideoSinkWants.
-class VideoStreamEncoder::VideoSourceProxy {
- public:
- explicit VideoSourceProxy(VideoStreamEncoder* video_stream_encoder)
- : video_stream_encoder_(video_stream_encoder),
- degradation_preference_(DegradationPreference::DISABLED),
- source_(nullptr),
- max_framerate_(std::numeric_limits<int>::max()),
- max_pixels_(std::numeric_limits<int>::max()) {}
-
- void SetSource(rtc::VideoSourceInterface<VideoFrame>* source,
- const DegradationPreference& degradation_preference) {
- // Called on libjingle's worker thread.
- RTC_DCHECK_RUN_ON(&main_checker_);
- rtc::VideoSourceInterface<VideoFrame>* old_source = nullptr;
- rtc::VideoSinkWants wants;
- {
- rtc::CritScope lock(&crit_);
- degradation_preference_ = degradation_preference;
- old_source = source_;
- source_ = source;
- wants = GetActiveSinkWantsInternal();
- }
-
- if (old_source != source && old_source != nullptr) {
- old_source->RemoveSink(video_stream_encoder_);
- }
-
- if (!source) {
- return;
- }
-
- source->AddOrUpdateSink(video_stream_encoder_, wants);
- }
-
- void SetMaxFramerate(int max_framerate) {
- RTC_DCHECK_GT(max_framerate, 0);
- rtc::CritScope lock(&crit_);
- if (max_framerate == max_framerate_)
- return;
-
- RTC_LOG(LS_INFO) << "Set max framerate: " << max_framerate;
- max_framerate_ = max_framerate;
- if (source_) {
- source_->AddOrUpdateSink(video_stream_encoder_,
- GetActiveSinkWantsInternal());
- }
- }
-
- void SetWantsRotationApplied(bool rotation_applied) {
- rtc::CritScope lock(&crit_);
- sink_wants_.rotation_applied = rotation_applied;
- if (source_) {
- source_->AddOrUpdateSink(video_stream_encoder_,
- GetActiveSinkWantsInternal());
- }
- }
-
- rtc::VideoSinkWants GetActiveSinkWants() {
- rtc::CritScope lock(&crit_);
- return GetActiveSinkWantsInternal();
- }
-
- void ResetPixelFpsCount() {
- rtc::CritScope lock(&crit_);
- sink_wants_.max_pixel_count = std::numeric_limits<int>::max();
- sink_wants_.target_pixel_count.reset();
- sink_wants_.max_framerate_fps = std::numeric_limits<int>::max();
- if (source_)
- source_->AddOrUpdateSink(video_stream_encoder_,
- GetActiveSinkWantsInternal());
- }
-
- bool RequestResolutionLowerThan(int pixel_count,
- int min_pixels_per_frame,
- bool* min_pixels_reached) {
- // Called on the encoder task queue.
- rtc::CritScope lock(&crit_);
- if (!source_ || !IsResolutionScalingEnabled(degradation_preference_)) {
- // This can happen since |degradation_preference_| is set on libjingle's
- // worker thread but the adaptation is done on the encoder task queue.
- return false;
- }
- // The input video frame size will have a resolution less than or equal to
- // |max_pixel_count| depending on how the source can scale the frame size.
- const int pixels_wanted = (pixel_count * 3) / 5;
- if (pixels_wanted >= sink_wants_.max_pixel_count) {
- return false;
- }
- if (pixels_wanted < min_pixels_per_frame) {
- *min_pixels_reached = true;
- return false;
- }
- RTC_LOG(LS_INFO) << "Scaling down resolution, max pixels: "
- << pixels_wanted;
- sink_wants_.max_pixel_count = pixels_wanted;
- sink_wants_.target_pixel_count = absl::nullopt;
- source_->AddOrUpdateSink(video_stream_encoder_,
- GetActiveSinkWantsInternal());
- return true;
- }
-
- int RequestFramerateLowerThan(int fps) {
- // Called on the encoder task queue.
- // The input video frame rate will be scaled down to 2/3, rounding down.
- int framerate_wanted = (fps * 2) / 3;
- return RestrictFramerate(framerate_wanted) ? framerate_wanted : -1;
- }
-
- int GetHigherResolutionThan(int pixel_count) const {
- // On step down we request at most 3/5 the pixel count of the previous
- // resolution, so in order to take "one step up" we request a resolution
- // as close as possible to 5/3 of the current resolution. The actual pixel
- // count selected depends on the capabilities of the source. In order to
- // not take a too large step up, we cap the requested pixel count to be at
- // most four time the current number of pixels.
- return (pixel_count * 5) / 3;
- }
-
- bool RequestHigherResolutionThan(int pixel_count) {
- // Called on the encoder task queue.
- rtc::CritScope lock(&crit_);
- if (!source_ || !IsResolutionScalingEnabled(degradation_preference_)) {
- // This can happen since |degradation_preference_| is set on libjingle's
- // worker thread but the adaptation is done on the encoder task queue.
- return false;
- }
- int max_pixels_wanted = pixel_count;
- if (max_pixels_wanted != std::numeric_limits<int>::max())
- max_pixels_wanted = pixel_count * 4;
-
- if (max_pixels_wanted <= sink_wants_.max_pixel_count)
- return false;
-
- sink_wants_.max_pixel_count = max_pixels_wanted;
- if (max_pixels_wanted == std::numeric_limits<int>::max()) {
- // Remove any constraints.
- sink_wants_.target_pixel_count.reset();
- } else {
- sink_wants_.target_pixel_count = GetHigherResolutionThan(pixel_count);
- }
- RTC_LOG(LS_INFO) << "Scaling up resolution, max pixels: "
- << max_pixels_wanted;
- source_->AddOrUpdateSink(video_stream_encoder_,
- GetActiveSinkWantsInternal());
- return true;
- }
-
- // Request upgrade in framerate. Returns the new requested frame, or -1 if
- // no change requested. Note that maxint may be returned if limits due to
- // adaptation requests are removed completely. In that case, consider
- // |max_framerate_| to be the current limit (assuming the capturer complies).
- int RequestHigherFramerateThan(int fps) {
- // Called on the encoder task queue.
- // The input frame rate will be scaled up to the last step, with rounding.
- int framerate_wanted = fps;
- if (fps != std::numeric_limits<int>::max())
- framerate_wanted = (fps * 3) / 2;
-
- return IncreaseFramerate(framerate_wanted) ? framerate_wanted : -1;
- }
-
- bool RestrictFramerate(int fps) {
- // Called on the encoder task queue.
- rtc::CritScope lock(&crit_);
- if (!source_ || !IsFramerateScalingEnabled(degradation_preference_))
- return false;
-
- const int fps_wanted = std::max(kMinFramerateFps, fps);
- if (fps_wanted >= sink_wants_.max_framerate_fps)
- return false;
-
- RTC_LOG(LS_INFO) << "Scaling down framerate: " << fps_wanted;
- sink_wants_.max_framerate_fps = fps_wanted;
- source_->AddOrUpdateSink(video_stream_encoder_,
- GetActiveSinkWantsInternal());
- return true;
- }
-
- bool IncreaseFramerate(int fps) {
- // Called on the encoder task queue.
- rtc::CritScope lock(&crit_);
- if (!source_ || !IsFramerateScalingEnabled(degradation_preference_))
- return false;
-
- const int fps_wanted = std::max(kMinFramerateFps, fps);
- if (fps_wanted <= sink_wants_.max_framerate_fps)
- return false;
-
- RTC_LOG(LS_INFO) << "Scaling up framerate: " << fps_wanted;
- sink_wants_.max_framerate_fps = fps_wanted;
- source_->AddOrUpdateSink(video_stream_encoder_,
- GetActiveSinkWantsInternal());
- return true;
- }
-
- // Used in automatic animation detection for screenshare.
- bool RestrictPixels(int max_pixels) {
- // Called on the encoder task queue.
- rtc::CritScope lock(&crit_);
- if (!source_ || !IsResolutionScalingEnabled(degradation_preference_)) {
- // This can happen since |degradation_preference_| is set on libjingle's
- // worker thread but the adaptation is done on the encoder task queue.
- return false;
- }
- max_pixels_ = max_pixels;
- RTC_LOG(LS_INFO) << "Applying max pixel restriction: " << max_pixels;
- source_->AddOrUpdateSink(video_stream_encoder_,
- GetActiveSinkWantsInternal());
- return true;
- }
-
- private:
- rtc::VideoSinkWants GetActiveSinkWantsInternal()
- RTC_EXCLUSIVE_LOCKS_REQUIRED(&crit_) {
- rtc::VideoSinkWants wants = sink_wants_;
- // Clear any constraints from the current sink wants that don't apply to
- // the used degradation_preference.
- switch (degradation_preference_) {
- case DegradationPreference::BALANCED:
- break;
- case DegradationPreference::MAINTAIN_FRAMERATE:
- wants.max_framerate_fps = std::numeric_limits<int>::max();
- break;
- case DegradationPreference::MAINTAIN_RESOLUTION:
- wants.max_pixel_count = std::numeric_limits<int>::max();
- wants.target_pixel_count.reset();
- break;
- case DegradationPreference::DISABLED:
- wants.max_pixel_count = std::numeric_limits<int>::max();
- wants.target_pixel_count.reset();
- wants.max_framerate_fps = std::numeric_limits<int>::max();
- }
- // Limit to configured max framerate.
- wants.max_framerate_fps = std::min(max_framerate_, wants.max_framerate_fps);
- // Limit resolution due to automatic animation detection for screenshare.
- wants.max_pixel_count = std::min(max_pixels_, wants.max_pixel_count);
-
- return wants;
- }
- rtc::CriticalSection crit_;
- SequenceChecker main_checker_;
- VideoStreamEncoder* const video_stream_encoder_;
- rtc::VideoSinkWants sink_wants_ RTC_GUARDED_BY(&crit_);
- DegradationPreference degradation_preference_ RTC_GUARDED_BY(&crit_);
- rtc::VideoSourceInterface<VideoFrame>* source_ RTC_GUARDED_BY(&crit_);
- int max_framerate_ RTC_GUARDED_BY(&crit_);
- int max_pixels_ RTC_GUARDED_BY(&crit_);
-
- RTC_DISALLOW_COPY_AND_ASSIGN(VideoSourceProxy);
-};
+} // namespace
VideoStreamEncoder::EncoderRateSettings::EncoderRateSettings()
: rate_control(),
@@ -505,18 +208,11 @@ VideoStreamEncoder::VideoStreamEncoder(
TaskQueueFactory* task_queue_factory)
: shutdown_event_(true /* manual_reset */, false),
number_of_cores_(number_of_cores),
- initial_framedrop_(0),
- initial_framedrop_on_bwe_enabled_(
- webrtc::field_trial::IsEnabled(kInitialFramedropFieldTrial)),
- quality_rampup_done_(false),
- quality_rampup_experiment_(QualityRampupExperiment::ParseSettings()),
quality_scaling_experiment_enabled_(QualityScalingExperiment::Enabled()),
- source_proxy_(new VideoSourceProxy(this)),
sink_(nullptr),
settings_(settings),
rate_control_settings_(RateControlSettings::ParseFromFieldTrials()),
- quality_scaler_settings_(QualityScalerSettings::ParseFromFieldTrials()),
- overuse_detector_(std::move(overuse_detector)),
+ encoder_selector_(settings.encoder_factory->GetEncoderSelector()),
encoder_stats_observer_(encoder_stats_observer),
encoder_initialized_(false),
max_framerate_(-1),
@@ -524,23 +220,20 @@ VideoStreamEncoder::VideoStreamEncoder(
pending_encoder_creation_(false),
crop_width_(0),
crop_height_(0),
- encoder_start_bitrate_bps_(0),
- set_start_bitrate_bps_(0),
- set_start_bitrate_time_ms_(0),
- has_seen_first_bwe_drop_(false),
+ encoder_target_bitrate_bps_(absl::nullopt),
max_data_payload_length_(0),
encoder_paused_and_dropped_frame_(false),
was_encode_called_since_last_initialization_(false),
encoder_failed_(false),
clock_(clock),
- degradation_preference_(DegradationPreference::DISABLED),
posted_frames_waiting_for_encode_(0),
last_captured_timestamp_(0),
delta_ntp_internal_ms_(clock_->CurrentNtpInMilliseconds() -
clock_->TimeInMilliseconds()),
last_frame_log_ms_(clock_->TimeInMilliseconds()),
captured_frame_count_(0),
- dropped_frame_count_(0),
+ dropped_frame_cwnd_pushback_count_(0),
+ dropped_frame_encoder_block_count_(0),
pending_frame_post_time_us_(0),
accumulated_update_rect_{0, 0, 0, 0},
accumulated_update_rect_is_valid_(true),
@@ -552,6 +245,7 @@ VideoStreamEncoder::VideoStreamEncoder(
force_disable_frame_dropper_(false),
input_framerate_(kFrameRateAvergingWindowSizeMs, 1000),
pending_frame_drops_(0),
+ cwnd_frame_counter_(0),
next_frame_types_(1, VideoFrameType::kVideoFrameDelta),
frame_encode_metadata_writer_(this),
experiment_groups_(GetExperimentGroups()),
@@ -560,11 +254,20 @@ VideoStreamEncoder::VideoStreamEncoder(
automatic_animation_detection_experiment_(
ParseAutomatincAnimationDetectionFieldTrial()),
encoder_switch_requested_(false),
+ video_source_sink_controller_(std::make_unique<VideoSourceSinkController>(
+ /*sink=*/this,
+ /*source=*/nullptr)),
+ resource_adaptation_processor_(
+ std::make_unique<ResourceAdaptationProcessor>(
+ clock_,
+ settings_.experiment_cpu_load_estimator,
+ std::move(overuse_detector),
+ encoder_stats_observer,
+ /*adaptation_listener=*/this)),
encoder_queue_(task_queue_factory->CreateTaskQueue(
"EncoderQueue",
TaskQueueFactory::Priority::NORMAL)) {
RTC_DCHECK(encoder_stats_observer);
- RTC_DCHECK(overuse_detector_);
RTC_DCHECK_GE(number_of_cores, 1);
for (auto& state : encoder_buffer_state_)
@@ -579,14 +282,13 @@ VideoStreamEncoder::~VideoStreamEncoder() {
void VideoStreamEncoder::Stop() {
RTC_DCHECK_RUN_ON(&thread_checker_);
- source_proxy_->SetSource(nullptr, DegradationPreference());
+ video_source_sink_controller_->SetSource(nullptr);
encoder_queue_.PostTask([this] {
RTC_DCHECK_RUN_ON(&encoder_queue_);
- overuse_detector_->StopCheckForOveruse();
+ resource_adaptation_processor_->StopResourceAdaptation();
rate_allocator_ = nullptr;
bitrate_observer_ = nullptr;
ReleaseEncoder();
- quality_scaler_ = nullptr;
shutdown_event_.Set();
});
@@ -619,37 +321,21 @@ void VideoStreamEncoder::SetSource(
rtc::VideoSourceInterface<VideoFrame>* source,
const DegradationPreference& degradation_preference) {
RTC_DCHECK_RUN_ON(&thread_checker_);
- source_proxy_->SetSource(source, degradation_preference);
- encoder_queue_.PostTask([this, degradation_preference] {
+ video_source_sink_controller_->SetSource(source);
+ encoder_queue_.PostTask([this, source, degradation_preference] {
RTC_DCHECK_RUN_ON(&encoder_queue_);
- if (degradation_preference_ != degradation_preference) {
- // Reset adaptation state, so that we're not tricked into thinking there's
- // an already pending request of the same type.
- last_adaptation_request_.reset();
- if (degradation_preference == DegradationPreference::BALANCED ||
- degradation_preference_ == DegradationPreference::BALANCED) {
- // TODO(asapersson): Consider removing |adapt_counters_| map and use one
- // AdaptCounter for all modes.
- source_proxy_->ResetPixelFpsCount();
- adapt_counters_.clear();
- }
- }
- degradation_preference_ = degradation_preference;
-
+ resource_adaptation_processor_->SetHasInputVideo(source);
+ resource_adaptation_processor_->SetDegradationPreference(
+ degradation_preference);
if (encoder_)
- ConfigureQualityScaler(encoder_->GetEncoderInfo());
-
- if (!IsFramerateScalingEnabled(degradation_preference) &&
- max_framerate_ != -1) {
- // If frame rate scaling is no longer allowed, remove any potential
- // allowance for longer frame intervals.
- overuse_detector_->OnTargetFramerateUpdated(max_framerate_);
- }
+ resource_adaptation_processor_->ConfigureQualityScaler(
+ encoder_->GetEncoderInfo());
});
}
void VideoStreamEncoder::SetSink(EncoderSink* sink, bool rotation_applied) {
- source_proxy_->SetWantsRotationApplied(rotation_applied);
+ video_source_sink_controller_->SetRotationApplied(rotation_applied);
+ video_source_sink_controller_->PushSourceSinkSettings();
encoder_queue_.PostTask([this, sink] {
RTC_DCHECK_RUN_ON(&encoder_queue_);
sink_ = sink;
@@ -659,9 +345,11 @@ void VideoStreamEncoder::SetSink(EncoderSink* sink, bool rotation_applied) {
void VideoStreamEncoder::SetStartBitrate(int start_bitrate_bps) {
encoder_queue_.PostTask([this, start_bitrate_bps] {
RTC_DCHECK_RUN_ON(&encoder_queue_);
- encoder_start_bitrate_bps_ = start_bitrate_bps;
- set_start_bitrate_bps_ = start_bitrate_bps;
- set_start_bitrate_time_ms_ = clock_->TimeInMilliseconds();
+ encoder_target_bitrate_bps_ =
+ start_bitrate_bps != 0 ? absl::optional<uint32_t>(start_bitrate_bps)
+ : absl::nullopt;
+ resource_adaptation_processor_->SetStartBitrate(
+ DataRate::BitsPerSec(start_bitrate_bps));
});
}
@@ -691,50 +379,14 @@ void VideoStreamEncoder::ConfigureEncoder(VideoEncoderConfig config,
codec_info_ = settings_.encoder_factory->QueryVideoEncoder(
encoder_config_.video_format);
if (HasInternalSource()) {
- last_frame_info_ = VideoFrameInfo(176, 144, false);
+ last_frame_info_ = VideoFrameInfo(kDefaultInputPixelsWidth,
+ kDefaultInputPixelsHeight, false);
ReconfigureEncoder();
}
}
});
}
-static absl::optional<VideoEncoder::ResolutionBitrateLimits>
-GetEncoderBitrateLimits(const VideoEncoder::EncoderInfo& encoder_info,
- int frame_size_pixels) {
- std::vector<VideoEncoder::ResolutionBitrateLimits> bitrate_limits =
- encoder_info.resolution_bitrate_limits;
-
- // Sort the list of bitrate limits by resolution.
- sort(bitrate_limits.begin(), bitrate_limits.end(),
- [](const VideoEncoder::ResolutionBitrateLimits& lhs,
- const VideoEncoder::ResolutionBitrateLimits& rhs) {
- return lhs.frame_size_pixels < rhs.frame_size_pixels;
- });
-
- for (size_t i = 0; i < bitrate_limits.size(); ++i) {
- RTC_DCHECK_GT(bitrate_limits[i].min_bitrate_bps, 0);
- RTC_DCHECK_GT(bitrate_limits[i].min_start_bitrate_bps, 0);
- RTC_DCHECK_GE(bitrate_limits[i].max_bitrate_bps,
- bitrate_limits[i].min_bitrate_bps);
- if (i > 0) {
- // The bitrate limits aren't expected to decrease with resolution.
- RTC_DCHECK_GE(bitrate_limits[i].min_bitrate_bps,
- bitrate_limits[i - 1].min_bitrate_bps);
- RTC_DCHECK_GE(bitrate_limits[i].min_start_bitrate_bps,
- bitrate_limits[i - 1].min_start_bitrate_bps);
- RTC_DCHECK_GE(bitrate_limits[i].max_bitrate_bps,
- bitrate_limits[i - 1].max_bitrate_bps);
- }
-
- if (bitrate_limits[i].frame_size_pixels >= frame_size_pixels) {
- return absl::optional<VideoEncoder::ResolutionBitrateLimits>(
- bitrate_limits[i]);
- }
- }
-
- return absl::nullopt;
-}
-
// TODO(bugs.webrtc.org/8807): Currently this always does a hard
// reconfiguration, but this isn't always necessary. Add in logic to only update
// the VideoBitrateAllocator and call OnEncoderConfigurationChanged with a
@@ -742,7 +394,8 @@ GetEncoderBitrateLimits(const VideoEncoder::EncoderInfo& encoder_info,
void VideoStreamEncoder::ReconfigureEncoder() {
RTC_DCHECK(pending_encoder_reconfiguration_);
- if (encoder_switch_experiment_.IsPixelCountBelowThreshold(
+ if (!encoder_selector_ &&
+ encoder_switch_experiment_.IsPixelCountBelowThreshold(
last_frame_info_->width * last_frame_info_->height) &&
!encoder_switch_requested_ && settings_.encoder_switch_request_callback) {
EncoderSwitchRequestCallback::Config conf;
@@ -758,6 +411,16 @@ void VideoStreamEncoder::ReconfigureEncoder() {
encoder_config_.video_stream_factory->CreateEncoderStreams(
last_frame_info_->width, last_frame_info_->height, encoder_config_);
+ // Check that the higher layers do not try to set number of temporal layers
+ // to less than 1.
+ // TODO(brandtr): Get rid of the wrapping optional as it serves no purpose
+ // at this layer.
+#if RTC_DCHECK_IS_ON
+ for (const auto& stream : streams) {
+ RTC_DCHECK_GE(stream.num_temporal_layers.value_or(1), 1);
+ }
+#endif
+
// TODO(ilnik): If configured resolution is significantly less than provided,
// e.g. because there are not enough SSRCs for all simulcast streams,
// signal new resolutions via SinkWants to video source.
@@ -789,6 +452,10 @@ void VideoStreamEncoder::ReconfigureEncoder() {
// or just discard incoming frames?
RTC_CHECK(encoder_);
+ if (encoder_selector_) {
+ encoder_selector_->OnCurrentEncoder(encoder_config_.video_format);
+ }
+
encoder_->SetFecControllerOverride(fec_controller_override_);
codec_info_ = settings_.encoder_factory->QueryVideoEncoder(
@@ -797,21 +464,50 @@ void VideoStreamEncoder::ReconfigureEncoder() {
encoder_reset_required = true;
}
- encoder_bitrate_limits_ = GetEncoderBitrateLimits(
- encoder_->GetEncoderInfo(),
- last_frame_info_->width * last_frame_info_->height);
+ encoder_bitrate_limits_ =
+ encoder_->GetEncoderInfo().GetEncoderBitrateLimitsForResolution(
+ last_frame_info_->width * last_frame_info_->height);
if (streams.size() == 1 && encoder_bitrate_limits_) {
- // Use bitrate limits recommended by encoder only if app didn't set any of
- // them.
- if (encoder_config_.max_bitrate_bps <= 0 &&
- (encoder_config_.simulcast_layers.empty() ||
- encoder_config_.simulcast_layers[0].min_bitrate_bps <= 0)) {
- streams.back().min_bitrate_bps = encoder_bitrate_limits_->min_bitrate_bps;
- streams.back().max_bitrate_bps = encoder_bitrate_limits_->max_bitrate_bps;
+ // Bitrate limits can be set by app (in SDP or RtpEncodingParameters) or/and
+ // can be provided by encoder. In presence of both set of limits, the final
+ // set is derived as their intersection.
+ int min_bitrate_bps;
+ if (encoder_config_.simulcast_layers.empty() ||
+ encoder_config_.simulcast_layers[0].min_bitrate_bps <= 0) {
+ min_bitrate_bps = encoder_bitrate_limits_->min_bitrate_bps;
+ } else {
+ min_bitrate_bps = std::max(encoder_bitrate_limits_->min_bitrate_bps,
+ streams.back().min_bitrate_bps);
+ }
+
+ int max_bitrate_bps;
+ // We don't check encoder_config_.simulcast_layers[0].max_bitrate_bps
+ // here since encoder_config_.max_bitrate_bps is derived from it (as
+ // well as from other inputs).
+ if (encoder_config_.max_bitrate_bps <= 0) {
+ max_bitrate_bps = encoder_bitrate_limits_->max_bitrate_bps;
+ } else {
+ max_bitrate_bps = std::min(encoder_bitrate_limits_->max_bitrate_bps,
+ streams.back().max_bitrate_bps);
+ }
+
+ if (min_bitrate_bps < max_bitrate_bps) {
+ streams.back().min_bitrate_bps = min_bitrate_bps;
+ streams.back().max_bitrate_bps = max_bitrate_bps;
streams.back().target_bitrate_bps =
std::min(streams.back().target_bitrate_bps,
encoder_bitrate_limits_->max_bitrate_bps);
+ } else {
+ RTC_LOG(LS_WARNING) << "Bitrate limits provided by encoder"
+ << " (min="
+ << encoder_bitrate_limits_->min_bitrate_bps
+ << ", max="
+ << encoder_bitrate_limits_->min_bitrate_bps
+ << ") do not intersect with limits set by app"
+ << " (min=" << streams.back().min_bitrate_bps
+ << ", max=" << encoder_config_.max_bitrate_bps
+ << "). The app bitrate limits will be used.";
}
}
@@ -820,18 +516,6 @@ void VideoStreamEncoder::ReconfigureEncoder() {
RTC_LOG(LS_ERROR) << "Failed to create encoder configuration.";
}
- // Set min_bitrate_bps, max_bitrate_bps, and max padding bit rate for VP9.
- if (encoder_config_.codec_type == kVideoCodecVP9) {
- // Lower max bitrate to the level codec actually can produce.
- streams[0].max_bitrate_bps =
- std::min(streams[0].max_bitrate_bps,
- SvcRateAllocator::GetMaxBitrate(codec).bps<int>());
- streams[0].min_bitrate_bps = codec.spatialLayers[0].minBitrate * 1000;
- // target_bitrate_bps specifies the maximum padding bitrate.
- streams[0].target_bitrate_bps =
- SvcRateAllocator::GetPaddingBitrate(codec).bps<int>();
- }
-
char log_stream_buf[4 * 1024];
rtc::SimpleStringBuilder log_stream(log_stream_buf);
log_stream << "ReconfigureEncoder:\n";
@@ -843,6 +527,7 @@ void VideoStreamEncoder::ReconfigureEncoder() {
<< " min_bps: " << codec.simulcastStream[i].minBitrate
<< " target_bps: " << codec.simulcastStream[i].targetBitrate
<< " max_bps: " << codec.simulcastStream[i].maxBitrate
+ << " max_fps: " << codec.simulcastStream[i].maxFramerate
<< " max_qp: " << codec.simulcastStream[i].qpMax
<< " num_tl: " << codec.simulcastStream[i].numberOfTemporalLayers
<< " active: "
@@ -866,8 +551,8 @@ void VideoStreamEncoder::ReconfigureEncoder() {
}
RTC_LOG(LS_INFO) << log_stream.str();
- codec.startBitrate =
- std::max(encoder_start_bitrate_bps_ / 1000, codec.minBitrate);
+ codec.startBitrate = std::max(encoder_target_bitrate_bps_.value_or(0) / 1000,
+ codec.minBitrate);
codec.startBitrate = std::min(codec.startBitrate, codec.maxBitrate);
codec.expect_encode_from_texture = last_frame_info_->is_texture;
// Make sure the start bit rate is sane...
@@ -879,7 +564,14 @@ void VideoStreamEncoder::ReconfigureEncoder() {
for (const auto& stream : streams) {
max_framerate = std::max(stream.max_framerate, max_framerate);
}
- source_proxy_->SetMaxFramerate(max_framerate);
+ int alignment = encoder_->GetEncoderInfo().requested_resolution_alignment;
+ if (max_framerate !=
+ video_source_sink_controller_->frame_rate_upper_limit() ||
+ alignment != video_source_sink_controller_->resolution_alignment()) {
+ video_source_sink_controller_->SetFrameRateUpperLimit(max_framerate);
+ video_source_sink_controller_->SetResolutionAlignment(alignment);
+ video_source_sink_controller_->PushSourceSinkSettings();
+ }
if (codec.maxBitrate == 0) {
// max is one bit per pixel
@@ -910,8 +602,6 @@ void VideoStreamEncoder::ReconfigureEncoder() {
send_codec_ = codec;
encoder_switch_experiment_.SetCodec(send_codec_.codecType);
- quality_rampup_experiment_.SetMaxBitrate(
- last_frame_info_->width * last_frame_info_->height, codec.maxBitrate);
// Keep the same encoder, as long as the video_format is unchanged.
// Encoder creation block is split in two since EncoderInfo needed to start
@@ -945,6 +635,9 @@ void VideoStreamEncoder::ReconfigureEncoder() {
was_encode_called_since_last_initialization_ = false;
}
+ resource_adaptation_processor_->SetEncoderSettings(EncoderSettings(
+ encoder_->GetEncoderInfo(), encoder_config_.Copy(), send_codec_));
+
if (success) {
next_frame_types_.clear();
next_frame_types_.resize(
@@ -960,12 +653,8 @@ void VideoStreamEncoder::ReconfigureEncoder() {
}
if (pending_encoder_creation_) {
- overuse_detector_->StopCheckForOveruse();
- overuse_detector_->StartCheckForOveruse(
- &encoder_queue_,
- GetCpuOveruseOptions(
- settings_, encoder_->GetEncoderInfo().is_hardware_accelerated),
- this);
+ resource_adaptation_processor_->StopResourceAdaptation();
+ resource_adaptation_processor_->StartResourceAdaptation(this);
pending_encoder_creation_ = false;
}
@@ -1016,68 +705,29 @@ void VideoStreamEncoder::ReconfigureEncoder() {
pending_encoder_reconfiguration_ = false;
- sink_->OnEncoderConfigurationChanged(
- std::move(streams), encoder_config_.content_type,
- encoder_config_.min_transmit_bitrate_bps);
-
- // Get the current target framerate, ie the maximum framerate as specified by
- // the current codec configuration, or any limit imposed by cpu adaption in
- // maintain-resolution or balanced mode. This is used to make sure overuse
- // detection doesn't needlessly trigger in low and/or variable framerate
- // scenarios.
- int target_framerate = std::min(
- max_framerate_, source_proxy_->GetActiveSinkWants().max_framerate_fps);
- overuse_detector_->OnTargetFramerateUpdated(target_framerate);
-
- ConfigureQualityScaler(info);
-}
-
-void VideoStreamEncoder::ConfigureQualityScaler(
- const VideoEncoder::EncoderInfo& encoder_info) {
- RTC_DCHECK_RUN_ON(&encoder_queue_);
- const auto scaling_settings = encoder_info.scaling_settings;
- const bool quality_scaling_allowed =
- IsResolutionScalingEnabled(degradation_preference_) &&
- scaling_settings.thresholds;
-
- if (quality_scaling_allowed) {
- if (quality_scaler_ == nullptr) {
- // Quality scaler has not already been configured.
-
- // Use experimental thresholds if available.
- absl::optional<VideoEncoder::QpThresholds> experimental_thresholds;
- if (quality_scaling_experiment_enabled_) {
- experimental_thresholds = QualityScalingExperiment::GetQpThresholds(
- encoder_config_.codec_type);
- }
- // Since the interface is non-public, std::make_unique can't do this
- // upcast.
- AdaptationObserverInterface* observer = this;
- quality_scaler_ = std::make_unique<QualityScaler>(
- &encoder_queue_, observer,
- experimental_thresholds ? *experimental_thresholds
- : *(scaling_settings.thresholds));
- has_seen_first_significant_bwe_change_ = false;
- initial_framedrop_ = 0;
- }
- } else {
- quality_scaler_.reset(nullptr);
- initial_framedrop_ = kMaxInitialFramedrop;
+ bool is_svc = false;
+ // Set min_bitrate_bps, max_bitrate_bps, and max padding bit rate for VP9
+ // and leave only one stream containing all necessary information.
+ if (encoder_config_.codec_type == kVideoCodecVP9) {
+ // Lower max bitrate to the level codec actually can produce.
+ streams[0].max_bitrate_bps =
+ std::min(streams[0].max_bitrate_bps,
+ SvcRateAllocator::GetMaxBitrate(codec).bps<int>());
+ streams[0].min_bitrate_bps = codec.spatialLayers[0].minBitrate * 1000;
+ // target_bitrate_bps specifies the maximum padding bitrate.
+ streams[0].target_bitrate_bps =
+ SvcRateAllocator::GetPaddingBitrate(codec).bps<int>();
+ streams[0].width = streams.back().width;
+ streams[0].height = streams.back().height;
+ is_svc = codec.VP9()->numberOfSpatialLayers > 1;
+ streams.resize(1);
}
- if (degradation_preference_ == DegradationPreference::BALANCED &&
- quality_scaler_ && last_frame_info_) {
- absl::optional<VideoEncoder::QpThresholds> thresholds =
- balanced_settings_.GetQpThresholds(encoder_config_.codec_type,
- last_frame_info_->pixel_count());
- if (thresholds) {
- quality_scaler_->SetQpThresholds(*thresholds);
- }
- }
+ sink_->OnEncoderConfigurationChanged(
+ std::move(streams), is_svc, encoder_config_.content_type,
+ encoder_config_.min_transmit_bitrate_bps);
- encoder_stats_observer_->OnAdaptationChanged(
- VideoStreamEncoderObserver::AdaptationReason::kNone,
- GetActiveCounts(kCpu), GetActiveCounts(kQuality));
+ resource_adaptation_processor_->ConfigureQualityScaler(info);
}
void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) {
@@ -1145,26 +795,40 @@ void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) {
posted_frames_waiting_for_encode_.fetch_sub(1);
RTC_DCHECK_GT(posted_frames_waiting_for_encode, 0);
CheckForAnimatedContent(incoming_frame, post_time_us);
- if (posted_frames_waiting_for_encode == 1) {
+ bool cwnd_frame_drop =
+ cwnd_frame_drop_interval_ &&
+ (cwnd_frame_counter_++ % cwnd_frame_drop_interval_.value() == 0);
+ if (posted_frames_waiting_for_encode == 1 && !cwnd_frame_drop) {
MaybeEncodeVideoFrame(incoming_frame, post_time_us);
} else {
- // There is a newer frame in flight. Do not encode this frame.
- RTC_LOG(LS_VERBOSE)
- << "Incoming frame dropped due to that the encoder is blocked.";
- ++dropped_frame_count_;
- encoder_stats_observer_->OnFrameDropped(
- VideoStreamEncoderObserver::DropReason::kEncoderQueue);
+ if (cwnd_frame_drop) {
+ // Frame drop by congestion window pusback. Do not encode this
+ // frame.
+ ++dropped_frame_cwnd_pushback_count_;
+ encoder_stats_observer_->OnFrameDropped(
+ VideoStreamEncoderObserver::DropReason::kCongestionWindow);
+ } else {
+ // There is a newer frame in flight. Do not encode this frame.
+ RTC_LOG(LS_VERBOSE)
+ << "Incoming frame dropped due to that the encoder is blocked.";
+ ++dropped_frame_encoder_block_count_;
+ encoder_stats_observer_->OnFrameDropped(
+ VideoStreamEncoderObserver::DropReason::kEncoderQueue);
+ }
accumulated_update_rect_.Union(incoming_frame.update_rect());
accumulated_update_rect_is_valid_ &= incoming_frame.has_update_rect();
}
if (log_stats) {
RTC_LOG(LS_INFO) << "Number of frames: captured "
<< captured_frame_count_
+ << ", dropped (due to congestion window pushback) "
+ << dropped_frame_cwnd_pushback_count_
<< ", dropped (due to encoder blocked) "
- << dropped_frame_count_ << ", interval_ms "
- << kFrameLogIntervalMs;
+ << dropped_frame_encoder_block_count_
+ << ", interval_ms " << kFrameLogIntervalMs;
captured_frame_count_ = 0;
- dropped_frame_count_ = 0;
+ dropped_frame_cwnd_pushback_count_ = 0;
+ dropped_frame_encoder_block_count_ = 0;
}
});
}
@@ -1237,9 +901,10 @@ VideoStreamEncoder::UpdateBitrateAllocationAndNotifyObserver(
// target in order to sustain the min bitrate of the video codec. In this
// case, make sure the bandwidth allocation is at least equal the allocation
// as that is part of the document contract for that field.
- new_rate_settings.rate_control.bandwidth_allocation = std::max(
- new_rate_settings.rate_control.bandwidth_allocation,
- DataRate::bps(new_rate_settings.rate_control.bitrate.get_sum_bps()));
+ new_rate_settings.rate_control.bandwidth_allocation =
+ std::max(new_rate_settings.rate_control.bandwidth_allocation,
+ DataRate::BitsPerSec(
+ new_rate_settings.rate_control.bitrate.get_sum_bps()));
if (bitrate_adjuster_) {
VideoBitrateAllocation adjusted_allocation =
@@ -1299,12 +964,14 @@ void VideoStreamEncoder::SetEncoderRates(
frame_encode_metadata_writer_.OnSetRates(
rate_settings.rate_control.bitrate,
static_cast<uint32_t>(rate_settings.rate_control.framerate_fps + 0.5));
+ resource_adaptation_processor_->SetEncoderRates(rate_settings.rate_control);
}
}
void VideoStreamEncoder::MaybeEncodeVideoFrame(const VideoFrame& video_frame,
int64_t time_when_posted_us) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
+ resource_adaptation_processor_->OnFrame(video_frame);
if (!last_frame_info_ || video_frame.width() != last_frame_info_->width ||
video_frame.height() != last_frame_info_->height ||
@@ -1366,18 +1033,7 @@ void VideoStreamEncoder::MaybeEncodeVideoFrame(const VideoFrame& video_frame,
if (DropDueToSize(video_frame.size())) {
RTC_LOG(LS_INFO) << "Dropping frame. Too large for target bitrate.";
- int fps_count = GetConstAdaptCounter().FramerateCount(kQuality);
- int res_count = GetConstAdaptCounter().ResolutionCount(kQuality);
- AdaptDown(kQuality);
- if (degradation_preference_ == DegradationPreference::BALANCED &&
- GetConstAdaptCounter().FramerateCount(kQuality) > fps_count) {
- // Adapt framerate in same step as resolution.
- AdaptDown(kQuality);
- }
- if (GetConstAdaptCounter().ResolutionCount(kQuality) > res_count) {
- encoder_stats_observer_->OnInitialQualityResolutionAdaptDown();
- }
- ++initial_framedrop_;
+ resource_adaptation_processor_->OnFrameDroppedDueToSize();
// Storing references to a native buffer risks blocking frame capture.
if (video_frame.video_frame_buffer()->type() !=
VideoFrameBuffer::Type::kNative) {
@@ -1391,17 +1047,7 @@ void VideoStreamEncoder::MaybeEncodeVideoFrame(const VideoFrame& video_frame,
}
return;
}
- initial_framedrop_ = kMaxInitialFramedrop;
-
- if (!quality_rampup_done_ && TryQualityRampup(now_ms) &&
- GetConstAdaptCounter().ResolutionCount(kQuality) > 0 &&
- GetConstAdaptCounter().TotalCount(kCpu) == 0) {
- RTC_LOG(LS_INFO) << "Reset quality limitations.";
- last_adaptation_request_.reset();
- source_proxy_->ResetPixelFpsCount();
- adapt_counters_.clear();
- quality_rampup_done_ = true;
- }
+ resource_adaptation_processor_->OnMaybeEncodeFrame();
if (EncoderPaused()) {
// Storing references to a native buffer risks blocking frame capture.
@@ -1433,7 +1079,7 @@ void VideoStreamEncoder::MaybeEncodeVideoFrame(const VideoFrame& video_frame,
if (frame_dropping_enabled && frame_dropper_.DropFrame()) {
RTC_LOG(LS_VERBOSE)
<< "Drop Frame: "
- << "target bitrate "
+ "target bitrate "
<< (last_encoder_rate_settings_
? last_encoder_rate_settings_->encoder_target.bps()
: 0)
@@ -1472,6 +1118,8 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
}
if (encoder_info_ != info) {
+ resource_adaptation_processor_->SetEncoderSettings(EncoderSettings(
+ encoder_->GetEncoderInfo(), encoder_config_.Copy(), send_codec_));
RTC_LOG(LS_INFO) << "Encoder settings changed from "
<< encoder_info_.ToString() << " to " << info.ToString();
}
@@ -1583,7 +1231,8 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", video_frame.render_time_ms(),
"Encode");
- overuse_detector_->FrameCaptured(out_frame, time_when_posted_us);
+ resource_adaptation_processor_->OnEncodeStarted(out_frame,
+ time_when_posted_us);
RTC_DCHECK_LE(send_codec_.width, out_frame.width());
RTC_DCHECK_LE(send_codec_.height, out_frame.height());
@@ -1606,9 +1255,17 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
if (encode_status == WEBRTC_VIDEO_CODEC_ENCODER_FAILURE) {
RTC_LOG(LS_ERROR) << "Encoder failed, failing encoder format: "
<< encoder_config_.video_format.ToString();
+
if (settings_.encoder_switch_request_callback) {
- encoder_failed_ = true;
- settings_.encoder_switch_request_callback->RequestEncoderFallback();
+ if (encoder_selector_) {
+ if (auto encoder = encoder_selector_->OnEncoderBroken()) {
+ settings_.encoder_switch_request_callback->RequestEncoderSwitch(
+ *encoder);
+ }
+ } else {
+ encoder_failed_ = true;
+ settings_.encoder_switch_request_callback->RequestEncoderFallback();
+ }
} else {
RTC_LOG(LS_ERROR)
<< "Encoder failed but no encoder fallback callback is registered";
@@ -1708,6 +1365,37 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
RTC_CHECK(videocontenttypehelpers::SetSimulcastId(
&image_copy.content_type_, static_cast<uint8_t>(spatial_idx + 1)));
+ // Currently internal quality scaler is used for VP9 instead of webrtc qp
+ // scaler (in no-svc case or if only a single spatial layer is encoded).
+ // It has to be explicitly detected and reported to adaptation metrics.
+ // Post a task because |send_codec_| requires |encoder_queue_| lock.
+ unsigned int image_width = image_copy._encodedWidth;
+ unsigned int image_height = image_copy._encodedHeight;
+ VideoCodecType codec = codec_specific_info
+ ? codec_specific_info->codecType
+ : VideoCodecType::kVideoCodecGeneric;
+ encoder_queue_.PostTask([this, codec, image_width, image_height] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ if (codec == VideoCodecType::kVideoCodecVP9 &&
+ send_codec_.VP9()->automaticResizeOn) {
+ unsigned int expected_width = send_codec_.width;
+ unsigned int expected_height = send_codec_.height;
+ int num_active_layers = 0;
+ for (int i = 0; i < send_codec_.VP9()->numberOfSpatialLayers; ++i) {
+ if (send_codec_.spatialLayers[i].active) {
+ ++num_active_layers;
+ expected_width = send_codec_.spatialLayers[i].width;
+ expected_height = send_codec_.spatialLayers[i].height;
+ }
+ }
+ RTC_DCHECK_LE(num_active_layers, 1)
+ << "VP9 quality scaling is enabled for "
+ "SVC with several active layers.";
+ encoder_stats_observer_->OnEncoderInternalScalerUpdate(
+ image_width < expected_width || image_height < expected_height);
+ }
+ });
+
// Encoded is called on whatever thread the real encoder implementation run
// on. In the case of hardware encoders, there might be several encoders
// running in parallel on different threads.
@@ -1772,7 +1460,7 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
// We are only interested in propagating the meta-data about the image, not
// encoded data itself, to the post encode function. Since we cannot be sure
// the pointer will still be valid when run on the task queue, set it to null.
- DataSize frame_size = DataSize::bytes(image_copy.size());
+ DataSize frame_size = DataSize::Bytes(image_copy.size());
image_copy.ClearEncodedData();
int temporal_index = 0;
@@ -1810,51 +1498,88 @@ void VideoStreamEncoder::OnDroppedFrame(DropReason reason) {
case DropReason::kDroppedByMediaOptimizations:
encoder_stats_observer_->OnFrameDropped(
VideoStreamEncoderObserver::DropReason::kMediaOptimization);
- encoder_queue_.PostTask([this] {
- RTC_DCHECK_RUN_ON(&encoder_queue_);
- if (quality_scaler_)
- quality_scaler_->ReportDroppedFrameByMediaOpt();
- });
break;
case DropReason::kDroppedByEncoder:
encoder_stats_observer_->OnFrameDropped(
VideoStreamEncoderObserver::DropReason::kEncoder);
- encoder_queue_.PostTask([this] {
- RTC_DCHECK_RUN_ON(&encoder_queue_);
- if (quality_scaler_)
- quality_scaler_->ReportDroppedFrameByEncoder();
- });
break;
}
sink_->OnDroppedFrame(reason);
+ encoder_queue_.PostTask([this, reason] {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ resource_adaptation_processor_->OnFrameDropped(reason);
+ });
+}
+
+DataRate VideoStreamEncoder::UpdateTargetBitrate(DataRate target_bitrate,
+ double cwnd_reduce_ratio) {
+ RTC_DCHECK_RUN_ON(&encoder_queue_);
+ DataRate updated_target_bitrate = target_bitrate;
+
+ // Drop frames when congestion window pushback ratio is larger than 1
+ // percent and target bitrate is larger than codec min bitrate.
+ // When target_bitrate is 0 means codec is paused, skip frame dropping.
+ if (cwnd_reduce_ratio > 0.01 && target_bitrate.bps() > 0 &&
+ target_bitrate.bps() > send_codec_.minBitrate * 1000) {
+ int reduce_bitrate_bps = std::min(
+ static_cast<int>(target_bitrate.bps() * cwnd_reduce_ratio),
+ static_cast<int>(target_bitrate.bps() - send_codec_.minBitrate * 1000));
+ if (reduce_bitrate_bps > 0) {
+ // At maximum the congestion window can drop 1/2 frames.
+ cwnd_frame_drop_interval_ = std::max(
+ 2, static_cast<int>(target_bitrate.bps() / reduce_bitrate_bps));
+ // Reduce target bitrate accordingly.
+ updated_target_bitrate =
+ target_bitrate - (target_bitrate / cwnd_frame_drop_interval_.value());
+ return updated_target_bitrate;
+ }
+ }
+ cwnd_frame_drop_interval_.reset();
+ return updated_target_bitrate;
}
void VideoStreamEncoder::OnBitrateUpdated(DataRate target_bitrate,
DataRate stable_target_bitrate,
DataRate link_allocation,
uint8_t fraction_lost,
- int64_t round_trip_time_ms) {
+ int64_t round_trip_time_ms,
+ double cwnd_reduce_ratio) {
RTC_DCHECK_GE(link_allocation, target_bitrate);
if (!encoder_queue_.IsCurrent()) {
encoder_queue_.PostTask([this, target_bitrate, stable_target_bitrate,
- link_allocation, fraction_lost,
- round_trip_time_ms] {
- OnBitrateUpdated(target_bitrate, stable_target_bitrate, link_allocation,
- fraction_lost, round_trip_time_ms);
+ link_allocation, fraction_lost, round_trip_time_ms,
+ cwnd_reduce_ratio] {
+ DataRate updated_target_bitrate =
+ UpdateTargetBitrate(target_bitrate, cwnd_reduce_ratio);
+ OnBitrateUpdated(updated_target_bitrate, stable_target_bitrate,
+ link_allocation, fraction_lost, round_trip_time_ms,
+ cwnd_reduce_ratio);
});
return;
}
RTC_DCHECK_RUN_ON(&encoder_queue_);
- if (encoder_switch_experiment_.IsBitrateBelowThreshold(target_bitrate) &&
- settings_.encoder_switch_request_callback && !encoder_switch_requested_) {
- EncoderSwitchRequestCallback::Config conf;
- conf.codec_name = encoder_switch_experiment_.to_codec;
- conf.param = encoder_switch_experiment_.to_param;
- conf.value = encoder_switch_experiment_.to_value;
- settings_.encoder_switch_request_callback->RequestEncoderSwitch(conf);
+ const bool video_is_suspended = target_bitrate == DataRate::Zero();
+ const bool video_suspension_changed = video_is_suspended != EncoderPaused();
- encoder_switch_requested_ = true;
+ if (!video_is_suspended && settings_.encoder_switch_request_callback) {
+ if (encoder_selector_) {
+ if (auto encoder =
+ encoder_selector_->OnAvailableBitrate(link_allocation)) {
+ settings_.encoder_switch_request_callback->RequestEncoderSwitch(
+ *encoder);
+ }
+ } else if (encoder_switch_experiment_.IsBitrateBelowThreshold(
+ target_bitrate) &&
+ !encoder_switch_requested_) {
+ EncoderSwitchRequestCallback::Config conf;
+ conf.codec_name = encoder_switch_experiment_.to_codec;
+ conf.param = encoder_switch_experiment_.to_param;
+ conf.value = encoder_switch_experiment_.to_value;
+ settings_.encoder_switch_request_callback->RequestEncoderSwitch(conf);
+
+ encoder_switch_requested_ = true;
+ }
}
RTC_DCHECK(sink_) << "sink_ must be set before the encoder is active.";
@@ -1865,35 +1590,6 @@ void VideoStreamEncoder::OnBitrateUpdated(DataRate target_bitrate,
<< " packet loss " << static_cast<int>(fraction_lost)
<< " rtt " << round_trip_time_ms;
- // On significant changes to BWE at the start of the call,
- // enable frame drops to quickly react to jumps in available bandwidth.
- if (encoder_start_bitrate_bps_ != 0 &&
- !has_seen_first_significant_bwe_change_ && quality_scaler_ &&
- initial_framedrop_on_bwe_enabled_ &&
- abs_diff(target_bitrate.bps(), encoder_start_bitrate_bps_) >=
- kFramedropThreshold * encoder_start_bitrate_bps_) {
- // Reset initial framedrop feature when first real BW estimate arrives.
- // TODO(kthelgason): Update BitrateAllocator to not call OnBitrateUpdated
- // without an actual BW estimate.
- initial_framedrop_ = 0;
- has_seen_first_significant_bwe_change_ = true;
- }
- if (set_start_bitrate_bps_ > 0 && !has_seen_first_bwe_drop_ &&
- quality_scaler_ && quality_scaler_settings_.InitialBitrateIntervalMs() &&
- quality_scaler_settings_.InitialBitrateFactor()) {
- int64_t diff_ms = clock_->TimeInMilliseconds() - set_start_bitrate_time_ms_;
- if (diff_ms < quality_scaler_settings_.InitialBitrateIntervalMs().value() &&
- (target_bitrate.bps() <
- (set_start_bitrate_bps_ *
- quality_scaler_settings_.InitialBitrateFactor().value()))) {
- RTC_LOG(LS_INFO) << "Reset initial_framedrop_. Start bitrate: "
- << set_start_bitrate_bps_
- << ", target bitrate: " << target_bitrate.bps();
- initial_framedrop_ = 0;
- has_seen_first_bwe_drop_ = true;
- }
- }
-
if (encoder_) {
encoder_->OnPacketLossRateUpdate(static_cast<float>(fraction_lost) / 256.f);
encoder_->OnRttUpdate(round_trip_time_ms);
@@ -1901,17 +1597,16 @@ void VideoStreamEncoder::OnBitrateUpdated(DataRate target_bitrate,
uint32_t framerate_fps = GetInputFramerateFps();
frame_dropper_.SetRates((target_bitrate.bps() + 500) / 1000, framerate_fps);
- const bool video_is_suspended = target_bitrate == DataRate::Zero();
- const bool video_suspension_changed = video_is_suspended != EncoderPaused();
EncoderRateSettings new_rate_settings{
VideoBitrateAllocation(), static_cast<double>(framerate_fps),
link_allocation, target_bitrate, stable_target_bitrate};
SetEncoderRates(UpdateBitrateAllocationAndNotifyObserver(new_rate_settings));
- encoder_start_bitrate_bps_ = target_bitrate.bps() != 0
- ? target_bitrate.bps()
- : encoder_start_bitrate_bps_;
+ if (target_bitrate.bps() != 0)
+ encoder_target_bitrate_bps_ = target_bitrate.bps();
+
+ resource_adaptation_processor_->SetTargetBitrate(target_bitrate);
if (video_suspension_changed) {
RTC_LOG(LS_INFO) << "Video suspend state changed to: "
@@ -1928,321 +1623,43 @@ void VideoStreamEncoder::OnBitrateUpdated(DataRate target_bitrate,
}
bool VideoStreamEncoder::DropDueToSize(uint32_t pixel_count) const {
- if (initial_framedrop_ >= kMaxInitialFramedrop ||
- encoder_start_bitrate_bps_ == 0) {
+ bool simulcast_or_svc =
+ (send_codec_.codecType == VideoCodecType::kVideoCodecVP9 &&
+ send_codec_.VP9().numberOfSpatialLayers > 1) ||
+ send_codec_.numberOfSimulcastStreams > 1;
+
+ if (simulcast_or_svc ||
+ !resource_adaptation_processor_->DropInitialFrames() ||
+ !encoder_target_bitrate_bps_.has_value()) {
return false;
}
absl::optional<VideoEncoder::ResolutionBitrateLimits> encoder_bitrate_limits =
- GetEncoderBitrateLimits(encoder_->GetEncoderInfo(), pixel_count);
+ encoder_->GetEncoderInfo().GetEncoderBitrateLimitsForResolution(
+ pixel_count);
if (encoder_bitrate_limits.has_value()) {
// Use bitrate limits provided by encoder.
- return encoder_start_bitrate_bps_ <
+ return encoder_target_bitrate_bps_.value() <
static_cast<uint32_t>(encoder_bitrate_limits->min_start_bitrate_bps);
}
- if (encoder_start_bitrate_bps_ < 300000 /* qvga */) {
+ if (encoder_target_bitrate_bps_.value() < 300000 /* qvga */) {
return pixel_count > 320 * 240;
- } else if (encoder_start_bitrate_bps_ < 500000 /* vga */) {
+ } else if (encoder_target_bitrate_bps_.value() < 500000 /* vga */) {
return pixel_count > 640 * 480;
}
return false;
}
-bool VideoStreamEncoder::TryQualityRampup(int64_t now_ms) {
- if (!quality_scaler_)
- return false;
-
- uint32_t bw_kbps = last_encoder_rate_settings_
- ? last_encoder_rate_settings_->rate_control
- .bandwidth_allocation.kbps()
- : 0;
-
- if (quality_rampup_experiment_.BwHigh(now_ms, bw_kbps)) {
- // Verify that encoder is at max bitrate and the QP is low.
- if (encoder_start_bitrate_bps_ == send_codec_.maxBitrate * 1000 &&
- quality_scaler_->QpFastFilterLow()) {
- return true;
- }
- }
- return false;
-}
-
-bool VideoStreamEncoder::AdaptDown(AdaptReason reason) {
+void VideoStreamEncoder::OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions) {
RTC_DCHECK_RUN_ON(&encoder_queue_);
- AdaptationRequest adaptation_request = {
- last_frame_info_->pixel_count(),
- encoder_stats_observer_->GetInputFrameRate(),
- AdaptationRequest::Mode::kAdaptDown};
-
- bool downgrade_requested =
- last_adaptation_request_ &&
- last_adaptation_request_->mode_ == AdaptationRequest::Mode::kAdaptDown;
-
- bool did_adapt = true;
-
- switch (EffectiveDegradataionPreference()) {
- case DegradationPreference::BALANCED:
- break;
- case DegradationPreference::MAINTAIN_FRAMERATE:
- if (downgrade_requested &&
- adaptation_request.input_pixel_count_ >=
- last_adaptation_request_->input_pixel_count_) {
- // Don't request lower resolution if the current resolution is not
- // lower than the last time we asked for the resolution to be lowered.
- return true;
- }
- break;
- case DegradationPreference::MAINTAIN_RESOLUTION:
- if (adaptation_request.framerate_fps_ <= 0 ||
- (downgrade_requested &&
- adaptation_request.framerate_fps_ < kMinFramerateFps)) {
- // If no input fps estimate available, can't determine how to scale down
- // framerate. Otherwise, don't request lower framerate if we don't have
- // a valid frame rate. Since framerate, unlike resolution, is a measure
- // we have to estimate, and can fluctuate naturally over time, don't
- // make the same kind of limitations as for resolution, but trust the
- // overuse detector to not trigger too often.
- return true;
- }
- break;
- case DegradationPreference::DISABLED:
- return true;
- }
-
- switch (EffectiveDegradataionPreference()) {
- case DegradationPreference::BALANCED: {
- // Try scale down framerate, if lower.
- int fps = balanced_settings_.MinFps(encoder_config_.codec_type,
- last_frame_info_->pixel_count());
- if (source_proxy_->RestrictFramerate(fps)) {
- GetAdaptCounter().IncrementFramerate(reason);
- // Check if requested fps is higher (or close to) input fps.
- absl::optional<int> min_diff =
- balanced_settings_.MinFpsDiff(last_frame_info_->pixel_count());
- if (min_diff && adaptation_request.framerate_fps_ > 0) {
- int fps_diff = adaptation_request.framerate_fps_ - fps;
- if (fps_diff < min_diff.value()) {
- did_adapt = false;
- }
- }
- break;
- }
- // Scale down resolution.
- RTC_FALLTHROUGH();
- }
- case DegradationPreference::MAINTAIN_FRAMERATE: {
- // Scale down resolution.
- bool min_pixels_reached = false;
- if (!source_proxy_->RequestResolutionLowerThan(
- adaptation_request.input_pixel_count_,
- encoder_->GetEncoderInfo().scaling_settings.min_pixels_per_frame,
- &min_pixels_reached)) {
- if (min_pixels_reached)
- encoder_stats_observer_->OnMinPixelLimitReached();
- return true;
- }
- GetAdaptCounter().IncrementResolution(reason);
- break;
- }
- case DegradationPreference::MAINTAIN_RESOLUTION: {
- // Scale down framerate.
- const int requested_framerate = source_proxy_->RequestFramerateLowerThan(
- adaptation_request.framerate_fps_);
- if (requested_framerate == -1)
- return true;
- RTC_DCHECK_NE(max_framerate_, -1);
- overuse_detector_->OnTargetFramerateUpdated(
- std::min(max_framerate_, requested_framerate));
- GetAdaptCounter().IncrementFramerate(reason);
- break;
- }
- case DegradationPreference::DISABLED:
- RTC_NOTREACHED();
- }
-
- last_adaptation_request_.emplace(adaptation_request);
-
- UpdateAdaptationStats(reason);
-
- RTC_LOG(LS_INFO) << GetConstAdaptCounter().ToString();
- return did_adapt;
-}
-
-void VideoStreamEncoder::AdaptUp(AdaptReason reason) {
- RTC_DCHECK_RUN_ON(&encoder_queue_);
-
- const AdaptCounter& adapt_counter = GetConstAdaptCounter();
- int num_downgrades = adapt_counter.TotalCount(reason);
- if (num_downgrades == 0)
- return;
- RTC_DCHECK_GT(num_downgrades, 0);
-
- AdaptationRequest adaptation_request = {
- last_frame_info_->pixel_count(),
- encoder_stats_observer_->GetInputFrameRate(),
- AdaptationRequest::Mode::kAdaptUp};
-
- bool adapt_up_requested =
- last_adaptation_request_ &&
- last_adaptation_request_->mode_ == AdaptationRequest::Mode::kAdaptUp;
-
- if (EffectiveDegradataionPreference() ==
- DegradationPreference::MAINTAIN_FRAMERATE) {
- if (adapt_up_requested &&
- adaptation_request.input_pixel_count_ <=
- last_adaptation_request_->input_pixel_count_) {
- // Don't request higher resolution if the current resolution is not
- // higher than the last time we asked for the resolution to be higher.
- return;
- }
- }
-
- switch (EffectiveDegradataionPreference()) {
- case DegradationPreference::BALANCED: {
- // Check if quality should be increased based on bitrate.
- if (reason == kQuality &&
- !balanced_settings_.CanAdaptUp(last_frame_info_->pixel_count(),
- encoder_start_bitrate_bps_)) {
- return;
- }
- // Try scale up framerate, if higher.
- int fps = balanced_settings_.MaxFps(encoder_config_.codec_type,
- last_frame_info_->pixel_count());
- if (source_proxy_->IncreaseFramerate(fps)) {
- GetAdaptCounter().DecrementFramerate(reason, fps);
- // Reset framerate in case of fewer fps steps down than up.
- if (adapt_counter.FramerateCount() == 0 &&
- fps != std::numeric_limits<int>::max()) {
- RTC_LOG(LS_INFO) << "Removing framerate down-scaling setting.";
- source_proxy_->IncreaseFramerate(std::numeric_limits<int>::max());
- }
- break;
- }
- // Check if resolution should be increased based on bitrate.
- if (reason == kQuality &&
- !balanced_settings_.CanAdaptUpResolution(
- last_frame_info_->pixel_count(), encoder_start_bitrate_bps_)) {
- return;
- }
- // Scale up resolution.
- RTC_FALLTHROUGH();
- }
- case DegradationPreference::MAINTAIN_FRAMERATE: {
- // Check if resolution should be increased based on bitrate and
- // limits specified by encoder capabilities.
- if (reason == kQuality &&
- !CanAdaptUpResolution(last_frame_info_->pixel_count(),
- encoder_start_bitrate_bps_)) {
- return;
- }
-
- // Scale up resolution.
- int pixel_count = adaptation_request.input_pixel_count_;
- if (adapt_counter.ResolutionCount() == 1) {
- RTC_LOG(LS_INFO) << "Removing resolution down-scaling setting.";
- pixel_count = std::numeric_limits<int>::max();
- }
- if (!source_proxy_->RequestHigherResolutionThan(pixel_count))
- return;
- GetAdaptCounter().DecrementResolution(reason);
- break;
- }
- case DegradationPreference::MAINTAIN_RESOLUTION: {
- // Scale up framerate.
- int fps = adaptation_request.framerate_fps_;
- if (adapt_counter.FramerateCount() == 1) {
- RTC_LOG(LS_INFO) << "Removing framerate down-scaling setting.";
- fps = std::numeric_limits<int>::max();
- }
-
- const int requested_framerate =
- source_proxy_->RequestHigherFramerateThan(fps);
- if (requested_framerate == -1) {
- overuse_detector_->OnTargetFramerateUpdated(max_framerate_);
- return;
- }
- overuse_detector_->OnTargetFramerateUpdated(
- std::min(max_framerate_, requested_framerate));
- GetAdaptCounter().DecrementFramerate(reason);
- break;
- }
- case DegradationPreference::DISABLED:
- return;
- }
-
- last_adaptation_request_.emplace(adaptation_request);
-
- UpdateAdaptationStats(reason);
-
- RTC_LOG(LS_INFO) << adapt_counter.ToString();
-}
-
-bool VideoStreamEncoder::CanAdaptUpResolution(int pixels,
- uint32_t bitrate_bps) const {
- absl::optional<VideoEncoder::ResolutionBitrateLimits> bitrate_limits =
- GetEncoderBitrateLimits(encoder_info_,
- source_proxy_->GetHigherResolutionThan(pixels));
- if (!bitrate_limits.has_value() || bitrate_bps == 0) {
- return true; // No limit configured or bitrate provided.
- }
- RTC_DCHECK_GE(bitrate_limits->frame_size_pixels, pixels);
- return bitrate_bps >=
- static_cast<uint32_t>(bitrate_limits->min_start_bitrate_bps);
-}
-
-// TODO(nisse): Delete, once AdaptReason and AdaptationReason are merged.
-void VideoStreamEncoder::UpdateAdaptationStats(AdaptReason reason) {
- switch (reason) {
- case kCpu:
- encoder_stats_observer_->OnAdaptationChanged(
- VideoStreamEncoderObserver::AdaptationReason::kCpu,
- GetActiveCounts(kCpu), GetActiveCounts(kQuality));
- break;
- case kQuality:
- encoder_stats_observer_->OnAdaptationChanged(
- VideoStreamEncoderObserver::AdaptationReason::kQuality,
- GetActiveCounts(kCpu), GetActiveCounts(kQuality));
- break;
- }
-}
-
-VideoStreamEncoderObserver::AdaptationSteps VideoStreamEncoder::GetActiveCounts(
- AdaptReason reason) {
- VideoStreamEncoderObserver::AdaptationSteps counts =
- GetConstAdaptCounter().Counts(reason);
- switch (reason) {
- case kCpu:
- if (!IsFramerateScalingEnabled(degradation_preference_))
- counts.num_framerate_reductions = absl::nullopt;
- if (!IsResolutionScalingEnabled(degradation_preference_))
- counts.num_resolution_reductions = absl::nullopt;
- break;
- case kQuality:
- if (!IsFramerateScalingEnabled(degradation_preference_) ||
- !quality_scaler_) {
- counts.num_framerate_reductions = absl::nullopt;
- }
- if (!IsResolutionScalingEnabled(degradation_preference_) ||
- !quality_scaler_) {
- counts.num_resolution_reductions = absl::nullopt;
- }
- break;
- }
- return counts;
-}
-
-VideoStreamEncoder::AdaptCounter& VideoStreamEncoder::GetAdaptCounter() {
- return adapt_counters_[degradation_preference_];
-}
-
-const VideoStreamEncoder::AdaptCounter&
-VideoStreamEncoder::GetConstAdaptCounter() {
- return adapt_counters_[degradation_preference_];
+ video_source_sink_controller_->SetRestrictions(std::move(restrictions));
+ video_source_sink_controller_->PushSourceSinkSettings();
}
-void VideoStreamEncoder::RunPostEncode(EncodedImage encoded_image,
+void VideoStreamEncoder::RunPostEncode(const EncodedImage& encoded_image,
int64_t time_sent_us,
int temporal_index,
DataSize frame_size) {
@@ -2284,12 +1701,8 @@ void VideoStreamEncoder::RunPostEncode(EncodedImage encoded_image,
}
}
- overuse_detector_->FrameSent(
- encoded_image.Timestamp(), time_sent_us,
- encoded_image.capture_time_ms_ * rtc::kNumMicrosecsPerMillisec,
- encode_duration_us);
- if (quality_scaler_ && encoded_image.qp_ >= 0)
- quality_scaler_->ReportQp(encoded_image.qp_, time_sent_us);
+ resource_adaptation_processor_->OnEncodeCompleted(encoded_image, time_sent_us,
+ encode_duration_us);
if (bitrate_adjuster_) {
bitrate_adjuster_->OnEncodedFrame(encoded_image, temporal_index);
}
@@ -2310,120 +1723,10 @@ void VideoStreamEncoder::ReleaseEncoder() {
TRACE_EVENT0("webrtc", "VCMGenericEncoder::Release");
}
-// Class holding adaptation information.
-VideoStreamEncoder::AdaptCounter::AdaptCounter() {
- fps_counters_.resize(kScaleReasonSize);
- resolution_counters_.resize(kScaleReasonSize);
- static_assert(kScaleReasonSize == 2, "Update MoveCount.");
-}
-
-VideoStreamEncoder::AdaptCounter::~AdaptCounter() {}
-
-std::string VideoStreamEncoder::AdaptCounter::ToString() const {
- rtc::StringBuilder ss;
- ss << "Downgrade counts: fps: {" << ToString(fps_counters_);
- ss << "}, resolution: {" << ToString(resolution_counters_) << "}";
- return ss.Release();
-}
-
-VideoStreamEncoderObserver::AdaptationSteps
-VideoStreamEncoder::AdaptCounter::Counts(int reason) const {
- VideoStreamEncoderObserver::AdaptationSteps counts;
- counts.num_framerate_reductions = fps_counters_[reason];
- counts.num_resolution_reductions = resolution_counters_[reason];
- return counts;
-}
-
-void VideoStreamEncoder::AdaptCounter::IncrementFramerate(int reason) {
- ++(fps_counters_[reason]);
-}
-
-void VideoStreamEncoder::AdaptCounter::IncrementResolution(int reason) {
- ++(resolution_counters_[reason]);
-}
-
-void VideoStreamEncoder::AdaptCounter::DecrementFramerate(int reason) {
- if (fps_counters_[reason] == 0) {
- // Balanced mode: Adapt up is in a different order, switch reason.
- // E.g. framerate adapt down: quality (2), framerate adapt up: cpu (3).
- // 1. Down resolution (cpu): res={quality:0,cpu:1}, fps={quality:0,cpu:0}
- // 2. Down fps (quality): res={quality:0,cpu:1}, fps={quality:1,cpu:0}
- // 3. Up fps (cpu): res={quality:1,cpu:0}, fps={quality:0,cpu:0}
- // 4. Up resolution (quality): res={quality:0,cpu:0}, fps={quality:0,cpu:0}
- RTC_DCHECK_GT(TotalCount(reason), 0) << "No downgrade for reason.";
- RTC_DCHECK_GT(FramerateCount(), 0) << "Framerate not downgraded.";
- MoveCount(&resolution_counters_, reason);
- MoveCount(&fps_counters_, (reason + 1) % kScaleReasonSize);
- }
- --(fps_counters_[reason]);
- RTC_DCHECK_GE(fps_counters_[reason], 0);
-}
-
-void VideoStreamEncoder::AdaptCounter::DecrementResolution(int reason) {
- if (resolution_counters_[reason] == 0) {
- // Balanced mode: Adapt up is in a different order, switch reason.
- RTC_DCHECK_GT(TotalCount(reason), 0) << "No downgrade for reason.";
- RTC_DCHECK_GT(ResolutionCount(), 0) << "Resolution not downgraded.";
- MoveCount(&fps_counters_, reason);
- MoveCount(&resolution_counters_, (reason + 1) % kScaleReasonSize);
- }
- --(resolution_counters_[reason]);
- RTC_DCHECK_GE(resolution_counters_[reason], 0);
-}
-
-void VideoStreamEncoder::AdaptCounter::DecrementFramerate(int reason,
- int cur_fps) {
- DecrementFramerate(reason);
- // Reset if at max fps (i.e. in case of fewer steps up than down).
- if (cur_fps == std::numeric_limits<int>::max())
- absl::c_fill(fps_counters_, 0);
-}
-
-int VideoStreamEncoder::AdaptCounter::FramerateCount() const {
- return Count(fps_counters_);
-}
-
-int VideoStreamEncoder::AdaptCounter::ResolutionCount() const {
- return Count(resolution_counters_);
-}
-
-int VideoStreamEncoder::AdaptCounter::FramerateCount(int reason) const {
- return fps_counters_[reason];
-}
-
-int VideoStreamEncoder::AdaptCounter::ResolutionCount(int reason) const {
- return resolution_counters_[reason];
-}
-
-int VideoStreamEncoder::AdaptCounter::TotalCount(int reason) const {
- return FramerateCount(reason) + ResolutionCount(reason);
-}
-
-int VideoStreamEncoder::AdaptCounter::Count(
- const std::vector<int>& counters) const {
- return absl::c_accumulate(counters, 0);
-}
-
-void VideoStreamEncoder::AdaptCounter::MoveCount(std::vector<int>* counters,
- int from_reason) {
- int to_reason = (from_reason + 1) % kScaleReasonSize;
- ++((*counters)[to_reason]);
- --((*counters)[from_reason]);
-}
-
-std::string VideoStreamEncoder::AdaptCounter::ToString(
- const std::vector<int>& counters) const {
- rtc::StringBuilder ss;
- for (size_t reason = 0; reason < kScaleReasonSize; ++reason) {
- ss << (reason ? " cpu" : "quality") << ":" << counters[reason];
- }
- return ss.Release();
-}
-
bool VideoStreamEncoder::EncoderSwitchExperiment::IsBitrateBelowThreshold(
const DataRate& target_bitrate) {
- DataRate rate =
- DataRate::kbps(bitrate_filter.Apply(1.0, target_bitrate.kbps()));
+ DataRate rate = DataRate::KilobitsPerSec(
+ bitrate_filter.Apply(1.0, target_bitrate.kbps()));
return current_thresholds.bitrate && rate < *current_thresholds.bitrate;
}
@@ -2489,7 +1792,8 @@ VideoStreamEncoder::ParseEncoderSwitchFieldTrial() const {
rtc::FromString(thresholds_split[2], &pixel_count);
if (bitrate_kbps > 0) {
- result.codec_thresholds[codec].bitrate = DataRate::kbps(bitrate_kbps);
+ result.codec_thresholds[codec].bitrate =
+ DataRate::KilobitsPerSec(bitrate_kbps);
}
if (pixel_count > 0) {
@@ -2505,8 +1809,8 @@ VideoStreamEncoder::ParseEncoderSwitchFieldTrial() const {
rtc::StringBuilder ss;
ss << "Successfully parsed WebRTC-NetworkCondition-EncoderSwitch field "
"trial."
- << " to_codec:" << result.to_codec
- << " to_param:" << result.to_param.value_or("<none>")
+ " to_codec:"
+ << result.to_codec << " to_param:" << result.to_param.value_or("<none>")
<< " to_value:" << result.to_value.value_or("<none>")
<< " codec_thresholds:";
@@ -2539,7 +1843,8 @@ VideoStreamEncoder::ParseAutomatincAnimationDetectionFieldTrial() const {
}
RTC_LOG(LS_INFO) << "Automatic animation detection experiment settings:"
- << " min_duration_ms=" << result.min_duration_ms
+ " min_duration_ms="
+ << result.min_duration_ms
<< " min_area_ration=" << result.min_area_ratio
<< " min_fps=" << result.min_fps;
@@ -2552,7 +1857,8 @@ void VideoStreamEncoder::CheckForAnimatedContent(
if (!automatic_animation_detection_experiment_.enabled ||
encoder_config_.content_type !=
VideoEncoderConfig::ContentType::kScreen ||
- degradation_preference_ != DegradationPreference::BALANCED) {
+ resource_adaptation_processor_->degradation_preference() !=
+ DegradationPreference::BALANCED) {
return;
}
@@ -2583,10 +1889,10 @@ void VideoStreamEncoder::CheckForAnimatedContent(
} else if ((!last_update_rect_ ||
frame.update_rect() != *last_update_rect_)) {
last_update_rect_ = frame.update_rect();
- animation_start_time_ = Timestamp::us(time_when_posted_in_us);
+ animation_start_time_ = Timestamp::Micros(time_when_posted_in_us);
} else {
TimeDelta animation_duration =
- Timestamp::us(time_when_posted_in_us) - animation_start_time_;
+ Timestamp::Micros(time_when_posted_in_us) - animation_start_time_;
float area_ratio = static_cast<float>(last_update_rect_->width *
last_update_rect_->height) /
(frame.width() * frame.height());
@@ -2609,23 +1915,16 @@ void VideoStreamEncoder::CheckForAnimatedContent(
RTC_LOG(LS_INFO) << "Removing resolution cap due to no consistent "
"animation detection.";
}
- source_proxy_->RestrictPixels(should_cap_resolution
- ? kMaxAnimationPixels
- : std::numeric_limits<int>::max());
+ video_source_sink_controller_->SetPixelsPerFrameUpperLimit(
+ should_cap_resolution ? absl::optional<size_t>(kMaxAnimationPixels)
+ : absl::nullopt);
+ video_source_sink_controller_->PushSourceSinkSettings();
}
}
-
-DegradationPreference VideoStreamEncoder::EffectiveDegradataionPreference()
- const {
- // Balanced mode for screenshare works via automatic animation detection:
- // Resolution is capped for fullscreen animated content.
- // Adapatation is done only via framerate downgrade.
- // Thus effective degradation preference is MAINTAIN_RESOLUTION.
- return (encoder_config_.content_type ==
- VideoEncoderConfig::ContentType::kScreen &&
- degradation_preference_ == DegradationPreference::BALANCED)
- ? DegradationPreference::MAINTAIN_RESOLUTION
- : degradation_preference_;
+void VideoStreamEncoder::InjectAdaptationResource(
+ Resource* resource,
+ AdaptationObserverInterface::AdaptReason reason) {
+ resource_adaptation_processor_->AddResource(resource, reason);
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/video/video_stream_encoder.h b/chromium/third_party/webrtc/video/video_stream_encoder.h
index 95179440492..4963fb81410 100644
--- a/chromium/third_party/webrtc/video/video_stream_encoder.h
+++ b/chromium/third_party/webrtc/video/video_stream_encoder.h
@@ -26,13 +26,11 @@
#include "api/video/video_stream_encoder_settings.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
+#include "call/adaptation/resource_adaptation_processor_interface.h"
+#include "call/adaptation/video_source_restrictions.h"
#include "modules/video_coding/utility/frame_dropper.h"
-#include "modules/video_coding/utility/quality_scaler.h"
#include "rtc_base/critical_section.h"
#include "rtc_base/event.h"
-#include "rtc_base/experiments/balanced_degradation_settings.h"
-#include "rtc_base/experiments/quality_rampup_experiment.h"
-#include "rtc_base/experiments/quality_scaler_settings.h"
#include "rtc_base/experiments/rate_control_settings.h"
#include "rtc_base/numerics/exp_filter.h"
#include "rtc_base/race_checker.h"
@@ -40,9 +38,10 @@
#include "rtc_base/synchronization/sequence_checker.h"
#include "rtc_base/task_queue.h"
#include "system_wrappers/include/clock.h"
+#include "video/adaptation/resource_adaptation_processor.h"
#include "video/encoder_bitrate_adjuster.h"
#include "video/frame_encode_metadata_writer.h"
-#include "video/overuse_frame_detector.h"
+#include "video/video_source_sink_controller.h"
namespace webrtc {
@@ -56,8 +55,7 @@ namespace webrtc {
// Call Stop() when done.
class VideoStreamEncoder : public VideoStreamEncoderInterface,
private EncodedImageCallback,
- // Protected only to provide access to tests.
- protected AdaptationObserverInterface {
+ public ResourceAdaptationProcessorListener {
public:
VideoStreamEncoder(Clock* clock,
uint32_t number_of_cores,
@@ -97,21 +95,27 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
DataRate stable_target_bitrate,
DataRate target_headroom,
uint8_t fraction_lost,
- int64_t round_trip_time_ms) override;
+ int64_t round_trip_time_ms,
+ double cwnd_reduce_ratio) override;
+
+ DataRate UpdateTargetBitrate(DataRate target_bitrate,
+ double cwnd_reduce_ratio);
protected:
// Used for testing. For example the |ScalingObserverInterface| methods must
// be called on |encoder_queue_|.
rtc::TaskQueue* encoder_queue() { return &encoder_queue_; }
- // AdaptationObserverInterface implementation.
- // These methods are protected for easier testing.
- void AdaptUp(AdaptReason reason) override;
- bool AdaptDown(AdaptReason reason) override;
+ void OnVideoSourceRestrictionsUpdated(
+ VideoSourceRestrictions restrictions) override;
- private:
- class VideoSourceProxy;
+ // Used for injected test resources.
+ // TODO(eshr): Move all adaptation tests out of VideoStreamEncoder tests.
+ void InjectAdaptationResource(Resource* resource,
+ AdaptationObserverInterface::AdaptReason reason)
+ RTC_RUN_ON(&encoder_queue_);
+ private:
class VideoFrameInfo {
public:
VideoFrameInfo(int width, int height, bool is_texture)
@@ -145,8 +149,6 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
void ReconfigureEncoder() RTC_RUN_ON(&encoder_queue_);
- void ConfigureQualityScaler(const VideoEncoder::EncoderInfo& encoder_info);
-
// Implements VideoSinkInterface.
void OnFrame(const VideoFrame& video_frame) override;
void OnDiscardedFrame() override;
@@ -159,7 +161,6 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
// Indicates wether frame should be dropped because the pixel count is too
// large for the current bitrate configuration.
bool DropDueToSize(uint32_t pixel_count) const RTC_RUN_ON(&encoder_queue_);
- bool TryQualityRampup(int64_t now_ms) RTC_RUN_ON(&encoder_queue_);
// Implements EncodedImageCallback.
EncodedImageCallback::Result OnEncodedImage(
@@ -183,51 +184,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
void SetEncoderRates(const EncoderRateSettings& rate_settings)
RTC_RUN_ON(&encoder_queue_);
- // Class holding adaptation information.
- class AdaptCounter final {
- public:
- AdaptCounter();
- ~AdaptCounter();
-
- // Get number of adaptation downscales for |reason|.
- VideoStreamEncoderObserver::AdaptationSteps Counts(int reason) const;
-
- std::string ToString() const;
-
- void IncrementFramerate(int reason);
- void IncrementResolution(int reason);
- void DecrementFramerate(int reason);
- void DecrementResolution(int reason);
- void DecrementFramerate(int reason, int cur_fps);
-
- // Gets the total number of downgrades (for all adapt reasons).
- int FramerateCount() const;
- int ResolutionCount() const;
-
- // Gets the total number of downgrades for |reason|.
- int FramerateCount(int reason) const;
- int ResolutionCount(int reason) const;
- int TotalCount(int reason) const;
-
- private:
- std::string ToString(const std::vector<int>& counters) const;
- int Count(const std::vector<int>& counters) const;
- void MoveCount(std::vector<int>* counters, int from_reason);
-
- // Degradation counters holding number of framerate/resolution reductions
- // per adapt reason.
- std::vector<int> fps_counters_;
- std::vector<int> resolution_counters_;
- };
-
- AdaptCounter& GetAdaptCounter() RTC_RUN_ON(&encoder_queue_);
- const AdaptCounter& GetConstAdaptCounter() RTC_RUN_ON(&encoder_queue_);
- void UpdateAdaptationStats(AdaptReason reason) RTC_RUN_ON(&encoder_queue_);
- VideoStreamEncoderObserver::AdaptationSteps GetActiveCounts(
- AdaptReason reason) RTC_RUN_ON(&encoder_queue_);
- bool CanAdaptUpResolution(int pixels, uint32_t bitrate_bps) const
- RTC_RUN_ON(&encoder_queue_);
- void RunPostEncode(EncodedImage encoded_image,
+ void RunPostEncode(const EncodedImage& encoded_image,
int64_t time_sent_us,
int temporal_index,
DataSize frame_size);
@@ -238,34 +195,18 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
int64_t time_when_posted_in_ms)
RTC_RUN_ON(&encoder_queue_);
- // Calculates degradation preference used in adaptation down or up.
- DegradationPreference EffectiveDegradataionPreference() const
- RTC_RUN_ON(&encoder_queue_);
-
rtc::Event shutdown_event_;
const uint32_t number_of_cores_;
- // Counts how many frames we've dropped in the initial framedrop phase.
- int initial_framedrop_;
- const bool initial_framedrop_on_bwe_enabled_;
- bool has_seen_first_significant_bwe_change_ = false;
- bool quality_rampup_done_ RTC_GUARDED_BY(&encoder_queue_);
- QualityRampupExperiment quality_rampup_experiment_
- RTC_GUARDED_BY(&encoder_queue_);
const bool quality_scaling_experiment_enabled_;
- const std::unique_ptr<VideoSourceProxy> source_proxy_;
EncoderSink* sink_;
const VideoStreamEncoderSettings settings_;
const RateControlSettings rate_control_settings_;
- const QualityScalerSettings quality_scaler_settings_;
-
- const std::unique_ptr<OveruseFrameDetector> overuse_detector_
- RTC_PT_GUARDED_BY(&encoder_queue_);
- std::unique_ptr<QualityScaler> quality_scaler_ RTC_GUARDED_BY(&encoder_queue_)
- RTC_PT_GUARDED_BY(&encoder_queue_);
+ std::unique_ptr<VideoEncoderFactory::EncoderSelectorInterface> const
+ encoder_selector_;
VideoStreamEncoderObserver* const encoder_stats_observer_;
// |thread_checker_| checks that public methods that are related to lifetime
// of VideoStreamEncoder are called on the same thread.
@@ -277,8 +218,6 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
bool encoder_initialized_;
std::unique_ptr<VideoBitrateAllocator> rate_allocator_
RTC_GUARDED_BY(&encoder_queue_) RTC_PT_GUARDED_BY(&encoder_queue_);
- // The maximum frame rate of the current codec configuration, as determined
- // at the last ReconfigureEncoder() call.
int max_framerate_ RTC_GUARDED_BY(&encoder_queue_);
// Set when ConfigureEncoder has been called in order to lazy reconfigure the
@@ -292,10 +231,8 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
RTC_GUARDED_BY(&encoder_queue_);
int crop_width_ RTC_GUARDED_BY(&encoder_queue_);
int crop_height_ RTC_GUARDED_BY(&encoder_queue_);
- uint32_t encoder_start_bitrate_bps_ RTC_GUARDED_BY(&encoder_queue_);
- int set_start_bitrate_bps_ RTC_GUARDED_BY(&encoder_queue_);
- int64_t set_start_bitrate_time_ms_ RTC_GUARDED_BY(&encoder_queue_);
- bool has_seen_first_bwe_drop_ RTC_GUARDED_BY(&encoder_queue_);
+ absl::optional<uint32_t> encoder_target_bitrate_bps_
+ RTC_GUARDED_BY(&encoder_queue_);
size_t max_data_payload_length_ RTC_GUARDED_BY(&encoder_queue_);
absl::optional<EncoderRateSettings> last_encoder_rate_settings_
RTC_GUARDED_BY(&encoder_queue_);
@@ -308,30 +245,6 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
bool encoder_failed_ RTC_GUARDED_BY(&encoder_queue_);
Clock* const clock_;
- // Counters used for deciding if the video resolution or framerate is
- // currently restricted, and if so, why, on a per degradation preference
- // basis.
- // TODO(sprang): Replace this with a state holding a relative overuse measure
- // instead, that can be translated into suitable down-scale or fps limit.
- std::map<const DegradationPreference, AdaptCounter> adapt_counters_
- RTC_GUARDED_BY(&encoder_queue_);
- // Set depending on degradation preferences.
- DegradationPreference degradation_preference_ RTC_GUARDED_BY(&encoder_queue_);
-
- const BalancedDegradationSettings balanced_settings_;
-
- struct AdaptationRequest {
- // The pixel count produced by the source at the time of the adaptation.
- int input_pixel_count_;
- // Framerate received from the source at the time of the adaptation.
- int framerate_fps_;
- // Indicates if request was to adapt up or down.
- enum class Mode { kAdaptUp, kAdaptDown } mode_;
- };
- // Stores a snapshot of the last adaptation request triggered by an AdaptUp
- // or AdaptDown signal.
- absl::optional<AdaptationRequest> last_adaptation_request_
- RTC_GUARDED_BY(&encoder_queue_);
rtc::RaceChecker incoming_frame_race_checker_
RTC_GUARDED_BY(incoming_frame_race_checker_);
@@ -344,7 +257,8 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
int64_t last_frame_log_ms_ RTC_GUARDED_BY(incoming_frame_race_checker_);
int captured_frame_count_ RTC_GUARDED_BY(&encoder_queue_);
- int dropped_frame_count_ RTC_GUARDED_BY(&encoder_queue_);
+ int dropped_frame_cwnd_pushback_count_ RTC_GUARDED_BY(&encoder_queue_);
+ int dropped_frame_encoder_block_count_ RTC_GUARDED_BY(&encoder_queue_);
absl::optional<VideoFrame> pending_frame_ RTC_GUARDED_BY(&encoder_queue_);
int64_t pending_frame_post_time_us_ RTC_GUARDED_BY(&encoder_queue_);
@@ -392,6 +306,12 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
// the worker thread.
std::atomic<int> pending_frame_drops_;
+ // Congestion window frame drop ratio (drop 1 in every
+ // cwnd_frame_drop_interval_ frames).
+ absl::optional<int> cwnd_frame_drop_interval_ RTC_GUARDED_BY(&encoder_queue_);
+ // Frame counter for congestion window frame drop.
+ int cwnd_frame_counter_ RTC_GUARDED_BY(&encoder_queue_);
+
std::unique_ptr<EncoderBitrateAdjuster> bitrate_adjuster_
RTC_GUARDED_BY(&encoder_queue_);
@@ -473,6 +393,22 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
// track of whether a request has been made or not.
bool encoder_switch_requested_ RTC_GUARDED_BY(&encoder_queue_);
+ // The controller updates the sink wants based on restrictions that come from
+ // the resource adaptation processor or adaptation due to bandwidth
+ // adaptation.
+ //
+ // This is used on the encoder queue, with a few exceptions:
+ // - VideoStreamEncoder::SetSource() invokes SetSource().
+ // - VideoStreamEncoder::SetSink() invokes SetRotationApplied() and
+ // PushSourceSinkSettings().
+ // - VideoStreamEncoder::Stop() invokes SetSource().
+ // TODO(hbos): If these can be moved to the encoder queue,
+ // VideoSourceSinkController can be made single-threaded, and its lock can be
+ // replaced with a sequence checker.
+ std::unique_ptr<VideoSourceSinkController> video_source_sink_controller_;
+ std::unique_ptr<ResourceAdaptationProcessor> resource_adaptation_processor_
+ RTC_GUARDED_BY(&encoder_queue_);
+
// All public methods are proxied to |encoder_queue_|. It must must be
// destroyed first to make sure no tasks are run that use other members.
rtc::TaskQueue encoder_queue_;
diff --git a/chromium/third_party/webrtc/video/video_stream_encoder_unittest.cc b/chromium/third_party/webrtc/video/video_stream_encoder_unittest.cc
index f2e023db987..bb85776500e 100644
--- a/chromium/third_party/webrtc/video/video_stream_encoder_unittest.cc
+++ b/chromium/third_party/webrtc/video/video_stream_encoder_unittest.cc
@@ -18,17 +18,18 @@
#include "absl/memory/memory.h"
#include "api/task_queue/default_task_queue_factory.h"
#include "api/test/mock_fec_controller_override.h"
+#include "api/test/mock_video_encoder.h"
#include "api/video/builtin_video_bitrate_allocator_factory.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_bitrate_allocation.h"
#include "api/video_codecs/video_encoder.h"
#include "api/video_codecs/vp8_temporal_layers.h"
#include "api/video_codecs/vp8_temporal_layers_factory.h"
+#include "call/adaptation/test/fake_resource.h"
#include "common_video/h264/h264_common.h"
#include "common_video/include/video_frame_buffer.h"
#include "media/base/video_adapter.h"
#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
-#include "modules/video_coding/utility/default_video_bitrate_allocator.h"
#include "modules/video_coding/utility/simulcast_rate_allocator.h"
#include "rtc_base/fake_clock.h"
#include "rtc_base/logging.h"
@@ -39,7 +40,7 @@
#include "test/encoder_settings.h"
#include "test/fake_encoder.h"
#include "test/field_trial.h"
-#include "test/frame_generator.h"
+#include "test/frame_forwarder.h"
#include "test/gmock.h"
#include "test/gtest.h"
#include "test/video_encoder_proxy_factory.h"
@@ -51,6 +52,9 @@ using ScaleReason = AdaptationObserverInterface::AdaptReason;
using ::testing::_;
using ::testing::AllOf;
using ::testing::Field;
+using ::testing::Matcher;
+using ::testing::NiceMock;
+using ::testing::Return;
using ::testing::StrictMock;
namespace {
@@ -153,19 +157,57 @@ class VideoStreamEncoderUnderTest : public VideoStreamEncoder {
std::unique_ptr<OveruseFrameDetector>(
overuse_detector_proxy_ =
new CpuOveruseDetectorProxy(stats_proxy)),
- task_queue_factory) {}
+ task_queue_factory),
+ fake_cpu_resource_(
+ std::make_unique<FakeResource>(ResourceUsageState::kStable,
+ "FakeResource[CPU]")),
+ fake_quality_resource_(
+ std::make_unique<FakeResource>(ResourceUsageState::kStable,
+ "FakeResource[QP]")) {
+ InjectAdaptationResource(
+ fake_quality_resource_.get(),
+ AdaptationObserverInterface::AdaptReason::kQuality);
+ InjectAdaptationResource(fake_cpu_resource_.get(),
+ AdaptationObserverInterface::AdaptReason::kCpu);
+ }
- void PostTaskAndWait(bool down, AdaptReason reason) {
+ void PostTaskAndWait(bool down,
+ AdaptationObserverInterface::AdaptReason reason) {
PostTaskAndWait(down, reason, /*expected_results=*/true);
}
- void PostTaskAndWait(bool down, AdaptReason reason, bool expected_results) {
+ void PostTaskAndWait(bool down,
+ AdaptationObserverInterface::AdaptReason reason,
+ bool expected_results) {
rtc::Event event;
encoder_queue()->PostTask([this, &event, reason, down, expected_results] {
- if (down)
- EXPECT_EQ(expected_results, AdaptDown(reason));
- else
- AdaptUp(reason);
+ ResourceUsageState usage_state =
+ down ? ResourceUsageState::kOveruse : ResourceUsageState::kUnderuse;
+
+ FakeResource* resource = nullptr;
+ switch (reason) {
+ case AdaptationObserverInterface::kQuality:
+ resource = fake_quality_resource_.get();
+ break;
+ case AdaptationObserverInterface::kCpu:
+ resource = fake_cpu_resource_.get();
+ break;
+ default:
+ RTC_NOTREACHED();
+ }
+
+ resource->set_usage_state(usage_state);
+ if (!expected_results) {
+ ASSERT_EQ(AdaptationObserverInterface::kQuality, reason)
+ << "We can only assert adaptation result for quality resources";
+ EXPECT_EQ(
+ ResourceListenerResponse::kQualityScalerShouldIncreaseFrequency,
+ resource->last_response());
+ } else {
+ EXPECT_EQ(ResourceListenerResponse::kNothing,
+ resource->last_response());
+ }
+
event.Set();
});
ASSERT_TRUE(event.Wait(5000));
@@ -180,27 +222,34 @@ class VideoStreamEncoderUnderTest : public VideoStreamEncoder {
}
void TriggerCpuOveruse() {
- PostTaskAndWait(/*down=*/true, AdaptReason::kCpu);
+ PostTaskAndWait(/*down=*/true,
+ AdaptationObserverInterface::AdaptReason::kCpu);
}
void TriggerCpuNormalUsage() {
- PostTaskAndWait(/*down=*/false, AdaptReason::kCpu);
+ PostTaskAndWait(/*down=*/false,
+ AdaptationObserverInterface::AdaptReason::kCpu);
}
void TriggerQualityLow() {
- PostTaskAndWait(/*down=*/true, AdaptReason::kQuality);
+ PostTaskAndWait(/*down=*/true,
+ AdaptationObserverInterface::AdaptReason::kQuality);
}
void TriggerQualityLowExpectFalse() {
- PostTaskAndWait(/*down=*/true, AdaptReason::kQuality,
+ PostTaskAndWait(/*down=*/true,
+ AdaptationObserverInterface::AdaptReason::kQuality,
/*expected_results=*/false);
}
void TriggerQualityHigh() {
- PostTaskAndWait(/*down=*/false, AdaptReason::kQuality);
+ PostTaskAndWait(/*down=*/false,
+ AdaptationObserverInterface::AdaptReason::kQuality);
}
CpuOveruseDetectorProxy* overuse_detector_proxy_;
+ std::unique_ptr<FakeResource> fake_cpu_resource_;
+ std::unique_ptr<FakeResource> fake_quality_resource_;
};
class VideoStreamFactory
@@ -377,6 +426,15 @@ class MockBitrateObserver : public VideoBitrateAllocationObserver {
MOCK_METHOD1(OnBitrateAllocationUpdated, void(const VideoBitrateAllocation&));
};
+class MockEncoderSelector
+ : public VideoEncoderFactory::EncoderSelectorInterface {
+ public:
+ MOCK_METHOD1(OnCurrentEncoder, void(const SdpVideoFormat& format));
+ MOCK_METHOD1(OnAvailableBitrate,
+ absl::optional<SdpVideoFormat>(const DataRate& rate));
+ MOCK_METHOD0(OnEncoderBroken, absl::optional<SdpVideoFormat>());
+};
+
} // namespace
class VideoStreamEncoderTest : public ::testing::Test {
@@ -417,7 +475,7 @@ class VideoStreamEncoderTest : public ::testing::Test {
video_encoder_config.video_stream_factory->CreateEncoderStreams(
codec_width_, codec_height_, video_encoder_config);
max_framerate_ = streams[0].max_framerate;
- fake_clock_.SetTime(Timestamp::us(1234));
+ fake_clock_.SetTime(Timestamp::Micros(1234));
ConfigureEncoder(std::move(video_encoder_config));
}
@@ -538,8 +596,9 @@ class VideoStreamEncoderTest : public ::testing::Test {
EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(expected_bitrate))
.Times(1);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(
CreateFrame(1, codec_width_, codec_height_));
@@ -634,9 +693,11 @@ class VideoStreamEncoderTest : public ::testing::Test {
// attempting to scale resolution.
int fps_limit = wants.max_framerate_fps;
if (last_frame_pixels <= 320 * 240) {
- EXPECT_TRUE(7 <= fps_limit && fps_limit <= 10);
+ EXPECT_LE(7, fps_limit);
+ EXPECT_LE(fps_limit, 10);
} else if (last_frame_pixels <= 480 * 270) {
- EXPECT_TRUE(10 <= fps_limit && fps_limit <= 15);
+ EXPECT_LE(10, fps_limit);
+ EXPECT_LE(fps_limit, 15);
} else if (last_frame_pixels <= 640 * 480) {
EXPECT_LE(15, fps_limit);
} else {
@@ -646,28 +707,28 @@ class VideoStreamEncoderTest : public ::testing::Test {
void WaitForEncodedFrame(int64_t expected_ntp_time) {
sink_.WaitForEncodedFrame(expected_ntp_time);
- fake_clock_.AdvanceTime(TimeDelta::seconds(1) / max_framerate_);
+ fake_clock_.AdvanceTime(TimeDelta::Seconds(1) / max_framerate_);
}
bool TimedWaitForEncodedFrame(int64_t expected_ntp_time, int64_t timeout_ms) {
bool ok = sink_.TimedWaitForEncodedFrame(expected_ntp_time, timeout_ms);
- fake_clock_.AdvanceTime(TimeDelta::seconds(1) / max_framerate_);
+ fake_clock_.AdvanceTime(TimeDelta::Seconds(1) / max_framerate_);
return ok;
}
void WaitForEncodedFrame(uint32_t expected_width, uint32_t expected_height) {
sink_.WaitForEncodedFrame(expected_width, expected_height);
- fake_clock_.AdvanceTime(TimeDelta::seconds(1) / max_framerate_);
+ fake_clock_.AdvanceTime(TimeDelta::Seconds(1) / max_framerate_);
}
void ExpectDroppedFrame() {
sink_.ExpectDroppedFrame();
- fake_clock_.AdvanceTime(TimeDelta::seconds(1) / max_framerate_);
+ fake_clock_.AdvanceTime(TimeDelta::Seconds(1) / max_framerate_);
}
bool WaitForFrame(int64_t timeout_ms) {
bool ok = sink_.WaitForFrame(timeout_ms);
- fake_clock_.AdvanceTime(TimeDelta::seconds(1) / max_framerate_);
+ fake_clock_.AdvanceTime(TimeDelta::Seconds(1) / max_framerate_);
return ok;
}
@@ -703,6 +764,7 @@ class VideoStreamEncoderTest : public ::testing::Test {
}
info.resolution_bitrate_limits = resolution_bitrate_limits_;
+ info.requested_resolution_alignment = requested_resolution_alignment_;
return info;
}
@@ -727,6 +789,11 @@ class VideoStreamEncoderTest : public ::testing::Test {
quality_scaling_ = b;
}
+ void SetRequestedResolutionAlignment(int requested_resolution_alignment) {
+ rtc::CritScope lock(&local_crit_sect_);
+ requested_resolution_alignment_ = requested_resolution_alignment;
+ }
+
void SetIsHardwareAccelerated(bool is_hardware_accelerated) {
rtc::CritScope lock(&local_crit_sect_);
is_hardware_accelerated_ = is_hardware_accelerated;
@@ -914,6 +981,7 @@ class VideoStreamEncoderTest : public ::testing::Test {
int last_input_width_ RTC_GUARDED_BY(local_crit_sect_) = 0;
int last_input_height_ RTC_GUARDED_BY(local_crit_sect_) = 0;
bool quality_scaling_ RTC_GUARDED_BY(local_crit_sect_) = true;
+ int requested_resolution_alignment_ RTC_GUARDED_BY(local_crit_sect_) = 1;
bool is_hardware_accelerated_ RTC_GUARDED_BY(local_crit_sect_) = false;
std::unique_ptr<Vp8FrameBufferController> frame_buffer_controller_
RTC_GUARDED_BY(local_crit_sect_);
@@ -931,7 +999,7 @@ class VideoStreamEncoderTest : public ::testing::Test {
bool expect_null_frame_ = false;
EncodedImageCallback* encoded_image_callback_
RTC_GUARDED_BY(local_crit_sect_) = nullptr;
- MockFecControllerOverride fec_controller_override_;
+ NiceMock<MockFecControllerOverride> fec_controller_override_;
int num_encoder_initializations_ RTC_GUARDED_BY(local_crit_sect_) = 0;
std::vector<ResolutionBitrateLimits> resolution_bitrate_limits_
RTC_GUARDED_BY(local_crit_sect_);
@@ -980,6 +1048,18 @@ class VideoStreamEncoderTest : public ::testing::Test {
EXPECT_EQ(expected_width, width);
}
+ void CheckLastFrameSizeIsMultipleOf(int resolution_alignment) {
+ int width = 0;
+ int height = 0;
+ {
+ rtc::CritScope lock(&crit_);
+ width = last_width_;
+ height = last_height_;
+ }
+ EXPECT_EQ(width % resolution_alignment, 0);
+ EXPECT_EQ(height % resolution_alignment, 0);
+ }
+
void CheckLastFrameRotationMatches(VideoRotation expected_rotation) {
VideoRotation rotation;
{
@@ -1061,6 +1141,7 @@ class VideoStreamEncoderTest : public ::testing::Test {
void OnEncoderConfigurationChanged(
std::vector<VideoStream> streams,
+ bool is_svc,
VideoEncoderConfig::ContentType content_type,
int min_transmit_bitrate_bps) override {
rtc::CritScope lock(&crit_);
@@ -1129,8 +1210,9 @@ class VideoStreamEncoderTest : public ::testing::Test {
TEST_F(VideoStreamEncoderTest, EncodeOneFrame) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
rtc::Event frame_destroyed_event;
video_source_.IncomingCapturedFrame(CreateFrame(1, &frame_destroyed_event));
WaitForEncodedFrame(1);
@@ -1149,8 +1231,9 @@ TEST_F(VideoStreamEncoderTest, DropsFramesBeforeFirstOnBitrateUpdated) {
EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeoutMs));
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// The pending frame should be received.
WaitForEncodedFrame(2);
@@ -1162,13 +1245,15 @@ TEST_F(VideoStreamEncoderTest, DropsFramesBeforeFirstOnBitrateUpdated) {
TEST_F(VideoStreamEncoderTest, DropsFramesWhenRateSetToZero) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
WaitForEncodedFrame(1);
- video_stream_encoder_->OnBitrateUpdated(DataRate::bps(0), DataRate::bps(0),
- DataRate::bps(0), 0, 0);
+ video_stream_encoder_->OnBitrateUpdated(DataRate::BitsPerSec(0),
+ DataRate::BitsPerSec(0),
+ DataRate::BitsPerSec(0), 0, 0, 0);
// The encoder will cache up to one frame for a short duration. Adding two
// frames means that the first frame will be dropped and the second frame will
// be sent when the encoder is resumed.
@@ -1176,8 +1261,9 @@ TEST_F(VideoStreamEncoderTest, DropsFramesWhenRateSetToZero) {
video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
WaitForEncodedFrame(3);
video_source_.IncomingCapturedFrame(CreateFrame(4, nullptr));
WaitForEncodedFrame(4);
@@ -1186,8 +1272,9 @@ TEST_F(VideoStreamEncoderTest, DropsFramesWhenRateSetToZero) {
TEST_F(VideoStreamEncoderTest, DropsFramesWithSameOrOldNtpTimestamp) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
WaitForEncodedFrame(1);
@@ -1201,8 +1288,9 @@ TEST_F(VideoStreamEncoderTest, DropsFramesWithSameOrOldNtpTimestamp) {
TEST_F(VideoStreamEncoderTest, DropsFrameAfterStop) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
WaitForEncodedFrame(1);
@@ -1216,8 +1304,9 @@ TEST_F(VideoStreamEncoderTest, DropsFrameAfterStop) {
TEST_F(VideoStreamEncoderTest, DropsPendingFramesOnSlowEncode) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
fake_encoder_.BlockNextEncode();
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
@@ -1234,8 +1323,9 @@ TEST_F(VideoStreamEncoderTest, DropsPendingFramesOnSlowEncode) {
TEST_F(VideoStreamEncoderTest, DropFrameWithFailedI420Conversion) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
rtc::Event frame_destroyed_event;
video_source_.IncomingCapturedFrame(
@@ -1255,8 +1345,9 @@ TEST_F(VideoStreamEncoderTest, DropFrameWithFailedI420ConversionWithCrop) {
// Capture a frame at codec_width_/codec_height_.
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
WaitForEncodedFrame(1);
// The encoder will have been configured once.
@@ -1274,11 +1365,37 @@ TEST_F(VideoStreamEncoderTest, DropFrameWithFailedI420ConversionWithCrop) {
video_stream_encoder_->Stop();
}
+TEST_F(VideoStreamEncoderTest, DropsFramesWhenCongestionWindowPushbackSet) {
+ video_stream_encoder_->OnBitrateUpdated(
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+
+ video_stream_encoder_->OnBitrateUpdated(
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0.5);
+ // The congestion window pushback is set to 0.5, which will drop 1/2 of
+ // frames. Adding two frames means that the first frame will be dropped and
+ // the second frame will be sent to the encoder.
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr));
+ WaitForEncodedFrame(3);
+ video_source_.IncomingCapturedFrame(CreateFrame(4, nullptr));
+ video_source_.IncomingCapturedFrame(CreateFrame(5, nullptr));
+ WaitForEncodedFrame(5);
+ EXPECT_EQ(2u, stats_proxy_->GetStats().frames_dropped_by_congestion_window);
+ video_stream_encoder_->Stop();
+}
+
TEST_F(VideoStreamEncoderTest,
ConfigureEncoderTriggersOnEncoderConfigurationChanged) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
EXPECT_EQ(0, sink_.number_of_reconfigurations());
// Capture a frame and wait for it to synchronize with the encoder thread.
@@ -1305,8 +1422,9 @@ TEST_F(VideoStreamEncoderTest,
TEST_F(VideoStreamEncoderTest, FrameResolutionChangeReconfigureEncoder) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Capture a frame and wait for it to synchronize with the encoder thread.
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
@@ -1332,8 +1450,9 @@ TEST_F(VideoStreamEncoderTest, FrameResolutionChangeReconfigureEncoder) {
TEST_F(VideoStreamEncoderTest,
EncoderInstanceDestroyedBeforeAnotherInstanceCreated) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Capture a frame and wait for it to synchronize with the encoder thread.
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
@@ -1355,8 +1474,9 @@ TEST_F(VideoStreamEncoderTest,
TEST_F(VideoStreamEncoderTest, BitrateLimitsChangeReconfigureRateAllocator) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
VideoEncoderConfig video_encoder_config;
test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
@@ -1399,93 +1519,84 @@ TEST_F(VideoStreamEncoderTest, BitrateLimitsChangeReconfigureRateAllocator) {
}
TEST_F(VideoStreamEncoderTest,
- EncoderRecommendedBitrateLimitsDoNotOverrideAppBitrateLimits) {
+ IntersectionOfEncoderAndAppBitrateLimitsUsedWhenBothProvided) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
+
+ const uint32_t kMinEncBitrateKbps = 100;
+ const uint32_t kMaxEncBitrateKbps = 1000;
+ const VideoEncoder::ResolutionBitrateLimits encoder_bitrate_limits(
+ /*frame_size_pixels=*/codec_width_ * codec_height_,
+ /*min_start_bitrate_bps=*/0,
+ /*min_bitrate_bps=*/kMinEncBitrateKbps * 1000,
+ /*max_bitrate_bps=*/kMaxEncBitrateKbps * 1000);
+ fake_encoder_.SetResolutionBitrateLimits({encoder_bitrate_limits});
VideoEncoderConfig video_encoder_config;
test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
- video_encoder_config.max_bitrate_bps = 0;
- video_encoder_config.simulcast_layers[0].min_bitrate_bps = 0;
+ video_encoder_config.max_bitrate_bps = (kMaxEncBitrateKbps + 1) * 1000;
+ video_encoder_config.simulcast_layers[0].min_bitrate_bps =
+ (kMinEncBitrateKbps + 1) * 1000;
video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
kMaxPayloadLength);
- video_source_.IncomingCapturedFrame(CreateFrame(1, 360, 180));
+ // When both encoder and app provide bitrate limits, the intersection of
+ // provided sets should be used.
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
WaitForEncodedFrame(1);
-
- // Get the default bitrate limits and use them as baseline for custom
- // application and encoder recommended limits.
- const uint32_t kDefaultMinBitrateKbps =
- bitrate_allocator_factory_.codec_config().minBitrate;
- const uint32_t kDefaultMaxBitrateKbps =
- bitrate_allocator_factory_.codec_config().maxBitrate;
- const uint32_t kEncMinBitrateKbps = kDefaultMinBitrateKbps * 2;
- const uint32_t kEncMaxBitrateKbps = kDefaultMaxBitrateKbps * 2;
- const uint32_t kAppMinBitrateKbps = kDefaultMinBitrateKbps * 3;
- const uint32_t kAppMaxBitrateKbps = kDefaultMaxBitrateKbps * 3;
-
- const VideoEncoder::ResolutionBitrateLimits encoder_bitrate_limits(
- codec_width_ * codec_height_, kEncMinBitrateKbps * 1000,
- kEncMinBitrateKbps * 1000, kEncMaxBitrateKbps * 1000);
- fake_encoder_.SetResolutionBitrateLimits({encoder_bitrate_limits});
-
- // Change resolution. This will trigger encoder re-configuration and video
- // stream encoder will pick up the bitrate limits recommended by encoder.
- video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360));
- WaitForEncodedFrame(2);
- video_source_.IncomingCapturedFrame(CreateFrame(3, 360, 180));
- WaitForEncodedFrame(3);
-
- // App bitrate limits are not set - bitrate limits recommended by encoder
- // should be used.
- EXPECT_EQ(kEncMaxBitrateKbps,
+ EXPECT_EQ(kMaxEncBitrateKbps,
bitrate_allocator_factory_.codec_config().maxBitrate);
- EXPECT_EQ(kEncMinBitrateKbps,
+ EXPECT_EQ(kMinEncBitrateKbps + 1,
bitrate_allocator_factory_.codec_config().minBitrate);
- video_encoder_config.max_bitrate_bps = kAppMaxBitrateKbps * 1000;
- video_encoder_config.simulcast_layers[0].min_bitrate_bps = 0;
+ video_encoder_config.max_bitrate_bps = (kMaxEncBitrateKbps - 1) * 1000;
+ video_encoder_config.simulcast_layers[0].min_bitrate_bps =
+ (kMinEncBitrateKbps - 1) * 1000;
video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
kMaxPayloadLength);
- video_source_.IncomingCapturedFrame(CreateFrame(4, nullptr));
- WaitForEncodedFrame(4);
-
- // App limited the max bitrate - bitrate limits recommended by encoder should
- // not be applied.
- EXPECT_EQ(kAppMaxBitrateKbps,
+ video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr));
+ WaitForEncodedFrame(2);
+ EXPECT_EQ(kMaxEncBitrateKbps - 1,
bitrate_allocator_factory_.codec_config().maxBitrate);
- EXPECT_EQ(kDefaultMinBitrateKbps,
+ EXPECT_EQ(kMinEncBitrateKbps,
bitrate_allocator_factory_.codec_config().minBitrate);
- video_encoder_config.max_bitrate_bps = 0;
- video_encoder_config.simulcast_layers[0].min_bitrate_bps =
- kAppMinBitrateKbps * 1000;
- video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
- kMaxPayloadLength);
- video_source_.IncomingCapturedFrame(CreateFrame(5, nullptr));
- WaitForEncodedFrame(5);
+ video_stream_encoder_->Stop();
+}
- // App limited the min bitrate - bitrate limits recommended by encoder should
- // not be applied.
- EXPECT_EQ(kDefaultMaxBitrateKbps,
- bitrate_allocator_factory_.codec_config().maxBitrate);
- EXPECT_EQ(kAppMinBitrateKbps,
- bitrate_allocator_factory_.codec_config().minBitrate);
+TEST_F(VideoStreamEncoderTest,
+ EncoderAndAppLimitsDontIntersectEncoderLimitsIgnored) {
+ video_stream_encoder_->OnBitrateUpdated(
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
+
+ const uint32_t kMinAppBitrateKbps = 100;
+ const uint32_t kMaxAppBitrateKbps = 200;
+ const uint32_t kMinEncBitrateKbps = kMaxAppBitrateKbps + 1;
+ const uint32_t kMaxEncBitrateKbps = kMaxAppBitrateKbps * 2;
+ const VideoEncoder::ResolutionBitrateLimits encoder_bitrate_limits(
+ /*frame_size_pixels=*/codec_width_ * codec_height_,
+ /*min_start_bitrate_bps=*/0,
+ /*min_bitrate_bps=*/kMinEncBitrateKbps * 1000,
+ /*max_bitrate_bps=*/kMaxEncBitrateKbps * 1000);
+ fake_encoder_.SetResolutionBitrateLimits({encoder_bitrate_limits});
- video_encoder_config.max_bitrate_bps = kAppMaxBitrateKbps * 1000;
+ VideoEncoderConfig video_encoder_config;
+ test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
+ video_encoder_config.max_bitrate_bps = kMaxAppBitrateKbps * 1000;
video_encoder_config.simulcast_layers[0].min_bitrate_bps =
- kAppMinBitrateKbps * 1000;
- video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
+ kMinAppBitrateKbps * 1000;
+ video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(),
kMaxPayloadLength);
- video_source_.IncomingCapturedFrame(CreateFrame(6, nullptr));
- WaitForEncodedFrame(6);
- // App limited both min and max bitrates - bitrate limits recommended by
- // encoder should not be applied.
- EXPECT_EQ(kAppMaxBitrateKbps,
+ video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(kMaxAppBitrateKbps,
bitrate_allocator_factory_.codec_config().maxBitrate);
- EXPECT_EQ(kAppMinBitrateKbps,
+ EXPECT_EQ(kMinAppBitrateKbps,
bitrate_allocator_factory_.codec_config().minBitrate);
video_stream_encoder_->Stop();
@@ -1494,8 +1605,9 @@ TEST_F(VideoStreamEncoderTest,
TEST_F(VideoStreamEncoderTest,
EncoderRecommendedMaxAndMinBitratesUsedForGivenResolution) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
const VideoEncoder::ResolutionBitrateLimits encoder_bitrate_limits_270p(
480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000);
@@ -1563,8 +1675,9 @@ TEST_F(VideoStreamEncoderTest,
TEST_F(VideoStreamEncoderTest, EncoderRecommendedMaxBitrateCapsTargetBitrate) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
VideoEncoderConfig video_encoder_config;
test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config);
@@ -1623,6 +1736,34 @@ TEST_F(VideoStreamEncoderTest, SinkWantsRotationApplied) {
video_stream_encoder_->Stop();
}
+TEST_F(VideoStreamEncoderTest, SinkWantsResolutionAlignment) {
+ constexpr int kRequestedResolutionAlignment = 7;
+ video_source_.set_adaptation_enabled(true);
+ fake_encoder_.SetRequestedResolutionAlignment(kRequestedResolutionAlignment);
+ video_stream_encoder_->OnBitrateUpdated(
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
+
+ // On the 1st frame, we should have initialized the encoder and
+ // asked for its resolution requirements.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(1, codec_width_, codec_height_));
+ WaitForEncodedFrame(1);
+ EXPECT_EQ(video_source_.sink_wants().resolution_alignment,
+ kRequestedResolutionAlignment);
+
+ // On the 2nd frame, we should be receiving a correctly aligned resolution.
+ // (It's up the to the encoder to potentially drop the previous frame,
+ // to avoid coding back-to-back keyframes.)
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(2, codec_width_, codec_height_));
+ WaitForEncodedFrame(2);
+ sink_.CheckLastFrameSizeIsMultipleOf(kRequestedResolutionAlignment);
+
+ video_stream_encoder_->Stop();
+}
+
TEST_F(VideoStreamEncoderTest, TestCpuDowngrades_BalancedMode) {
const int kFramerateFps = 30;
const int kWidth = 1280;
@@ -1634,8 +1775,9 @@ TEST_F(VideoStreamEncoderTest, TestCpuDowngrades_BalancedMode) {
// Enable BALANCED preference, no initial limitation.
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_stream_encoder_->SetSource(&video_source_,
webrtc::DegradationPreference::BALANCED);
VerifyNoLimitation(video_source_.sink_wants());
@@ -1716,10 +1858,12 @@ TEST_F(VideoStreamEncoderTest, TestCpuDowngrades_BalancedMode) {
video_stream_encoder_->Stop();
}
+
TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
VerifyNoLimitation(video_source_.sink_wants());
const int kFrameWidth = 1280;
@@ -1750,7 +1894,12 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
test::FrameForwarder new_video_source;
video_stream_encoder_->SetSource(
&new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
-
+ // Give the encoder queue time to process the change in degradation preference
+ // by waiting for an encoded frame.
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameWidth));
+ sink_.WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
// Initially no degradation registered.
VerifyFpsMaxResolutionMax(new_video_source.sink_wants());
@@ -1776,6 +1925,12 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
// Turn off degradation completely.
video_stream_encoder_->SetSource(&new_video_source,
webrtc::DegradationPreference::DISABLED);
+ // Give the encoder queue time to process the change in degradation preference
+ // by waiting for an encoded frame.
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameWidth));
+ sink_.WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
VerifyFpsMaxResolutionMax(new_video_source.sink_wants());
video_stream_encoder_->TriggerCpuOveruse();
@@ -1790,6 +1945,12 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
// Calling SetSource with resolution scaling enabled apply the old SinkWants.
video_stream_encoder_->SetSource(
&new_video_source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ // Give the encoder queue time to process the change in degradation preference
+ // by waiting for an encoded frame.
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameWidth));
+ sink_.WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
EXPECT_LT(new_video_source.sink_wants().max_pixel_count,
kFrameWidth * kFrameHeight);
EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
@@ -1798,6 +1959,12 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
// Calling SetSource with framerate scaling enabled apply the old SinkWants.
video_stream_encoder_->SetSource(
&new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+ // Give the encoder queue time to process the change in degradation preference
+ // by waiting for an encoded frame.
+ new_video_source.IncomingCapturedFrame(
+ CreateFrame(frame_timestamp, kFrameWidth, kFrameWidth));
+ sink_.WaitForEncodedFrame(frame_timestamp);
+ frame_timestamp += kFrameIntervalMs;
EXPECT_FALSE(new_video_source.sink_wants().target_pixel_count);
EXPECT_EQ(std::numeric_limits<int>::max(),
new_video_source.sink_wants().max_pixel_count);
@@ -1808,8 +1975,9 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) {
TEST_F(VideoStreamEncoderTest, StatsTracksQualityAdaptationStats) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
const int kWidth = 1280;
const int kHeight = 720;
@@ -1843,8 +2011,9 @@ TEST_F(VideoStreamEncoderTest, StatsTracksQualityAdaptationStats) {
TEST_F(VideoStreamEncoderTest, StatsTracksCpuAdaptationStats) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
const int kWidth = 1280;
const int kHeight = 720;
@@ -1878,8 +2047,9 @@ TEST_F(VideoStreamEncoderTest, StatsTracksCpuAdaptationStats) {
TEST_F(VideoStreamEncoderTest, SwitchingSourceKeepsCpuAdaptation) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
const int kWidth = 1280;
const int kHeight = 720;
@@ -1948,8 +2118,9 @@ TEST_F(VideoStreamEncoderTest, SwitchingSourceKeepsCpuAdaptation) {
TEST_F(VideoStreamEncoderTest, SwitchingSourceKeepsQualityAdaptation) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
const int kWidth = 1280;
const int kHeight = 720;
@@ -2010,8 +2181,9 @@ TEST_F(VideoStreamEncoderTest, SwitchingSourceKeepsQualityAdaptation) {
TEST_F(VideoStreamEncoderTest,
QualityAdaptationStatsAreResetWhenScalerIsDisabled) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
const int kWidth = 1280;
const int kHeight = 720;
@@ -2065,10 +2237,78 @@ TEST_F(VideoStreamEncoderTest,
}
TEST_F(VideoStreamEncoderTest,
+ StatsTracksCpuAdaptationStatsWhenSwitchingSource_Balanced) {
+ video_stream_encoder_->OnBitrateUpdated(
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
+
+ const int kWidth = 1280;
+ const int kHeight = 720;
+ int sequence = 1;
+
+ // Enable BALANCED preference, no initial limitation.
+ test::FrameForwarder source;
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+ source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ VideoSendStream::Stats stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(0, stats.number_of_cpu_adapt_changes);
+
+ // Trigger CPU overuse, should now adapt down.
+ video_stream_encoder_->TriggerCpuOveruse();
+ source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Set new degradation preference should clear restrictions since we changed
+ // from BALANCED.
+ video_stream_encoder_->SetSource(
+ &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE);
+ source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(1, stats.number_of_cpu_adapt_changes);
+
+ // Force an input frame rate to be available, or the adaptation call won't
+ // know what framerate to adapt from.
+ VideoSendStream::Stats mock_stats = stats_proxy_->GetStats();
+ mock_stats.input_frame_rate = 30;
+ stats_proxy_->SetMockStats(mock_stats);
+ video_stream_encoder_->TriggerCpuOveruse();
+ stats_proxy_->ResetMockStats();
+ source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+
+ // We have now adapted once.
+ stats = stats_proxy_->GetStats();
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ // Back to BALANCED, should clear the restrictions again.
+ video_stream_encoder_->SetSource(&source,
+ webrtc::DegradationPreference::BALANCED);
+ source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight));
+ WaitForEncodedFrame(sequence++);
+ stats = stats_proxy_->GetStats();
+ EXPECT_FALSE(stats.cpu_limited_resolution);
+ EXPECT_FALSE(stats.cpu_limited_framerate);
+ EXPECT_EQ(2, stats.number_of_cpu_adapt_changes);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest,
StatsTracksCpuAdaptationStatsWhenSwitchingSource) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
const int kWidth = 1280;
const int kHeight = 720;
@@ -2205,8 +2445,9 @@ TEST_F(VideoStreamEncoderTest,
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Expect no scaling to begin with.
VerifyNoLimitation(video_source_.sink_wants());
@@ -2255,8 +2496,9 @@ TEST_F(VideoStreamEncoderTest,
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable MAINTAIN_FRAMERATE preference, no initial limitation.
test::FrameForwarder source;
@@ -2289,8 +2531,9 @@ TEST_F(VideoStreamEncoderTest, SkipsSameOrLargerAdaptDownRequest_BalancedMode) {
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable BALANCED preference, no initial limitation.
test::FrameForwarder source;
@@ -2331,8 +2574,9 @@ TEST_F(VideoStreamEncoderTest,
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable MAINTAIN_FRAMERATE preference, no initial limitation.
test::FrameForwarder source;
@@ -2359,8 +2603,9 @@ TEST_F(VideoStreamEncoderTest,
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable MAINTAIN_RESOLUTION preference, no initial limitation.
test::FrameForwarder source;
@@ -2386,8 +2631,9 @@ TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_BalancedMode) {
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable BALANCED preference, no initial limitation.
test::FrameForwarder source;
@@ -2415,8 +2661,9 @@ TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_DisabledMode) {
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable DISABLED preference, no initial limitation.
test::FrameForwarder source;
@@ -2445,8 +2692,9 @@ TEST_F(VideoStreamEncoderTest,
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable MAINTAIN_FRAMERATE preference, no initial limitation.
AdaptingFrameForwarder source;
@@ -2484,8 +2732,9 @@ TEST_F(VideoStreamEncoderTest,
const int kHeight = 720;
const int kInputFps = 30;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
VideoSendStream::Stats stats = stats_proxy_->GetStats();
stats.input_frame_rate = kInputFps;
@@ -2506,12 +2755,16 @@ TEST_F(VideoStreamEncoderTest,
test::FrameForwarder new_video_source;
video_stream_encoder_->SetSource(
&new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
+ // Give the encoder queue time to process the change in degradation preference
+ // by waiting for an encoded frame.
+ new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(3);
VerifyFpsMaxResolutionMax(new_video_source.sink_wants());
// Trigger adapt down, expect reduced framerate.
video_stream_encoder_->TriggerQualityLow();
- new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
- sink_.WaitForEncodedFrame(3);
+ new_video_source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight));
+ sink_.WaitForEncodedFrame(4);
VerifyFpsLtResolutionMax(new_video_source.sink_wants(), kInputFps);
// Trigger adapt up, expect no restriction.
@@ -2527,8 +2780,9 @@ TEST_F(VideoStreamEncoderTest, DoesNotScaleBelowSetResolutionLimit) {
const size_t kNumFrames = 10;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable adapter, expected input resolutions when downscaling:
// 1280x720 -> 960x540 -> 640x360 -> 480x270 -> 320x180 (kMinPixelsPerFrame)
@@ -2564,8 +2818,9 @@ TEST_F(VideoStreamEncoderTest,
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable MAINTAIN_FRAMERATE preference, no initial limitation.
AdaptingFrameForwarder source;
@@ -2624,8 +2879,9 @@ TEST_F(VideoStreamEncoderTest,
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable BALANCED preference, no initial limitation.
AdaptingFrameForwarder source;
@@ -2684,9 +2940,10 @@ TEST_F(VideoStreamEncoderTest, AdaptUpIfBwEstimateIsHigherThanMinBitrate) {
{kEncoderBitrateLimits540p, kEncoderBitrateLimits720p});
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kEncoderBitrateLimits720p.min_start_bitrate_bps),
- DataRate::bps(kEncoderBitrateLimits720p.min_start_bitrate_bps),
- DataRate::bps(kEncoderBitrateLimits720p.min_start_bitrate_bps), 0, 0);
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps), 0,
+ 0, 0);
// Enable MAINTAIN_FRAMERATE preference, no initial limitation.
AdaptingFrameForwarder source;
@@ -2701,9 +2958,10 @@ TEST_F(VideoStreamEncoderTest, AdaptUpIfBwEstimateIsHigherThanMinBitrate) {
// Reduce bitrate and trigger adapt down.
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kEncoderBitrateLimits540p.min_start_bitrate_bps),
- DataRate::bps(kEncoderBitrateLimits540p.min_start_bitrate_bps),
- DataRate::bps(kEncoderBitrateLimits540p.min_start_bitrate_bps), 0, 0);
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps), 0,
+ 0, 0);
video_stream_encoder_->TriggerQualityLow();
// Insert 720p frame. It should be downscaled and encoded.
@@ -2718,9 +2976,10 @@ TEST_F(VideoStreamEncoderTest, AdaptUpIfBwEstimateIsHigherThanMinBitrate) {
// Increase bitrate.
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kEncoderBitrateLimits720p.min_start_bitrate_bps),
- DataRate::bps(kEncoderBitrateLimits720p.min_start_bitrate_bps),
- DataRate::bps(kEncoderBitrateLimits720p.min_start_bitrate_bps), 0, 0);
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits720p.min_start_bitrate_bps), 0,
+ 0, 0);
// Trigger adapt up. Higher resolution should be requested.
video_stream_encoder_->TriggerQualityHigh();
@@ -2735,9 +2994,10 @@ TEST_F(VideoStreamEncoderTest, DropFirstFramesIfBwEstimateIsTooLow) {
// Set bitrate equal to min bitrate of 540p.
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kEncoderBitrateLimits540p.min_start_bitrate_bps),
- DataRate::bps(kEncoderBitrateLimits540p.min_start_bitrate_bps),
- DataRate::bps(kEncoderBitrateLimits540p.min_start_bitrate_bps), 0, 0);
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps),
+ DataRate::BitsPerSec(kEncoderBitrateLimits540p.min_start_bitrate_bps), 0,
+ 0, 0);
// Enable MAINTAIN_FRAMERATE preference, no initial limitation.
AdaptingFrameForwarder source;
@@ -2773,9 +3033,9 @@ class BalancedDegradationTest : public VideoStreamEncoderTest {
}
void OnBitrateUpdated(int bitrate_bps) {
- video_stream_encoder_->OnBitrateUpdated(DataRate::bps(bitrate_bps),
- DataRate::bps(bitrate_bps),
- DataRate::bps(bitrate_bps), 0, 0);
+ video_stream_encoder_->OnBitrateUpdated(
+ DataRate::BitsPerSec(bitrate_bps), DataRate::BitsPerSec(bitrate_bps),
+ DataRate::BitsPerSec(bitrate_bps), 0, 0, 0);
}
void InsertFrame() {
@@ -3060,8 +3320,9 @@ TEST_F(VideoStreamEncoderTest,
const int kWidth = 1280;
const int kHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable MAINTAIN_FRAMERATE preference, no initial limitation.
AdaptingFrameForwarder source;
@@ -3199,8 +3460,9 @@ TEST_F(VideoStreamEncoderTest, CpuLimitedHistogramIsReported) {
const int kHeight = 360;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
for (int i = 1; i <= SendStatisticsProxy::kMinRequiredMetricsSamples; ++i) {
video_source_.IncomingCapturedFrame(CreateFrame(i, kWidth, kHeight));
@@ -3218,17 +3480,18 @@ TEST_F(VideoStreamEncoderTest, CpuLimitedHistogramIsReported) {
video_stream_encoder_.reset();
stats_proxy_.reset();
- EXPECT_EQ(1,
- metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
- EXPECT_EQ(
+ EXPECT_METRIC_EQ(
+ 1, metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent"));
+ EXPECT_METRIC_EQ(
1, metrics::NumEvents("WebRTC.Video.CpuLimitedResolutionInPercent", 50));
}
TEST_F(VideoStreamEncoderTest,
CpuLimitedHistogramIsNotReportedForDisabledDegradation) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
const int kWidth = 640;
const int kHeight = 360;
@@ -3254,15 +3517,16 @@ TEST_F(VideoStreamEncoderTest, CallsBitrateObserver) {
const int kDefaultFps = 30;
const VideoBitrateAllocation expected_bitrate =
- DefaultVideoBitrateAllocator(fake_encoder_.codec_config())
+ SimulcastRateAllocator(fake_encoder_.codec_config())
.Allocate(VideoBitrateAllocationParameters(kLowTargetBitrateBps,
kDefaultFps));
EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(expected_bitrate))
.Times(1);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kLowTargetBitrateBps), DataRate::bps(kLowTargetBitrateBps),
- DataRate::bps(kLowTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kLowTargetBitrateBps),
+ DataRate::BitsPerSec(kLowTargetBitrateBps),
+ DataRate::BitsPerSec(kLowTargetBitrateBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(
CreateFrame(rtc::TimeMillis(), codec_width_, codec_height_));
@@ -3273,7 +3537,7 @@ TEST_F(VideoStreamEncoderTest, CallsBitrateObserver) {
EXPECT_EQ(bitrate_allocation.get_sum_bps(), kLowTargetBitrateBps);
// TODO(srte): The use of millisecs here looks like an error, but the tests
// fails using seconds, this should be investigated.
- fake_clock_.AdvanceTime(TimeDelta::ms(1) / kDefaultFps);
+ fake_clock_.AdvanceTime(TimeDelta::Millis(1) / kDefaultFps);
// Not called on second frame.
EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(expected_bitrate))
@@ -3281,7 +3545,7 @@ TEST_F(VideoStreamEncoderTest, CallsBitrateObserver) {
video_source_.IncomingCapturedFrame(
CreateFrame(rtc::TimeMillis(), codec_width_, codec_height_));
WaitForEncodedFrame(rtc::TimeMillis());
- fake_clock_.AdvanceTime(TimeDelta::ms(1) / kDefaultFps);
+ fake_clock_.AdvanceTime(TimeDelta::Millis(1) / kDefaultFps);
// Called after a process interval.
EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(expected_bitrate))
@@ -3291,7 +3555,7 @@ TEST_F(VideoStreamEncoderTest, CallsBitrateObserver) {
video_source_.IncomingCapturedFrame(
CreateFrame(rtc::TimeMillis(), codec_width_, codec_height_));
WaitForEncodedFrame(rtc::TimeMillis());
- fake_clock_.AdvanceTime(TimeDelta::ms(1) / kDefaultFps);
+ fake_clock_.AdvanceTime(TimeDelta::Millis(1) / kDefaultFps);
}
// Since rates are unchanged, encoder should not be reconfigured.
@@ -3369,8 +3633,9 @@ TEST_F(VideoStreamEncoderTest, OveruseDetectorUpdatedOnReconfigureAndAdaption) {
const int kFramerate = 24;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
test::FrameForwarder source;
video_stream_encoder_->SetSource(
&source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
@@ -3431,8 +3696,9 @@ TEST_F(VideoStreamEncoderTest,
const int kHighFramerate = 25;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
test::FrameForwarder source;
video_stream_encoder_->SetSource(
&source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
@@ -3496,8 +3762,9 @@ TEST_F(VideoStreamEncoderTest,
const int kFramerate = 24;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
test::FrameForwarder source;
video_stream_encoder_->SetSource(
&source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
@@ -3543,9 +3810,9 @@ TEST_F(VideoStreamEncoderTest,
TEST_F(VideoStreamEncoderTest, DropsFramesAndScalesWhenBitrateIsTooLow) {
const int kTooLowBitrateForFrameSizeBps = 10000;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTooLowBitrateForFrameSizeBps),
- DataRate::bps(kTooLowBitrateForFrameSizeBps),
- DataRate::bps(kTooLowBitrateForFrameSizeBps), 0, 0);
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps), 0, 0, 0);
const int kWidth = 640;
const int kHeight = 360;
@@ -3575,9 +3842,9 @@ TEST_F(VideoStreamEncoderTest,
NumberOfDroppedFramesLimitedWhenBitrateIsTooLow) {
const int kTooLowBitrateForFrameSizeBps = 10000;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTooLowBitrateForFrameSizeBps),
- DataRate::bps(kTooLowBitrateForFrameSizeBps),
- DataRate::bps(kTooLowBitrateForFrameSizeBps), 0, 0);
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps), 0, 0, 0);
const int kWidth = 640;
const int kHeight = 360;
@@ -3602,8 +3869,9 @@ TEST_F(VideoStreamEncoderTest,
const int kWidth = 640;
const int kHeight = 360;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kLowTargetBitrateBps), DataRate::bps(kLowTargetBitrateBps),
- DataRate::bps(kLowTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kLowTargetBitrateBps),
+ DataRate::BitsPerSec(kLowTargetBitrateBps),
+ DataRate::BitsPerSec(kLowTargetBitrateBps), 0, 0, 0);
// Set degradation preference.
video_stream_encoder_->SetSource(
@@ -3628,8 +3896,9 @@ TEST_F(VideoStreamEncoderTest, InitialFrameDropOffWhenEncoderDisabledScaling) {
video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config),
kMaxPayloadLength);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kLowTargetBitrateBps), DataRate::bps(kLowTargetBitrateBps),
- DataRate::bps(kLowTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kLowTargetBitrateBps),
+ DataRate::BitsPerSec(kLowTargetBitrateBps),
+ DataRate::BitsPerSec(kLowTargetBitrateBps), 0, 0, 0);
// Force quality scaler reconfiguration by resetting the source.
video_stream_encoder_->SetSource(&video_source_,
@@ -3643,35 +3912,6 @@ TEST_F(VideoStreamEncoderTest, InitialFrameDropOffWhenEncoderDisabledScaling) {
fake_encoder_.SetQualityScaling(true);
}
-TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenBWEstimateReady) {
- webrtc::test::ScopedFieldTrials field_trials(
- "WebRTC-InitialFramedrop/Enabled/");
- // Reset encoder for field trials to take effect.
- ConfigureEncoder(video_encoder_config_.Copy());
- const int kTooLowBitrateForFrameSizeBps = 10000;
- const int kWidth = 640;
- const int kHeight = 360;
-
- video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
- video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
- // Frame should not be dropped.
- WaitForEncodedFrame(1);
-
- video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTooLowBitrateForFrameSizeBps),
- DataRate::bps(kTooLowBitrateForFrameSizeBps),
- DataRate::bps(kTooLowBitrateForFrameSizeBps), 0, 0);
- video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
- // Expect to drop this frame, the wait should time out.
- ExpectDroppedFrame();
-
- // Expect the sink_wants to specify a scaled frame.
- EXPECT_LT(video_source_.sink_wants().max_pixel_count, kWidth * kHeight);
- video_stream_encoder_->Stop();
-}
-
TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenBweDrops) {
webrtc::test::ScopedFieldTrials field_trials(
"WebRTC-Video-QualityScalerSettings/"
@@ -3684,24 +3924,25 @@ TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenBweDrops) {
const int kHeight = 360;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight));
// Frame should not be dropped.
WaitForEncodedFrame(1);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kNotTooLowBitrateForFrameSizeBps),
- DataRate::bps(kNotTooLowBitrateForFrameSizeBps),
- DataRate::bps(kNotTooLowBitrateForFrameSizeBps), 0, 0);
+ DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight));
// Frame should not be dropped.
WaitForEncodedFrame(2);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTooLowBitrateForFrameSizeBps),
- DataRate::bps(kTooLowBitrateForFrameSizeBps),
- DataRate::bps(kTooLowBitrateForFrameSizeBps), 0, 0);
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps),
+ DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight));
// Expect to drop this frame, the wait should time out.
ExpectDroppedFrame();
@@ -3729,9 +3970,10 @@ TEST_F(VideoStreamEncoderTest, RampsUpInQualityWhenBwIsHigh) {
// Start at low bitrate.
const int kLowBitrateBps = 200000;
- video_stream_encoder_->OnBitrateUpdated(DataRate::bps(kLowBitrateBps),
- DataRate::bps(kLowBitrateBps),
- DataRate::bps(kLowBitrateBps), 0, 0);
+ video_stream_encoder_->OnBitrateUpdated(DataRate::BitsPerSec(kLowBitrateBps),
+ DataRate::BitsPerSec(kLowBitrateBps),
+ DataRate::BitsPerSec(kLowBitrateBps),
+ 0, 0, 0);
// Expect first frame to be dropped and resolution to be limited.
const int kWidth = 1280;
@@ -3743,10 +3985,10 @@ TEST_F(VideoStreamEncoderTest, RampsUpInQualityWhenBwIsHigh) {
EXPECT_LT(source.sink_wants().max_pixel_count, kWidth * kHeight);
// Increase bitrate to encoder max.
- video_stream_encoder_->OnBitrateUpdated(DataRate::bps(config.max_bitrate_bps),
- DataRate::bps(config.max_bitrate_bps),
- DataRate::bps(config.max_bitrate_bps),
- 0, 0);
+ video_stream_encoder_->OnBitrateUpdated(
+ DataRate::BitsPerSec(config.max_bitrate_bps),
+ DataRate::BitsPerSec(config.max_bitrate_bps),
+ DataRate::BitsPerSec(config.max_bitrate_bps), 0, 0, 0);
// Insert frames and advance |min_duration_ms|.
for (size_t i = 1; i <= 10; i++) {
@@ -3757,7 +3999,7 @@ TEST_F(VideoStreamEncoderTest, RampsUpInQualityWhenBwIsHigh) {
EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution);
EXPECT_LT(source.sink_wants().max_pixel_count, kWidth * kHeight);
- fake_clock_.AdvanceTime(TimeDelta::ms(2000));
+ fake_clock_.AdvanceTime(TimeDelta::Millis(2000));
// Insert frame should trigger high BW and release quality limitation.
timestamp_ms += kFrameIntervalMs;
@@ -3779,8 +4021,9 @@ TEST_F(VideoStreamEncoderTest,
const int kTooSmallWidth = 10;
const int kTooSmallHeight = 10;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable MAINTAIN_FRAMERATE preference, no initial limitation.
test::FrameForwarder source;
@@ -3806,8 +4049,9 @@ TEST_F(VideoStreamEncoderTest,
const int kTooSmallHeight = 10;
const int kFpsLimit = 7;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable BALANCED preference, no initial limitation.
test::FrameForwarder source;
@@ -3841,8 +4085,9 @@ TEST_F(VideoStreamEncoderTest,
TEST_F(VideoStreamEncoderTest, FailingInitEncodeDoesntCauseCrash) {
fake_encoder_.ForceInitEncodeFailure(true);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
ResetEncoder("VP8", 2, 1, 1, false);
const int kFrameWidth = 1280;
const int kFrameHeight = 720;
@@ -3856,8 +4101,9 @@ TEST_F(VideoStreamEncoderTest, FailingInitEncodeDoesntCauseCrash) {
TEST_F(VideoStreamEncoderTest,
AdaptsResolutionOnOveruse_MaintainFramerateMode) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
const int kFrameWidth = 1280;
const int kFrameHeight = 720;
@@ -3891,8 +4137,9 @@ TEST_F(VideoStreamEncoderTest,
const int kFrameHeight = 720;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_stream_encoder_->SetSource(
&video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
video_source_.set_adaptation_enabled(true);
@@ -3994,8 +4241,9 @@ TEST_F(VideoStreamEncoderTest, DoesntAdaptDownPastMinFramerate) {
ResetEncoder("VP8", 1, 2, 1, true);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_stream_encoder_->SetSource(
&video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION);
video_source_.set_adaptation_enabled(true);
@@ -4015,7 +4263,7 @@ TEST_F(VideoStreamEncoderTest, DoesntAdaptDownPastMinFramerate) {
sink_.WaitForEncodedFrame(timestamp_ms);
}
timestamp_ms += kFrameIntervalMs;
- fake_clock_.AdvanceTime(TimeDelta::ms(kFrameIntervalMs));
+ fake_clock_.AdvanceTime(TimeDelta::Millis(kFrameIntervalMs));
}
// ...and then try to adapt again.
video_stream_encoder_->TriggerCpuOveruse();
@@ -4034,8 +4282,9 @@ TEST_F(VideoStreamEncoderTest,
const int64_t kFrameIntervalMs = 150;
int64_t timestamp_ms = kFrameIntervalMs;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable BALANCED preference, no initial limitation.
AdaptingFrameForwarder source;
@@ -4216,8 +4465,9 @@ TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) {
const int64_t kFrameIntervalMs = 150;
int64_t timestamp_ms = kFrameIntervalMs;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable BALANCED preference, no initial limitation.
AdaptingFrameForwarder source;
@@ -4331,8 +4581,9 @@ TEST_F(VideoStreamEncoderTest,
const int64_t kFrameIntervalMs = 150;
int64_t timestamp_ms = kFrameIntervalMs;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Enable BALANCED preference, no initial limitation.
AdaptingFrameForwarder source;
@@ -4421,8 +4672,9 @@ TEST_F(VideoStreamEncoderTest, AcceptsFullHdAdaptedDownSimulcastFrames) {
const int kFramerate = 24;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Trigger reconfigure encoder (without resetting the entire instance).
VideoEncoderConfig video_encoder_config;
video_encoder_config.codec_type = kVideoCodecVP8;
@@ -4456,8 +4708,9 @@ TEST_F(VideoStreamEncoderTest, PeriodicallyUpdatesChannelParameters) {
const int kHighFps = 30;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec;
max_framerate_ = kLowFps;
@@ -4472,8 +4725,9 @@ TEST_F(VideoStreamEncoderTest, PeriodicallyUpdatesChannelParameters) {
// Make sure encoder is updated with new target.
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(
CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight));
WaitForEncodedFrame(timestamp_ms);
@@ -4511,8 +4765,9 @@ TEST_F(VideoStreamEncoderTest, DoesNotUpdateBitrateAllocationWhenSuspended) {
MockBitrateObserver bitrate_observer;
video_stream_encoder_->SetBitrateAllocationObserver(&bitrate_observer);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_stream_encoder_->WaitUntilTaskQueueIsIdle();
// Insert a first video frame, causes another bitrate update.
@@ -4523,12 +4778,13 @@ TEST_F(VideoStreamEncoderTest, DoesNotUpdateBitrateAllocationWhenSuspended) {
WaitForEncodedFrame(timestamp_ms);
// Next, simulate video suspension due to pacer queue overrun.
- video_stream_encoder_->OnBitrateUpdated(DataRate::bps(0), DataRate::bps(0),
- DataRate::bps(0), 0, 1);
+ video_stream_encoder_->OnBitrateUpdated(DataRate::BitsPerSec(0),
+ DataRate::BitsPerSec(0),
+ DataRate::BitsPerSec(0), 0, 1, 0);
// Skip ahead until a new periodic parameter update should have occured.
timestamp_ms += kProcessIntervalMs;
- fake_clock_.AdvanceTime(TimeDelta::ms(kProcessIntervalMs));
+ fake_clock_.AdvanceTime(TimeDelta::Millis(kProcessIntervalMs));
// Bitrate observer should not be called.
EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(_)).Times(0);
@@ -4545,8 +4801,9 @@ TEST_F(VideoStreamEncoderTest,
const int kFrameHeight = 720;
const CpuOveruseOptions default_options;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(
CreateFrame(1, kFrameWidth, kFrameHeight));
WaitForEncodedFrame(1);
@@ -4569,8 +4826,9 @@ TEST_F(VideoStreamEncoderTest,
fake_encoder_.SetIsHardwareAccelerated(true);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_source_.IncomingCapturedFrame(
CreateFrame(1, kFrameWidth, kFrameHeight));
WaitForEncodedFrame(1);
@@ -4591,8 +4849,9 @@ TEST_F(VideoStreamEncoderTest, DropsFramesWhenEncoderOvershoots) {
const int kNumFramesInRun = kFps * 5; // Runs of five seconds.
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec;
max_framerate_ = kFps;
@@ -4627,9 +4886,9 @@ TEST_F(VideoStreamEncoderTest, DropsFramesWhenEncoderOvershoots) {
}
fake_encoder_.SimulateOvershoot(overshoot_factor);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps + 1000),
- DataRate::bps(kTargetBitrateBps + 1000),
- DataRate::bps(kTargetBitrateBps + 1000), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps + 1000),
+ DataRate::BitsPerSec(kTargetBitrateBps + 1000),
+ DataRate::BitsPerSec(kTargetBitrateBps + 1000), 0, 0, 0);
num_dropped = 0;
for (int i = 0; i < kNumFramesInRun; ++i) {
video_source_.IncomingCapturedFrame(
@@ -4642,8 +4901,9 @@ TEST_F(VideoStreamEncoderTest, DropsFramesWhenEncoderOvershoots) {
}
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Target framerate should be still be near the expected target, despite
// the frame drops.
@@ -4666,8 +4926,9 @@ TEST_F(VideoStreamEncoderTest, ConfiguresCorrectFrameRate) {
int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec;
max_framerate_ = kActualInputFps;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Insert 3 seconds of video, with an input fps lower than configured max.
for (int i = 0; i < kActualInputFps * 3; ++i) {
@@ -4686,8 +4947,9 @@ TEST_F(VideoStreamEncoderTest, ConfiguresCorrectFrameRate) {
TEST_F(VideoStreamEncoderTest, AccumulatesUpdateRectOnDroppedFrames) {
VideoFrame::UpdateRect rect;
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
fake_encoder_.BlockNextEncode();
video_source_.IncomingCapturedFrame(
@@ -4731,8 +4993,9 @@ TEST_F(VideoStreamEncoderTest, AccumulatesUpdateRectOnDroppedFrames) {
TEST_F(VideoStreamEncoderTest, SetsFrameTypes) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// First frame is always keyframe.
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
@@ -4763,9 +5026,9 @@ TEST_F(VideoStreamEncoderTest, SetsFrameTypesSimulcast) {
// Setup simulcast with three streams.
ResetEncoder("VP8", 3, 1, 1, false);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kSimulcastTargetBitrateBps),
- DataRate::bps(kSimulcastTargetBitrateBps),
- DataRate::bps(kSimulcastTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kSimulcastTargetBitrateBps),
+ DataRate::BitsPerSec(kSimulcastTargetBitrateBps),
+ DataRate::BitsPerSec(kSimulcastTargetBitrateBps), 0, 0, 0);
// Wait for all three layers before triggering event.
sink_.SetNumExpectedLayers(3);
@@ -4806,8 +5069,9 @@ TEST_F(VideoStreamEncoderTest, RequestKeyframeInternalSource) {
encoder_factory_.SetHasInternalSource(true);
ResetEncoder("VP8", 1, 1, 1, false);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Call encoder directly, simulating internal source where encoded frame
// callback in VideoStreamEncoder is called despite no OnFrame().
@@ -4844,8 +5108,9 @@ TEST_F(VideoStreamEncoderTest, AdjustsTimestampInternalSource) {
encoder_factory_.SetHasInternalSource(true);
ResetEncoder("VP8", 1, 1, 1, false);
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
int64_t timestamp = 1;
EncodedImage image;
@@ -4936,8 +5201,9 @@ TEST_F(VideoStreamEncoderTest, CopiesVideoFrameMetadataAfterDownscale) {
const int kTargetBitrateBps = 300000; // To low for HD resolution.
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_stream_encoder_->WaitUntilTaskQueueIsIdle();
// Insert a first video frame. It should be dropped because of downscale in
@@ -4976,11 +5242,12 @@ TEST_F(VideoStreamEncoderTest, BandwidthAllocationLowerBound) {
// Initial rate.
video_stream_encoder_->OnBitrateUpdated(
- /*target_bitrate=*/DataRate::kbps(300),
- /*stable_target_bitrate=*/DataRate::kbps(300),
- /*link_allocation=*/DataRate::kbps(300),
+ /*target_bitrate=*/DataRate::KilobitsPerSec(300),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(300),
+ /*link_allocation=*/DataRate::KilobitsPerSec(300),
/*fraction_lost=*/0,
- /*rtt_ms=*/0);
+ /*rtt_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
// Insert a first video frame so that encoder gets configured.
int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec;
@@ -4991,30 +5258,33 @@ TEST_F(VideoStreamEncoderTest, BandwidthAllocationLowerBound) {
// Set a target rate below the minimum allowed by the codec settings.
VideoCodec codec_config = fake_encoder_.codec_config();
- DataRate min_rate = DataRate::kbps(codec_config.minBitrate);
- DataRate target_rate = min_rate - DataRate::kbps(1);
+ DataRate min_rate = DataRate::KilobitsPerSec(codec_config.minBitrate);
+ DataRate target_rate = min_rate - DataRate::KilobitsPerSec(1);
video_stream_encoder_->OnBitrateUpdated(
/*target_bitrate=*/target_rate,
/*stable_target_bitrate=*/target_rate,
/*link_allocation=*/target_rate,
/*fraction_lost=*/0,
- /*rtt_ms=*/0);
+ /*rtt_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
video_stream_encoder_->WaitUntilTaskQueueIsIdle();
// Target bitrate and bandwidth allocation should both be capped at min_rate.
auto rate_settings = fake_encoder_.GetAndResetLastRateControlSettings();
ASSERT_TRUE(rate_settings.has_value());
- DataRate allocation_sum = DataRate::bps(rate_settings->bitrate.get_sum_bps());
+ DataRate allocation_sum =
+ DataRate::BitsPerSec(rate_settings->bitrate.get_sum_bps());
EXPECT_EQ(min_rate, allocation_sum);
EXPECT_EQ(rate_settings->bandwidth_allocation, min_rate);
video_stream_encoder_->Stop();
}
-TEST_F(VideoStreamEncoderTest, EncoderRatesPropegatedOnReconfigure) {
+TEST_F(VideoStreamEncoderTest, EncoderRatesPropagatedOnReconfigure) {
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
// Capture a frame and wait for it to synchronize with the encoder thread.
int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec;
video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, nullptr));
@@ -5055,6 +5325,8 @@ TEST_F(VideoStreamEncoderTest, EncoderRatesPropegatedOnReconfigure) {
struct MockEncoderSwitchRequestCallback : public EncoderSwitchRequestCallback {
MOCK_METHOD0(RequestEncoderFallback, void());
MOCK_METHOD1(RequestEncoderSwitch, void(const Config& conf));
+ MOCK_METHOD1(RequestEncoderSwitch,
+ void(const webrtc::SdpVideoFormat& format));
};
TEST_F(VideoStreamEncoderTest, BitrateEncoderSwitch) {
@@ -5078,17 +5350,53 @@ TEST_F(VideoStreamEncoderTest, BitrateEncoderSwitch) {
CreateFrame(kDontCare, kDontCare, kDontCare));
using Config = EncoderSwitchRequestCallback::Config;
- EXPECT_CALL(switch_callback,
- RequestEncoderSwitch(AllOf(Field(&Config::codec_name, "AV1"),
+ EXPECT_CALL(switch_callback, RequestEncoderSwitch(Matcher<const Config&>(
+ AllOf(Field(&Config::codec_name, "AV1"),
Field(&Config::param, "ping"),
- Field(&Config::value, "pong"))));
+ Field(&Config::value, "pong")))));
video_stream_encoder_->OnBitrateUpdated(
- /*target_bitrate=*/DataRate::kbps(50),
- /*stable_target_bitrate=*/DataRate::kbps(kDontCare),
- /*link_allocation=*/DataRate::kbps(kDontCare),
+ /*target_bitrate=*/DataRate::KilobitsPerSec(50),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(kDontCare),
+ /*link_allocation=*/DataRate::KilobitsPerSec(kDontCare),
/*fraction_lost=*/0,
- /*rtt_ms=*/0);
+ /*rtt_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, VideoSuspendedNoEncoderSwitch) {
+ constexpr int kDontCare = 100;
+
+ StrictMock<MockEncoderSwitchRequestCallback> switch_callback;
+ video_send_config_.encoder_settings.encoder_switch_request_callback =
+ &switch_callback;
+ VideoEncoderConfig encoder_config = video_encoder_config_.Copy();
+ encoder_config.codec_type = kVideoCodecVP8;
+ webrtc::test::ScopedFieldTrials field_trial(
+ "WebRTC-NetworkCondition-EncoderSwitch/"
+ "codec_thresholds:VP8;100;-1|H264;-1;30000,"
+ "to_codec:AV1,to_param:ping,to_value:pong,window:2.0/");
+
+ // Reset encoder for new configuration to take effect.
+ ConfigureEncoder(std::move(encoder_config));
+
+ // Send one frame to trigger ReconfigureEncoder.
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(kDontCare, kDontCare, kDontCare));
+
+ using Config = EncoderSwitchRequestCallback::Config;
+ EXPECT_CALL(switch_callback, RequestEncoderSwitch(Matcher<const Config&>(_)))
+ .Times(0);
+
+ video_stream_encoder_->OnBitrateUpdated(
+ /*target_bitrate=*/DataRate::KilobitsPerSec(0),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(0),
+ /*link_allocation=*/DataRate::KilobitsPerSec(kDontCare),
+ /*fraction_lost=*/0,
+ /*rtt_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
video_stream_encoder_->Stop();
}
@@ -5115,21 +5423,23 @@ TEST_F(VideoStreamEncoderTest, ResolutionEncoderSwitch) {
// setting some bitrate so that subsequent calls to WaitForEncodedFrame does
// not fail.
video_stream_encoder_->OnBitrateUpdated(
- /*target_bitrate=*/DataRate::kbps(kSufficientBitrateToNotDrop),
- /*stable_target_bitrate=*/DataRate::kbps(kSufficientBitrateToNotDrop),
- /*link_allocation=*/DataRate::kbps(kSufficientBitrateToNotDrop),
+ /*target_bitrate=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
+ /*stable_target_bitrate=*/
+ DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
+ /*link_allocation=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
/*fraction_lost=*/0,
- /*rtt_ms=*/0);
+ /*rtt_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
// Send one frame to trigger ReconfigureEncoder.
video_source_.IncomingCapturedFrame(CreateFrame(1, kHighRes, kHighRes));
WaitForEncodedFrame(1);
using Config = EncoderSwitchRequestCallback::Config;
- EXPECT_CALL(switch_callback,
- RequestEncoderSwitch(AllOf(Field(&Config::codec_name, "AV1"),
+ EXPECT_CALL(switch_callback, RequestEncoderSwitch(Matcher<const Config&>(
+ AllOf(Field(&Config::codec_name, "AV1"),
Field(&Config::param, "ping"),
- Field(&Config::value, "pong"))));
+ Field(&Config::value, "pong")))));
video_source_.IncomingCapturedFrame(CreateFrame(2, kLowRes, kLowRes));
WaitForEncodedFrame(2);
@@ -5137,19 +5447,127 @@ TEST_F(VideoStreamEncoderTest, ResolutionEncoderSwitch) {
video_stream_encoder_->Stop();
}
+TEST_F(VideoStreamEncoderTest, EncoderSelectorCurrentEncoderIsSignaled) {
+ constexpr int kDontCare = 100;
+ StrictMock<MockEncoderSelector> encoder_selector;
+ auto encoder_factory = std::make_unique<test::VideoEncoderProxyFactory>(
+ &fake_encoder_, &encoder_selector);
+ video_send_config_.encoder_settings.encoder_factory = encoder_factory.get();
+
+ // Reset encoder for new configuration to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+
+ EXPECT_CALL(encoder_selector, OnCurrentEncoder(_));
+
+ video_source_.IncomingCapturedFrame(
+ CreateFrame(kDontCare, kDontCare, kDontCare));
+ video_stream_encoder_->Stop();
+
+ // The encoders produces by the VideoEncoderProxyFactory have a pointer back
+ // to it's factory, so in order for the encoder instance in the
+ // |video_stream_encoder_| to be destroyed before the |encoder_factory| we
+ // reset the |video_stream_encoder_| here.
+ video_stream_encoder_.reset();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderSelectorBitrateSwitch) {
+ constexpr int kDontCare = 100;
+
+ NiceMock<MockEncoderSelector> encoder_selector;
+ StrictMock<MockEncoderSwitchRequestCallback> switch_callback;
+ video_send_config_.encoder_settings.encoder_switch_request_callback =
+ &switch_callback;
+ auto encoder_factory = std::make_unique<test::VideoEncoderProxyFactory>(
+ &fake_encoder_, &encoder_selector);
+ video_send_config_.encoder_settings.encoder_factory = encoder_factory.get();
+
+ // Reset encoder for new configuration to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+
+ ON_CALL(encoder_selector, OnAvailableBitrate(_))
+ .WillByDefault(Return(SdpVideoFormat("AV1")));
+ EXPECT_CALL(switch_callback,
+ RequestEncoderSwitch(Matcher<const SdpVideoFormat&>(
+ Field(&SdpVideoFormat::name, "AV1"))));
+
+ video_stream_encoder_->OnBitrateUpdated(
+ /*target_bitrate=*/DataRate::KilobitsPerSec(50),
+ /*stable_target_bitrate=*/DataRate::KilobitsPerSec(kDontCare),
+ /*link_allocation=*/DataRate::KilobitsPerSec(kDontCare),
+ /*fraction_lost=*/0,
+ /*rtt_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+
+ video_stream_encoder_->Stop();
+}
+
+TEST_F(VideoStreamEncoderTest, EncoderSelectorBrokenEncoderSwitch) {
+ constexpr int kSufficientBitrateToNotDrop = 1000;
+ constexpr int kDontCare = 100;
+
+ NiceMock<MockVideoEncoder> video_encoder;
+ NiceMock<MockEncoderSelector> encoder_selector;
+ StrictMock<MockEncoderSwitchRequestCallback> switch_callback;
+ video_send_config_.encoder_settings.encoder_switch_request_callback =
+ &switch_callback;
+ auto encoder_factory = std::make_unique<test::VideoEncoderProxyFactory>(
+ &video_encoder, &encoder_selector);
+ video_send_config_.encoder_settings.encoder_factory = encoder_factory.get();
+
+ // Reset encoder for new configuration to take effect.
+ ConfigureEncoder(video_encoder_config_.Copy());
+
+ // The VideoStreamEncoder needs some bitrate before it can start encoding,
+ // setting some bitrate so that subsequent calls to WaitForEncodedFrame does
+ // not fail.
+ video_stream_encoder_->OnBitrateUpdated(
+ /*target_bitrate=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
+ /*stable_target_bitrate=*/
+ DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
+ /*link_allocation=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop),
+ /*fraction_lost=*/0,
+ /*rtt_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
+
+ ON_CALL(video_encoder, Encode(_, _))
+ .WillByDefault(Return(WEBRTC_VIDEO_CODEC_ENCODER_FAILURE));
+ ON_CALL(encoder_selector, OnEncoderBroken())
+ .WillByDefault(Return(SdpVideoFormat("AV2")));
+
+ rtc::Event encode_attempted;
+ EXPECT_CALL(switch_callback,
+ RequestEncoderSwitch(Matcher<const SdpVideoFormat&>(_)))
+ .WillOnce([&encode_attempted](const SdpVideoFormat& format) {
+ EXPECT_EQ(format.name, "AV2");
+ encode_attempted.Set();
+ });
+
+ video_source_.IncomingCapturedFrame(CreateFrame(1, kDontCare, kDontCare));
+ encode_attempted.Wait(3000);
+
+ video_stream_encoder_->Stop();
+
+ // The encoders produces by the VideoEncoderProxyFactory have a pointer back
+ // to it's factory, so in order for the encoder instance in the
+ // |video_stream_encoder_| to be destroyed before the |encoder_factory| we
+ // reset the |video_stream_encoder_| here.
+ video_stream_encoder_.reset();
+}
+
TEST_F(VideoStreamEncoderTest,
- AllocationPropegratedToEncoderWhenTargetRateChanged) {
+ AllocationPropagatedToEncoderWhenTargetRateChanged) {
const int kFrameWidth = 320;
const int kFrameHeight = 180;
// Set initial rate.
- auto rate = DataRate::kbps(100);
+ auto rate = DataRate::KilobitsPerSec(100);
video_stream_encoder_->OnBitrateUpdated(
/*target_bitrate=*/rate,
/*stable_target_bitrate=*/rate,
/*link_allocation=*/rate,
/*fraction_lost=*/0,
- /*rtt_ms=*/0);
+ /*rtt_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
// Insert a first video frame so that encoder gets configured.
int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec;
@@ -5160,31 +5578,33 @@ TEST_F(VideoStreamEncoderTest,
EXPECT_EQ(1, fake_encoder_.GetNumSetRates());
// Change of target bitrate propagates to the encoder.
- auto new_stable_rate = rate - DataRate::kbps(5);
+ auto new_stable_rate = rate - DataRate::KilobitsPerSec(5);
video_stream_encoder_->OnBitrateUpdated(
/*target_bitrate=*/new_stable_rate,
/*stable_target_bitrate=*/new_stable_rate,
/*link_allocation=*/rate,
/*fraction_lost=*/0,
- /*rtt_ms=*/0);
+ /*rtt_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
video_stream_encoder_->WaitUntilTaskQueueIsIdle();
EXPECT_EQ(2, fake_encoder_.GetNumSetRates());
video_stream_encoder_->Stop();
}
TEST_F(VideoStreamEncoderTest,
- AllocationNotPropegratedToEncoderWhenTargetRateUnchanged) {
+ AllocationNotPropagatedToEncoderWhenTargetRateUnchanged) {
const int kFrameWidth = 320;
const int kFrameHeight = 180;
// Set initial rate.
- auto rate = DataRate::kbps(100);
+ auto rate = DataRate::KilobitsPerSec(100);
video_stream_encoder_->OnBitrateUpdated(
/*target_bitrate=*/rate,
/*stable_target_bitrate=*/rate,
/*link_allocation=*/rate,
/*fraction_lost=*/0,
- /*rtt_ms=*/0);
+ /*rtt_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
// Insert a first video frame so that encoder gets configured.
int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec;
@@ -5196,13 +5616,14 @@ TEST_F(VideoStreamEncoderTest,
// Set a higher target rate without changing the link_allocation. Should not
// reset encoder's rate.
- auto new_stable_rate = rate - DataRate::kbps(5);
+ auto new_stable_rate = rate - DataRate::KilobitsPerSec(5);
video_stream_encoder_->OnBitrateUpdated(
/*target_bitrate=*/rate,
/*stable_target_bitrate=*/new_stable_rate,
/*link_allocation=*/rate,
/*fraction_lost=*/0,
- /*rtt_ms=*/0);
+ /*rtt_ms=*/0,
+ /*cwnd_reduce_ratio=*/0);
video_stream_encoder_->WaitUntilTaskQueueIsIdle();
EXPECT_EQ(1, fake_encoder_.GetNumSetRates());
video_stream_encoder_->Stop();
@@ -5224,8 +5645,9 @@ TEST_F(VideoStreamEncoderTest, AutomaticAnimationDetection) {
// BALANCED degradation preference is required for this feature.
video_stream_encoder_->OnBitrateUpdated(
- DataRate::bps(kTargetBitrateBps), DataRate::bps(kTargetBitrateBps),
- DataRate::bps(kTargetBitrateBps), 0, 0);
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps),
+ DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
video_stream_encoder_->SetSource(&video_source_,
webrtc::DegradationPreference::BALANCED);
VerifyNoLimitation(video_source_.sink_wants());