summaryrefslogtreecommitdiff
path: root/chromium/media
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/media')
-rw-r--r--chromium/media/BUILD.gn7
-rw-r--r--chromium/media/audio/BUILD.gn5
-rw-r--r--chromium/media/audio/alsa/alsa_wrapper.cc10
-rw-r--r--chromium/media/audio/alsa/alsa_wrapper.h3
-rw-r--r--chromium/media/audio/android/audio_track_output_stream.cc1
-rw-r--r--chromium/media/audio/audio_device_thread.cc2
-rw-r--r--chromium/media/audio/audio_device_thread.h3
-rw-r--r--chromium/media/audio/audio_encoders_unittest.cc119
-rw-r--r--chromium/media/audio/audio_features.cc7
-rw-r--r--chromium/media/audio/audio_features.h4
-rw-r--r--chromium/media/audio/audio_input_delegate.h3
-rw-r--r--chromium/media/audio/audio_io.cc19
-rw-r--r--chromium/media/audio/audio_io.h6
-rw-r--r--chromium/media/audio/audio_manager_base.cc7
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.cc4
-rw-r--r--chromium/media/audio/cras/audio_manager_chromeos.cc8
-rw-r--r--chromium/media/audio/cras/audio_manager_chromeos_unittest.cc1
-rw-r--r--chromium/media/audio/fuchsia/DIR_METADATA1
-rw-r--r--chromium/media/audio/fuchsia/audio_manager_fuchsia.cc16
-rw-r--r--chromium/media/audio/fuchsia/audio_output_stream_fuchsia.cc2
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.cc51
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.h8
-rw-r--r--chromium/media/audio/mac/audio_input_mac.h4
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.cc48
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.h8
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.cc12
-rw-r--r--chromium/media/audio/system_glitch_reporter.cc93
-rw-r--r--chromium/media/audio/system_glitch_reporter.h62
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.cc122
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.h18
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.cc59
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.h18
-rw-r--r--chromium/media/audio/win/core_audio_util_win.cc50
-rw-r--r--chromium/media/audio/win/core_audio_util_win.h5
-rw-r--r--chromium/media/base/BUILD.gn5
-rw-r--r--chromium/media/base/android/BUILD.gn2
-rw-r--r--chromium/media/base/android/media_codec_util.cc8
-rw-r--r--chromium/media/base/android/media_codec_util.h7
-rw-r--r--chromium/media/base/android/media_player_bridge.cc10
-rw-r--r--chromium/media/base/android/media_player_bridge.h3
-rw-r--r--chromium/media/base/audio_codecs.cc4
-rw-r--r--chromium/media/base/audio_latency.cc2
-rw-r--r--chromium/media/base/audio_parameters.cc3
-rw-r--r--chromium/media/base/audio_parameters.h5
-rw-r--r--chromium/media/base/audio_shifter.cc7
-rw-r--r--chromium/media/base/audio_shifter.h12
-rw-r--r--chromium/media/base/audio_timestamp_helper.cc2
-rw-r--r--chromium/media/base/bitstream_buffer.cc19
-rw-r--r--chromium/media/base/bitstream_buffer.h32
-rw-r--r--chromium/media/base/callback_registry.h4
-rw-r--r--chromium/media/base/decoder.cc2
-rw-r--r--chromium/media/base/decoder.h19
-rw-r--r--chromium/media/base/decoder_buffer.cc61
-rw-r--r--chromium/media/base/decoder_buffer.h43
-rw-r--r--chromium/media/base/decoder_buffer_unittest.cc16
-rw-r--r--chromium/media/base/decoder_status.cc3
-rw-r--r--chromium/media/base/decoder_status.h3
-rw-r--r--chromium/media/base/eme_constants.h11
-rw-r--r--chromium/media/base/fake_audio_renderer_sink.cc1
-rw-r--r--chromium/media/base/key_system_properties.h9
-rw-r--r--chromium/media/base/key_systems.cc26
-rw-r--r--chromium/media/base/key_systems.h2
-rw-r--r--chromium/media/base/key_systems_unittest.cc8
-rw-r--r--chromium/media/base/limits.h128
-rw-r--r--chromium/media/base/mac/color_space_util_mac.mm34
-rw-r--r--chromium/media/base/media_log.cc3
-rw-r--r--chromium/media/base/media_log.h4
-rw-r--r--chromium/media/base/media_serializers.h3
-rw-r--r--chromium/media/base/media_serializers_base.h12
-rw-r--r--chromium/media/base/media_switches.cc143
-rw-r--r--chromium/media/base/media_switches.h48
-rw-r--r--chromium/media/base/media_util.cc16
-rw-r--r--chromium/media/base/media_util.h6
-rw-r--r--chromium/media/base/mime_util_internal.cc3
-rw-r--r--chromium/media/base/mock_filters.h3
-rw-r--r--chromium/media/base/moving_average.cc3
-rw-r--r--chromium/media/base/offloading_video_encoder.cc2
-rw-r--r--chromium/media/base/pipeline.h9
-rw-r--r--chromium/media/base/pipeline_impl.cc37
-rw-r--r--chromium/media/base/pipeline_impl.h10
-rw-r--r--chromium/media/base/renderer.cc4
-rw-r--r--chromium/media/base/renderer.h11
-rw-r--r--chromium/media/base/renderer_client.h3
-rw-r--r--chromium/media/base/scoped_async_trace.cc32
-rw-r--r--chromium/media/base/scoped_async_trace.h58
-rw-r--r--chromium/media/base/sinc_resampler.h31
-rw-r--r--chromium/media/base/sinc_resampler_unittest.cc4
-rw-r--r--chromium/media/base/status.h34
-rw-r--r--chromium/media/base/status.md33
-rw-r--r--chromium/media/base/status_unittest.cc28
-rw-r--r--chromium/media/base/stream_parser.cc16
-rw-r--r--chromium/media/base/supported_types.cc34
-rw-r--r--chromium/media/base/unaligned_shared_memory.cc186
-rw-r--r--chromium/media/base/unaligned_shared_memory.h132
-rw-r--r--chromium/media/base/unaligned_shared_memory_unittest.cc254
-rw-r--r--chromium/media/base/video_bitrate_allocation.cc4
-rw-r--r--chromium/media/base/video_bitrate_allocation.h3
-rw-r--r--chromium/media/base/video_bitrate_allocation_unittest.cc6
-rw-r--r--chromium/media/base/video_codecs.cc98
-rw-r--r--chromium/media/base/video_codecs.h12
-rw-r--r--chromium/media/base/video_codecs_unittest.cc86
-rw-r--r--chromium/media/base/video_encoder.cc9
-rw-r--r--chromium/media/base/video_frame.h4
-rw-r--r--chromium/media/base/video_frame_metadata.cc2
-rw-r--r--chromium/media/base/video_frame_metadata.h33
-rw-r--r--chromium/media/base/video_frame_unittest.cc6
-rw-r--r--chromium/media/base/video_util.cc6
-rw-r--r--chromium/media/base/video_util.h3
-rw-r--r--chromium/media/base/win/BUILD.gn5
-rw-r--r--chromium/media/base/win/DEPS3
-rw-r--r--chromium/media/base/win/dcomp_texture_wrapper.h6
-rw-r--r--chromium/media/base/win/dxgi_device_manager.cc10
-rw-r--r--chromium/media/base/win/mf_helpers.cc34
-rw-r--r--chromium/media/base/win/mf_helpers.h16
-rw-r--r--chromium/media/base/win/overlay_state_observer_subscription.h42
-rw-r--r--chromium/media/capabilities/BUILD.gn3
-rw-r--r--chromium/media/capabilities/pending_operations.cc111
-rw-r--r--chromium/media/capabilities/pending_operations.h92
-rw-r--r--chromium/media/capabilities/pending_operations_unittest.cc115
-rw-r--r--chromium/media/capabilities/video_decode_stats_db_impl.cc147
-rw-r--r--chromium/media/capabilities/video_decode_stats_db_impl.h62
-rw-r--r--chromium/media/capabilities/video_decode_stats_db_impl_unittest.cc14
-rw-r--r--chromium/media/capabilities/webrtc_video_stats_db_impl.cc166
-rw-r--r--chromium/media/capabilities/webrtc_video_stats_db_impl.h69
-rw-r--r--chromium/media/capabilities/webrtc_video_stats_db_impl_unittest.cc14
-rw-r--r--chromium/media/capture/content/android/BUILD.gn2
-rw-r--r--chromium/media/capture/mojom/video_capture_buffer.mojom8
-rw-r--r--chromium/media/capture/mojom/video_capture_types.mojom2
-rw-r--r--chromium/media/capture/video/DEPS1
-rw-r--r--chromium/media/capture/video/android/BUILD.gn2
-rw-r--r--chromium/media/capture/video/chromeos/camera_app_device_bridge_impl.cc4
-rw-r--r--chromium/media/capture/video/chromeos/camera_app_device_bridge_impl.h4
-rw-r--r--chromium/media/capture/video/chromeos/camera_app_device_impl.cc15
-rw-r--r--chromium/media/capture/video/chromeos/camera_app_device_impl.h8
-rw-r--r--chromium/media/capture/video/chromeos/camera_app_device_provider_impl.cc12
-rw-r--r--chromium/media/capture/video/chromeos/camera_app_device_provider_impl.h8
-rw-r--r--chromium/media/capture/video/chromeos/camera_device_delegate.cc8
-rw-r--r--chromium/media/capture/video/chromeos/camera_device_delegate_unittest.cc4
-rw-r--r--chromium/media/capture/video/chromeos/camera_hal_delegate.cc6
-rw-r--r--chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc17
-rw-r--r--chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.h5
-rw-r--r--chromium/media/capture/video/chromeos/mojom/camera_app.mojom8
-rw-r--r--chromium/media/capture/video/chromeos/request_manager.cc8
-rw-r--r--chromium/media/capture/video/chromeos/request_manager_unittest.cc8
-rw-r--r--chromium/media/capture/video/chromeos/token_manager.cc5
-rw-r--r--chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia.cc27
-rw-r--r--chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia.h2
-rw-r--r--chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia_test.cc38
-rw-r--r--chromium/media/capture/video/fuchsia/video_capture_device_fuchsia_test.cc5
-rw-r--r--chromium/media/capture/video/linux/fake_v4l2_impl.cc2
-rw-r--r--chromium/media/capture/video/linux/video_capture_device_factory_linux_unittest.cc2
-rw-r--r--chromium/media/capture/video/linux/video_capture_device_linux.cc79
-rw-r--r--chromium/media/capture/video/linux/video_capture_device_linux.h9
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_mac.mm50
-rw-r--r--chromium/media/capture/video/mock_video_capture_device_client.cc5
-rw-r--r--chromium/media/capture/video/video_capture_buffer_pool_util.cc15
-rw-r--r--chromium/media/capture/video/video_capture_device.h3
-rw-r--r--chromium/media/capture/video/video_capture_device_client.cc26
-rw-r--r--chromium/media/capture/video/video_capture_device_unittest.cc20
-rw-r--r--chromium/media/capture/video/win/video_capture_device_factory_win.cc4
-rw-r--r--chromium/media/capture/video/win/video_capture_device_mf_win.cc116
-rw-r--r--chromium/media/capture/video/win/video_capture_device_mf_win.h5
-rw-r--r--chromium/media/capture/video/win/video_capture_device_mf_win_unittest.cc50
-rw-r--r--chromium/media/capture/video/win/video_capture_device_win.cc20
-rw-r--r--chromium/media/capture/video/win/video_capture_device_win.h5
-rw-r--r--chromium/media/cast/BUILD.gn134
-rw-r--r--chromium/media/cast/cast_callbacks.h26
-rw-r--r--chromium/media/cast/cast_sender.h10
-rw-r--r--chromium/media/cast/cast_sender_impl.cc2
-rw-r--r--chromium/media/cast/common/encoded_frame.cc28
-rw-r--r--chromium/media/cast/common/encoded_frame.h94
-rw-r--r--chromium/media/cast/common/expanded_value_base.h30
-rw-r--r--chromium/media/cast/common/sender_encoded_frame.cc (renamed from chromium/media/cast/sender/sender_encoded_frame.cc)6
-rw-r--r--chromium/media/cast/common/sender_encoded_frame.h (renamed from chromium/media/cast/sender/sender_encoded_frame.h)17
-rw-r--r--chromium/media/cast/common/video_frame_factory.h (renamed from chromium/media/cast/sender/video_frame_factory.h)9
-rw-r--r--chromium/media/cast/encoding/audio_encoder.cc (renamed from chromium/media/cast/sender/audio_encoder.cc)122
-rw-r--r--chromium/media/cast/encoding/audio_encoder.h (renamed from chromium/media/cast/sender/audio_encoder.h)12
-rw-r--r--chromium/media/cast/encoding/audio_encoder_unittest.cc (renamed from chromium/media/cast/sender/audio_encoder_unittest.cc)15
-rw-r--r--chromium/media/cast/encoding/av1_encoder.cc (renamed from chromium/media/cast/sender/av1_encoder.cc)9
-rw-r--r--chromium/media/cast/encoding/av1_encoder.h (renamed from chromium/media/cast/sender/av1_encoder.h)12
-rw-r--r--chromium/media/cast/encoding/external_video_encoder.cc (renamed from chromium/media/cast/sender/external_video_encoder.cc)29
-rw-r--r--chromium/media/cast/encoding/external_video_encoder.h (renamed from chromium/media/cast/sender/external_video_encoder.h)18
-rw-r--r--chromium/media/cast/encoding/external_video_encoder_unittest.cc (renamed from chromium/media/cast/sender/external_video_encoder_unittest.cc)21
-rw-r--r--chromium/media/cast/encoding/fake_software_video_encoder.cc (renamed from chromium/media/cast/sender/fake_software_video_encoder.cc)9
-rw-r--r--chromium/media/cast/encoding/fake_software_video_encoder.h (renamed from chromium/media/cast/sender/fake_software_video_encoder.h)11
-rw-r--r--chromium/media/cast/encoding/h264_vt_encoder.cc (renamed from chromium/media/cast/sender/h264_vt_encoder.cc)44
-rw-r--r--chromium/media/cast/encoding/h264_vt_encoder.h (renamed from chromium/media/cast/sender/h264_vt_encoder.h)16
-rw-r--r--chromium/media/cast/encoding/h264_vt_encoder_unittest.cc (renamed from chromium/media/cast/sender/h264_vt_encoder_unittest.cc)19
-rw-r--r--chromium/media/cast/encoding/size_adaptable_video_encoder_base.cc (renamed from chromium/media/cast/sender/size_adaptable_video_encoder_base.cc)11
-rw-r--r--chromium/media/cast/encoding/size_adaptable_video_encoder_base.h (renamed from chromium/media/cast/sender/size_adaptable_video_encoder_base.h)18
-rw-r--r--chromium/media/cast/encoding/software_video_encoder.h (renamed from chromium/media/cast/sender/software_video_encoder.h)9
-rw-r--r--chromium/media/cast/encoding/video_encoder.cc (renamed from chromium/media/cast/sender/video_encoder.cc)18
-rw-r--r--chromium/media/cast/encoding/video_encoder.h (renamed from chromium/media/cast/sender/video_encoder.h)13
-rw-r--r--chromium/media/cast/encoding/video_encoder_impl.cc (renamed from chromium/media/cast/sender/video_encoder_impl.cc)9
-rw-r--r--chromium/media/cast/encoding/video_encoder_impl.h (renamed from chromium/media/cast/sender/video_encoder_impl.h)10
-rw-r--r--chromium/media/cast/encoding/video_encoder_unittest.cc (renamed from chromium/media/cast/sender/video_encoder_unittest.cc)28
-rw-r--r--chromium/media/cast/encoding/vpx_encoder.cc (renamed from chromium/media/cast/sender/vpx_encoder.cc)8
-rw-r--r--chromium/media/cast/encoding/vpx_encoder.h (renamed from chromium/media/cast/sender/vpx_encoder.h)9
-rw-r--r--chromium/media/cast/encoding/vpx_quantizer_parser.cc (renamed from chromium/media/cast/sender/vpx_quantizer_parser.cc)2
-rw-r--r--chromium/media/cast/encoding/vpx_quantizer_parser.h (renamed from chromium/media/cast/sender/vpx_quantizer_parser.h)6
-rw-r--r--chromium/media/cast/encoding/vpx_quantizer_parser_unittest.cc (renamed from chromium/media/cast/sender/vpx_quantizer_parser_unittest.cc)6
-rw-r--r--chromium/media/cast/logging/logging_defines.h2
-rw-r--r--chromium/media/cast/net/cast_transport.h7
-rw-r--r--chromium/media/cast/net/cast_transport_config.cc16
-rw-r--r--chromium/media/cast/net/cast_transport_config.h72
-rw-r--r--chromium/media/cast/net/cast_transport_impl.cc7
-rw-r--r--chromium/media/cast/net/cast_transport_impl.h2
-rw-r--r--chromium/media/cast/net/cast_transport_impl_unittest.cc13
-rw-r--r--chromium/media/cast/net/pacing/mock_paced_packet_sender.cc15
-rw-r--r--chromium/media/cast/net/pacing/mock_paced_packet_sender.h29
-rw-r--r--chromium/media/cast/net/rtcp/rtcp_builder_unittest.cc2
-rw-r--r--chromium/media/cast/net/rtcp/rtcp_utility_unittest.cc2
-rw-r--r--chromium/media/cast/net/rtcp/test_rtcp_packet_builder.cc276
-rw-r--r--chromium/media/cast/net/rtcp/test_rtcp_packet_builder.h115
-rw-r--r--chromium/media/cast/net/rtp/mock_rtp_feedback.h38
-rw-r--r--chromium/media/cast/net/rtp/mock_rtp_payload_feedback.cc15
-rw-r--r--chromium/media/cast/net/rtp/mock_rtp_payload_feedback.h25
-rw-r--r--chromium/media/cast/net/rtp/rtp_packet_builder.cc94
-rw-r--r--chromium/media/cast/net/rtp/rtp_packet_builder.h54
-rw-r--r--chromium/media/cast/net/rtp/rtp_packetizer.cc1
-rw-r--r--chromium/media/cast/net/rtp/rtp_packetizer.h1
-rw-r--r--chromium/media/cast/net/rtp/rtp_packetizer_unittest.cc1
-rw-r--r--chromium/media/cast/net/rtp/rtp_parser_unittest.cc2
-rw-r--r--chromium/media/cast/net/rtp/rtp_sender.cc1
-rw-r--r--chromium/media/cast/net/rtp/rtp_sender.h2
-rw-r--r--chromium/media/cast/net/transport_util.cc9
-rw-r--r--chromium/media/cast/net/transport_util.h2
-rw-r--r--chromium/media/cast/net/udp_transport_impl.cc8
-rw-r--r--chromium/media/cast/net/udp_transport_impl.h2
-rw-r--r--chromium/media/cast/sender/audio_sender.cc55
-rw-r--r--chromium/media/cast/sender/audio_sender.h27
-rw-r--r--chromium/media/cast/sender/audio_sender_unittest.cc6
-rw-r--r--chromium/media/cast/sender/frame_sender.cc479
-rw-r--r--chromium/media/cast/sender/frame_sender.h245
-rw-r--r--chromium/media/cast/sender/frame_sender_impl.cc559
-rw-r--r--chromium/media/cast/sender/frame_sender_impl.h182
-rw-r--r--chromium/media/cast/sender/performance_metrics_overlay.cc5
-rw-r--r--chromium/media/cast/sender/performance_metrics_overlay.h2
-rw-r--r--chromium/media/cast/sender/video_sender.cc120
-rw-r--r--chromium/media/cast/sender/video_sender.h43
-rw-r--r--chromium/media/cast/sender/video_sender_unittest.cc15
-rw-r--r--chromium/media/cdm/BUILD.gn2
-rw-r--r--chromium/media/cdm/aes_decryptor.cc7
-rw-r--r--chromium/media/cdm/api/content_decryption_module.h13
-rw-r--r--chromium/media/cdm/external_clear_key_test_helper.h2
-rw-r--r--chromium/media/cdm/win/media_foundation_cdm_factory.cc205
-rw-r--r--chromium/media/cdm/win/media_foundation_cdm_factory.h12
-rw-r--r--chromium/media/cdm/win/media_foundation_cdm_util.cc216
-rw-r--r--chromium/media/cdm/win/media_foundation_cdm_util.h33
-rw-r--r--chromium/media/device_monitors/device_monitor_udev.cc17
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.cc70
-rw-r--r--chromium/media/filters/BUILD.gn57
-rw-r--r--chromium/media/filters/audio_video_metadata_extractor.cc4
-rw-r--r--chromium/media/filters/chunk_demuxer.cc6
-rw-r--r--chromium/media/filters/decoder_selector.cc83
-rw-r--r--chromium/media/filters/decoder_selector.h40
-rw-r--r--chromium/media/filters/decoder_selector_unittest.cc208
-rw-r--r--chromium/media/filters/decoder_stream.cc102
-rw-r--r--chromium/media/filters/decoder_stream.h14
-rw-r--r--chromium/media/filters/fuchsia/DIR_METADATA10
-rw-r--r--chromium/media/filters/mac/audio_toolbox_audio_encoder.cc322
-rw-r--r--chromium/media/filters/mac/audio_toolbox_audio_encoder.h65
-rw-r--r--chromium/media/filters/pipeline_controller.cc5
-rw-r--r--chromium/media/filters/pipeline_controller.h1
-rw-r--r--chromium/media/filters/pipeline_controller_unittest.cc1
-rw-r--r--chromium/media/filters/source_buffer_range.cc6
-rw-r--r--chromium/media/filters/source_buffer_state.cc47
-rw-r--r--chromium/media/filters/video_cadence_estimator.h1
-rw-r--r--chromium/media/filters/vp9_compressed_header_parser.cc2
-rw-r--r--chromium/media/filters/vp9_parser.cc2
-rw-r--r--chromium/media/filters/win/media_foundation_audio_decoder.cc463
-rw-r--r--chromium/media/filters/win/media_foundation_audio_decoder.h122
-rw-r--r--chromium/media/filters/win/media_foundation_utils.cc209
-rw-r--r--chromium/media/filters/win/media_foundation_utils.h36
-rw-r--r--chromium/media/formats/BUILD.gn22
-rw-r--r--chromium/media/formats/hls/common_playlist_unittest.cc163
-rw-r--r--chromium/media/formats/hls/items_unittest.cc46
-rw-r--r--chromium/media/formats/hls/media_playlist.cc257
-rw-r--r--chromium/media/formats/hls/media_playlist.h73
-rw-r--r--chromium/media/formats/hls/media_playlist_fuzzer.cc29
-rw-r--r--chromium/media/formats/hls/media_playlist_test_builder.cc59
-rw-r--r--chromium/media/formats/hls/media_playlist_test_builder.h211
-rw-r--r--chromium/media/formats/hls/media_playlist_unittest.cc1120
-rw-r--r--chromium/media/formats/hls/media_segment.cc8
-rw-r--r--chromium/media/formats/hls/media_segment.h26
-rw-r--r--chromium/media/formats/hls/multivariant_playlist.cc175
-rw-r--r--chromium/media/formats/hls/multivariant_playlist.h60
-rw-r--r--chromium/media/formats/hls/multivariant_playlist_fuzzer.cc33
-rw-r--r--chromium/media/formats/hls/multivariant_playlist_test_builder.cc64
-rw-r--r--chromium/media/formats/hls/multivariant_playlist_test_builder.h146
-rw-r--r--chromium/media/formats/hls/multivariant_playlist_unittest.cc280
-rw-r--r--chromium/media/formats/hls/parse_status.cc12
-rw-r--r--chromium/media/formats/hls/parse_status.h12
-rw-r--r--chromium/media/formats/hls/playlist.h4
-rw-r--r--chromium/media/formats/hls/playlist_common.cc4
-rw-r--r--chromium/media/formats/hls/playlist_common.h12
-rw-r--r--chromium/media/formats/hls/playlist_test_builder.h124
-rw-r--r--chromium/media/formats/hls/tag_name.cc10
-rw-r--r--chromium/media/formats/hls/tag_name.h14
-rw-r--r--chromium/media/formats/hls/tags.cc149
-rw-r--r--chromium/media/formats/hls/tags.h116
-rw-r--r--chromium/media/formats/hls/tags_unittest.cc174
-rw-r--r--chromium/media/formats/hls/test_util.h26
-rw-r--r--chromium/media/formats/hls/types.cc78
-rw-r--r--chromium/media/formats/hls/types.h91
-rw-r--r--chromium/media/formats/hls/types_unittest.cc311
-rw-r--r--chromium/media/formats/hls/variable_dictionary_unittest.cc21
-rw-r--r--chromium/media/formats/hls/variant_stream.cc31
-rw-r--r--chromium/media/formats/hls/variant_stream.h106
-rw-r--r--chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc8
-rw-r--r--chromium/media/formats/mp2t/ts_section_pmt.cc6
-rw-r--r--chromium/media/formats/mp4/avc.cc4
-rw-r--r--chromium/media/formats/mp4/box_definitions.cc6
-rw-r--r--chromium/media/formats/mp4/hevc.cc67
-rw-r--r--chromium/media/formats/mp4/hevc.h3
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser_unittest.cc14
-rw-r--r--chromium/media/formats/mp4/nalu_test_helper.cc2
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser_unittest.cc4
-rw-r--r--chromium/media/fuchsia/DIR_METADATA1
-rw-r--r--chromium/media/fuchsia/audio/fuchsia_audio_output_device_test.cc9
-rw-r--r--chromium/media/fuchsia/common/vmo_buffer_writer_queue.cc40
-rw-r--r--chromium/media/fuchsia/common/vmo_buffer_writer_queue.h24
-rw-r--r--chromium/media/fuchsia/mojom/BUILD.gn9
-rw-r--r--chromium/media/fuchsia/mojom/DEPS2
-rw-r--r--chromium/media/fuchsia/mojom/fuchsia_media_resource_provider.mojom16
-rw-r--r--chromium/media/fuchsia/mojom/fuchsia_media_resource_provider_mojom_traits.h10
-rw-r--r--chromium/media/fuchsia/video/BUILD.gn52
-rw-r--r--chromium/media/fuchsia/video/DEPS (renamed from chromium/media/filters/fuchsia/DEPS)2
-rw-r--r--chromium/media/fuchsia/video/OWNERS (renamed from chromium/media/filters/fuchsia/OWNERS)1
-rw-r--r--chromium/media/fuchsia/video/fuchsia_decoder_factory.cc65
-rw-r--r--chromium/media/fuchsia/video/fuchsia_decoder_factory.h47
-rw-r--r--chromium/media/fuchsia/video/fuchsia_video_decoder.cc (renamed from chromium/media/filters/fuchsia/fuchsia_video_decoder.cc)85
-rw-r--r--chromium/media/fuchsia/video/fuchsia_video_decoder.h (renamed from chromium/media/filters/fuchsia/fuchsia_video_decoder.h)27
-rw-r--r--chromium/media/fuchsia/video/fuchsia_video_decoder_unittest.cc (renamed from chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc)66
-rw-r--r--chromium/media/gpu/BUILD.gn14
-rw-r--r--chromium/media/gpu/DEPS8
-rw-r--r--chromium/media/gpu/android/android_video_encode_accelerator.cc18
-rw-r--r--chromium/media/gpu/android/codec_image_unittest.cc15
-rw-r--r--chromium/media/gpu/android/codec_wrapper.cc32
-rw-r--r--chromium/media/gpu/android/codec_wrapper.h3
-rw-r--r--chromium/media/gpu/android/codec_wrapper_unittest.cc25
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.cc43
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.h6
-rw-r--r--chromium/media/gpu/android/ndk_video_encode_accelerator.cc711
-rw-r--r--chromium/media/gpu/android/ndk_video_encode_accelerator.h176
-rw-r--r--chromium/media/gpu/android/ndk_video_encode_accelerator_tests.cc321
-rw-r--r--chromium/media/gpu/android/video_frame_factory_impl.cc54
-rw-r--r--chromium/media/gpu/android/video_frame_factory_impl.h6
-rw-r--r--chromium/media/gpu/android/video_frame_factory_impl_unittest.cc2
-rw-r--r--chromium/media/gpu/chromeos/dmabuf_video_frame_pool.h5
-rw-r--r--chromium/media/gpu/chromeos/image_processor_test.cc1
-rw-r--r--chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc5
-rw-r--r--chromium/media/gpu/chromeos/oop_video_decoder.cc23
-rw-r--r--chromium/media/gpu/chromeos/oop_video_decoder.h19
-rw-r--r--chromium/media/gpu/chromeos/platform_video_frame_pool.cc35
-rw-r--r--chromium/media/gpu/chromeos/platform_video_frame_pool.h10
-rw-r--r--chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc11
-rw-r--r--chromium/media/gpu/chromeos/platform_video_frame_utils.cc56
-rw-r--r--chromium/media/gpu/chromeos/platform_video_frame_utils.h46
-rw-r--r--chromium/media/gpu/chromeos/platform_video_frame_utils_unittest.cc80
-rw-r--r--chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc7
-rw-r--r--chromium/media/gpu/chromeos/vd_video_decode_accelerator.h4
-rw-r--r--chromium/media/gpu/chromeos/video_decoder_pipeline.cc32
-rw-r--r--chromium/media/gpu/chromeos/video_decoder_pipeline.h4
-rw-r--r--chromium/media/gpu/gpu_video_accelerator_util.cc16
-rw-r--r--chromium/media/gpu/gpu_video_decode_accelerator_factory.cc8
-rw-r--r--chromium/media/gpu/gpu_video_encode_accelerator_factory.cc11
-rw-r--r--chromium/media/gpu/gpu_video_encode_accelerator_helpers.cc32
-rw-r--r--chromium/media/gpu/gpu_video_encode_accelerator_helpers.h12
-rw-r--r--chromium/media/gpu/h264_decoder.cc32
-rw-r--r--chromium/media/gpu/h264_decoder.h18
-rw-r--r--chromium/media/gpu/h264_decoder_unittest.cc49
-rw-r--r--chromium/media/gpu/h265_decoder.cc30
-rw-r--r--chromium/media/gpu/h265_decoder_unittest.cc3
-rw-r--r--chromium/media/gpu/ipc/common/media_param_traits.cc13
-rw-r--r--chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h2
-rw-r--r--chromium/media/gpu/ipc/service/media_gpu_channel.cc11
-rw-r--r--chromium/media/gpu/ipc/service/picture_buffer_manager.cc1
-rw-r--r--chromium/media/gpu/mac/vt_config_util.mm50
-rw-r--r--chromium/media/gpu/mac/vt_config_util_unittest.cc169
-rw-r--r--chromium/media/gpu/mac/vt_video_decode_accelerator_mac.cc87
-rw-r--r--chromium/media/gpu/mac/vt_video_decode_accelerator_mac.h14
-rw-r--r--chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc8
-rw-r--r--chromium/media/gpu/v4l2/BUILD.gn3
-rw-r--r--chromium/media/gpu/v4l2/v4l2_device.cc2
-rw-r--r--chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc109
-rw-r--r--chromium/media/gpu/v4l2/v4l2_image_processor_backend.h10
-rw-r--r--chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.cc115
-rw-r--r--chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h25
-rw-r--r--chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.cc39
-rw-r--r--chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc21
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc1
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder.cc7
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc16
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_chromium.cc377
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_chromium.h58
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc109
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h4
-rw-r--r--chromium/media/gpu/vaapi/BUILD.gn2
-rw-r--r--chromium/media/gpu/vaapi/OWNERS1
-rw-r--r--chromium/media/gpu/vaapi/av1_vaapi_video_decoder_delegate.cc36
-rw-r--r--chromium/media/gpu/vaapi/av1_vaapi_video_decoder_delegate.h1
-rw-r--r--chromium/media/gpu/vaapi/h264_vaapi_video_decoder_delegate.cc26
-rw-r--r--chromium/media/gpu/vaapi/h264_vaapi_video_decoder_delegate.h11
-rw-r--r--chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate.cc212
-rw-r--r--chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate.h16
-rw-r--r--chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate_unittest.cc17
-rw-r--r--chromium/media/gpu/vaapi/vaapi_common.cc4
-rw-r--r--chromium/media/gpu/vaapi/vaapi_common.h8
-rw-r--r--chromium/media/gpu/vaapi/vaapi_image_processor_backend.cc1
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc83
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.h10
-rw-r--r--chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.cc20
-rw-r--r--chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.h4
-rw-r--r--chromium/media/gpu/vaapi/vaapi_unittest.cc66
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc1
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc23
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decoder.cc35
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decoder.h6
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc115
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h2
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encode_accelerator_unittest.cc56
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encoder_delegate.cc43
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encoder_delegate.h40
-rw-r--r--chromium/media/gpu/vaapi/vaapi_wrapper.cc115
-rw-r--r--chromium/media/gpu/vaapi/vaapi_wrapper.h1
-rw-r--r--chromium/media/gpu/vaapi/vp8_vaapi_video_decoder_delegate.cc5
-rw-r--r--chromium/media/gpu/vaapi/vp8_vaapi_video_encoder_delegate.cc19
-rw-r--r--chromium/media/gpu/vaapi/vp9_vaapi_video_decoder_delegate.cc5
-rw-r--r--chromium/media/gpu/vaapi/vp9_vaapi_video_encoder_delegate.cc17
-rw-r--r--chromium/media/gpu/vaapi/vp9_vaapi_video_encoder_delegate_unittest.cc30
-rw-r--r--chromium/media/gpu/video_decode_accelerator_perf_tests.cc5
-rw-r--r--chromium/media/gpu/video_decode_accelerator_tests.cc8
-rw-r--r--chromium/media/gpu/video_encode_accelerator_perf_tests.cc3
-rw-r--r--chromium/media/gpu/video_encode_accelerator_tests.cc29
-rw-r--r--chromium/media/gpu/vp9_decoder.cc2
-rw-r--r--chromium/media/gpu/windows/d3d11_copying_texture_wrapper_unittest.cc4
-rw-r--r--chromium/media/gpu/windows/d3d11_decoder_configurator.cc4
-rw-r--r--chromium/media/gpu/windows/d3d11_texture_selector.cc2
-rw-r--r--chromium/media/gpu/windows/d3d11_texture_wrapper_unittest.cc7
-rw-r--r--chromium/media/gpu/windows/d3d11_video_decoder.cc17
-rw-r--r--chromium/media/gpu/windows/dxva_picture_buffer_win.cc33
-rw-r--r--chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc64
-rw-r--r--chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h35
-rw-r--r--chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.cc159
-rw-r--r--chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.h15
-rw-r--r--chromium/media/gpu/windows/mf_audio_encoder.cc1
-rw-r--r--chromium/media/gpu/windows/supported_profile_helpers.cc6
-rw-r--r--chromium/media/gpu/windows/supported_profile_helpers_unittest.cc23
-rw-r--r--chromium/media/learning/common/target_histogram.h5
-rw-r--r--chromium/media/learning/mojo/public/cpp/learning_mojom_traits.h4
-rw-r--r--chromium/media/media_options.gni65
-rw-r--r--chromium/media/midi/BUILD.gn7
-rw-r--r--chromium/media/mojo/clients/mojo_video_encode_accelerator.cc14
-rw-r--r--chromium/media/mojo/clients/mojo_video_encode_accelerator_unittest.cc33
-rw-r--r--chromium/media/mojo/clients/win/media_foundation_renderer_client.cc149
-rw-r--r--chromium/media/mojo/clients/win/media_foundation_renderer_client.h47
-rw-r--r--chromium/media/mojo/clients/win/media_foundation_renderer_client_factory.cc25
-rw-r--r--chromium/media/mojo/clients/win/media_foundation_renderer_client_factory.h9
-rw-r--r--chromium/media/mojo/common/audio_data_s16_converter.cc3
-rw-r--r--chromium/media/mojo/mojom/BUILD.gn95
-rw-r--r--chromium/media/mojo/mojom/audio_data.mojom24
-rw-r--r--chromium/media/mojo/mojom/media_foundation_rendering_mode_mojom_traits.h37
-rw-r--r--chromium/media/mojo/mojom/media_metrics_provider.mojom6
-rw-r--r--chromium/media/mojo/mojom/media_types.mojom27
-rw-r--r--chromium/media/mojo/mojom/media_types_enum_mojom_traits.h35
-rw-r--r--chromium/media/mojo/mojom/renderer_extensions.mojom25
-rw-r--r--chromium/media/mojo/mojom/speech_recognition.mojom197
-rw-r--r--chromium/media/mojo/mojom/speech_recognition_result_mojom_traits.h2
-rw-r--r--chromium/media/mojo/mojom/speech_recognition_service.mojom216
-rw-r--r--chromium/media/mojo/mojom/stable/stable_video_decoder_types.mojom15
-rw-r--r--chromium/media/mojo/mojom/stable/stable_video_decoder_types_mojom_traits.h54
-rw-r--r--chromium/media/mojo/mojom/status_mojom_traits.cc1
-rw-r--r--chromium/media/mojo/mojom/status_mojom_traits.h5
-rw-r--r--chromium/media/mojo/mojom/video_encode_accelerator.mojom10
-rw-r--r--chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits.cc59
-rw-r--r--chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits.h40
-rw-r--r--chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits_unittest.cc2
-rw-r--r--chromium/media/mojo/mojom/video_frame_metadata_mojom_traits.cc10
-rw-r--r--chromium/media/mojo/mojom/video_frame_metadata_mojom_traits.h9
-rw-r--r--chromium/media/mojo/mojom/video_frame_metadata_mojom_traits_unittest.cc8
-rw-r--r--chromium/media/mojo/services/BUILD.gn41
-rw-r--r--chromium/media/mojo/services/DEPS4
-rw-r--r--chromium/media/mojo/services/gpu_mojo_media_client.cc35
-rw-r--r--chromium/media/mojo/services/gpu_mojo_media_client.h17
-rw-r--r--chromium/media/mojo/services/gpu_mojo_media_client_android.cc7
-rw-r--r--chromium/media/mojo/services/gpu_mojo_media_client_cros.cc32
-rw-r--r--chromium/media/mojo/services/gpu_mojo_media_client_mac.cc13
-rw-r--r--chromium/media/mojo/services/gpu_mojo_media_client_stubs.cc8
-rw-r--r--chromium/media/mojo/services/gpu_mojo_media_client_win.cc30
-rw-r--r--chromium/media/mojo/services/interface_factory_impl.cc6
-rw-r--r--chromium/media/mojo/services/media_foundation_mojo_media_client.cc15
-rw-r--r--chromium/media/mojo/services/media_foundation_mojo_media_client.h4
-rw-r--r--chromium/media/mojo/services/media_foundation_renderer_wrapper.cc7
-rw-r--r--chromium/media/mojo/services/media_foundation_renderer_wrapper.h3
-rw-r--r--chromium/media/mojo/services/media_foundation_service.cc102
-rw-r--r--chromium/media/mojo/services/media_metrics_provider.cc10
-rw-r--r--chromium/media/mojo/services/media_metrics_provider.h1
-rw-r--r--chromium/media/mojo/services/media_service_factory.cc3
-rw-r--r--chromium/media/mojo/services/mojo_audio_input_stream.cc8
-rw-r--r--chromium/media/mojo/services/mojo_audio_input_stream.h1
-rw-r--r--chromium/media/mojo/services/mojo_audio_output_stream.cc3
-rw-r--r--chromium/media/mojo/services/mojo_media_client.cc3
-rw-r--r--chromium/media/mojo/services/mojo_media_client.h4
-rw-r--r--chromium/media/mojo/services/mojo_renderer_service.cc4
-rw-r--r--chromium/media/mojo/services/mojo_renderer_service.h1
-rw-r--r--chromium/media/mojo/services/mojo_video_decoder_service.cc21
-rw-r--r--chromium/media/mojo/services/mojo_video_decoder_service.h13
-rw-r--r--chromium/media/mojo/services/mojo_video_encode_accelerator_service.cc9
-rw-r--r--chromium/media/mojo/services/mojo_video_encode_accelerator_service.h3
-rw-r--r--chromium/media/mojo/services/mojo_video_encode_accelerator_service_unittest.cc13
-rw-r--r--chromium/media/mojo/services/stable_video_decoder_factory_service.cc114
-rw-r--r--chromium/media/mojo/services/stable_video_decoder_factory_service.h48
-rw-r--r--chromium/media/mojo/services/stable_video_decoder_service.cc143
-rw-r--r--chromium/media/mojo/services/stable_video_decoder_service.h132
-rw-r--r--chromium/media/mojo/services/stable_video_decoder_service_unittest.cc501
-rw-r--r--chromium/media/mojo/services/webrtc_video_perf_fuzzer_seed_corpus/update_record_and_get_perf.textproto77
-rw-r--r--chromium/media/mojo/services/webrtc_video_perf_history.cc13
-rw-r--r--chromium/media/mojo/services/webrtc_video_perf_history_unittest.cc9
-rw-r--r--chromium/media/mojo/services/webrtc_video_perf_mojolpm_fuzzer.cc226
-rw-r--r--chromium/media/mojo/services/webrtc_video_perf_mojolpm_fuzzer.proto36
-rw-r--r--chromium/media/muxers/webm_muxer_unittest.cc4
-rw-r--r--chromium/media/parsers/jpeg_parser.cc1
-rw-r--r--chromium/media/remoting/courier_renderer_unittest.cc1
-rw-r--r--chromium/media/remoting/demuxer_stream_adapter.cc17
-rw-r--r--chromium/media/remoting/demuxer_stream_adapter_unittest.cc24
-rw-r--r--chromium/media/remoting/fake_remoter.cc7
-rw-r--r--chromium/media/remoting/fake_remoter.h1
-rw-r--r--chromium/media/remoting/receiver.cc4
-rw-r--r--chromium/media/remoting/receiver.h1
-rw-r--r--chromium/media/remoting/renderer_controller.cc3
-rw-r--r--chromium/media/remoting/triggers.h12
-rw-r--r--chromium/media/renderers/BUILD.gn2
-rw-r--r--chromium/media/renderers/audio_renderer_impl.cc26
-rw-r--r--chromium/media/renderers/audio_renderer_impl_unittest.cc1
-rw-r--r--chromium/media/renderers/default_decoder_factory.cc25
-rw-r--r--chromium/media/renderers/paint_canvas_video_renderer.cc28
-rw-r--r--chromium/media/renderers/paint_canvas_video_renderer_unittest.cc6
-rw-r--r--chromium/media/renderers/renderer_impl.cc7
-rw-r--r--chromium/media/renderers/renderer_impl.h4
-rw-r--r--chromium/media/renderers/video_frame_rgba_to_yuva_converter.cc61
-rw-r--r--chromium/media/renderers/video_frame_rgba_to_yuva_converter.h4
-rw-r--r--chromium/media/renderers/video_renderer_impl.cc21
-rw-r--r--chromium/media/renderers/video_renderer_impl.h4
-rw-r--r--chromium/media/renderers/video_renderer_impl_unittest.cc1
-rw-r--r--chromium/media/renderers/video_resource_updater.cc75
-rw-r--r--chromium/media/renderers/video_resource_updater.h9
-rw-r--r--chromium/media/renderers/video_resource_updater_unittest.cc67
-rw-r--r--chromium/media/renderers/win/media_foundation_audio_stream.cc174
-rw-r--r--chromium/media/renderers/win/media_foundation_audio_stream.h1
-rw-r--r--chromium/media/renderers/win/media_foundation_renderer.cc51
-rw-r--r--chromium/media/renderers/win/media_foundation_renderer.h8
-rw-r--r--chromium/media/renderers/win/media_foundation_renderer_extension.h12
-rw-r--r--chromium/media/renderers/win/media_foundation_renderer_unittest.cc7
-rw-r--r--chromium/media/renderers/win/media_foundation_rendering_mode.cc31
-rw-r--r--chromium/media/renderers/win/media_foundation_rendering_mode.h24
-rw-r--r--chromium/media/renderers/win/media_foundation_source_wrapper.cc3
-rw-r--r--chromium/media/renderers/win/media_foundation_stream_wrapper.cc11
-rw-r--r--chromium/media/renderers/win/media_foundation_texture_pool_unittest.cc7
-rw-r--r--chromium/media/renderers/win/media_foundation_video_stream.cc45
-rw-r--r--chromium/media/video/BUILD.gn11
-rw-r--r--chromium/media/video/av1_video_encoder.cc2
-rw-r--r--chromium/media/video/fake_video_encode_accelerator.cc1
-rw-r--r--chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc9
-rw-r--r--chromium/media/video/h264_level_limits.cc1
-rw-r--r--chromium/media/video/h265_parser.cc122
-rw-r--r--chromium/media/video/h265_parser.h37
-rw-r--r--chromium/media/video/h265_poc_unittest.cc393
-rw-r--r--chromium/media/video/openh264_video_encoder.cc3
-rw-r--r--chromium/media/video/picture.cc3
-rw-r--r--chromium/media/video/picture.h7
-rw-r--r--chromium/media/video/renderable_gpu_memory_buffer_video_frame_pool.cc2
-rw-r--r--chromium/media/video/software_video_encoder_test.cc37
-rw-r--r--chromium/media/video/video_encode_accelerator.cc5
-rw-r--r--chromium/media/video/video_encode_accelerator.h38
-rw-r--r--chromium/media/video/video_encoder_fallback.cc1
-rw-r--r--chromium/media/video/video_encoder_fallback_test.cc111
-rw-r--r--chromium/media/video/vpx_video_encoder.cc2
-rw-r--r--chromium/media/webrtc/audio_processor.cc21
-rw-r--r--chromium/media/webrtc/audio_processor.h4
-rw-r--r--chromium/media/webrtc/audio_processor_test.cc6
-rw-r--r--chromium/media/webrtc/helpers_unittests.cc5
-rw-r--r--chromium/media/webrtc/webrtc_features.cc11
583 files changed, 15887 insertions, 8024 deletions
diff --git a/chromium/media/BUILD.gn b/chromium/media/BUILD.gn
index 01e0910666e..c8205385759 100644
--- a/chromium/media/BUILD.gn
+++ b/chromium/media/BUILD.gn
@@ -32,8 +32,7 @@ buildflag_header("media_buildflags") {
"ENABLE_FFMPEG=$media_use_ffmpeg",
"ENABLE_FFMPEG_VIDEO_DECODERS=$enable_ffmpeg_video_decoders",
"ENABLE_PLATFORM_HEVC=$enable_platform_hevc",
- "ENABLE_PLATFORM_HEVC_DECODING=$enable_platform_hevc_decoding",
- "ENABLE_PLATFORM_ENCRYPTED_HEVC=$enable_platform_encrypted_hevc",
+ "ENABLE_HEVC_PARSER_AND_HW_DECODER=$enable_hevc_parser_and_hw_decoder",
"ENABLE_HLS_SAMPLE_AES=$enable_hls_sample_aes",
"ENABLE_HLS_DEMUXER=$enable_hls_demuxer",
"ENABLE_LIBGAV1_DECODER=$enable_libgav1_decoder",
@@ -110,7 +109,6 @@ component("media") {
"//base:i18n",
"//base/third_party/dynamic_annotations",
"//cc/paint",
- "//crypto:platform",
"//gpu/command_buffer/client:gles2_interface",
"//gpu/command_buffer/common",
"//third_party/libyuv",
@@ -210,6 +208,7 @@ test("media_unittests") {
deps += [
"//media/fuchsia/audio:unittests",
"//media/fuchsia/cdm/service:unittests",
+ "//media/fuchsia/video:unittests",
]
use_cfv2 = false
@@ -295,6 +294,7 @@ component("shared_memory_support") {
sources = [
"base/audio_bus.cc",
"base/audio_bus.h",
+ "base/audio_codecs.h",
"base/audio_latency.cc",
"base/audio_latency.h",
"base/audio_parameters.cc",
@@ -305,6 +305,7 @@ component("shared_memory_support") {
"base/channel_layout.cc",
"base/channel_layout.h",
"base/limits.h",
+ "base/media_export.h",
"base/media_shmem_export.h",
"base/sample_format.cc",
"base/sample_format.h",
diff --git a/chromium/media/audio/BUILD.gn b/chromium/media/audio/BUILD.gn
index ebe37172d25..46faaaea997 100644
--- a/chromium/media/audio/BUILD.gn
+++ b/chromium/media/audio/BUILD.gn
@@ -78,6 +78,7 @@ source_set("audio") {
"audio_input_ipc.h",
"audio_input_stream_data_interceptor.cc",
"audio_input_stream_data_interceptor.h",
+ "audio_io.cc",
"audio_io.h",
"audio_manager.cc",
"audio_manager.h",
@@ -136,6 +137,8 @@ source_set("audio") {
"scoped_task_runner_observer.h",
"simple_sources.cc",
"simple_sources.h",
+ "system_glitch_reporter.cc",
+ "system_glitch_reporter.h",
"wav_audio_handler.cc",
"wav_audio_handler.h",
]
@@ -205,6 +208,8 @@ source_set("audio") {
"win/waveout_output_win.h",
]
+ deps += [ "//media/base/win:media_foundation_util" ]
+
libs += [
"dxguid.lib",
"setupapi.lib",
diff --git a/chromium/media/audio/alsa/alsa_wrapper.cc b/chromium/media/audio/alsa/alsa_wrapper.cc
index b24630dd6bf..a5ac06af062 100644
--- a/chromium/media/audio/alsa/alsa_wrapper.cc
+++ b/chromium/media/audio/alsa/alsa_wrapper.cc
@@ -350,6 +350,10 @@ int AlsaWrapper::MixerSelemHasPlaybackSwitch(snd_mixer_elem_t* elem) {
return snd_mixer_selem_has_playback_switch(elem);
}
+int AlsaWrapper::MixerSelemHasPlaybackVolume(snd_mixer_elem_t* elem) {
+ return snd_mixer_selem_has_playback_volume(elem);
+}
+
void AlsaWrapper::MixerSelemIdSetIndex(snd_mixer_selem_id_t* obj,
unsigned int val) {
snd_mixer_selem_id_set_index(obj, val);
@@ -367,6 +371,12 @@ int AlsaWrapper::MixerSelemSetPlaybackSwitch(
return snd_mixer_selem_set_playback_switch(elem, channel, value);
}
+int AlsaWrapper::MixerSelemSetPlaybackSwitchAll(
+ snd_mixer_elem_t* elem,
+ int value) {
+ return snd_mixer_selem_set_playback_switch_all(elem, value);
+}
+
int AlsaWrapper::MixerSelemSetPlaybackVolumeAll(snd_mixer_elem_t* elem,
long value) {
return snd_mixer_selem_set_playback_volume_all(elem, value);
diff --git a/chromium/media/audio/alsa/alsa_wrapper.h b/chromium/media/audio/alsa/alsa_wrapper.h
index 970327f43fe..11e0f207617 100644
--- a/chromium/media/audio/alsa/alsa_wrapper.h
+++ b/chromium/media/audio/alsa/alsa_wrapper.h
@@ -143,12 +143,15 @@ class MEDIA_EXPORT AlsaWrapper {
long* min,
long* max);
virtual int MixerSelemHasPlaybackSwitch(snd_mixer_elem_t* elem);
+ virtual int MixerSelemHasPlaybackVolume(snd_mixer_elem_t* elem);
virtual void MixerSelemIdSetIndex(snd_mixer_selem_id_t* obj,
unsigned int val);
virtual void MixerSelemIdSetName(snd_mixer_selem_id_t* obj, const char* val);
virtual int MixerSelemSetPlaybackSwitch(snd_mixer_elem_t* elem,
snd_mixer_selem_channel_id_t channel,
int value);
+ virtual int MixerSelemSetPlaybackSwitchAll(snd_mixer_elem_t* elem,
+ int value);
virtual int MixerSelemSetPlaybackVolumeAll(snd_mixer_elem_t* elem,
long value);
virtual int MixerSelemIdMalloc(snd_mixer_selem_id_t** ptr);
diff --git a/chromium/media/audio/android/audio_track_output_stream.cc b/chromium/media/audio/android/audio_track_output_stream.cc
index 3c5804f9d2f..0b7b11b94cf 100644
--- a/chromium/media/audio/android/audio_track_output_stream.cc
+++ b/chromium/media/audio/android/audio_track_output_stream.cc
@@ -69,6 +69,7 @@ bool AudioTrackOutputStream::Open() {
case AudioParameters::AUDIO_BITSTREAM_IEC61937:
format = kEncodingIec61937;
break;
+ case AudioParameters::AUDIO_BITSTREAM_DTSX_P2:
case AudioParameters::AUDIO_FAKE:
case AudioParameters::AUDIO_PCM_LINEAR:
case AudioParameters::AUDIO_PCM_LOW_LATENCY:
diff --git a/chromium/media/audio/audio_device_thread.cc b/chromium/media/audio/audio_device_thread.cc
index 2340cc137b5..eec6885d931 100644
--- a/chromium/media/audio/audio_device_thread.cc
+++ b/chromium/media/audio/audio_device_thread.cc
@@ -68,9 +68,11 @@ AudioDeviceThread::~AudioDeviceThread() {
base::PlatformThread::Join(thread_handle_);
}
+#if BUILDFLAG(IS_APPLE)
base::TimeDelta AudioDeviceThread::GetRealtimePeriod() {
return callback_->buffer_duration();
}
+#endif
void AudioDeviceThread::ThreadMain() {
base::PlatformThread::SetName(thread_name_);
diff --git a/chromium/media/audio/audio_device_thread.h b/chromium/media/audio/audio_device_thread.h
index dac3c99b7d7..2a8d9151966 100644
--- a/chromium/media/audio/audio_device_thread.h
+++ b/chromium/media/audio/audio_device_thread.h
@@ -11,6 +11,7 @@
#include "base/sync_socket.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_checker.h"
+#include "build/buildflag.h"
#include "media/base/audio_parameters.h"
#include "media/base/media_export.h"
@@ -82,7 +83,9 @@ class MEDIA_EXPORT AudioDeviceThread : public base::PlatformThread::Delegate {
~AudioDeviceThread() override;
private:
+#if BUILDFLAG(IS_APPLE)
base::TimeDelta GetRealtimePeriod() final;
+#endif
void ThreadMain() final;
const raw_ptr<Callback> callback_;
diff --git a/chromium/media/audio/audio_encoders_unittest.cc b/chromium/media/audio/audio_encoders_unittest.cc
index e3608591703..8db7f1e214b 100644
--- a/chromium/media/audio/audio_encoders_unittest.cc
+++ b/chromium/media/audio/audio_encoders_unittest.cc
@@ -20,6 +20,7 @@
#include "media/base/audio_parameters.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/status.h"
+#include "media/media_buildflags.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/opus/src/include/opus.h"
@@ -27,16 +28,21 @@
#include "base/win/scoped_com_initializer.h"
#include "base/win/windows_version.h"
#include "media/gpu/windows/mf_audio_encoder.h"
-#include "media/media_buildflags.h"
+#define HAS_AAC_ENCODER 1
+#endif
+
+#if BUILDFLAG(IS_MAC) && BUILDFLAG(USE_PROPRIETARY_CODECS)
+#include "media/filters/mac/audio_toolbox_audio_encoder.h"
+#define HAS_AAC_ENCODER 1
+#endif
-#if BUILDFLAG(ENABLE_FFMPEG)
+#if HAS_AAC_ENCODER
#include "media/base/audio_decoder.h"
#include "media/base/channel_layout.h"
#include "media/base/decoder_status.h"
#include "media/base/mock_media_log.h"
#include "media/filters/ffmpeg_audio_decoder.h"
-#endif // BUILDFLAG(ENABLE_FFMPEG)
-#endif // BUILDFLAG(IS_WIN)
+#endif
namespace media {
@@ -48,11 +54,11 @@ constexpr int kAudioSampleRateWithDelay = 647744;
// value of 2880 frames per buffer at a sample rate of 48 khz.
constexpr base::TimeDelta kOpusBufferDuration = base::Milliseconds(60);
-#if BUILDFLAG(IS_WIN)
+#if HAS_AAC_ENCODER
// AAC puts 1024 PCM samples into each AAC frame, which corresponds to a
// duration of 21 and 1/3 milliseconds at a sample rate of 48 khz.
constexpr int kAacFramesPerBuffer = 1024;
-#endif // BUILDFLAG(IS_WIN)
+#endif // HAS_AAC_ENCODER
struct TestAudioParams {
const AudioCodec codec;
@@ -76,13 +82,13 @@ constexpr TestAudioParams kTestAudioParamsOpus[] = {
{AudioCodec::kOpus, 2, kAudioSampleRateWithDelay},
};
-#if BUILDFLAG(IS_WIN)
+#if HAS_AAC_ENCODER
constexpr TestAudioParams kTestAudioParamsAAC[] = {
{AudioCodec::kAAC, 2, 48000}, {AudioCodec::kAAC, 6, 48000},
{AudioCodec::kAAC, 1, 48000}, {AudioCodec::kAAC, 2, 44100},
{AudioCodec::kAAC, 6, 44100}, {AudioCodec::kAAC, 1, 44100},
};
-#endif // BUILDFLAG(IS_WIN)
+#endif // HAS_AAC_ENCODER
std::string EncoderStatusCodeToString(EncoderStatus::Codes code) {
switch (code) {
@@ -165,6 +171,11 @@ class AudioEncodersTest : public ::testing::TestWithParam<TestAudioParams> {
frames_per_buffer_ = kAacFramesPerBuffer;
buffer_duration_ = AudioTimestampHelper::FramesToTime(
frames_per_buffer_, options_.sample_rate);
+#elif HAS_AAC_ENCODER && BUILDFLAG(IS_MAC)
+ encoder_ = std::make_unique<AudioToolboxAudioEncoder>();
+ frames_per_buffer_ = kAacFramesPerBuffer;
+ buffer_duration_ = AudioTimestampHelper::FramesToTime(
+ frames_per_buffer_, options_.sample_rate);
#else
NOTREACHED();
#endif
@@ -222,9 +233,10 @@ class AudioEncodersTest : public ::testing::TestWithParam<TestAudioParams> {
if (!done_cb) {
pending_callback_results_.emplace_back();
done_cb = base::BindLambdaForTesting([&](EncoderStatus error) {
- if (!error.is_ok())
+ if (!error.is_ok()) {
FAIL() << "Error code: " << EncoderStatusCodeToString(error.code())
<< "\nError message: " << error.message();
+ }
pending_callback_results_[pending_callback_count_].status_code =
error.code();
@@ -262,13 +274,34 @@ class AudioEncodersTest : public ::testing::TestWithParam<TestAudioParams> {
}
}
- void ValidateOutputDuration() {
- int64_t amount_of_padding =
+ // The amount of front padding that the encoder emits.
+ size_t GetExpectedPadding() {
+#if BUILDFLAG(IS_MAC)
+ if (options_.codec == AudioCodec::kAAC)
+ return 2112;
+#endif
+ return 0;
+ }
+
+ void ValidateOutputDuration(int64_t flush_count = 1) {
+ // Since encoders can only output buffers of size `frames_per_buffer_`, the
+ // number of outputs will be larger than the number of inputs.
+ int64_t frame_remainder =
frames_per_buffer_ -
(expected_duration_helper_->frame_count() % frames_per_buffer_);
+
+ int64_t amount_of_padding = GetExpectedPadding() + frame_remainder;
+
+ // Padding is re-emitted after each flush.
+ amount_of_padding *= flush_count;
+
+ int64_t number_of_outputs = std::ceil(
+ (expected_duration_helper_->frame_count() + amount_of_padding) /
+ static_cast<double>(frames_per_buffer_));
int64_t duration_of_padding_us =
- (amount_of_padding * base::Time::kMicrosecondsPerSecond) /
- options_.sample_rate;
+ number_of_outputs * AudioTimestampHelper::FramesToTime(
+ frames_per_buffer_, options_.sample_rate)
+ .InMicroseconds();
int64_t acceptable_diff = duration_of_padding_us + 10;
EXPECT_NEAR(expected_output_duration_.InMicroseconds(),
observed_output_duration_.InMicroseconds(), acceptable_diff);
@@ -404,7 +437,7 @@ TEST_P(AudioEncodersTest, EncodeAndFlushTwice) {
RunLoop();
EXPECT_TRUE(called_flush2);
ValidateDoneCallbacksRun();
- ValidateOutputDuration();
+ ValidateOutputDuration(/*flush_count=*/2);
}
// Instead of synchronously calling `Encode`, wait until `done_cb` is invoked
@@ -487,15 +520,18 @@ TEST_P(AudioEncodersTest, Timestamps) {
int num_frames =
AudioTimestampHelper::TimeToFrames(duration, options_.sample_rate);
+ size_t expected_padding = GetExpectedPadding();
+
// The encoder will have multiple outputs per input if `num_frames` is
// larger than `frames_per_buffer_`, and fewer outputs per input if it is
// smaller.
- size_t expected_outputs = (num_frames * kCount) / frames_per_buffer_;
+ size_t total_frames = num_frames * kCount + expected_padding;
+ size_t expected_outputs = total_frames / frames_per_buffer_;
// Round up if the division truncated. This is because the encoder will pad
// the final buffer to produce output, even if there aren't
// `frames_per_buffer_` left.
- if ((num_frames * kCount) % frames_per_buffer_ != 0)
+ if (total_frames % frames_per_buffer_ != 0)
expected_outputs++;
base::TimeTicks current_timestamp;
@@ -516,8 +552,8 @@ TEST_P(AudioEncodersTest, Timestamps) {
for (auto& observed_ts : timestamps) {
base::TimeTicks expected_ts =
timestamp_tracker.GetTimestamp() + base::TimeTicks();
- EXPECT_TRUE(
- TimesAreNear(expected_ts, observed_ts, base::Microseconds(1)));
+ EXPECT_TRUE(TimesAreNear(expected_ts, observed_ts, base::Microseconds(1)))
+ << "expected_ts: " << expected_ts << ", observed_ts: " << observed_ts;
timestamp_tracker.AddFrames(frames_per_buffer_);
}
}
@@ -561,10 +597,11 @@ TEST_P(AudioEncodersTest, TimeContinuityBreak) {
FlushAndVerifyStatus();
- ASSERT_EQ(3u, timestamps.size());
+ ASSERT_LE(3u, timestamps.size());
EXPECT_TRUE(TimesAreNear(ts0, timestamps[0], base::Microseconds(1)));
EXPECT_TRUE(TimesAreNear(ts1, timestamps[1], base::Microseconds(1)));
EXPECT_TRUE(TimesAreNear(ts2, timestamps[2], base::Microseconds(1)));
+ timestamps.clear();
// Reset output timestamp after Flush(), the encoder should start producing
// timestamps from new base 0.
@@ -578,9 +615,9 @@ TEST_P(AudioEncodersTest, TimeContinuityBreak) {
FlushAndVerifyStatus();
- ASSERT_EQ(5u, timestamps.size());
- EXPECT_TRUE(TimesAreNear(ts3, timestamps[3], base::Microseconds(1)));
- EXPECT_TRUE(TimesAreNear(ts4, timestamps[4], base::Microseconds(1)));
+ ASSERT_LE(2u, timestamps.size());
+ EXPECT_TRUE(TimesAreNear(ts3, timestamps[0], base::Microseconds(1)));
+ EXPECT_TRUE(TimesAreNear(ts4, timestamps[1], base::Microseconds(1)));
ValidateDoneCallbacksRun();
}
@@ -588,11 +625,11 @@ INSTANTIATE_TEST_SUITE_P(Opus,
AudioEncodersTest,
testing::ValuesIn(kTestAudioParamsOpus));
-#if BUILDFLAG(IS_WIN)
+#if HAS_AAC_ENCODER
INSTANTIATE_TEST_SUITE_P(AAC,
AudioEncodersTest,
testing::ValuesIn(kTestAudioParamsAAC));
-#endif // BUILDFLAG(IS_WIN)
+#endif // HAS_AAC_ENCODER
class AudioOpusEncoderTest : public AudioEncodersTest {
public:
@@ -736,13 +773,13 @@ INSTANTIATE_TEST_SUITE_P(Opus,
AudioOpusEncoderTest,
testing::ValuesIn(kTestAudioParamsOpus));
-#if BUILDFLAG(IS_WIN)
-class MFAudioEncoderTest : public AudioEncodersTest {
+#if HAS_AAC_ENCODER
+class AACAudioEncoderTest : public AudioEncodersTest {
public:
- MFAudioEncoderTest() = default;
- MFAudioEncoderTest(const MFAudioEncoderTest&) = delete;
- MFAudioEncoderTest& operator=(const MFAudioEncoderTest&) = delete;
- ~MFAudioEncoderTest() override = default;
+ AACAudioEncoderTest() = default;
+ AACAudioEncoderTest(const AACAudioEncoderTest&) = delete;
+ AACAudioEncoderTest& operator=(const AACAudioEncoderTest&) = delete;
+ ~AACAudioEncoderTest() override = default;
#if BUILDFLAG(ENABLE_FFMPEG) && BUILDFLAG(USE_PROPRIETARY_CODECS)
void InitializeDecoder() {
@@ -786,9 +823,10 @@ class MFAudioEncoderTest : public AudioEncodersTest {
#endif // BUILDFLAG(ENABLE_FFMPEG) && BUILDFLAG(USE_PROPRIETARY_CODECS)
};
+#if BUILDFLAG(IS_WIN)
// `MFAudioEncoder` requires `kMinSamplesForOutput` before `Flush` can be called
// successfully.
-TEST_P(MFAudioEncoderTest, FlushWithTooLittleInput) {
+TEST_P(AACAudioEncoderTest, FlushWithTooLittleInput) {
InitializeEncoder(base::DoNothing());
ProduceAudioAndEncode();
@@ -796,9 +834,10 @@ TEST_P(MFAudioEncoderTest, FlushWithTooLittleInput) {
ValidateDoneCallbacksRun();
}
+#endif
#if BUILDFLAG(ENABLE_FFMPEG) && BUILDFLAG(USE_PROPRIETARY_CODECS)
-TEST_P(MFAudioEncoderTest, FullCycleEncodeDecode) {
+TEST_P(AACAudioEncoderTest, FullCycleEncodeDecode) {
InitializeDecoder();
int encode_output_callback_count = 0;
@@ -823,18 +862,18 @@ TEST_P(MFAudioEncoderTest, FullCycleEncodeDecode) {
FlushAndVerifyStatus();
- // The encoder should produce three pieces of output.
- EXPECT_EQ(3, encode_output_callback_count);
- // We should get three status messages from the decoder.
- EXPECT_EQ(3, decode_status_callback_count);
- // The decoder should produce three pieces of output.
- EXPECT_EQ(3, decoder_output_callback_count);
+ int expected_outputs = 3 + std::ceil(GetExpectedPadding() /
+ static_cast<double>(frames_per_buffer_));
+
+ EXPECT_EQ(expected_outputs, encode_output_callback_count);
+ EXPECT_EQ(expected_outputs, decode_status_callback_count);
+ EXPECT_EQ(expected_outputs, decoder_output_callback_count);
}
#endif // BUILDFLAG(ENABLE_FFMPEG) && BUILDFLAG(USE_PROPRIETARY_CODECS)
INSTANTIATE_TEST_SUITE_P(AAC,
- MFAudioEncoderTest,
+ AACAudioEncoderTest,
testing::ValuesIn(kTestAudioParamsAAC));
-#endif // BUILDFLAG(IS_WIN)
+#endif // HAS_AAC_ENCODER
} // namespace media
diff --git a/chromium/media/audio/audio_features.cc b/chromium/media/audio/audio_features.cc
index 49e14b5ca40..3e60bf27e33 100644
--- a/chromium/media/audio/audio_features.cc
+++ b/chromium/media/audio/audio_features.cc
@@ -6,14 +6,9 @@
#include "base/feature_list.h"
#include "build/build_config.h"
-#include "build/chromeos_buildflags.h"
namespace features {
-// Allows usage of OS-level (platform) audio encoders.
-const base::Feature kPlatformAudioEncoder{"PlatformAudioEncoder",
- base::FEATURE_DISABLED_BY_DEFAULT};
-
// When the audio service in a separate process, kill it when a hang is
// detected. It will be restarted when needed.
const base::Feature kAudioServiceOutOfProcessKillAtHang{
@@ -38,7 +33,7 @@ const base::Feature kUseAAudioDriver{"UseAAudioDriver",
base::FEATURE_ENABLED_BY_DEFAULT};
#endif
-#if BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#if BUILDFLAG(IS_CHROMEOS)
const base::Feature kCrOSSystemAEC{"CrOSSystemAECWithBoardTuningsAllowed",
base::FEATURE_ENABLED_BY_DEFAULT};
const base::Feature kCrOSSystemAECDeactivatedGroups{
diff --git a/chromium/media/audio/audio_features.h b/chromium/media/audio/audio_features.h
index 6dd296b62e1..17b96b0ae0d 100644
--- a/chromium/media/audio/audio_features.h
+++ b/chromium/media/audio/audio_features.h
@@ -7,12 +7,10 @@
#include "base/feature_list.h"
#include "build/build_config.h"
-#include "build/chromeos_buildflags.h"
#include "media/base/media_export.h"
namespace features {
-MEDIA_EXPORT extern const base::Feature kPlatformAudioEncoder;
MEDIA_EXPORT extern const base::Feature kAudioServiceOutOfProcessKillAtHang;
MEDIA_EXPORT extern const base::Feature kDumpOnAudioServiceHang;
@@ -20,7 +18,7 @@ MEDIA_EXPORT extern const base::Feature kDumpOnAudioServiceHang;
MEDIA_EXPORT extern const base::Feature kUseAAudioDriver;
#endif
-#if BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#if BUILDFLAG(IS_CHROMEOS)
MEDIA_EXPORT extern const base::Feature kCrOSSystemAEC;
MEDIA_EXPORT extern const base::Feature kCrOSSystemAECDeactivatedGroups;
MEDIA_EXPORT extern const base::Feature kCrOSEnforceSystemAecNsAgc;
diff --git a/chromium/media/audio/audio_input_delegate.h b/chromium/media/audio/audio_input_delegate.h
index 3c97d8031aa..babd1b48128 100644
--- a/chromium/media/audio/audio_input_delegate.h
+++ b/chromium/media/audio/audio_input_delegate.h
@@ -33,9 +33,6 @@ class MEDIA_EXPORT AudioInputDelegate {
std::unique_ptr<base::CancelableSyncSocket> socket,
bool initially_muted) = 0;
- // Called when the microphone is muted/unmuted.
- virtual void OnMuted(int stream_id, bool is_muted) = 0;
-
// Called if stream encounters an error and has become unusable.
virtual void OnStreamError(int stream_id) = 0;
};
diff --git a/chromium/media/audio/audio_io.cc b/chromium/media/audio/audio_io.cc
new file mode 100644
index 00000000000..36f94b812d1
--- /dev/null
+++ b/chromium/media/audio/audio_io.cc
@@ -0,0 +1,19 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_io.h"
+
+namespace media {
+
+int AudioOutputStream::AudioSourceCallback::OnMoreData(
+ base::TimeDelta delay,
+ base::TimeTicks delay_timestamp,
+ int prior_frames_skipped,
+ AudioBus* dest,
+ bool is_mixing) {
+ // Ignore the `is_mixing` flag by default.
+ return OnMoreData(delay, delay_timestamp, prior_frames_skipped, dest);
+}
+
+} // namespace media \ No newline at end of file
diff --git a/chromium/media/audio/audio_io.h b/chromium/media/audio/audio_io.h
index 295be0994d3..09133b94fa9 100644
--- a/chromium/media/audio/audio_io.h
+++ b/chromium/media/audio/audio_io.h
@@ -74,6 +74,12 @@ class MEDIA_EXPORT AudioOutputStream {
int prior_frames_skipped,
AudioBus* dest) = 0;
+ virtual int OnMoreData(base::TimeDelta delay,
+ base::TimeTicks delay_timestamp,
+ int prior_frames_skipped,
+ AudioBus* dest,
+ bool is_mixing);
+
// There was an error while playing a buffer. Audio source cannot be
// destroyed yet. No direct action needed by the AudioStream, but it is
// a good place to stop accumulating sound data since is is likely that
diff --git a/chromium/media/audio/audio_manager_base.cc b/chromium/media/audio/audio_manager_base.cc
index 46f69f6455d..7090de5b88d 100644
--- a/chromium/media/audio/audio_manager_base.cc
+++ b/chromium/media/audio/audio_manager_base.cc
@@ -9,6 +9,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/command_line.h"
+#include "base/memory/raw_ptr.h"
#include "base/metrics/histogram_macros.h"
#include "base/observer_list.h"
#include "base/strings/string_number_conversions.h"
@@ -27,7 +28,6 @@
#include "third_party/abseil-cpp/absl/types/optional.h"
#include "base/logging.h"
-#include "build/chromeos_buildflags.h"
#include "media/audio/audio_input_stream_data_interceptor.h"
namespace media {
@@ -115,7 +115,7 @@ class AudioManagerBase::CompareByParams {
}
private:
- const DispatcherParams* dispatcher_;
+ raw_ptr<const DispatcherParams> dispatcher_;
};
AudioManagerBase::AudioManagerBase(std::unique_ptr<AudioThread> audio_thread,
@@ -231,6 +231,7 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
case AudioParameters::AUDIO_BITSTREAM_EAC3:
case AudioParameters::AUDIO_BITSTREAM_DTS:
case AudioParameters::AUDIO_BITSTREAM_DTS_HD:
+ case AudioParameters::AUDIO_BITSTREAM_DTSX_P2:
case AudioParameters::AUDIO_BITSTREAM_IEC61937:
stream = MakeBitstreamOutputStream(params, device_id, log_callback);
break;
@@ -347,7 +348,7 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
std::string output_device_id =
AudioDeviceDescription::IsDefaultDevice(device_id)
?
-#if BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#if BUILDFLAG(IS_CHROMEOS)
// On ChromeOS, it is expected that, if the default device is given,
// no specific device ID should be used since the actual output device
// should change dynamically if the system default device changes.
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.cc b/chromium/media/audio/audio_output_dispatcher_impl.cc
index 91b495f3a0e..72f824bcd7b 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.cc
+++ b/chromium/media/audio/audio_output_dispatcher_impl.cc
@@ -40,8 +40,8 @@ AudioOutputDispatcherImpl::~AudioOutputDispatcherImpl() {
DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
// Stop all active streams.
- for (auto& iter : proxy_to_physical_map_) {
- StopPhysicalStream(iter.second);
+ for (const auto& [proxy, stream] : proxy_to_physical_map_) {
+ StopPhysicalStream(stream);
}
// Close all idle streams immediately. The |close_timer_| will handle
diff --git a/chromium/media/audio/cras/audio_manager_chromeos.cc b/chromium/media/audio/cras/audio_manager_chromeos.cc
index f5a3a98b186..50bdd67e2ce 100644
--- a/chromium/media/audio/cras/audio_manager_chromeos.cc
+++ b/chromium/media/audio/cras/audio_manager_chromeos.cc
@@ -235,15 +235,15 @@ void AudioManagerChromeOS::GetAudioDeviceNamesImpl(
dev_idx_map[dev_index_of(device.id)].push_back(device);
}
- for (const auto& item : dev_idx_map) {
- if (1 == item.second.size()) {
- const AudioDevice& device = item.second.front();
+ for (const auto& [dev_idx, device_list] : dev_idx_map) {
+ if (1 == device_list.size()) {
+ const AudioDevice& device = device_list.front();
device_names->emplace_back(device.display_name,
base::NumberToString(device.id));
} else {
// Create virtual device name for audio nodes that share the same device
// index.
- ProcessVirtualDeviceName(device_names, item.second);
+ ProcessVirtualDeviceName(device_names, device_list);
}
}
}
diff --git a/chromium/media/audio/cras/audio_manager_chromeos_unittest.cc b/chromium/media/audio/cras/audio_manager_chromeos_unittest.cc
index d860cfac828..440f790e5fa 100644
--- a/chromium/media/audio/cras/audio_manager_chromeos_unittest.cc
+++ b/chromium/media/audio/cras/audio_manager_chromeos_unittest.cc
@@ -4,6 +4,7 @@
#include "media/audio/cras/audio_manager_chromeos.h"
#include "base/test/scoped_feature_list.h"
+#include "build/chromeos_buildflags.h"
#include "media/audio/audio_features.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/media/audio/fuchsia/DIR_METADATA b/chromium/media/audio/fuchsia/DIR_METADATA
index 5b3985ecc8b..7bba048979f 100644
--- a/chromium/media/audio/fuchsia/DIR_METADATA
+++ b/chromium/media/audio/fuchsia/DIR_METADATA
@@ -7,4 +7,3 @@
# https://source.chromium.org/chromium/infra/infra/+/main:go/src/infra/tools/dirmd/proto/dir_metadata.proto
mixins: "//build/fuchsia/COMMON_METADATA"
-os: FUCHSIA \ No newline at end of file
diff --git a/chromium/media/audio/fuchsia/audio_manager_fuchsia.cc b/chromium/media/audio/fuchsia/audio_manager_fuchsia.cc
index c7c5f94552f..d52af7ab995 100644
--- a/chromium/media/audio/fuchsia/audio_manager_fuchsia.cc
+++ b/chromium/media/audio/fuchsia/audio_manager_fuchsia.cc
@@ -67,16 +67,22 @@ AudioParameters AudioManagerFuchsia::GetInputStreamParameters(
//
// Use 16kHz sample rate with 10ms buffer, which is consistent with
// the default configuration used in the AudioCapturer implementation.
- // Assume that the system-provided AudioConsumer supports echo cancellation,
- // noise suppression and automatic gain control.
const size_t kSampleRate = 16000;
const size_t kPeriodSamples = AudioTimestampHelper::TimeToFrames(
base::kAudioSchedulingPeriod, kSampleRate);
AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
CHANNEL_LAYOUT_MONO, kSampleRate, kPeriodSamples);
- params.set_effects(AudioParameters::ECHO_CANCELLER |
- AudioParameters::NOISE_SUPPRESSION |
- AudioParameters::AUTOMATIC_GAIN_CONTROL);
+
+ // Some AudioCapturer implementations support echo cancellation, noise
+ // suppression and automatic gain control, but currently there is no way to
+ // detect it. For now the corresponding effect flags are set based on a
+ // command line switch.
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kAudioCapturerWithEchoCancellation)) {
+ params.set_effects(AudioParameters::ECHO_CANCELLER |
+ AudioParameters::NOISE_SUPPRESSION |
+ AudioParameters::AUTOMATIC_GAIN_CONTROL);
+ }
return params;
}
diff --git a/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.cc b/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.cc
index d2f112e6685..3ec94836325 100644
--- a/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.cc
+++ b/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.cc
@@ -24,6 +24,8 @@ const uint32_t kBufferId = 0;
fuchsia::media::AudioRenderUsage GetStreamUsage(
const AudioParameters& parameters) {
+ // TODO(crbug.com/1253010) In WebEngine: use `audio_renderer_usage` from the
+ // `FrameMediaSettings` for the current web frame.
if (parameters.latency_tag() == AudioLatency::LATENCY_RTC)
return fuchsia::media::AudioRenderUsage::COMMUNICATION;
return fuchsia::media::AudioRenderUsage::MEDIA;
diff --git a/chromium/media/audio/mac/audio_auhal_mac.cc b/chromium/media/audio/mac/audio_auhal_mac.cc
index 0c7c8d3ea94..86fd0d75e34 100644
--- a/chromium/media/audio/mac/audio_auhal_mac.cc
+++ b/chromium/media/audio/mac/audio_auhal_mac.cc
@@ -75,8 +75,8 @@ static bool SetStreamFormat(int channels,
AudioStreamBasicDescription* format) {
format->mSampleRate = sample_rate;
format->mFormatID = kAudioFormatLinearPCM;
- format->mFormatFlags =
- kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved;
+ format->mFormatFlags = AudioFormatFlags{kAudioFormatFlagsNativeFloatPacked} |
+ kLinearPCMFormatFlagIsNonInterleaved;
format->mBytesPerPacket = sizeof(Float32);
format->mFramesPerPacket = 1;
format->mBytesPerFrame = sizeof(Float32);
@@ -169,9 +169,7 @@ AUHALStream::AUHALStream(AudioManagerMac* manager,
current_lost_frames_(0),
last_sample_time_(0.0),
last_number_of_frames_(0),
- total_lost_frames_(0),
- largest_glitch_frames_(0),
- glitches_detected_(0),
+ glitch_reporter_(SystemGlitchReporter::StreamType::kRender),
log_callback_(log_callback) {
// We must have a manager.
DCHECK(manager_);
@@ -418,19 +416,13 @@ void AUHALStream::UpdatePlayoutTimestamp(const AudioTimeStamp* timestamp) {
if (last_sample_time_) {
DCHECK_NE(0U, last_number_of_frames_);
- UInt32 diff =
+ UInt32 sample_time_diff =
static_cast<UInt32>(timestamp->mSampleTime - last_sample_time_);
- if (diff != last_number_of_frames_) {
- DCHECK_GT(diff, last_number_of_frames_);
- // We're being asked to render samples post what we expected. Update the
- // glitch count etc and keep a record of the largest glitch.
- auto lost_frames = diff - last_number_of_frames_;
- total_lost_frames_ += lost_frames;
- current_lost_frames_ += lost_frames;
- if (lost_frames > largest_glitch_frames_)
- largest_glitch_frames_ = lost_frames;
- ++glitches_detected_;
- }
+ DCHECK_GE(sample_time_diff, last_number_of_frames_);
+ UInt32 lost_frames = sample_time_diff - last_number_of_frames_;
+ base::TimeDelta lost_audio_duration =
+ AudioTimestampHelper::FramesToTime(lost_frames, params_.sample_rate());
+ glitch_reporter_.UpdateStats(lost_audio_duration);
}
// Store the last sample time for use next time we get called back.
@@ -446,36 +438,25 @@ void AUHALStream::ReportAndResetStats() {
// A value of 0 indicates that we got the buffer size we asked for.
UMA_HISTOGRAM_COUNTS_1M("Media.Audio.Render.FramesRequested",
number_of_frames_requested_);
- // Even if there aren't any glitches, we want to record it to get a feel for
- // how often we get no glitches vs the alternative.
- UMA_HISTOGRAM_CUSTOM_COUNTS("Media.Audio.Render.Glitches", glitches_detected_,
- 1, 999999, 100);
- auto lost_frames_ms = (total_lost_frames_ * 1000) / params_.sample_rate();
+ SystemGlitchReporter::Stats stats =
+ glitch_reporter_.GetLongTermStatsAndReset();
std::string log_message = base::StringPrintf(
- "AU out: Total glitches=%d. Total frames lost=%d (%d ms).",
- glitches_detected_, total_lost_frames_, lost_frames_ms);
+ "AU out: (num_glitches_detected=[%d], cumulative_audio_lost=[%llu ms], "
+ "largest_glitch=[%llu ms])",
+ stats.glitches_detected, stats.total_glitch_duration.InMilliseconds(),
+ stats.largest_glitch_duration.InMilliseconds());
if (!log_callback_.is_null())
log_callback_.Run(log_message);
-
- if (glitches_detected_ != 0) {
- UMA_HISTOGRAM_COUNTS_1M("Media.Audio.Render.LostFramesInMs",
- lost_frames_ms);
- auto largest_glitch_ms =
- (largest_glitch_frames_ * 1000) / params_.sample_rate();
- UMA_HISTOGRAM_COUNTS_1M("Media.Audio.Render.LargestGlitchMs",
- largest_glitch_ms);
+ if (stats.glitches_detected > 0) {
DLOG(WARNING) << log_message;
}
number_of_frames_requested_ = 0;
- glitches_detected_ = 0;
last_sample_time_ = 0;
last_number_of_frames_ = 0;
- total_lost_frames_ = 0;
- largest_glitch_frames_ = 0;
}
bool AUHALStream::ConfigureAUHAL() {
diff --git a/chromium/media/audio/mac/audio_auhal_mac.h b/chromium/media/audio/mac/audio_auhal_mac.h
index d5d1903b813..32bf0833eb3 100644
--- a/chromium/media/audio/mac/audio_auhal_mac.h
+++ b/chromium/media/audio/mac/audio_auhal_mac.h
@@ -33,6 +33,7 @@
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
#include "media/audio/mac/scoped_audio_unit.h"
+#include "media/audio/system_glitch_reporter.h"
#include "media/base/audio_parameters.h"
namespace media {
@@ -200,9 +201,10 @@ class AUHALStream : public AudioOutputStream {
// NOTE: Float64 and UInt32 types are used for native API compatibility.
Float64 last_sample_time_ GUARDED_BY(lock_);
UInt32 last_number_of_frames_ GUARDED_BY(lock_);
- UInt32 total_lost_frames_ GUARDED_BY(lock_);
- UInt32 largest_glitch_frames_ GUARDED_BY(lock_);
- int glitches_detected_ GUARDED_BY(lock_);
+
+ // Used to aggregate and report glitch metrics to UMA (periodically) and to
+ // text logs (when a stream ends).
+ SystemGlitchReporter glitch_reporter_ GUARDED_BY(lock_);
// Used to defer Start() to workaround http://crbug.com/160920.
base::CancelableOnceClosure deferred_start_cb_;
diff --git a/chromium/media/audio/mac/audio_input_mac.h b/chromium/media/audio/mac/audio_input_mac.h
index cc5b72ea9a9..302e34d92c6 100644
--- a/chromium/media/audio/mac/audio_input_mac.h
+++ b/chromium/media/audio/mac/audio_input_mac.h
@@ -24,8 +24,8 @@ namespace media {
class AudioBus;
class AudioManagerMac;
-// Implementation of AudioInputStream for Mac OS X using the audio queue service
-// present in OS 10.5 and later. Design reflects PCMQueueOutAudioOutputStream.
+// Implementation of AudioInputStream for macOS using the Audio Queue service
+// in Audio Toolbox. Design reflects PCMQueueOutAudioOutputStream.
class PCMQueueInAudioInputStream : public AudioInputStream {
public:
// Parameters as per AudioManager::MakeAudioInputStream.
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.cc b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
index 0e842caf7b6..de981fa288c 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac.cc
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
@@ -228,9 +228,7 @@ AUAudioInputStream::AUAudioInputStream(
output_device_id_for_aec_(kAudioObjectUnknown),
last_sample_time_(0.0),
last_number_of_frames_(0),
- total_lost_frames_(0),
- largest_glitch_frames_(0),
- glitches_detected_(0),
+ glitch_reporter_(SystemGlitchReporter::StreamType::kCapture),
log_callback_(log_callback) {
DCHECK(manager_);
CHECK(log_callback_ != AudioManager::LogCallback());
@@ -1391,18 +1389,13 @@ void AUAudioInputStream::UpdateCaptureTimestamp(
if (last_sample_time_) {
DCHECK_NE(0U, last_number_of_frames_);
- UInt32 diff =
+ UInt32 sample_time_diff =
static_cast<UInt32>(timestamp->mSampleTime - last_sample_time_);
- if (diff != last_number_of_frames_) {
- DCHECK_GT(diff, last_number_of_frames_);
- // We were given samples post what we expected. Update the glitch count
- // etc. and keep a record of the largest glitch.
- auto lost_frames = diff - last_number_of_frames_;
- total_lost_frames_ += lost_frames;
- if (lost_frames > largest_glitch_frames_)
- largest_glitch_frames_ = lost_frames;
- ++glitches_detected_;
- }
+ DCHECK_GE(sample_time_diff, last_number_of_frames_);
+ UInt32 lost_frames = sample_time_diff - last_number_of_frames_;
+ base::TimeDelta lost_audio_duration = AudioTimestampHelper::FramesToTime(
+ lost_frames, input_params_.sample_rate());
+ glitch_reporter_.UpdateStats(lost_audio_duration);
}
// Store the last sample time for use next time we get called back.
@@ -1416,33 +1409,24 @@ void AUAudioInputStream::ReportAndResetStats() {
// A value of 0 indicates that we got the buffer size we asked for.
UMA_HISTOGRAM_COUNTS_10000("Media.Audio.Capture.FramesProvided",
number_of_frames_provided_);
- // Even if there aren't any glitches, we want to record it to get a feel for
- // how often we get no glitches vs the alternative.
- UMA_HISTOGRAM_COUNTS_1M("Media.Audio.Capture.Glitches", glitches_detected_);
- auto lost_frames_ms = (total_lost_frames_ * 1000) / format_.mSampleRate;
+ SystemGlitchReporter::Stats stats =
+ glitch_reporter_.GetLongTermStatsAndReset();
+
std::string log_message = base::StringPrintf(
- "AU in: Total glitches=%d. Total frames lost=%d (%.0lf ms).",
- glitches_detected_, total_lost_frames_, lost_frames_ms);
- log_callback_.Run(log_message);
+ "AU in: (num_glitches_detected=[%d], cumulative_audio_lost=[%llu ms], "
+ "largest_glitch=[%llu ms])",
+ stats.glitches_detected, stats.total_glitch_duration.InMilliseconds(),
+ stats.largest_glitch_duration.InMilliseconds());
- if (glitches_detected_ != 0) {
- UMA_HISTOGRAM_LONG_TIMES("Media.Audio.Capture.LostFramesInMs",
- base::Milliseconds(lost_frames_ms));
- auto largest_glitch_ms =
- (largest_glitch_frames_ * 1000) / format_.mSampleRate;
- UMA_HISTOGRAM_CUSTOM_TIMES("Media.Audio.Capture.LargestGlitchMs",
- base::Milliseconds(largest_glitch_ms),
- base::Milliseconds(1), base::Minutes(1), 50);
+ log_callback_.Run(log_message);
+ if (stats.glitches_detected != 0) {
DLOG(WARNING) << log_message;
}
number_of_frames_provided_ = 0;
- glitches_detected_ = 0;
last_sample_time_ = 0;
last_number_of_frames_ = 0;
- total_lost_frames_ = 0;
- largest_glitch_frames_ = 0;
}
// TODO(ossu): Ideally, we'd just use the mono stream directly. However, since
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.h b/chromium/media/audio/mac/audio_low_latency_input_mac.h
index 9e5b3a2f88b..4002c9b05b7 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac.h
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.h
@@ -50,6 +50,7 @@
#include "media/audio/agc_audio_stream.h"
#include "media/audio/audio_io.h"
#include "media/audio/mac/audio_manager_mac.h"
+#include "media/audio/system_glitch_reporter.h"
#include "media/base/audio_block_fifo.h"
#include "media/base/audio_parameters.h"
@@ -268,9 +269,10 @@ class MEDIA_EXPORT AUAudioInputStream
// NOTE: Float64 and UInt32 types are used for native API compatibility.
Float64 last_sample_time_;
UInt32 last_number_of_frames_;
- UInt32 total_lost_frames_;
- UInt32 largest_glitch_frames_;
- int glitches_detected_;
+
+ // Used to aggregate and report glitch metrics to UMA (periodically) and to
+ // text logs (when a stream ends).
+ SystemGlitchReporter glitch_reporter_;
// Callback to send statistics info.
AudioManager::LogCallback log_callback_;
diff --git a/chromium/media/audio/mac/audio_manager_mac.cc b/chromium/media/audio/mac/audio_manager_mac.cc
index b8805f91748..eb0aff29b2f 100644
--- a/chromium/media/audio/mac/audio_manager_mac.cc
+++ b/chromium/media/audio/mac/audio_manager_mac.cc
@@ -643,13 +643,11 @@ AudioParameters AudioManagerMac::GetInputStreamParameters(
params.set_effects(AudioParameters::NOISE_SUPPRESSION);
}
- // VoiceProcessingIO is only supported on MacOS 10.12 and cannot be used on
- // aggregate devices, since it creates an aggregate device itself. It also
- // only runs in mono, but we allow upmixing to stereo since we can't claim a
- // device works either in stereo without echo cancellation or mono with echo
- // cancellation.
- if (base::mac::IsAtLeastOS10_12() &&
- (params.channel_layout() == CHANNEL_LAYOUT_MONO ||
+ // VoiceProcessingIO cannot be used on aggregate devices, since it creates an
+ // aggregate device itself. It also only runs in mono, but we allow upmixing
+ // to stereo since we can't claim a device works either in stereo without echo
+ // cancellation or mono with echo cancellation.
+ if ((params.channel_layout() == CHANNEL_LAYOUT_MONO ||
params.channel_layout() == CHANNEL_LAYOUT_STEREO) &&
core_audio_mac::GetDeviceTransportType(device) !=
kAudioDeviceTransportTypeAggregate) {
diff --git a/chromium/media/audio/system_glitch_reporter.cc b/chromium/media/audio/system_glitch_reporter.cc
new file mode 100644
index 00000000000..fea682ccb61
--- /dev/null
+++ b/chromium/media/audio/system_glitch_reporter.cc
@@ -0,0 +1,93 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/system_glitch_reporter.h"
+
+#include "base/metrics/histogram_functions.h"
+#include "base/trace_event/trace_event.h"
+
+namespace media {
+
+namespace {
+// Logs once every 10s, assuming 10ms buffers.
+constexpr static int kCallbacksPerLogPeriod = 1000;
+} // namespace
+
+SystemGlitchReporter::SystemGlitchReporter(StreamType stream_type)
+ : num_glitches_detected_metric_name_(stream_type == StreamType::kCapture
+ ? "Media.Audio.Capture.Glitches2"
+ : "Media.Audio.Render.Glitches2"),
+ total_glitch_duration_metric_name_(
+ stream_type == StreamType::kCapture
+ ? "Media.Audio.Capture.LostFramesInMs2"
+ : "Media.Audio.Render.LostFramesInMs2"),
+ largest_glitch_duration_metric_name_(
+ stream_type == StreamType::kCapture
+ ? "Media.Audio.Capture.LargestGlitchMs2"
+ : "Media.Audio.Render.LargestGlitchMs2"),
+ early_glitch_detected_metric_name_(
+ stream_type == StreamType::kCapture
+ ? "Media.Audio.Capture.EarlyGlitchDetected"
+ : "Media.Audio.Render.EarlyGlitchDetected") {}
+
+SystemGlitchReporter::~SystemGlitchReporter() = default;
+
+SystemGlitchReporter::Stats SystemGlitchReporter::GetLongTermStatsAndReset() {
+ if (callback_count_ > 0) {
+ base::UmaHistogramBoolean(early_glitch_detected_metric_name_,
+ early_glitch_detected_);
+ }
+
+ Stats result = long_term_stats_;
+ callback_count_ = 0;
+ short_term_stats_ = {};
+ long_term_stats_ = {};
+ early_glitch_detected_ = false;
+ return result;
+}
+
+void SystemGlitchReporter::UpdateStats(base::TimeDelta glitch_duration) {
+ ++callback_count_;
+
+ if (glitch_duration.is_positive()) {
+ TRACE_EVENT_INSTANT1("audio", "OsGlitchDetected", TRACE_EVENT_SCOPE_THREAD,
+ "glitch_duration_ms",
+ glitch_duration.InMilliseconds());
+
+ if (callback_count_ <= kCallbacksPerLogPeriod)
+ early_glitch_detected_ = true;
+
+ ++short_term_stats_.glitches_detected;
+ ++long_term_stats_.glitches_detected;
+
+ short_term_stats_.total_glitch_duration += glitch_duration;
+ long_term_stats_.total_glitch_duration += glitch_duration;
+
+ short_term_stats_.largest_glitch_duration =
+ std::max(short_term_stats_.largest_glitch_duration, glitch_duration);
+ long_term_stats_.largest_glitch_duration =
+ std::max(long_term_stats_.largest_glitch_duration, glitch_duration);
+ }
+
+ if (callback_count_ % kCallbacksPerLogPeriod != 0)
+ return;
+
+ // We record the glitch count even if there aren't any glitches, to get a
+ // feel for how often we get no glitches vs the alternative.
+ base::UmaHistogramCounts1000(num_glitches_detected_metric_name_,
+ short_term_stats_.glitches_detected);
+
+ if (short_term_stats_.glitches_detected != 0) {
+ base::UmaHistogramCounts1M(
+ total_glitch_duration_metric_name_,
+ short_term_stats_.total_glitch_duration.InMilliseconds());
+ base::UmaHistogramCounts1M(
+ largest_glitch_duration_metric_name_,
+ short_term_stats_.largest_glitch_duration.InMilliseconds());
+ }
+
+ short_term_stats_ = {};
+}
+
+} // namespace media
diff --git a/chromium/media/audio/system_glitch_reporter.h b/chromium/media/audio/system_glitch_reporter.h
new file mode 100644
index 00000000000..02cddf84c30
--- /dev/null
+++ b/chromium/media/audio/system_glitch_reporter.h
@@ -0,0 +1,62 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_SYSTEM_GLITCH_REPORTER_H_
+#define MEDIA_AUDIO_SYSTEM_GLITCH_REPORTER_H_
+
+#include <string>
+
+#include "base/time/time.h"
+
+namespace media {
+
+// Aggregates and reports glitch statistics.
+// Stats are aggregated and reported to UMA periodically every 1000th call to
+// UpdateStats(), and longer-term (manually reset) stats are available via
+// GetLongTermStatsAndReset().
+class SystemGlitchReporter {
+ public:
+ // Used to determine which UMA metrics to log.
+ enum class StreamType { kCapture, kRender };
+
+ struct Stats {
+ int glitches_detected = 0;
+ base::TimeDelta total_glitch_duration;
+ base::TimeDelta largest_glitch_duration;
+ };
+
+ SystemGlitchReporter(StreamType stream_type);
+
+ ~SystemGlitchReporter();
+
+ // Resets all state: both periodic and long-term stats.
+ Stats GetLongTermStatsAndReset();
+
+ // Updates statistics and metric reporting counters. Any non-zero
+ // |glitch_duration| is considered a glitch.
+ void UpdateStats(base::TimeDelta glitch_duration);
+
+ private:
+ const std::string num_glitches_detected_metric_name_;
+ const std::string total_glitch_duration_metric_name_;
+ const std::string largest_glitch_duration_metric_name_;
+ const std::string early_glitch_detected_metric_name_;
+
+ int callback_count_ = 0;
+
+ // Stats reported periodically to UMA. Resets every 1000 callbacks and on
+ // GetLongTermStatsAndReset().
+ Stats short_term_stats_;
+
+ // Stats that only reset on GetLongTermStatsAndReset().
+ Stats long_term_stats_;
+
+ // Long-term metric reported in ReportLongTermStatsAndReset().
+ // Records whether any glitch occurred during the first 1000 callbacks.
+ bool early_glitch_detected_ = false;
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_SYSTEM_GLITCH_REPORTER_H_
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.cc b/chromium/media/audio/win/audio_low_latency_input_win.cc
index 8390c8113df..245a63c562e 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win.cc
@@ -251,12 +251,57 @@ bool InitializeUWPSupport() {
} // namespace
+// Counts how often an OS capture callback reports a data discontinuity and logs
+// it as a UMA histogram.
+class WASAPIAudioInputStream::DataDiscontinuityReporter {
+ public:
+ // Logs once every 10s, assuming 10ms buffers.
+ constexpr static int kCallbacksPerLogPeriod = 1000;
+
+ DataDiscontinuityReporter() {}
+
+ int GetLongTermDiscontinuityCountAndReset() {
+ int long_term_count = data_discontinuity_long_term_count_;
+ callback_count_ = 0;
+ data_discontinuity_short_term_count_ = 0;
+ data_discontinuity_long_term_count_ = 0;
+ return long_term_count;
+ }
+
+ void Log(bool observed_data_discontinuity) {
+ ++callback_count_;
+ if (observed_data_discontinuity) {
+ ++data_discontinuity_short_term_count_;
+ ++data_discontinuity_long_term_count_;
+ }
+
+ if (callback_count_ % kCallbacksPerLogPeriod)
+ return;
+
+ // TODO(https://crbug.com/825744): It can be possible to replace
+ // "Media.Audio.Capture.Glitches2" with this new (simplified) metric
+ // instead.
+ base::UmaHistogramCounts1000("Media.Audio.Capture.Win.Glitches2",
+ data_discontinuity_short_term_count_);
+
+ data_discontinuity_short_term_count_ = 0;
+ }
+
+ private:
+ int callback_count_ = 0;
+ int data_discontinuity_short_term_count_ = 0;
+ int data_discontinuity_long_term_count_ = 0;
+};
+
WASAPIAudioInputStream::WASAPIAudioInputStream(
AudioManagerWin* manager,
const AudioParameters& params,
const std::string& device_id,
AudioManager::LogCallback log_callback)
: manager_(manager),
+ glitch_reporter_(SystemGlitchReporter::StreamType::kCapture),
+ data_discontinuity_reporter_(
+ std::make_unique<DataDiscontinuityReporter>()),
device_id_(device_id),
log_callback_(std::move(log_callback)) {
DCHECK(manager_);
@@ -846,16 +891,17 @@ void WASAPIAudioInputStream::PullCaptureDataAndPushToSink() {
// The data in the packet is not correlated with the previous packet's
// device position; this is possibly due to a stream state transition or
// timing glitch. Note that, usage of this flag was added after the existing
- // glitch detection in UpdateGlitchCount() and it will be used as a
- // supplementary scheme initially.
+ // glitch detection and it will be used as a supplementary scheme initially.
// The behavior of the AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY flag is
// undefined on the application's first call to GetBuffer after Start and
// Windows 7 or later is required for support.
- if (device_position > 0 && flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) {
+ const bool observed_data_discontinuity =
+ (device_position > 0 && flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY);
+ if (observed_data_discontinuity) {
LOG(WARNING) << "WAIS::" << __func__
<< " => (WARNING: AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY)";
- ++num_data_discontinuity_warnings_;
}
+ data_discontinuity_reporter_->Log(observed_data_discontinuity);
// The time at which the device's stream position was recorded is uncertain.
// Thus, the client might be unable to accurately set a time stamp for the
@@ -881,7 +927,16 @@ void WASAPIAudioInputStream::PullCaptureDataAndPushToSink() {
// If the device position has not changed we assume this data belongs to the
// previous chunk, and only update the expected next device position.
if (device_position != last_device_position) {
- UpdateGlitchCount(device_position);
+ if (expected_next_device_position_ != 0) {
+ base::TimeDelta glitch_duration;
+ if (device_position > expected_next_device_position_) {
+ glitch_duration = AudioTimestampHelper::FramesToTime(
+ device_position - expected_next_device_position_,
+ input_format_.Format.nSamplesPerSec);
+ }
+ glitch_reporter_.UpdateStats(glitch_duration);
+ }
+
last_device_position = device_position;
expected_next_device_position_ = device_position + num_frames_to_read;
} else {
@@ -1598,10 +1653,10 @@ void WASAPIAudioInputStream::MaybeReportFormatRelatedInitError(
? converter_.get()
? FormatRelatedInitError::kUnsupportedFormatWithFormatConversion
: FormatRelatedInitError::kUnsupportedFormat
- // Otherwise |hr| == E_INVALIDARG.
- : converter_.get()
- ? FormatRelatedInitError::kInvalidArgumentWithFormatConversion
- : FormatRelatedInitError::kInvalidArgument;
+ // Otherwise |hr| == E_INVALIDARG.
+ : converter_.get()
+ ? FormatRelatedInitError::kInvalidArgumentWithFormatConversion
+ : FormatRelatedInitError::kInvalidArgument;
base::UmaHistogramEnumeration(
"Media.Audio.Capture.Win.InitError.FormatRelated", format_related_error,
FormatRelatedInitError::kCount);
@@ -1613,41 +1668,20 @@ double WASAPIAudioInputStream::ProvideInput(AudioBus* audio_bus,
return 1.0;
}
-void WASAPIAudioInputStream::UpdateGlitchCount(UINT64 device_position) {
- if (expected_next_device_position_ != 0) {
- if (device_position > expected_next_device_position_) {
- ++total_glitches_;
- auto lost_frames = device_position - expected_next_device_position_;
- total_lost_frames_ += lost_frames;
- if (lost_frames > largest_glitch_frames_)
- largest_glitch_frames_ = lost_frames;
- }
- }
-}
-
void WASAPIAudioInputStream::ReportAndResetGlitchStats() {
- UMA_HISTOGRAM_COUNTS_1M("Media.Audio.Capture.Glitches", total_glitches_);
- double lost_frames_ms =
- (total_lost_frames_ * 1000) / input_format_.Format.nSamplesPerSec;
+ SystemGlitchReporter::Stats stats =
+ glitch_reporter_.GetLongTermStatsAndReset();
SendLogMessage(
- "%s => (total glitches=[%d], total frames lost=[%llu/%.0lf ms])",
- __func__, total_glitches_, total_lost_frames_, lost_frames_ms);
- if (total_glitches_ != 0) {
- UMA_HISTOGRAM_LONG_TIMES("Media.Audio.Capture.LostFramesInMs",
- base::Milliseconds(lost_frames_ms));
- int64_t largest_glitch_ms =
- (largest_glitch_frames_ * 1000) / input_format_.Format.nSamplesPerSec;
- UMA_HISTOGRAM_CUSTOM_TIMES("Media.Audio.Capture.LargestGlitchMs",
- base::Milliseconds(largest_glitch_ms),
- base::Milliseconds(1), base::Minutes(1), 50);
- }
-
- // TODO(https://crbug.com/825744): It can be possible to replace
- // "Media.Audio.Capture.Glitches" with this new (simplified) metric instead.
- base::UmaHistogramCounts1M("Media.Audio.Capture.Win.Glitches",
- num_data_discontinuity_warnings_);
- SendLogMessage("%s => (discontinuity warnings=[%" PRIu64 "])", __func__,
- num_data_discontinuity_warnings_);
+ "%s => (num_glitches_detected=[%d], cumulative_audio_lost=[%llu ms], "
+ "largest_glitch=[%llu ms])",
+ __func__, stats.glitches_detected,
+ stats.total_glitch_duration.InMilliseconds(),
+ stats.largest_glitch_duration.InMilliseconds());
+
+ int num_data_discontinuities =
+ data_discontinuity_reporter_->GetLongTermDiscontinuityCountAndReset();
+ SendLogMessage("%s => (discontinuity warnings=[%d])", __func__,
+ num_data_discontinuities);
SendLogMessage("%s => (timstamp errors=[%" PRIu64 "])", __func__,
num_timestamp_errors_);
if (num_timestamp_errors_ > 0) {
@@ -1657,10 +1691,6 @@ void WASAPIAudioInputStream::ReportAndResetGlitchStats() {
}
expected_next_device_position_ = 0;
- total_glitches_ = 0;
- total_lost_frames_ = 0;
- largest_glitch_frames_ = 0;
- num_data_discontinuity_warnings_ = 0;
num_timestamp_errors_ = 0;
}
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.h b/chromium/media/audio/win/audio_low_latency_input_win.h
index 05f5f873f44..caef8a2e2da 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.h
+++ b/chromium/media/audio/win/audio_low_latency_input_win.h
@@ -78,6 +78,7 @@
#include "base/win/scoped_com_initializer.h"
#include "base/win/scoped_handle.h"
#include "media/audio/agc_audio_stream.h"
+#include "media/audio/system_glitch_reporter.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/base/audio_converter.h"
#include "media/base/audio_parameters.h"
@@ -147,6 +148,8 @@ class MEDIA_EXPORT WASAPIAudioInputStream
bool started() const { return started_; }
private:
+ class DataDiscontinuityReporter;
+
void SendLogMessage(const char* format, ...) PRINTF_FORMAT(2, 3);
// DelegateSimpleThread::Delegate implementation.
@@ -199,15 +202,20 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// AudioConverter::InputCallback implementation.
double ProvideInput(AudioBus* audio_bus, uint32_t frames_delayed) override;
- // Detects and counts glitches based on |device_position|.
- void UpdateGlitchCount(UINT64 device_position);
-
// Reports glitch stats and resets associated variables.
void ReportAndResetGlitchStats();
// Our creator, the audio manager needs to be notified when we close.
const raw_ptr<AudioManagerWin> manager_;
+ // Used to aggregate and report glitch metrics to UMA (periodically) and to
+ // text logs (when a stream ends).
+ SystemGlitchReporter glitch_reporter_;
+
+ // Used to track and log data discontinuity warnings from
+ // IAudioCaptureClient::GetBuffer.
+ std::unique_ptr<DataDiscontinuityReporter> data_discontinuity_reporter_;
+
// Capturing is driven by this thread (which has no message loop).
// All OnData() callbacks will be called from this thread.
std::unique_ptr<base::DelegateSimpleThread> capture_thread_;
@@ -313,12 +321,8 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// For detecting and reporting glitches.
UINT64 expected_next_device_position_ = 0;
- int total_glitches_ = 0;
- UINT64 total_lost_frames_ = 0;
- UINT64 largest_glitch_frames_ = 0;
// Tracks error messages from IAudioCaptureClient::GetBuffer.
- UINT64 num_data_discontinuity_warnings_ = 0;
UINT64 num_timestamp_errors_ = 0;
base::TimeTicks record_start_time_;
base::TimeDelta time_until_first_timestamp_error_;
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.cc b/chromium/media/audio/win/audio_low_latency_output_win.cc
index 98a490827b2..5e22cf84053 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win.cc
@@ -87,6 +87,7 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(
AudioManager::LogCallback log_callback)
: creating_thread_id_(base::PlatformThread::CurrentId()),
manager_(manager),
+ glitch_reporter_(SystemGlitchReporter::StreamType::kRender),
format_(),
opened_(false),
volume_(1.0),
@@ -490,8 +491,8 @@ void WASAPIAudioOutputStream::Run() {
// Enable MMCSS to ensure that this thread receives prioritized access to
// CPU resources.
DWORD task_index = 0;
- HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
- &task_index);
+ HANDLE mm_task =
+ avrt::AvSetMmThreadCharacteristics(L"Pro Audio", &task_index);
bool mmcss_is_ok =
(mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
if (!mmcss_is_ok) {
@@ -507,8 +508,8 @@ void WASAPIAudioOutputStream::Run() {
bool playing = true;
bool error = false;
- HANDLE wait_array[] = { stop_render_event_.Get(),
- audio_samples_render_event_.Get() };
+ HANDLE wait_array[] = {stop_render_event_.Get(),
+ audio_samples_render_event_.Get()};
UINT64 device_frequency = 0;
// The device frequency is the frequency generated by the hardware clock in
@@ -582,8 +583,7 @@ bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
// Get the padding value which represents the amount of rendering
// data that is queued up to play in the endpoint buffer.
hr = audio_client_->GetCurrentPadding(&num_queued_frames);
- num_available_frames =
- endpoint_buffer_size_frames_ - num_queued_frames;
+ num_available_frames = endpoint_buffer_size_frames_ - num_queued_frames;
if (FAILED(hr)) {
RecordAudioFailure(kRenderFailureHistogram, hr);
LOG(ERROR) << "WAOS::" << __func__
@@ -628,8 +628,7 @@ bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
for (size_t n = 0; n < num_packets; ++n) {
// Grab all available space in the rendering endpoint buffer
// into which the client can write a data packet.
- hr = audio_render_client_->GetBuffer(packet_size_frames_,
- &audio_data);
+ hr = audio_render_client_->GetBuffer(packet_size_frames_, &audio_data);
if (FAILED(hr)) {
RecordAudioFailure(kRenderFailureHistogram, hr);
LOG(ERROR) << "WAOS::" << __func__
@@ -674,17 +673,11 @@ bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
const int64_t qpc_position_diff_us =
qpc_position_us - last_qpc_position_us;
- if (qpc_position_diff_us - position_diff_us > buffer_duration_us / 2) {
- ++num_glitches_detected_;
+ const int64_t gap_duration_us = qpc_position_diff_us - position_diff_us;
- base::TimeDelta glitch_duration =
- base::Microseconds(qpc_position_diff_us - position_diff_us);
-
- if (glitch_duration > largest_glitch_)
- largest_glitch_ = glitch_duration;
-
- cumulative_audio_lost_ += glitch_duration;
- }
+ glitch_reporter_.UpdateStats(gap_duration_us > buffer_duration_us / 2
+ ? base::Microseconds(gap_duration_us)
+ : base::TimeDelta());
}
last_position_ = position;
@@ -727,8 +720,9 @@ bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
// Release the buffer space acquired in the GetBuffer() call.
// Render silence if we were not able to fill up the buffer totally.
- DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
- AUDCLNT_BUFFERFLAGS_SILENT : 0;
+ DWORD flags = (num_filled_bytes < packet_size_bytes_)
+ ? AUDCLNT_BUFFERFLAGS_SILENT
+ : 0;
audio_render_client_->ReleaseBuffer(packet_size_frames_, flags);
num_written_frames_ += packet_size_frames_;
@@ -777,8 +771,8 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
// Calculate new aligned periodicity. Each unit of reference time
// is 100 nanoseconds.
REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>(
- (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec)
- + 0.5);
+ (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec) +
+ 0.5);
// It is possible to re-activate and re-initialize the audio client
// at this stage but we bail out with an error code instead and
@@ -834,25 +828,14 @@ void WASAPIAudioOutputStream::StopThread() {
}
void WASAPIAudioOutputStream::ReportAndResetStats() {
- // Even if there aren't any glitches, we want to record it to get a feel for
- // how often we get no glitches vs the alternative.
- UMA_HISTOGRAM_CUSTOM_COUNTS("Media.Audio.Render.Glitches",
- num_glitches_detected_, 1, 999999, 100);
- // Don't record these unless there actually was a glitch, though.
- if (num_glitches_detected_ != 0) {
- UMA_HISTOGRAM_COUNTS_1M("Media.Audio.Render.LostFramesInMs",
- cumulative_audio_lost_.InMilliseconds());
- UMA_HISTOGRAM_COUNTS_1M("Media.Audio.Render.LargestGlitchMs",
- largest_glitch_.InMilliseconds());
- }
+ SystemGlitchReporter::Stats stats =
+ glitch_reporter_.GetLongTermStatsAndReset();
SendLogMessage(
"%s => (num_glitches_detected=[%d], cumulative_audio_lost=[%llu ms], "
"largest_glitch=[%llu ms])",
- __func__, num_glitches_detected_, cumulative_audio_lost_.InMilliseconds(),
- largest_glitch_.InMilliseconds());
- num_glitches_detected_ = 0;
- cumulative_audio_lost_ = base::TimeDelta();
- largest_glitch_ = base::TimeDelta();
+ __func__, stats.glitches_detected,
+ stats.total_glitch_duration.InMilliseconds(),
+ stats.largest_glitch_duration.InMilliseconds());
}
void WASAPIAudioOutputStream::OnDeviceChanged() {
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.h b/chromium/media/audio/win/audio_low_latency_output_win.h
index 3e89f682dac..27c9b1ea79c 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.h
+++ b/chromium/media/audio/win/audio_low_latency_output_win.h
@@ -112,6 +112,7 @@
#include "base/win/scoped_com_initializer.h"
#include "base/win/scoped_handle.h"
#include "media/audio/audio_io.h"
+#include "media/audio/system_glitch_reporter.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/base/audio_parameters.h"
#include "media/base/media_export.h"
@@ -122,8 +123,8 @@ class AudioManagerWin;
class AudioSessionEventListener;
// AudioOutputStream implementation using Windows Core Audio APIs.
-class MEDIA_EXPORT WASAPIAudioOutputStream :
- public AudioOutputStream,
+class MEDIA_EXPORT WASAPIAudioOutputStream
+ : public AudioOutputStream,
public base::DelegateSimpleThread::Delegate {
public:
// The ctor takes all the usual parameters, plus |manager| which is the
@@ -193,6 +194,10 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// Our creator, the audio manager needs to be notified when we close.
const raw_ptr<AudioManagerWin> manager_;
+ // Used to aggregate and report glitch metrics to UMA (periodically) and to
+ // text logs (when a stream ends).
+ SystemGlitchReporter glitch_reporter_;
+
// Rendering is driven by this thread (which has no message loop).
// All OnMoreData() callbacks will be called from this thread.
std::unique_ptr<base::DelegateSimpleThread> render_thread_;
@@ -243,15 +248,6 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// The performance counter read during the last call to RenderAudioFromSource
UINT64 last_qpc_position_ = 0;
- // The number of glitches detected while this stream was active.
- int num_glitches_detected_ = 0;
-
- // The approximate amount of audio lost due to glitches.
- base::TimeDelta cumulative_audio_lost_;
-
- // The largest single glitch recorded.
- base::TimeDelta largest_glitch_;
-
// Pointer to the client that will deliver audio samples to be played out.
raw_ptr<AudioSourceCallback> source_;
diff --git a/chromium/media/audio/win/core_audio_util_win.cc b/chromium/media/audio/win/core_audio_util_win.cc
index 1299937db6b..6bd66dec2fc 100644
--- a/chromium/media/audio/win/core_audio_util_win.cc
+++ b/chromium/media/audio/win/core_audio_util_win.cc
@@ -26,6 +26,7 @@
#include "media/audio/audio_device_description.h"
#include "media/audio/audio_features.h"
#include "media/base/media_switches.h"
+#include "media/base/win/mf_helpers.h"
using Microsoft::WRL::ComPtr;
using base::win::ScopedCoMem;
@@ -101,55 +102,6 @@ void LogUMAPreferredOutputParams(UmaLogStep step, HRESULT hr) {
}
}
-// Converts Microsoft's channel configuration to ChannelLayout.
-// This mapping is not perfect but the best we can do given the current
-// ChannelLayout enumerator and the Windows-specific speaker configurations
-// defined in ksmedia.h. Don't assume that the channel ordering in
-// ChannelLayout is exactly the same as the Windows specific configuration.
-// As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
-// CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
-// speakers are different in these two definitions.
-ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
- switch (config) {
- case KSAUDIO_SPEAKER_MONO:
- DVLOG(2) << "KSAUDIO_SPEAKER_MONO=>CHANNEL_LAYOUT_MONO";
- return CHANNEL_LAYOUT_MONO;
- case KSAUDIO_SPEAKER_STEREO:
- DVLOG(2) << "KSAUDIO_SPEAKER_STEREO=>CHANNEL_LAYOUT_STEREO";
- return CHANNEL_LAYOUT_STEREO;
- case KSAUDIO_SPEAKER_QUAD:
- DVLOG(2) << "KSAUDIO_SPEAKER_QUAD=>CHANNEL_LAYOUT_QUAD";
- return CHANNEL_LAYOUT_QUAD;
- case KSAUDIO_SPEAKER_SURROUND:
- DVLOG(2) << "KSAUDIO_SPEAKER_SURROUND=>CHANNEL_LAYOUT_4_0";
- return CHANNEL_LAYOUT_4_0;
- case KSAUDIO_SPEAKER_5POINT1:
- DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1=>CHANNEL_LAYOUT_5_1_BACK";
- return CHANNEL_LAYOUT_5_1_BACK;
- case KSAUDIO_SPEAKER_5POINT1_SURROUND:
- DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1_SURROUND=>CHANNEL_LAYOUT_5_1";
- return CHANNEL_LAYOUT_5_1;
- case KSAUDIO_SPEAKER_7POINT1:
- DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1=>CHANNEL_LAYOUT_7_1_WIDE";
- return CHANNEL_LAYOUT_7_1_WIDE;
- case KSAUDIO_SPEAKER_7POINT1_SURROUND:
- DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1_SURROUND=>CHANNEL_LAYOUT_7_1";
- return CHANNEL_LAYOUT_7_1;
- case KSAUDIO_SPEAKER_DIRECTOUT:
- // When specifying the wave format for a direct-out stream, an application
- // should set the dwChannelMask member of the WAVEFORMATEXTENSIBLE
- // structure to the value KSAUDIO_SPEAKER_DIRECTOUT, which is zero.
- // A channel mask of zero indicates that no speaker positions are defined.
- // As always, the number of channels in the stream is specified in the
- // Format.nChannels member.
- DVLOG(2) << "KSAUDIO_SPEAKER_DIRECTOUT=>CHANNEL_LAYOUT_DISCRETE";
- return CHANNEL_LAYOUT_DISCRETE;
- default:
- DVLOG(2) << "Unsupported channel configuration: " << config;
- return CHANNEL_LAYOUT_UNSUPPORTED;
- }
-}
-
// TODO(henrika): add mapping for all types in the ChannelLayout enumerator.
ChannelConfig ChannelLayoutToChannelConfig(ChannelLayout layout) {
switch (layout) {
diff --git a/chromium/media/audio/win/core_audio_util_win.h b/chromium/media/audio/win/core_audio_util_win.h
index 6ffb4c9ecdf..0e9a55e6059 100644
--- a/chromium/media/audio/win/core_audio_util_win.h
+++ b/chromium/media/audio/win/core_audio_util_win.h
@@ -25,10 +25,7 @@
namespace media {
-// Represents audio channel configuration constants as understood by Windows.
-// E.g. KSAUDIO_SPEAKER_MONO. For a list of possible values see:
-// http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx
-typedef uint32_t ChannelConfig;
+using ChannelConfig = uint32_t;
class MEDIA_EXPORT CoreAudioUtil {
public:
diff --git a/chromium/media/base/BUILD.gn b/chromium/media/base/BUILD.gn
index 619c3bf1995..68d741a7099 100644
--- a/chromium/media/base/BUILD.gn
+++ b/chromium/media/base/BUILD.gn
@@ -270,7 +270,6 @@ source_set("base") {
"routing_token_callback.h",
"sample_rates.cc",
"sample_rates.h",
- "scoped_async_trace.cc",
"scoped_async_trace.h",
"seekable_buffer.cc",
"seekable_buffer.h",
@@ -313,8 +312,6 @@ source_set("base") {
"timestamp_constants.h",
"tuneable.cc",
"tuneable.h",
- "unaligned_shared_memory.cc",
- "unaligned_shared_memory.h",
"use_after_free_checker.h",
"user_input_monitor.cc",
"user_input_monitor.h",
@@ -440,6 +437,7 @@ source_set("base") {
public_deps += [
"//media/base/win:dcomp_texture_wrapper",
"//media/base/win:mf_cdm_proxy",
+ "//media/base/win:overlay_state_observer_subscription",
]
sources += [
"win/mf_feature_checks.cc",
@@ -617,7 +615,6 @@ source_set("unit_tests") {
"text_renderer_unittest.cc",
"time_delta_interpolator_unittest.cc",
"tuneable_unittest.cc",
- "unaligned_shared_memory_unittest.cc",
"user_input_monitor_unittest.cc",
"vector_math_unittest.cc",
"video_aspect_ratio_unittest.cc",
diff --git a/chromium/media/base/android/BUILD.gn b/chromium/media/base/android/BUILD.gn
index 54375dd8305..e4a5e5f573b 100644
--- a/chromium/media/base/android/BUILD.gn
+++ b/chromium/media/base/android/BUILD.gn
@@ -164,6 +164,8 @@ if (is_android) {
":display_java",
":media_java_resources",
"//base:base_java",
+ "//base:jni_java",
+ "//build/android:build_java",
"//third_party/androidx:androidx_annotation_annotation_java",
]
annotation_processor_deps = [ "//base/android/jni_generator:jni_processor" ]
diff --git a/chromium/media/base/android/media_codec_util.cc b/chromium/media/base/android/media_codec_util.cc
index 3b53e0125d9..fe96d28fc6b 100644
--- a/chromium/media/base/android/media_codec_util.cc
+++ b/chromium/media/base/android/media_codec_util.cc
@@ -309,12 +309,8 @@ bool MediaCodecUtil::CanDecode(AudioCodec codec) {
}
// static
-bool MediaCodecUtil::IsH264EncoderAvailable(bool use_codec_list) {
- if (use_codec_list)
- return IsEncoderSupportedByDevice(kAvcMimeType);
-
- // Assume support since Chrome only supports Marshmallow+.
- return true;
+bool MediaCodecUtil::IsH264EncoderAvailable() {
+ return IsEncoderSupportedByDevice(kAvcMimeType);
}
// static
diff --git a/chromium/media/base/android/media_codec_util.h b/chromium/media/base/android/media_codec_util.h
index 1933e9c174b..4afc89f0019 100644
--- a/chromium/media/base/android/media_codec_util.h
+++ b/chromium/media/base/android/media_codec_util.h
@@ -85,10 +85,9 @@ class MEDIA_EXPORT MediaCodecUtil {
// Indicates if the h264 encoder is available on this device.
//
- // WARNING: If |use_codec_list| is true, this can't be used from the renderer
- // process since it attempts to access MediaCodecList (which requires
- // permissions).
- static bool IsH264EncoderAvailable(bool use_codec_list = true);
+ // This can't be used from the renderer process since it attempts to
+ // access MediaCodecList (which requires permissions).
+ static bool IsH264EncoderAvailable();
// Returns a vector of supported codecs profiles and levels.
//
diff --git a/chromium/media/base/android/media_player_bridge.cc b/chromium/media/base/android/media_player_bridge.cc
index 6a7eacdd39e..9ed5a3f9d37 100644
--- a/chromium/media/base/android/media_player_bridge.cc
+++ b/chromium/media/base/android/media_player_bridge.cc
@@ -169,6 +169,11 @@ void MediaPlayerBridge::SetVideoSurface(gl::ScopedJavaSurface surface) {
}
void MediaPlayerBridge::SetPlaybackRate(double playback_rate) {
+ if (!prepared_) {
+ pending_playback_rate_ = playback_rate;
+ return;
+ }
+
if (j_media_player_bridge_.is_null())
return;
@@ -481,6 +486,11 @@ void MediaPlayerBridge::OnMediaPrepared() {
StartInternal();
pending_play_ = false;
}
+
+ if (pending_playback_rate_) {
+ SetPlaybackRate(pending_playback_rate_.value());
+ pending_playback_rate_.reset();
+ }
}
ScopedJavaLocalRef<jobject> MediaPlayerBridge::GetAllowedOperations() {
diff --git a/chromium/media/base/android/media_player_bridge.h b/chromium/media/base/android/media_player_bridge.h
index 862921c2a4c..98b88b97105 100644
--- a/chromium/media/base/android/media_player_bridge.h
+++ b/chromium/media/base/android/media_player_bridge.h
@@ -282,6 +282,9 @@ class MEDIA_EXPORT MediaPlayerBridge {
// Listener object that listens to all the media player events.
std::unique_ptr<MediaPlayerListener> listener_;
+ // Pending playback rate while player is preparing.
+ absl::optional<double> pending_playback_rate_;
+
// Weak pointer passed to `listener_` for callbacks.
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<MediaPlayerBridge> weak_factory_{this};
diff --git a/chromium/media/base/audio_codecs.cc b/chromium/media/base/audio_codecs.cc
index 6b7a81802d1..7f5b6e0e80b 100644
--- a/chromium/media/base/audio_codecs.cc
+++ b/chromium/media/base/audio_codecs.cc
@@ -88,6 +88,10 @@ AudioCodec StringToAudioCodec(const std::string& codec_id) {
return AudioCodec::kOpus;
if (codec_id == "vorbis")
return AudioCodec::kVorbis;
+ if (codec_id == "dtsc")
+ return AudioCodec::kDTS;
+ if (codec_id == "dtsx")
+ return AudioCodec::kDTSXP2;
if (base::StartsWith(codec_id, "mp4a.40.", base::CompareCase::SENSITIVE))
return AudioCodec::kAAC;
return AudioCodec::kUnknown;
diff --git a/chromium/media/base/audio_latency.cc b/chromium/media/base/audio_latency.cc
index 6642e65b357..f102f238116 100644
--- a/chromium/media/base/audio_latency.cc
+++ b/chromium/media/base/audio_latency.cc
@@ -7,7 +7,9 @@
#include <stdint.h>
#include <algorithm>
+#include <cmath>
+#include "base/check_op.h"
#include "base/logging.h"
#include "base/time/time.h"
#include "build/build_config.h"
diff --git a/chromium/media/base/audio_parameters.cc b/chromium/media/base/audio_parameters.cc
index ded8e403c9b..a06134c67a7 100644
--- a/chromium/media/base/audio_parameters.cc
+++ b/chromium/media/base/audio_parameters.cc
@@ -36,6 +36,8 @@ const char* FormatToString(AudioParameters::Format format) {
return "BITSTREAM_DTS";
case AudioParameters::AUDIO_BITSTREAM_DTS_HD:
return "BITSTREAM_DTS_HD";
+ case AudioParameters::AUDIO_BITSTREAM_DTSX_P2:
+ return "BITSTREAM_DTSX_P2";
case AudioParameters::AUDIO_BITSTREAM_IEC61937:
return "BITSTREAM_IEC61937";
case AudioParameters::AUDIO_FAKE:
@@ -200,6 +202,7 @@ bool AudioParameters::IsBitstreamFormat() const {
case AUDIO_BITSTREAM_EAC3:
case AUDIO_BITSTREAM_DTS:
case AUDIO_BITSTREAM_DTS_HD:
+ case AUDIO_BITSTREAM_DTSX_P2:
case AUDIO_BITSTREAM_IEC61937:
return true;
default:
diff --git a/chromium/media/base/audio_parameters.h b/chromium/media/base/audio_parameters.h
index 4293aeeb1c7..dccb01bfe1a 100644
--- a/chromium/media/base/audio_parameters.h
+++ b/chromium/media/base/audio_parameters.h
@@ -125,9 +125,10 @@ class MEDIA_SHMEM_EXPORT AudioParameters {
AUDIO_BITSTREAM_EAC3 = 0x08, // Compressed E-AC3 bitstream.
AUDIO_BITSTREAM_DTS = 0x10, // Compressed DTS bitstream.
AUDIO_BITSTREAM_DTS_HD = 0x20, // Compressed DTS-HD bitstream.
- AUDIO_BITSTREAM_IEC61937 = 0x40, // Compressed IEC61937 bitstream.
+ AUDIO_BITSTREAM_DTSX_P2 = 0x40, // Compressed DTS-HD bitstream.
+ AUDIO_BITSTREAM_IEC61937 = 0x80, // Compressed IEC61937 bitstream.
AUDIO_FORMAT_LAST =
- AUDIO_BITSTREAM_IEC61937, // Only used for validation of format.
+ AUDIO_BITSTREAM_IEC61937, // Only used for validation of format.
};
enum {
diff --git a/chromium/media/base/audio_shifter.cc b/chromium/media/base/audio_shifter.cc
index 462fdf702fd..e6e0e6de959 100644
--- a/chromium/media/base/audio_shifter.cc
+++ b/chromium/media/base/audio_shifter.cc
@@ -91,14 +91,11 @@ AudioShifter::AudioShifter(base::TimeDelta max_buffer_size,
input_clock_smoother_(new ClockSmoother(clock_accuracy)),
output_clock_smoother_(new ClockSmoother(clock_accuracy)),
running_(false),
- position_(0),
- previous_requested_samples_(0),
resampler_(channels,
1.0,
96,
base::BindRepeating(&AudioShifter::ResamplerCallback,
- base::Unretained(this))),
- current_ratio_(1.0) {}
+ base::Unretained(this))) {}
AudioShifter::~AudioShifter() = default;
@@ -106,6 +103,8 @@ void AudioShifter::Push(std::unique_ptr<AudioBus> input,
base::TimeTicks playout_time) {
TRACE_EVENT1("audio", "AudioShifter::Push", "time (ms)",
(playout_time - base::TimeTicks()).InMillisecondsF());
+ DCHECK_EQ(input->channels(), channels_);
+ frames_pushed_for_testing_ += input->frames();
if (!queue_.empty()) {
playout_time = input_clock_smoother_->Smooth(
playout_time, base::Seconds(queue_.back().audio->frames() / rate_));
diff --git a/chromium/media/base/audio_shifter.h b/chromium/media/base/audio_shifter.h
index b04f663e660..36e6e2b4d60 100644
--- a/chromium/media/base/audio_shifter.h
+++ b/chromium/media/base/audio_shifter.h
@@ -81,7 +81,9 @@ class MEDIA_EXPORT AudioShifter {
// calculate playout_time would be now + audio pipeline delay.
void Pull(AudioBus* output, base::TimeTicks playout_time);
-private:
+ int frames_pushed_for_testing() { return frames_pushed_for_testing_; }
+
+ private:
struct AudioQueueEntry {
AudioQueueEntry(base::TimeTicks target_playout_time,
std::unique_ptr<AudioBus> audio);
@@ -112,7 +114,7 @@ private:
bool running_;
// Number of frames already consumed from |queue_|.
- size_t position_;
+ size_t position_ = 0;
// Queue of data provided to us.
base::circular_deque<AudioQueueEntry> queue_;
@@ -121,7 +123,7 @@ private:
base::TimeTicks previous_playout_time_;
// Number of frames requested in last Pull call.
- int previous_requested_samples_;
+ int previous_requested_samples_ = 0;
// Timestamp at the end of last audio bus
// consumed by resampler.
@@ -135,7 +137,9 @@ private:
MultiChannelResampler resampler_;
// Current resampler ratio.
- double current_ratio_;
+ double current_ratio_ = 1.0;
+
+ int frames_pushed_for_testing_ = 0;
};
} // namespace media
diff --git a/chromium/media/base/audio_timestamp_helper.cc b/chromium/media/base/audio_timestamp_helper.cc
index 01ec6be22ef..15f5b177d56 100644
--- a/chromium/media/base/audio_timestamp_helper.cc
+++ b/chromium/media/base/audio_timestamp_helper.cc
@@ -4,6 +4,8 @@
#include "media/base/audio_timestamp_helper.h"
+#include <cmath>
+
#include "base/check_op.h"
#include "media/base/timestamp_constants.h"
diff --git a/chromium/media/base/bitstream_buffer.cc b/chromium/media/base/bitstream_buffer.cc
index 97cb6be017c..202e974601f 100644
--- a/chromium/media/base/bitstream_buffer.cc
+++ b/chromium/media/base/bitstream_buffer.cc
@@ -10,28 +10,15 @@
namespace media {
BitstreamBuffer::BitstreamBuffer()
- : BitstreamBuffer(-1, base::subtle::PlatformSharedMemoryRegion(), 0) {}
-
-BitstreamBuffer::BitstreamBuffer(
- int32_t id,
- base::subtle::PlatformSharedMemoryRegion region,
- size_t size,
- off_t offset,
- base::TimeDelta presentation_timestamp)
- : id_(id),
- region_(std::move(region)),
- size_(size),
- offset_(offset),
- presentation_timestamp_(presentation_timestamp) {}
+ : BitstreamBuffer(-1, base::UnsafeSharedMemoryRegion(), 0) {}
BitstreamBuffer::BitstreamBuffer(int32_t id,
base::UnsafeSharedMemoryRegion region,
size_t size,
- off_t offset,
+ uint64_t offset,
base::TimeDelta presentation_timestamp)
: id_(id),
- region_(base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- std::move(region))),
+ region_(std::move(region)),
size_(size),
offset_(offset),
presentation_timestamp_(presentation_timestamp) {}
diff --git a/chromium/media/base/bitstream_buffer.h b/chromium/media/base/bitstream_buffer.h
index dfff2d3c00c..52dadb02495 100644
--- a/chromium/media/base/bitstream_buffer.h
+++ b/chromium/media/base/bitstream_buffer.h
@@ -8,7 +8,6 @@
#include <stddef.h>
#include <stdint.h>
-#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/scoped_refptr.h"
#include "base/memory/unsafe_shared_memory_region.h"
#include "base/time/time.h"
@@ -37,16 +36,9 @@ class MEDIA_EXPORT BitstreamBuffer {
// When not provided, |presentation_timestamp| will be
// |media::kNoTimestamp|.
BitstreamBuffer(int32_t id,
- base::subtle::PlatformSharedMemoryRegion region,
- size_t size,
- off_t offset = 0,
- base::TimeDelta presentation_timestamp = kNoTimestamp);
-
- // As above, creating by unwrapping a base::UnsafeSharedMemoryRegion.
- BitstreamBuffer(int32_t id,
base::UnsafeSharedMemoryRegion region,
size_t size,
- off_t offset = 0,
+ uint64_t offset = 0,
base::TimeDelta presentation_timestamp = kNoTimestamp);
// Move operations are allowed.
@@ -79,22 +71,18 @@ class MEDIA_EXPORT BitstreamBuffer {
const std::string& iv,
const std::vector<SubsampleEntry>& subsamples);
- // Taking the region invalides the one in this BitstreamBuffer.
- base::subtle::PlatformSharedMemoryRegion TakeRegion() {
- return std::move(region_);
- }
+ // Taking the region invalidates the one in this BitstreamBuffer.
+ base::UnsafeSharedMemoryRegion TakeRegion() { return std::move(region_); }
// If a region needs to be taken from a const BitstreamBuffer, it must be
// duplicated. This function makes that explicit.
// TODO(crbug.com/793446): this is probably only needed by legacy IPC, and can
// be removed once that is converted to the new shared memory API.
- base::subtle::PlatformSharedMemoryRegion DuplicateRegion() const {
+ base::UnsafeSharedMemoryRegion DuplicateRegion() const {
return region_.Duplicate();
}
- const base::subtle::PlatformSharedMemoryRegion& region() const {
- return region_;
- }
+ const base::UnsafeSharedMemoryRegion& region() const { return region_; }
int32_t id() const { return id_; }
@@ -103,17 +91,13 @@ class MEDIA_EXPORT BitstreamBuffer {
size_t size() const { return size_; }
// The offset to the start of actual bitstream data in the shared memory.
- off_t offset() const { return offset_; }
+ uint64_t offset() const { return offset_; }
// The timestamp is only valid if it's not equal to |media::kNoTimestamp|.
base::TimeDelta presentation_timestamp() const {
return presentation_timestamp_;
}
- void set_region(base::subtle::PlatformSharedMemoryRegion region) {
- region_ = std::move(region);
- }
-
// The following methods come from SetDecryptionSettings().
const std::string& key_id() const { return key_id_; }
const std::string& iv() const { return iv_; }
@@ -121,9 +105,9 @@ class MEDIA_EXPORT BitstreamBuffer {
private:
int32_t id_;
- base::subtle::PlatformSharedMemoryRegion region_;
+ base::UnsafeSharedMemoryRegion region_;
size_t size_;
- off_t offset_;
+ uint64_t offset_;
// Note: Not set by all clients.
base::TimeDelta presentation_timestamp_;
diff --git a/chromium/media/base/callback_registry.h b/chromium/media/base/callback_registry.h
index b89be2c693c..faf55049cd6 100644
--- a/chromium/media/base/callback_registry.h
+++ b/chromium/media/base/callback_registry.h
@@ -70,8 +70,8 @@ class CallbackRegistry<void(Args...)> {
void Notify(Args&&... args) {
DVLOG(1) << __func__;
base::AutoLock lock(lock_);
- for (auto const& entry : callbacks_)
- entry.second.Run(std::forward<Args>(args)...);
+ for (auto const& [key_id, callback] : callbacks_)
+ callback.Run(std::forward<Args>(args)...);
}
private:
diff --git a/chromium/media/base/decoder.cc b/chromium/media/base/decoder.cc
index 24a73f3dcb2..5b3e69f57b0 100644
--- a/chromium/media/base/decoder.cc
+++ b/chromium/media/base/decoder.cc
@@ -75,6 +75,8 @@ std::string GetDecoderName(AudioDecoderType type) {
return "Testing or Mock Audio decoder";
case AudioDecoderType::kAudioToolbox:
return "AudioToolbox";
+ case AudioDecoderType::kMediaFoundation:
+ return "MediaFoundationAudioDecoder";
}
}
diff --git a/chromium/media/base/decoder.h b/chromium/media/base/decoder.h
index f4370b12226..dcf860045d7 100644
--- a/chromium/media/base/decoder.h
+++ b/chromium/media/base/decoder.h
@@ -16,17 +16,18 @@ namespace media {
// List of known AudioDecoder implementations; recorded to UKM, always add new
// values to the end and do not reorder or delete values from this list.
enum class AudioDecoderType : int {
- kUnknown = 0, // Decoder name string is not recognized or n/a.
- kFFmpeg = 1, // FFmpegAudioDecoder
- kMojo = 2, // MojoAudioDecoder
- kDecrypting = 3, // DecryptingAudioDecoder
- kMediaCodec = 4, // MediaCodecAudioDecoder (Android)
- kBroker = 5, // AudioDecoderBroker
- kTesting = 6, // Never send this to UKM, for tests only.
- kAudioToolbox = 7, // AudioToolbox (macOS)
+ kUnknown = 0, // Decoder name string is not recognized or n/a.
+ kFFmpeg = 1, // FFmpegAudioDecoder
+ kMojo = 2, // MojoAudioDecoder
+ kDecrypting = 3, // DecryptingAudioDecoder
+ kMediaCodec = 4, // MediaCodecAudioDecoder (Android)
+ kBroker = 5, // AudioDecoderBroker
+ kTesting = 6, // Never send this to UKM, for tests only.
+ kAudioToolbox = 7, // AudioToolbox (macOS)
+ kMediaFoundation = 8, // MediaFoundationAudioDecoder
// Keep this at the end and equal to the last entry.
- kMaxValue = kAudioToolbox,
+ kMaxValue = kMediaFoundation,
};
// List of known VideoDecoder implementations; recorded to UKM, always add new
diff --git a/chromium/media/base/decoder_buffer.cc b/chromium/media/base/decoder_buffer.cc
index 4d98e2c4bd1..620fc447c10 100644
--- a/chromium/media/base/decoder_buffer.cc
+++ b/chromium/media/base/decoder_buffer.cc
@@ -46,25 +46,15 @@ DecoderBuffer::DecoderBuffer(const uint8_t* data,
}
DecoderBuffer::DecoderBuffer(std::unique_ptr<uint8_t[]> data, size_t size)
- : data_(std::move(data)),
- size_(size),
- side_data_size_(0),
- is_key_frame_(false) {}
+ : data_(std::move(data)), size_(size) {}
-DecoderBuffer::DecoderBuffer(std::unique_ptr<UnalignedSharedMemory> shm,
+DecoderBuffer::DecoderBuffer(base::ReadOnlySharedMemoryMapping mapping,
size_t size)
- : size_(size),
- side_data_size_(0),
- shm_(std::move(shm)),
- is_key_frame_(false) {}
-
-DecoderBuffer::DecoderBuffer(
- std::unique_ptr<ReadOnlyUnalignedMapping> shared_mem_mapping,
- size_t size)
- : size_(size),
- side_data_size_(0),
- shared_mem_mapping_(std::move(shared_mem_mapping)),
- is_key_frame_(false) {}
+ : size_(size), read_only_mapping_(std::move(mapping)) {}
+
+DecoderBuffer::DecoderBuffer(base::WritableSharedMemoryMapping mapping,
+ size_t size)
+ : size_(size), writable_mapping_(std::move(mapping)) {}
DecoderBuffer::~DecoderBuffer() {
data_.reset();
@@ -107,34 +97,33 @@ scoped_refptr<DecoderBuffer> DecoderBuffer::FromArray(
// static
scoped_refptr<DecoderBuffer> DecoderBuffer::FromSharedMemoryRegion(
- base::subtle::PlatformSharedMemoryRegion region,
- off_t offset,
+ base::UnsafeSharedMemoryRegion region,
+ uint64_t offset,
size_t size) {
- // TODO(crbug.com/795291): when clients have converted to using
- // base::ReadOnlySharedMemoryRegion the ugly mode check below will no longer
- // be necessary.
- auto shm = std::make_unique<UnalignedSharedMemory>(
- std::move(region), size,
- region.GetMode() ==
- base::subtle::PlatformSharedMemoryRegion::Mode::kReadOnly
- ? true
- : false);
- if (size == 0 || !shm->MapAt(offset, size))
+ if (size == 0) {
+ return nullptr;
+ }
+
+ auto mapping = region.MapAt(offset, size);
+ if (!mapping.IsValid()) {
return nullptr;
- return base::WrapRefCounted(new DecoderBuffer(std::move(shm), size));
+ }
+ return base::WrapRefCounted(new DecoderBuffer(std::move(mapping), size));
}
// static
scoped_refptr<DecoderBuffer> DecoderBuffer::FromSharedMemoryRegion(
base::ReadOnlySharedMemoryRegion region,
- off_t offset,
+ uint64_t offset,
size_t size) {
- std::unique_ptr<ReadOnlyUnalignedMapping> unaligned_mapping =
- std::make_unique<ReadOnlyUnalignedMapping>(region, size, offset);
- if (!unaligned_mapping->IsValid())
+ if (size == 0) {
return nullptr;
- return base::WrapRefCounted(
- new DecoderBuffer(std::move(unaligned_mapping), size));
+ }
+ auto mapping = region.MapAt(offset, size);
+ if (!mapping.IsValid()) {
+ return nullptr;
+ }
+ return base::WrapRefCounted(new DecoderBuffer(std::move(mapping), size));
}
// static
diff --git a/chromium/media/base/decoder_buffer.h b/chromium/media/base/decoder_buffer.h
index f44b8792d2b..28befa6c9aa 100644
--- a/chromium/media/base/decoder_buffer.h
+++ b/chromium/media/base/decoder_buffer.h
@@ -15,12 +15,13 @@
#include "base/check.h"
#include "base/memory/read_only_shared_memory_region.h"
#include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "media/base/decrypt_config.h"
#include "media/base/media_export.h"
#include "media/base/timestamp_constants.h"
-#include "media/base/unaligned_shared_memory.h"
namespace media {
@@ -97,8 +98,8 @@ class MEDIA_EXPORT DecoderBuffer
//
// If mapping fails, nullptr will be returned.
static scoped_refptr<DecoderBuffer> FromSharedMemoryRegion(
- base::subtle::PlatformSharedMemoryRegion region,
- off_t offset,
+ base::UnsafeSharedMemoryRegion region,
+ uint64_t offset,
size_t size);
// Create a DecoderBuffer where data() of |size| bytes resides within the
@@ -108,7 +109,7 @@ class MEDIA_EXPORT DecoderBuffer
// Ownership of |region| is transferred to the buffer.
static scoped_refptr<DecoderBuffer> FromSharedMemoryRegion(
base::ReadOnlySharedMemoryRegion region,
- off_t offset,
+ uint64_t offset,
size_t size);
// Create a DecoderBuffer indicating we've reached end of stream.
@@ -146,18 +147,18 @@ class MEDIA_EXPORT DecoderBuffer
const uint8_t* data() const {
DCHECK(!end_of_stream());
- if (shared_mem_mapping_ && shared_mem_mapping_->IsValid())
- return static_cast<const uint8_t*>(shared_mem_mapping_->memory());
- if (shm_)
- return static_cast<uint8_t*>(shm_->memory());
+ if (read_only_mapping_.IsValid())
+ return read_only_mapping_.GetMemoryAs<const uint8_t>();
+ if (writable_mapping_.IsValid())
+ return writable_mapping_.GetMemoryAs<const uint8_t>();
return data_.get();
}
// TODO(sandersd): Remove writable_data(). https://crbug.com/834088
uint8_t* writable_data() const {
DCHECK(!end_of_stream());
- DCHECK(!shm_);
- DCHECK(!shared_mem_mapping_);
+ DCHECK(!read_only_mapping_.IsValid());
+ DCHECK(!writable_mapping_.IsValid());
return data_.get();
}
@@ -199,7 +200,10 @@ class MEDIA_EXPORT DecoderBuffer
}
// If there's no data in this buffer, it represents end of stream.
- bool end_of_stream() const { return !shared_mem_mapping_ && !shm_ && !data_; }
+ bool end_of_stream() const {
+ return !read_only_mapping_.IsValid() && !writable_mapping_.IsValid() &&
+ !data_;
+ }
bool is_key_frame() const {
DCHECK(!end_of_stream());
@@ -237,10 +241,9 @@ class MEDIA_EXPORT DecoderBuffer
DecoderBuffer(std::unique_ptr<uint8_t[]> data, size_t size);
- DecoderBuffer(std::unique_ptr<UnalignedSharedMemory> shm, size_t size);
+ DecoderBuffer(base::ReadOnlySharedMemoryMapping mapping, size_t size);
- DecoderBuffer(std::unique_ptr<ReadOnlyUnalignedMapping> shared_mem_mapping,
- size_t size);
+ DecoderBuffer(base::WritableSharedMemoryMapping mapping, size_t size);
virtual ~DecoderBuffer();
@@ -254,20 +257,20 @@ class MEDIA_EXPORT DecoderBuffer
size_t size_;
// Side data. Used for alpha channel in VPx, and for text cues.
- size_t side_data_size_;
+ size_t side_data_size_ = 0;
std::unique_ptr<uint8_t[]> side_data_;
- // Encoded data, if it is stored in a shared memory mapping.
- std::unique_ptr<ReadOnlyUnalignedMapping> shared_mem_mapping_;
+ // Encoded data, if it is stored in a read-only shared memory mapping.
+ base::ReadOnlySharedMemoryMapping read_only_mapping_;
- // Encoded data, if it is stored in SHM.
- std::unique_ptr<UnalignedSharedMemory> shm_;
+ // Encoded data, if it is stored in a writable shared memory mapping.
+ base::WritableSharedMemoryMapping writable_mapping_;
// Encryption parameters for the encoded data.
std::unique_ptr<DecryptConfig> decrypt_config_;
// Whether the frame was marked as a keyframe in the container.
- bool is_key_frame_;
+ bool is_key_frame_ = false;
// Constructor helper method for memory allocations.
void Initialize();
diff --git a/chromium/media/base/decoder_buffer_unittest.cc b/chromium/media/base/decoder_buffer_unittest.cc
index 2f80d5e92d8..91a871a0ab8 100644
--- a/chromium/media/base/decoder_buffer_unittest.cc
+++ b/chromium/media/base/decoder_buffer_unittest.cc
@@ -86,10 +86,8 @@ TEST(DecoderBufferTest, FromPlatformSharedMemoryRegion) {
ASSERT_TRUE(mapping.IsValid());
memcpy(mapping.GetMemoryAs<uint8_t>(), kData, kDataSize);
- scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::FromSharedMemoryRegion(
- base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- 0, kDataSize));
+ scoped_refptr<DecoderBuffer> buffer(
+ DecoderBuffer::FromSharedMemoryRegion(std::move(region), 0, kDataSize));
ASSERT_TRUE(buffer.get());
EXPECT_EQ(buffer->data_size(), kDataSize);
EXPECT_EQ(0, memcmp(buffer->data(), kData, kDataSize));
@@ -108,9 +106,7 @@ TEST(DecoderBufferTest, FromPlatformSharedMemoryRegion_Unaligned) {
memcpy(mapping.GetMemoryAs<uint8_t>(), kData, kDataSize);
scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::FromSharedMemoryRegion(
- base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kDataOffset, kDataSize - kDataOffset));
+ std::move(region), kDataOffset, kDataSize - kDataOffset));
ASSERT_TRUE(buffer.get());
EXPECT_EQ(buffer->data_size(), kDataSize - kDataOffset);
EXPECT_EQ(
@@ -128,10 +124,8 @@ TEST(DecoderBufferTest, FromPlatformSharedMemoryRegion_ZeroSize) {
ASSERT_TRUE(mapping.IsValid());
memcpy(mapping.memory(), kData, kDataSize);
- scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::FromSharedMemoryRegion(
- base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- 0, 0));
+ scoped_refptr<DecoderBuffer> buffer(
+ DecoderBuffer::FromSharedMemoryRegion(std::move(region), 0, 0));
ASSERT_FALSE(buffer.get());
}
diff --git a/chromium/media/base/decoder_status.cc b/chromium/media/base/decoder_status.cc
index 44d0aaf1351..caba2464b35 100644
--- a/chromium/media/base/decoder_status.cc
+++ b/chromium/media/base/decoder_status.cc
@@ -30,7 +30,6 @@ const std::string GetDecodeStatusString(const DecoderStatus& status) {
STRINGIFY(DecoderStatus::Codes::kMalformedBitstream);
STRINGIFY(DecoderStatus::Codes::kFailedToGetDecoderBuffer);
STRINGIFY(DecoderStatus::Codes::kDecoderStreamInErrorState);
- STRINGIFY(DecoderStatus::Codes::kDecoderStreamReinitFailed);
STRINGIFY(DecoderStatus::Codes::kDecoderStreamDemuxerError);
STRINGIFY(DecoderStatus::Codes::kUnsupportedProfile);
STRINGIFY(DecoderStatus::Codes::kUnsupportedCodec);
@@ -40,6 +39,8 @@ const std::string GetDecodeStatusString(const DecoderStatus& status) {
STRINGIFY(DecoderStatus::Codes::kFailedToCreateDecoder);
STRINGIFY(DecoderStatus::Codes::kKeyFrameRequired);
STRINGIFY(DecoderStatus::Codes::kMissingTimestamp);
+ STRINGIFY(DecoderStatus::Codes::kTooManyDecoders);
+ STRINGIFY(DecoderStatus::Codes::kMediaFoundationNotAvailable);
}
#undef STRINGIFY
}
diff --git a/chromium/media/base/decoder_status.h b/chromium/media/base/decoder_status.h
index dd2a0c6548a..1142cb61d2d 100644
--- a/chromium/media/base/decoder_status.h
+++ b/chromium/media/base/decoder_status.h
@@ -28,7 +28,6 @@ struct DecoderStatusTraits {
kMalformedBitstream = 104,
kFailedToGetDecoderBuffer = 107,
kDecoderStreamInErrorState = 108,
- kDecoderStreamReinitFailed = 109,
kDecoderStreamDemuxerError = 110,
kKeyFrameRequired = 111,
kMissingTimestamp = 112,
@@ -40,6 +39,8 @@ struct DecoderStatusTraits {
kUnsupportedEncryptionMode = 203,
kCantChangeCodec = 204,
kFailedToCreateDecoder = 205,
+ kTooManyDecoders = 206,
+ kMediaFoundationNotAvailable = 207,
};
static constexpr StatusGroupType Group() { return "DecoderStatus"; }
static constexpr Codes DefaultEnumValue() { return Codes::kOk; }
diff --git a/chromium/media/base/eme_constants.h b/chromium/media/base/eme_constants.h
index 31918a77996..039fff44ce2 100644
--- a/chromium/media/base/eme_constants.h
+++ b/chromium/media/base/eme_constants.h
@@ -126,17 +126,6 @@ static_assert(
#endif // BUILDFLAG(ENABLE_MSE_MPEG2TS_STREAM_PARSER)
#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
-enum class EmeSessionTypeSupport {
- // Invalid default value.
- INVALID,
- // The session type is not supported.
- NOT_SUPPORTED,
- // The session type is supported if a distinctive identifier is available.
- SUPPORTED_WITH_IDENTIFIER,
- // The session type is always supported.
- SUPPORTED,
-};
-
// Used to declare support for distinctive identifier and persistent state.
// These are purposefully limited to not allow one to require the other, so that
// transitive requirements are not possible. Non-trivial refactoring would be
diff --git a/chromium/media/base/fake_audio_renderer_sink.cc b/chromium/media/base/fake_audio_renderer_sink.cc
index 114aa95dcb1..ff9af5bfd54 100644
--- a/chromium/media/base/fake_audio_renderer_sink.cc
+++ b/chromium/media/base/fake_audio_renderer_sink.cc
@@ -62,7 +62,6 @@ void FakeAudioRendererSink::Pause() {
void FakeAudioRendererSink::Play() {
DCHECK(state_ == kStarted || state_ == kPaused) << "state_ " << state_;
- DCHECK_EQ(state_, kPaused);
ChangeState(kPlaying);
}
diff --git a/chromium/media/base/key_system_properties.h b/chromium/media/base/key_system_properties.h
index f80ba87203a..1d2b81cf0de 100644
--- a/chromium/media/base/key_system_properties.h
+++ b/chromium/media/base/key_system_properties.h
@@ -65,8 +65,13 @@ class MEDIA_EXPORT KeySystemProperties {
const bool* hw_secure_requirement) const = 0;
// Returns the support this key system provides for persistent-license
- // sessions.
- virtual EmeSessionTypeSupport GetPersistentLicenseSessionSupport() const = 0;
+ // sessions. The returned `EmeConfigRule` (if supported) assumes persistence
+ // requirement, which is enforced by `KeySystemConfigSelector`. Therefore, the
+ // returned `EmeConfigRule` doesn't need to specify persistence requirement
+ // explicitly.
+ // TODO(crbug.com/1324262): Refactor `EmeConfigRule` to make it easier to
+ // express combinations of requirements.
+ virtual EmeConfigRule GetPersistentLicenseSessionSupport() const = 0;
// Returns the support this key system provides for persistent state.
virtual EmeFeatureSupport GetPersistentStateSupport() const = 0;
diff --git a/chromium/media/base/key_systems.cc b/chromium/media/base/key_systems.cc
index 744b47f7078..0d7283d97d0 100644
--- a/chromium/media/base/key_systems.cc
+++ b/chromium/media/base/key_systems.cc
@@ -177,8 +177,8 @@ class ClearKeyProperties : public KeySystemProperties {
: EmeConfigRule::NOT_SUPPORTED;
}
- EmeSessionTypeSupport GetPersistentLicenseSessionSupport() const final {
- return EmeSessionTypeSupport::NOT_SUPPORTED;
+ EmeConfigRule GetPersistentLicenseSessionSupport() const final {
+ return EmeConfigRule::NOT_SUPPORTED;
}
EmeFeatureSupport GetPersistentStateSupport() const final {
@@ -295,7 +295,7 @@ class KeySystemsImpl : public KeySystems {
EmeMediaType media_type,
const std::string& requested_robustness,
const bool* hw_secure_requirement) const override;
- EmeSessionTypeSupport GetPersistentLicenseSessionSupport(
+ EmeConfigRule GetPersistentLicenseSessionSupport(
const std::string& key_system) const override;
EmeFeatureSupport GetPersistentStateSupport(
const std::string& key_system) const override;
@@ -373,8 +373,8 @@ KeySystemsImpl* KeySystemsImpl::GetInstance() {
}
KeySystemsImpl::KeySystemsImpl() {
- for (const auto& entry : kMimeTypeToCodecsMap)
- RegisterMimeType(entry.mime_type, entry.codecs);
+ for (const auto& [mime_type, codecs] : kMimeTypeToCodecsMap)
+ RegisterMimeType(mime_type, codecs);
UpdateSupportedKeySystems();
}
@@ -485,8 +485,6 @@ void KeySystemsImpl::ProcessSupportedKeySystems(
for (auto& properties : key_systems) {
DCHECK(!properties->GetBaseKeySystemName().empty());
- DCHECK(properties->GetPersistentLicenseSessionSupport() !=
- EmeSessionTypeSupport::INVALID);
DCHECK(properties->GetPersistentStateSupport() !=
EmeFeatureSupport::INVALID);
DCHECK(properties->GetDistinctiveIdentifierSupport() !=
@@ -504,15 +502,7 @@ void KeySystemsImpl::ProcessSupportedKeySystems(
if (properties->GetPersistentStateSupport() ==
EmeFeatureSupport::NOT_SUPPORTED) {
DCHECK(properties->GetPersistentLicenseSessionSupport() ==
- EmeSessionTypeSupport::NOT_SUPPORTED);
- }
-
- // If distinctive identifiers are not supported, then no other features can
- // require them.
- if (properties->GetDistinctiveIdentifierSupport() ==
- EmeFeatureSupport::NOT_SUPPORTED) {
- DCHECK(properties->GetPersistentLicenseSessionSupport() !=
- EmeSessionTypeSupport::SUPPORTED_WITH_IDENTIFIER);
+ EmeConfigRule::NOT_SUPPORTED);
}
if (!CanBlock(*properties)) {
@@ -775,14 +765,14 @@ EmeConfigRule KeySystemsImpl::GetRobustnessConfigRule(
key_system, media_type, requested_robustness, hw_secure_requirement);
}
-EmeSessionTypeSupport KeySystemsImpl::GetPersistentLicenseSessionSupport(
+EmeConfigRule KeySystemsImpl::GetPersistentLicenseSessionSupport(
const std::string& key_system) const {
DCHECK(thread_checker_.CalledOnValidThread());
const auto* properties = GetKeySystemProperties(key_system);
if (!properties) {
NOTREACHED();
- return EmeSessionTypeSupport::INVALID;
+ return EmeConfigRule::NOT_SUPPORTED;
}
return properties->GetPersistentLicenseSessionSupport();
diff --git a/chromium/media/base/key_systems.h b/chromium/media/base/key_systems.h
index 7165a179c82..3635e8ddceb 100644
--- a/chromium/media/base/key_systems.h
+++ b/chromium/media/base/key_systems.h
@@ -82,7 +82,7 @@ class MEDIA_EXPORT KeySystems {
const bool* hw_secure_requirement) const = 0;
// Returns the support |key_system| provides for persistent-license sessions.
- virtual EmeSessionTypeSupport GetPersistentLicenseSessionSupport(
+ virtual EmeConfigRule GetPersistentLicenseSessionSupport(
const std::string& key_system) const = 0;
// Returns the support |key_system| provides for persistent state.
diff --git a/chromium/media/base/key_systems_unittest.cc b/chromium/media/base/key_systems_unittest.cc
index 7a9b977589f..b08284f7186 100644
--- a/chromium/media/base/key_systems_unittest.cc
+++ b/chromium/media/base/key_systems_unittest.cc
@@ -100,8 +100,8 @@ class AesKeySystemProperties : public TestKeySystemPropertiesBase {
: EmeConfigRule::NOT_SUPPORTED;
}
- EmeSessionTypeSupport GetPersistentLicenseSessionSupport() const override {
- return EmeSessionTypeSupport::NOT_SUPPORTED;
+ EmeConfigRule GetPersistentLicenseSessionSupport() const override {
+ return EmeConfigRule::NOT_SUPPORTED;
}
EmeFeatureSupport GetPersistentStateSupport() const override {
@@ -158,8 +158,8 @@ class ExternalKeySystemProperties : public TestKeySystemPropertiesBase {
return EmeConfigRule::NOT_SUPPORTED;
}
- EmeSessionTypeSupport GetPersistentLicenseSessionSupport() const override {
- return EmeSessionTypeSupport::SUPPORTED;
+ EmeConfigRule GetPersistentLicenseSessionSupport() const override {
+ return EmeConfigRule::SUPPORTED;
}
EmeFeatureSupport GetPersistentStateSupport() const override {
diff --git a/chromium/media/base/limits.h b/chromium/media/base/limits.h
index 80e80fcd2d8..c5cb20e36ee 100644
--- a/chromium/media/base/limits.h
+++ b/chromium/media/base/limits.h
@@ -13,78 +13,76 @@ namespace media {
namespace limits {
-enum {
- // Maximum possible dimension (width or height) for any video.
- kMaxDimension = (1 << 15) - 1, // 32767
-
- // Maximum possible canvas size (width multiplied by height) for any video.
- kMaxCanvas = (1 << (14 * 2)), // 16384 x 16384
-
- // Total number of video frames which are populating in the pipeline.
- kMaxVideoFrames = 4,
-
- // The following limits are used by AudioParameters::IsValid().
- //
- // A few notes on sample rates of common formats:
- // - AAC files are limited to 96 kHz.
- // - MP3 files are limited to 48 kHz.
- // - Vorbis used to be limited to 96 kHz, but no longer has that
- // restriction.
- // - Most PC audio hardware is limited to 192 kHz, some specialized DAC
- // devices will use 768 kHz though.
- //
- // kMaxSampleRate should be updated with
- // blink::audio_utilities::MaxAudioBufferSampleRate()
- kMaxSampleRate = 768000,
- kMinSampleRate = 3000,
- kMaxChannels = 32,
- kMaxBytesPerSample = 4,
- kMaxBitsPerSample = kMaxBytesPerSample * 8,
- kMaxSamplesPerPacket = kMaxSampleRate,
- kMaxPacketSizeInBytes =
- kMaxBytesPerSample * kMaxChannels * kMaxSamplesPerPacket,
-
- // This limit is used by ParamTraits<VideoCaptureParams>.
- kMaxFramesPerSecond = 1000,
-
- // The minimum elapsed amount of time (in seconds) for a playback to be
- // considered as having active engagement.
- kMinimumElapsedWatchTimeSecs = 7,
-
- // Maximum lengths for various EME API parameters. These are checks to
- // prevent unnecessarily large parameters from being passed around, and the
- // lengths are somewhat arbitrary as the EME spec doesn't specify any limits.
- kMinCertificateLength = 128,
- kMaxCertificateLength = 16 * 1024,
- kMaxSessionIdLength = 512,
- kMinKeyIdLength = 1,
- kMaxKeyIdLength = 512,
- kMaxKeyIds = 128,
- kMaxInitDataLength = 64 * 1024, // 64 KB
- kMaxSessionResponseLength = 64 * 1024, // 64 KB
- kMaxKeySystemLength = 256,
+// Maximum possible dimension (width or height) for any video.
+constexpr int kMaxDimension = (1 << 15) - 1; // 32767
+
+// Maximum possible canvas size (width multiplied by height) for any video.
+constexpr int kMaxCanvas = (1 << (14 * 2)); // 16384 x 16384
+
+// Total number of video frames which are populating in the pipeline.
+constexpr int kMaxVideoFrames = 4;
+
+// The following limits are used by AudioParameters::IsValid().
+//
+// A few notes on sample rates of common formats:
+// - AAC files are limited to 96 kHz.
+// - MP3 files are limited to 48 kHz.
+// - Vorbis used to be limited to 96 kHz, but no longer has that
+// restriction.
+// - Most PC audio hardware is limited to 192 kHz, some specialized DAC
+// devices will use 768 kHz though.
+//
+// kMaxSampleRate should be updated with
+// blink::audio_utilities::MaxAudioBufferSampleRate()
+constexpr int kMaxSampleRate = 768000;
+constexpr int kMinSampleRate = 3000;
+constexpr int kMaxChannels = 32;
+constexpr int kMaxBytesPerSample = 4;
+constexpr int kMaxBitsPerSample = kMaxBytesPerSample * 8;
+constexpr int kMaxSamplesPerPacket = kMaxSampleRate;
+constexpr int kMaxPacketSizeInBytes =
+ kMaxBytesPerSample * kMaxChannels * kMaxSamplesPerPacket;
+
+// This limit is used by ParamTraits<VideoCaptureParams>.
+constexpr int kMaxFramesPerSecond = 1000;
+
+// The minimum elapsed amount of time (in seconds) for a playback to be
+// considered as having active engagement.
+constexpr int kMinimumElapsedWatchTimeSecs = 7;
+
+// Maximum lengths for various EME API parameters. These are checks to
+// prevent unnecessarily large parameters from being passed around, and the
+// lengths are somewhat arbitrary as the EME spec doesn't specify any limits.
+constexpr int kMinCertificateLength = 128;
+constexpr int kMaxCertificateLength = 16 * 1024;
+constexpr int kMaxSessionIdLength = 512;
+constexpr int kMinKeyIdLength = 1;
+constexpr int kMaxKeyIdLength = 512;
+constexpr int kMaxKeyIds = 128;
+constexpr int kMaxInitDataLength = 64 * 1024; // 64 KB
+constexpr int kMaxSessionResponseLength = 64 * 1024; // 64 KB
+constexpr int kMaxKeySystemLength = 256;
// Minimum and maximum buffer sizes for certain audio platforms.
#if BUILDFLAG(IS_MAC)
- kMinAudioBufferSize = 128,
- kMaxAudioBufferSize = 4096,
+constexpr int kMinAudioBufferSize = 128;
+constexpr int kMaxAudioBufferSize = 4096;
#elif defined(USE_CRAS)
- // Though CRAS has different per-board defaults, allow explicitly requesting
- // this buffer size on any board.
- kMinAudioBufferSize = 256,
- kMaxAudioBufferSize = 8192,
+// Though CRAS has different per-board defaults, allow explicitly requesting
+// this buffer size on any board.
+constexpr int kMinAudioBufferSize = 256;
+constexpr int kMaxAudioBufferSize = 8192;
#endif
- // Maximum buffer size supported by Web Audio.
- kMaxWebAudioBufferSize = 8192,
+// Maximum buffer size supported by Web Audio.
+constexpr int kMaxWebAudioBufferSize = 8192;
- // Bounds for the number of threads used for software video decoding.
- kMinVideoDecodeThreads = 2,
- kMaxVideoDecodeThreads =
- 16, // Matches ffmpeg's MAX_AUTO_THREADS. Higher values can result in
- // immediate out of memory errors for high resolution content. See
- // https://crbug.com/893984
-};
+// Bounds for the number of threads used for software video decoding.
+constexpr int kMinVideoDecodeThreads = 2;
+constexpr int kMaxVideoDecodeThreads =
+ 16; // Matches ffmpeg's MAX_AUTO_THREADS. Higher values can result in
+ // immediate out of memory errors for high resolution content. See
+ // https://crbug.com/893984
} // namespace limits
diff --git a/chromium/media/base/mac/color_space_util_mac.mm b/chromium/media/base/mac/color_space_util_mac.mm
index b36e3a0aed5..8a8ace9a54b 100644
--- a/chromium/media/base/mac/color_space_util_mac.mm
+++ b/chromium/media/base/mac/color_space_util_mac.mm
@@ -102,25 +102,21 @@ gfx::ColorSpace::TransferID GetCoreVideoTransferFn(CFTypeRef transfer_untyped,
{kCVImageBufferTransferFunction_ITU_R_2020,
kCMFormatDescriptionTransferFunction_ITU_R_2020,
gfx::ColorSpace::TransferID::BT2020_10});
- if (@available(macos 10.12, *)) {
- supported_transfer_funcs.push_back(
- {kCVImageBufferTransferFunction_SMPTE_ST_428_1,
- kCMFormatDescriptionTransferFunction_SMPTE_ST_428_1,
- gfx::ColorSpace::TransferID::SMPTEST428_1});
- }
- if (@available(macos 10.13, *)) {
- supported_transfer_funcs.push_back(
- {kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ,
- kCMFormatDescriptionTransferFunction_SMPTE_ST_2084_PQ,
- gfx::ColorSpace::TransferID::PQ});
- supported_transfer_funcs.push_back(
- {kCVImageBufferTransferFunction_ITU_R_2100_HLG,
- kCMFormatDescriptionTransferFunction_ITU_R_2100_HLG,
- gfx::ColorSpace::TransferID::HLG});
- supported_transfer_funcs.push_back(
- {kCVImageBufferTransferFunction_sRGB, nullptr,
- gfx::ColorSpace::TransferID::SRGB});
- }
+ supported_transfer_funcs.push_back(
+ {kCVImageBufferTransferFunction_SMPTE_ST_428_1,
+ kCMFormatDescriptionTransferFunction_SMPTE_ST_428_1,
+ gfx::ColorSpace::TransferID::SMPTEST428_1});
+ supported_transfer_funcs.push_back(
+ {kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ,
+ kCMFormatDescriptionTransferFunction_SMPTE_ST_2084_PQ,
+ gfx::ColorSpace::TransferID::PQ});
+ supported_transfer_funcs.push_back(
+ {kCVImageBufferTransferFunction_ITU_R_2100_HLG,
+ kCMFormatDescriptionTransferFunction_ITU_R_2100_HLG,
+ gfx::ColorSpace::TransferID::HLG});
+ supported_transfer_funcs.push_back({kCVImageBufferTransferFunction_sRGB,
+ nullptr,
+ gfx::ColorSpace::TransferID::SRGB});
if (@available(macos 10.14, *)) {
supported_transfer_funcs.push_back(
{kCVImageBufferTransferFunction_Linear,
diff --git a/chromium/media/base/media_log.cc b/chromium/media/base/media_log.cc
index 3728433c5eb..46887da3dc2 100644
--- a/chromium/media/base/media_log.cc
+++ b/chromium/media/base/media_log.cc
@@ -49,6 +49,9 @@ std::string MediaLog::GetErrorMessageLocked() {
return "";
}
+// Default implementation.
+void MediaLog::Stop() {}
+
void MediaLog::AddMessage(MediaLogMessageLevel level, std::string message) {
std::unique_ptr<MediaLogRecord> record(
CreateRecord(MediaLogRecord::Type::kMessage));
diff --git a/chromium/media/base/media_log.h b/chromium/media/base/media_log.h
index e70a95b72f7..cce6a266327 100644
--- a/chromium/media/base/media_log.h
+++ b/chromium/media/base/media_log.h
@@ -131,6 +131,10 @@ class MEDIA_EXPORT MediaLog {
// even if this occurs, in the "won't crash" sense.
virtual std::unique_ptr<MediaLog> Clone();
+ // Can be used for stopping a MediaLog during a garbage-collected destruction
+ // sequence.
+ virtual void Stop();
+
protected:
// Ensures only subclasses and factories (e.g. Clone()) can create MediaLog.
MediaLog();
diff --git a/chromium/media/base/media_serializers.h b/chromium/media/base/media_serializers.h
index 401acac1114..a41080d969b 100644
--- a/chromium/media/base/media_serializers.h
+++ b/chromium/media/base/media_serializers.h
@@ -360,8 +360,9 @@ struct MediaSerializer<TextTrackConfig> {
base::Value result(base::Value::Type::DICTIONARY);
FIELD_SERIALIZE("kind", value.kind());
FIELD_SERIALIZE("language", value.language());
- if (value.label().length())
+ if (value.label().length()) {
FIELD_SERIALIZE("label", value.label());
+ }
return result;
}
};
diff --git a/chromium/media/base/media_serializers_base.h b/chromium/media/base/media_serializers_base.h
index 8e1359b9841..bbc6ce34f68 100644
--- a/chromium/media/base/media_serializers_base.h
+++ b/chromium/media/base/media_serializers_base.h
@@ -7,6 +7,7 @@
#include <vector>
+#include "base/strings/string_util.h"
#include "base/values.h"
#include "media/base/media_export.h"
@@ -22,6 +23,17 @@ struct MediaSerializer {
static inline base::Value Serialize(T value) { return base::Value(value); }
};
+// a special serializer for strings, because base::Value checks
+// IsStringUTF8AllowingNoncharacters.
+template <>
+struct MediaSerializer<std::string> {
+ static inline base::Value Serialize(const std::string& string) {
+ if (base::IsStringUTF8AllowingNoncharacters(string))
+ return base::Value(string);
+ return base::Value("");
+ }
+};
+
} // namespace internal
template <typename T>
diff --git a/chromium/media/base/media_switches.cc b/chromium/media/base/media_switches.cc
index ebd93cf54a2..0362b7b7068 100644
--- a/chromium/media/base/media_switches.cc
+++ b/chromium/media/base/media_switches.cc
@@ -88,6 +88,13 @@ const char kDisableAudioInput[] = "disable-audio-input";
// Present video content as overlays.
const char kUseOverlaysForVideo[] = "use-overlays-for-video";
+
+// Forces AudioManagerFuchsia to assume that the AudioCapturer implements echo
+// cancellation.
+// TODO(crbug.com/852834): Remove this once AudioManagerFuchsia is updated to
+// get this information from AudioCapturerFactory.
+const char kAudioCapturerWithEchoCancellation[] =
+ "audio-capturer-with-echo-cancellation";
#endif // BUILDFLAG(IS_FUCHSIA)
#if defined(USE_CRAS)
@@ -199,19 +206,12 @@ const char kOverrideHardwareSecureCodecsForTesting[] =
const char kEnableLiveCaptionPrefForTesting[] =
"enable-live-caption-pref-for-testing";
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC)
-// Enables playback of clear (unencrypted) HEVC content for testing purposes.
-const char kEnableClearHevcForTesting[] = "enable-clear-hevc-for-testing";
-#endif
-
#if BUILDFLAG(IS_CHROMEOS)
// These are flags passed from ash-chrome to lacros-chrome that correspond to
// buildflags for the platform we are running on. lacros-chrome only builds for
// x86/arm differences, so we unconditionally build in the below features into
// the relevant parts of lacros-chrome and then filter the functionality based
// on these command line flags.
-MEDIA_EXPORT extern const char kLacrosEnablePlatformEncryptedHevc[] =
- "lacros-enable-platform-encrypted-hevc";
MEDIA_EXPORT extern const char kLacrosEnablePlatformHevc[] =
"lacros-enable-platform-hevc";
MEDIA_EXPORT extern const char kLacrosUseChromeosProtectedMedia[] =
@@ -267,6 +267,12 @@ const base::Feature kPictureInPicture {
#endif
};
+#if BUILDFLAG(ENABLE_PLATFORM_HEVC)
+// Enables HEVC hardware accelerated decoding.
+const base::Feature kPlatformHEVCDecoderSupport{
+ "PlatformHEVCDecoderSupport", base::FEATURE_DISABLED_BY_DEFAULT};
+#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC)
+
// Only decode preload=metadata elements upon visibility.
// TODO(crbug.com/879406): Remove this after M76 ships to stable
const base::Feature kPreloadMetadataLazyLoad{"PreloadMetadataLazyLoad",
@@ -283,12 +289,6 @@ const base::Feature kResumeBackgroundVideo {
#endif
};
-// Experimental: Try to avoid destroying the media player when transferring a
-// media element to a new document. This is a work in progress, and may cause
-// security and/or stability issues.
-const base::Feature kReuseMediaPlayer{"ReuseMediaPlayer",
- base::FEATURE_DISABLED_BY_DEFAULT};
-
// When enabled, MediaCapabilities will check with GPU Video Accelerator
// Factories to determine isPowerEfficient = true/false.
const base::Feature kMediaCapabilitiesQueryGpuFactories{
@@ -312,6 +312,16 @@ const base::Feature kMediaCastOverlayButton{"MediaCastOverlayButton",
const base::Feature kUseAndroidOverlayForSecureOnly{
"UseAndroidOverlayForSecureOnly", base::FEATURE_DISABLED_BY_DEFAULT};
+// Allows usage of OS-level (platform) audio encoders.
+const base::Feature kPlatformAudioEncoder {
+ "PlatformAudioEncoder",
+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
+ base::FEATURE_ENABLED_BY_DEFAULT
+#else
+ base::FEATURE_DISABLED_BY_DEFAULT
+#endif
+};
+
// If enabled, RTCVideoDecoderAdapter will wrap a DecoderStream as a video
// decoder, rather than using MojoVideoDecoder. This causes the RTC external
// decoder to have all the decoder selection / fallback/forward logic of the
@@ -360,6 +370,23 @@ const base::FeatureParam<int> kChromeWideEchoCancellationProcessingFifoSize{
// audio capture device.
const base::FeatureParam<bool> kChromeWideEchoCancellationMinimizeResampling{
&kChromeWideEchoCancellation, "minimize_resampling", true};
+
+// Dynamically sets audio::SyncReader's timeout based off of a percentage of
+// buffer duration, in an attempt to minimize glitches.
+// The default negative value indicates that no experiment is running, and
+// we shouldn't use a mixing specific timeout value.
+// If the similar kDynamicAudioTimeout feature is enabled and this value is set,
+// this parameter will override kDynamicAudioTimeout values when we are mixing.
+const base::FeatureParam<double>
+ kChromeWideEchoCancellationDynamicMixingTimeout{
+ &kChromeWideEchoCancellation, "mixing_buffer_duration_percent", -1.0};
+
+// Allows all sample rates to be used for audio processing. If disabled, only
+// sample rates divisible by 100 are allowed; a request for a media stream with
+// enabled audio processing will fail otherwise. For context see
+// https://crbug.com/1332484.
+const base::FeatureParam<bool> kChromeWideEchoCancellationAllowAllSampleRates{
+ &kChromeWideEchoCancellation, "allow_all_sample_rates", true};
#endif
// Make MSE garbage collection algorithm more aggressive when we are under
@@ -379,6 +406,11 @@ const base::Feature kMultiPlaneVideoCaptureSharedImages {
#endif
};
+// Controls whether the Open Screen libcast SenderSession is used for
+// initializing and managing streaming sessions, or the legacy implementation.
+const base::Feature kOpenscreenCastStreamingSession{
+ "OpenscreenCastStreamingSession", base::FEATURE_DISABLED_BY_DEFAULT};
+
// Approach original pre-REC MSE object URL autorevoking behavior, though await
// actual attempt to use the object URL for attachment to perform revocation.
// This will hopefully reduce runtime memory bloat for pages that do not
@@ -467,10 +499,6 @@ const base::Feature kGlobalMediaControlsModernUI{
const base::Feature kSpecCompliantCanPlayThrough{
"SpecCompliantCanPlayThrough", base::FEATURE_ENABLED_BY_DEFAULT};
-// Controls usage of SurfaceLayer for MediaStreams.
-const base::Feature kSurfaceLayerForMediaStreams{
- "SurfaceLayerForMediaStreams", base::FEATURE_ENABLED_BY_DEFAULT};
-
// Disables the real audio output stream after silent audio has been delivered
// for too long. Should save quite a bit of power in the muted video case.
const base::Feature kSuspendMutedAudio{"SuspendMutedAudio",
@@ -497,6 +525,12 @@ const base::Feature kVaapiVideoDecodeLinux{"VaapiVideoDecoder",
const base::Feature kVaapiVideoEncodeLinux{"VaapiVideoEncoder",
base::FEATURE_DISABLED_BY_DEFAULT};
+
+// Ignore the non-intel driver blacklist for VaapiVideoDecoder implementations.
+// Intended for manual usage only in order to gague the status of newer driver
+// implementations.
+const base::Feature kVaapiIgnoreDriverChecks{"VaapiIgnoreDriverChecks",
+ base::FEATURE_DISABLED_BY_DEFAULT};
#endif // BUILDFLAG(IS_LINUX)
// Enable VA-API hardware decode acceleration for AV1.
@@ -543,14 +577,8 @@ const base::Feature kVaapiH264TemporalLayerHWEncoding{
const base::Feature kVaapiVp8TemporalLayerHWEncoding{
"VaapiVp8TemporalLayerEncoding", base::FEATURE_DISABLED_BY_DEFAULT};
// Enable VP9 k-SVC encoding with HW encoder for webrtc use case on ChromeOS.
-const base::Feature kVaapiVp9kSVCHWEncoding {
- "VaapiVp9kSVCHWEncoding",
-#if BUILDFLAG(IS_CHROMEOS_ASH)
- base::FEATURE_ENABLED_BY_DEFAULT
-#else
- base::FEATURE_DISABLED_BY_DEFAULT
-#endif // BUILDFLAG(IS_CHROMEOS_ASH)
-};
+const base::Feature kVaapiVp9kSVCHWEncoding{"VaapiVp9kSVCHWEncoding",
+ base::FEATURE_ENABLED_BY_DEFAULT};
#endif // defined(ARCH_CPU_X86_FAMILY) && BUILDFLAG(IS_CHROMEOS)
// Inform video blitter of video color space.
@@ -560,7 +588,9 @@ const base::Feature kVideoBlitColorAccuracy{"video-blit-color-accuracy",
// Enable VP9 k-SVC decoding with HW decoder for webrtc use case.
const base::Feature kVp9kSVCHWDecoding {
"Vp9kSVCHWDecoding",
-#if defined(ARCH_CPU_X86_FAMILY) && BUILDFLAG(IS_CHROMEOS_ASH)
+// TODO(crbug.com/1325698): Remove defined(ARCH_CPU_X86_FAMILY) once this is
+// enabled by default on ChromeOS ARM devices.
+#if defined(ARCH_CPU_X86_FAMILY) && BUILDFLAG(IS_CHROMEOS)
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
@@ -644,6 +674,11 @@ const base::Feature kHardwareSecureDecryptionFallback{
const base::Feature kWakeLockOptimisationHiddenMuted{
"kWakeLockOptimisationHiddenMuted", base::FEATURE_ENABLED_BY_DEFAULT};
+// If active, enable HiDPI mode that increases the display scale factor
+// while capturing a low-resolution tab.
+const base::Feature kWebContentsCaptureHiDpi{"WebContentsCaptureHiDPI",
+ base::FEATURE_DISABLED_BY_DEFAULT};
+
// Enables handling of hardware media keys for controlling media.
const base::Feature kHardwareMediaKeyHandling {
"HardwareMediaKeyHandling",
@@ -761,13 +796,17 @@ const base::Feature kUseRealColorSpaceForAndroidVideo{
const base::Feature kUseChromeOSDirectVideoDecoder{
"UseChromeOSDirectVideoDecoder", base::FEATURE_ENABLED_BY_DEFAULT};
+// Limit the number of concurrent hardware decoder instances on ChromeOS.
+const base::Feature kLimitConcurrentDecoderInstances{
+ "LimitConcurrentDecoderInstances", base::FEATURE_ENABLED_BY_DEFAULT};
+
#if defined(ARCH_CPU_ARM_FAMILY)
// Some architectures have separate image processor hardware that
// can be used by Chromium's ImageProcessor to color convert/crop/etc.
// video buffers. Sometimes it is more efficient/performant/correct
// to use libYUV instead of the hardware to do this processing.
const base::Feature kPreferLibYuvImageProcessor{
- "prefer-libyuv-image-processor", base::FEATURE_DISABLED_BY_DEFAULT};
+ "PreferLibYUVImageProcessor", base::FEATURE_DISABLED_BY_DEFAULT};
#endif // defined(ARCH_CPU_ARM_FAMILY)
#if BUILDFLAG(IS_CHROMEOS)
// ChromeOS has one of two VideoDecoder implementations active based on
@@ -781,12 +820,6 @@ const base::Feature kUseAlternateVideoDecoderImplementation{
#endif // BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
#if BUILDFLAG(IS_MAC)
-
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
-const base::Feature kVideoToolboxHEVCDecoding{
- "VideoToolboxHEVCDecoding", base::FEATURE_DISABLED_BY_DEFAULT};
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
-
// Enable binding multiple shared images to a single GpuMemoryBuffer for
// accelerated video decode using VideoToolbox.
const base::Feature kMultiPlaneVideoToolboxSharedImages{
@@ -820,6 +853,10 @@ const base::Feature MEDIA_EXPORT kMediaFoundationAV1Encoding{
const base::Feature MEDIA_EXPORT kMediaFoundationH264CbpEncoding{
"MediaFoundationH264CbpEncoding", base::FEATURE_DISABLED_BY_DEFAULT};
+// Enables VP9 encode acceleration for Windows.
+const base::Feature MEDIA_EXPORT kMediaFoundationVP9Encoding{
+ "MediaFoundationVP9Encoding", base::FEATURE_DISABLED_BY_DEFAULT};
+
// Enables MediaFoundation based video capture
const base::Feature kMediaFoundationVideoCapture{
"MediaFoundationVideoCapture", base::FEATURE_ENABLED_BY_DEFAULT};
@@ -848,14 +885,46 @@ const base::Feature kMediaFoundationClearPlayback{
const base::Feature MEDIA_EXPORT kWasapiRawAudioCapture{
"WASAPIRawAudioCapture", base::FEATURE_ENABLED_BY_DEFAULT};
-// Enables HEVC hardware accelerated decoding.
-const base::Feature kD3D11HEVCDecoding{"D3D11HEVCDecoding",
- base::FEATURE_DISABLED_BY_DEFAULT};
-
// Enable VP9 kSVC decoding with HW decoder for webrtc use case on Windows.
const base::Feature kD3D11Vp9kSVCHWDecoding{"D3D11Vp9kSVCHWDecoding",
base::FEATURE_DISABLED_BY_DEFAULT};
+// The Media Foundation Rendering Strategy determines which presentation mode
+// Media Foundation Renderer should use for presenting clear content. This
+// strategy has no impact for protected content, which must always use Direct
+// Composition.
+//
+// The strategy may be one of the following options:
+// 1.) Direct Composition: Media Foundation Renderer will use a Windowsless
+// Swapchain to present directly to a Direct Composition surface.
+// 2.) Frame Server: Media Foundation Renderer will produce Video Frames that
+// may be passed through the Chromium video frame rendering pipeline.
+// 3.) Dynamic: Media Foundation Renderer may freely switch between Direct
+// Composition & Frame Server mode based on the current operating
+// conditions.
+//
+// Command line invocation:
+// --enable-features=MediaFoundationClearRendering:strategy/direct-composition
+// --enable-features=MediaFoundationClearRendering:strategy/frame-server
+// --enable-features=MediaFoundationClearRendering:strategy/dynamic
+const base::Feature kMediaFoundationClearRendering = {
+ "MediaFoundationClearRendering", base::FEATURE_ENABLED_BY_DEFAULT};
+
+constexpr base::FeatureParam<MediaFoundationClearRenderingStrategy>::Option
+ kMediaFoundationClearRenderingStrategyOptions[] = {
+ {MediaFoundationClearRenderingStrategy::kDirectComposition,
+ "direct-composition"},
+ {MediaFoundationClearRenderingStrategy::kFrameServer, "frame-server"},
+ {MediaFoundationClearRenderingStrategy::kDynamic, "dynamic"}};
+
+// TODO(crbug.com/1321817, wicarr): Media Foundation for Clear should operate in
+// dynamic mode by default. However due to a bug with dual adapters when using
+// Frame Serve mode we currently start in Direct Composition mode.
+const base::FeatureParam<MediaFoundationClearRenderingStrategy>
+ kMediaFoundationClearRenderingStrategyParam{
+ &kMediaFoundationClearRendering, "strategy",
+ MediaFoundationClearRenderingStrategy::kDirectComposition,
+ &kMediaFoundationClearRenderingStrategyOptions};
#endif // BUILDFLAG(IS_WIN)
#if BUILDFLAG(IS_CHROMEOS)
diff --git a/chromium/media/base/media_switches.h b/chromium/media/base/media_switches.h
index c6715ac3cec..60f470b1189 100644
--- a/chromium/media/base/media_switches.h
+++ b/chromium/media/base/media_switches.h
@@ -55,6 +55,7 @@ MEDIA_EXPORT extern const char kEnableProtectedVideoBuffers[];
MEDIA_EXPORT extern const char kForceProtectedVideoOutputBuffers[];
MEDIA_EXPORT extern const char kDisableAudioInput[];
MEDIA_EXPORT extern const char kUseOverlaysForVideo[];
+MEDIA_EXPORT extern const char kAudioCapturerWithEchoCancellation[];
#endif
#if defined(USE_CRAS)
@@ -87,12 +88,7 @@ MEDIA_EXPORT extern const char kOverrideEnabledCdmInterfaceVersion[];
MEDIA_EXPORT extern const char kOverrideHardwareSecureCodecsForTesting[];
MEDIA_EXPORT extern const char kEnableLiveCaptionPrefForTesting[];
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC)
-MEDIA_EXPORT extern const char kEnableClearHevcForTesting[];
-#endif
-
#if BUILDFLAG(IS_CHROMEOS)
-MEDIA_EXPORT extern const char kLacrosEnablePlatformEncryptedHevc[];
MEDIA_EXPORT extern const char kLacrosEnablePlatformHevc[];
MEDIA_EXPORT extern const char kLacrosUseChromeosProtectedMedia[];
MEDIA_EXPORT extern const char kLacrosUseChromeosProtectedAv1[];
@@ -131,6 +127,10 @@ MEDIA_EXPORT extern const base::FeatureParam<int>
kChromeWideEchoCancellationProcessingFifoSize;
MEDIA_EXPORT extern const base::FeatureParam<bool>
kChromeWideEchoCancellationMinimizeResampling;
+MEDIA_EXPORT extern const base::FeatureParam<double>
+ kChromeWideEchoCancellationDynamicMixingTimeout;
+MEDIA_EXPORT extern const base::FeatureParam<bool>
+ kChromeWideEchoCancellationAllowAllSampleRates;
#endif
MEDIA_EXPORT extern const base::Feature kD3D11VideoDecoderUseSharedHandle;
MEDIA_EXPORT extern const base::Feature kEnableTabMuting;
@@ -170,8 +170,13 @@ MEDIA_EXPORT extern const base::Feature kMediaPowerExperiment;
MEDIA_EXPORT extern const base::Feature kMediaSessionWebRTC;
MEDIA_EXPORT extern const base::Feature kMemoryPressureBasedSourceBufferGC;
MEDIA_EXPORT extern const base::Feature kMultiPlaneVideoCaptureSharedImages;
+MEDIA_EXPORT extern const base::Feature kOpenscreenCastStreamingSession;
MEDIA_EXPORT extern const base::Feature kOverlayFullscreenVideo;
MEDIA_EXPORT extern const base::Feature kPictureInPicture;
+MEDIA_EXPORT extern const base::Feature kPlatformAudioEncoder;
+#if BUILDFLAG(ENABLE_PLATFORM_HEVC)
+MEDIA_EXPORT extern const base::Feature kPlatformHEVCDecoderSupport;
+#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC)
MEDIA_EXPORT extern const base::Feature kPlaybackSpeedButton;
MEDIA_EXPORT extern const base::Feature kPreloadMediaEngagementData;
MEDIA_EXPORT extern const base::Feature kPreloadMetadataLazyLoad;
@@ -179,7 +184,6 @@ MEDIA_EXPORT extern const base::Feature kPreloadMetadataSuspend;
MEDIA_EXPORT extern const base::Feature kRecordMediaEngagementScores;
MEDIA_EXPORT extern const base::Feature kRecordWebAudioEngagement;
MEDIA_EXPORT extern const base::Feature kResumeBackgroundVideo;
-MEDIA_EXPORT extern const base::Feature kReuseMediaPlayer;
MEDIA_EXPORT extern const base::Feature kRevokeMediaSourceObjectURLOnAttach;
MEDIA_EXPORT extern const base::Feature
kShareThisTabInsteadButtonGetDisplayMedia;
@@ -187,7 +191,6 @@ MEDIA_EXPORT extern const base::Feature
kShareThisTabInsteadButtonGetDisplayMediaAudio;
MEDIA_EXPORT extern const base::Feature kSpeakerChangeDetection;
MEDIA_EXPORT extern const base::Feature kSpecCompliantCanPlayThrough;
-MEDIA_EXPORT extern const base::Feature kSurfaceLayerForMediaStreams;
MEDIA_EXPORT extern const base::Feature kSuspendMutedAudio;
MEDIA_EXPORT extern const base::Feature kUnifiedAutoplay;
MEDIA_EXPORT extern const base::Feature kUseAndroidOverlayForSecureOnly;
@@ -198,6 +201,7 @@ MEDIA_EXPORT extern const base::Feature kUseR16Texture;
#if BUILDFLAG(IS_LINUX)
MEDIA_EXPORT extern const base::Feature kVaapiVideoDecodeLinux;
MEDIA_EXPORT extern const base::Feature kVaapiVideoEncodeLinux;
+MEDIA_EXPORT extern const base::Feature kVaapiIgnoreDriverChecks;
#endif // BUILDFLAG(IS_LINUX)
MEDIA_EXPORT extern const base::Feature kVaapiAV1Decoder;
MEDIA_EXPORT extern const base::Feature kVaapiLowPowerEncoderGen9x;
@@ -214,6 +218,7 @@ MEDIA_EXPORT extern const base::Feature kVaapiVp9kSVCHWEncoding;
MEDIA_EXPORT extern const base::Feature kVideoBlitColorAccuracy;
MEDIA_EXPORT extern const base::Feature kVp9kSVCHWDecoding;
MEDIA_EXPORT extern const base::Feature kWakeLockOptimisationHiddenMuted;
+MEDIA_EXPORT extern const base::Feature kWebContentsCaptureHiDpi;
MEDIA_EXPORT extern const base::Feature kWebrtcMediaCapabilitiesParameters;
MEDIA_EXPORT extern const base::Feature kResolutionBasedDecoderPriority;
MEDIA_EXPORT extern const base::Feature kForceHardwareVideoDecoders;
@@ -236,6 +241,7 @@ MEDIA_EXPORT extern const base::Feature kUseRealColorSpaceForAndroidVideo;
#if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
MEDIA_EXPORT extern const base::Feature kUseChromeOSDirectVideoDecoder;
+MEDIA_EXPORT extern const base::Feature kLimitConcurrentDecoderInstances;
#if defined(ARCH_CPU_ARM_FAMILY)
MEDIA_EXPORT extern const base::Feature kPreferLibYuvImageProcessor;
#endif // defined(ARCH_CPU_ARM_FAMILY)
@@ -245,9 +251,6 @@ MEDIA_EXPORT extern const base::Feature kUseAlternateVideoDecoderImplementation;
#endif // BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
#if BUILDFLAG(IS_MAC)
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
-MEDIA_EXPORT extern const base::Feature kVideoToolboxHEVCDecoding;
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
MEDIA_EXPORT extern const base::Feature kMultiPlaneVideoToolboxSharedImages;
#endif // BUILDFLAG(IS_MAC)
@@ -261,6 +264,8 @@ MEDIA_EXPORT extern const base::Feature kMediaFoundationAV1Encoding;
// please use IsMediaFoundationH264CbpEncodingEnabled() instead.
MEDIA_EXPORT extern const base::Feature kMediaFoundationH264CbpEncoding;
+MEDIA_EXPORT extern const base::Feature kMediaFoundationVP9Encoding;
+
MEDIA_EXPORT extern const base::Feature kMediaFoundationVideoCapture;
MEDIA_EXPORT extern const base::Feature kMediaFoundationVP8Decoding;
@@ -269,9 +274,30 @@ MEDIA_EXPORT extern const base::Feature kMediaFoundationVP8Decoding;
MEDIA_EXPORT extern const base::Feature kMediaFoundationD3D11VideoCapture;
MEDIA_EXPORT extern const base::Feature kMediaFoundationClearPlayback;
+MEDIA_EXPORT extern const base::Feature kAllowMediaFoundationFrameServerMode;
MEDIA_EXPORT extern const base::Feature kWasapiRawAudioCapture;
-MEDIA_EXPORT extern const base::Feature kD3D11HEVCDecoding;
MEDIA_EXPORT extern const base::Feature kD3D11Vp9kSVCHWDecoding;
+
+// Strategy affecting how Media Foundation Renderer determines its rendering
+// mode when used with clear video media. This strategy does not impact
+// protected media which must always use Direct Composition mode.
+enum class MediaFoundationClearRenderingStrategy {
+ // The renderer will operate in Direct Composition mode (e.g. windowless
+ // swapchain).
+ kDirectComposition,
+ // The renderer will operate in Frame Server mode.
+ kFrameServer,
+ // The renderer is allowed to switch between Direct Composition & Frame Server
+ // mode at its discretion.
+ kDynamic,
+};
+
+// Under this feature, a given MediaFoundationClearRenderingStrategy param is
+// used by the Media Foundation Renderer for Clear content scenarios.
+MEDIA_EXPORT extern const base::Feature kMediaFoundationClearRendering;
+MEDIA_EXPORT extern const base::FeatureParam<
+ MediaFoundationClearRenderingStrategy>
+ kMediaFoundationClearRenderingStrategyParam;
#endif // BUILDFLAG(IS_WIN)
#if BUILDFLAG(IS_CHROMEOS)
diff --git a/chromium/media/base/media_util.cc b/chromium/media/base/media_util.cc
index 66f23beea2a..0993d5cebda 100644
--- a/chromium/media/base/media_util.cc
+++ b/chromium/media/base/media_util.cc
@@ -51,4 +51,20 @@ void ReportPepperVideoDecoderOutputPictureCountSW(int height) {
GetMediaVideoHeight(height));
}
+AudioParameters::Format ConvertAudioCodecToBitstreamFormat(AudioCodec codec) {
+ switch (codec) {
+ case AudioCodec::kAC3:
+ return AudioParameters::Format::AUDIO_BITSTREAM_AC3;
+ case AudioCodec::kEAC3:
+ return AudioParameters::Format::AUDIO_BITSTREAM_EAC3;
+ case AudioCodec::kDTS:
+ return AudioParameters::Format::AUDIO_BITSTREAM_DTS;
+ // No support for DTS_HD yet as this section is related to the incoming
+ // stream type. DTS_HD support is only added for audio track output to
+ // support audiosink reporting DTS_HD support.
+ default:
+ return AudioParameters::Format::AUDIO_FAKE;
+ }
+}
+
} // namespace media
diff --git a/chromium/media/base/media_util.h b/chromium/media/base/media_util.h
index faba023318f..6c450a34617 100644
--- a/chromium/media/base/media_util.h
+++ b/chromium/media/base/media_util.h
@@ -8,6 +8,8 @@
#include <stdint.h>
#include <vector>
+#include "media/base/audio_codecs.h"
+#include "media/base/audio_parameters.h"
#include "media/base/media_export.h"
#include "media/base/media_log.h"
@@ -35,6 +37,10 @@ class MEDIA_EXPORT NullMediaLog : public media::MediaLog {
std::unique_ptr<media::MediaLogRecord> event) override {}
};
+// Converts Audio Codec Type to Bitstream Format.
+MEDIA_EXPORT AudioParameters::Format ConvertAudioCodecToBitstreamFormat(
+ media::AudioCodec codec);
+
} // namespace media
#endif // MEDIA_BASE_MEDIA_UTIL_H_
diff --git a/chromium/media/base/mime_util_internal.cc b/chromium/media/base/mime_util_internal.cc
index 5aaa7a175ac..0ba711497d4 100644
--- a/chromium/media/base/mime_util_internal.cc
+++ b/chromium/media/base/mime_util_internal.cc
@@ -130,7 +130,8 @@ static bool IsValidH264Level(uint8_t level_idc) {
(level_idc >= 20 && level_idc <= 22) ||
(level_idc >= 30 && level_idc <= 32) ||
(level_idc >= 40 && level_idc <= 42) ||
- (level_idc >= 50 && level_idc <= 52));
+ (level_idc >= 50 && level_idc <= 52) ||
+ (level_idc >= 60 && level_idc <= 62));
}
// Make a default ParsedCodecResult. Values should indicate "unspecified"
diff --git a/chromium/media/base/mock_filters.h b/chromium/media/base/mock_filters.h
index 31f3dbe4948..4d6126d9bef 100644
--- a/chromium/media/base/mock_filters.h
+++ b/chromium/media/base/mock_filters.h
@@ -57,6 +57,7 @@ class MockPipelineClient : public Pipeline::Client {
~MockPipelineClient();
MOCK_METHOD1(OnError, void(PipelineStatus));
+ MOCK_METHOD1(OnFallback, void(PipelineStatus));
MOCK_METHOD0(OnEnded, void());
MOCK_METHOD1(OnMetadata, void(const PipelineMetadata&));
MOCK_METHOD2(OnBufferingStateChange,
@@ -108,6 +109,7 @@ class MockPipeline : public Pipeline {
void(const std::vector<MediaTrack::Id>&, base::OnceClosure));
MOCK_METHOD2(OnSelectedVideoTrackChanged,
void(absl::optional<MediaTrack::Id>, base::OnceClosure));
+ MOCK_METHOD0(OnExternalVideoFrameRequest, void());
// TODO(sandersd): This should automatically return true between Start() and
// Stop(). (Or better, remove it from the interface entirely.)
@@ -403,6 +405,7 @@ class MockRendererClient : public RendererClient {
// RendererClient implementation.
MOCK_METHOD1(OnError, void(PipelineStatus));
+ MOCK_METHOD1(OnFallback, void(PipelineStatus));
MOCK_METHOD0(OnEnded, void());
MOCK_METHOD1(OnStatisticsUpdate, void(const PipelineStatistics&));
MOCK_METHOD2(OnBufferingStateChange,
diff --git a/chromium/media/base/moving_average.cc b/chromium/media/base/moving_average.cc
index 0779ab6ee3a..c3d9d6bbd98 100644
--- a/chromium/media/base/moving_average.cc
+++ b/chromium/media/base/moving_average.cc
@@ -5,6 +5,9 @@
#include "media/base/moving_average.h"
#include <algorithm>
+#include <cmath>
+
+#include "base/check_op.h"
namespace media {
diff --git a/chromium/media/base/offloading_video_encoder.cc b/chromium/media/base/offloading_video_encoder.cc
index 737927f4912..a2517981ec9 100644
--- a/chromium/media/base/offloading_video_encoder.cc
+++ b/chromium/media/base/offloading_video_encoder.cc
@@ -9,6 +9,7 @@
#include "base/task/task_traits.h"
#include "base/task/thread_pool.h"
#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/trace_event/trace_event.h"
#include "media/base/video_frame.h"
namespace media {
@@ -51,6 +52,7 @@ void OffloadingVideoEncoder::Encode(scoped_refptr<VideoFrame> frame,
bool key_frame,
EncoderStatusCB done_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ TRACE_EVENT0("media", "OffloadingVideoEncoder::Encode");
work_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VideoEncoder::Encode,
diff --git a/chromium/media/base/pipeline.h b/chromium/media/base/pipeline.h
index 556d62c0e31..0dd708ea7a5 100644
--- a/chromium/media/base/pipeline.h
+++ b/chromium/media/base/pipeline.h
@@ -39,6 +39,11 @@ class MEDIA_EXPORT Pipeline {
// NOTE: The client is responsible for calling Pipeline::Stop().
virtual void OnError(PipelineStatus status) = 0;
+ // Executed whenever some fallback-enabled portion of the pipeline (Just
+ // Decoders and Renderers for now) fails in such a way that a fallback
+ // is still possible without a fatal pipeline error.
+ virtual void OnFallback(PipelineStatus status) = 0;
+
// Executed whenever the media reaches the end.
virtual void OnEnded() = 0;
@@ -151,6 +156,10 @@ class MEDIA_EXPORT Pipeline {
absl::optional<MediaTrack::Id> selected_track_id,
base::OnceClosure change_completed_cb) = 0;
+ // Signal to the pipeline that there has been a client request to access
+ // video frame data.
+ virtual void OnExternalVideoFrameRequest() = 0;
+
// Stops the pipeline. This is a blocking function.
// If the pipeline is started, it must be stopped before destroying it.
// It it permissible to call Stop() at any point during the lifetime of the
diff --git a/chromium/media/base/pipeline_impl.cc b/chromium/media/base/pipeline_impl.cc
index 93f4e72a854..c37a9568876 100644
--- a/chromium/media/base/pipeline_impl.cc
+++ b/chromium/media/base/pipeline_impl.cc
@@ -100,6 +100,8 @@ class PipelineImpl::RendererWrapper final : public DemuxerHost,
absl::optional<MediaTrack::Id> selected_track_id,
base::OnceClosure change_completed_cb);
+ void OnExternalVideoFrameRequest();
+
private:
// Contains state shared between main and media thread. On the media thread
// each member can be read without locking, but writing requires locking. On
@@ -150,6 +152,7 @@ class PipelineImpl::RendererWrapper final : public DemuxerHost,
// RendererClient implementation.
void OnError(PipelineStatus error) final;
+ void OnFallback(PipelineStatus status) final;
void OnEnded() final;
void OnStatisticsUpdate(const PipelineStatistics& stats) final;
void OnBufferingStateChange(BufferingState state,
@@ -653,6 +656,13 @@ void PipelineImpl::RendererWrapper::OnError(PipelineStatus error) {
media_task_runner_->PostTask(FROM_HERE, base::BindOnce(error_cb_, error));
}
+void PipelineImpl::RendererWrapper::OnFallback(PipelineStatus fallback) {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+ main_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&PipelineImpl::OnFallback, weak_pipeline_,
+ std::move(fallback).AddHere()));
+}
+
void PipelineImpl::RendererWrapper::OnEnded() {
DCHECK(media_task_runner_->BelongsToCurrentThread());
media_log_->AddEvent<MediaLogEvent::kEnded>();
@@ -756,6 +766,27 @@ void PipelineImpl::RendererWrapper::OnSelectedVideoTrackChanged(
std::move(change_completed_cb)));
}
+void PipelineImpl::OnExternalVideoFrameRequest() {
+ // This function is currently a no-op unless we're on a Windows build with
+ // Media Foundation for Clear running.
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!external_video_frame_request_signaled_) {
+ external_video_frame_request_signaled_ = true;
+ media_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&RendererWrapper::OnExternalVideoFrameRequest,
+ base::Unretained(renderer_wrapper_.get())));
+ }
+}
+
+void PipelineImpl::RendererWrapper::OnExternalVideoFrameRequest() {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+ if (!shared_state_.renderer) {
+ return;
+ }
+
+ shared_state_.renderer->OnExternalVideoFrameRequest();
+}
+
void PipelineImpl::RendererWrapper::OnDemuxerCompletedTrackChange(
base::OnceClosure change_completed_cb,
DemuxerStream::Type stream_type,
@@ -1242,6 +1273,7 @@ void PipelineImpl::Start(StartType start_type,
seek_cb_ = std::move(seek_cb);
last_media_time_ = base::TimeDelta();
seek_time_ = kNoTimestamp;
+ external_video_frame_request_signaled_ = false;
// By default, create a default renderer to avoid additional start-to-play
// latency caused by asynchronous Renderer creation. When |start_type| is
@@ -1335,6 +1367,7 @@ void PipelineImpl::Resume(base::TimeDelta time,
seek_cb_ = std::move(seek_cb);
seek_time_ = time;
last_media_time_ = base::TimeDelta();
+ external_video_frame_request_signaled_ = false;
// Always create a default renderer for Resume().
auto default_renderer = create_renderer_cb_.Run(absl::nullopt);
@@ -1560,6 +1593,10 @@ void PipelineImpl::OnError(PipelineStatus error) {
client_->OnError(error);
}
+void PipelineImpl::OnFallback(PipelineStatus status) {
+ client_->OnFallback(std::move(status).AddHere());
+}
+
void PipelineImpl::OnEnded() {
DVLOG(2) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
diff --git a/chromium/media/base/pipeline_impl.h b/chromium/media/base/pipeline_impl.h
index 6262fa9bbcc..37d884deb69 100644
--- a/chromium/media/base/pipeline_impl.h
+++ b/chromium/media/base/pipeline_impl.h
@@ -130,6 +130,8 @@ class MEDIA_EXPORT PipelineImpl : public Pipeline {
absl::optional<MediaTrack::Id> selected_track_id,
base::OnceClosure change_completed_cb) override;
+ void OnExternalVideoFrameRequest() override;
+
private:
friend class MediaLog;
class RendererWrapper;
@@ -157,6 +159,7 @@ class MEDIA_EXPORT PipelineImpl : public Pipeline {
// Notifications from RendererWrapper.
void OnError(PipelineStatus error);
+ void OnFallback(PipelineStatus fallback);
void OnEnded();
void OnMetadata(const PipelineMetadata& metadata);
void OnBufferingStateChange(BufferingState state,
@@ -219,6 +222,13 @@ class MEDIA_EXPORT PipelineImpl : public Pipeline {
// Cached suspension state for the RendererWrapper.
bool is_suspended_;
+ // 'external_video_frame_request_signaled_' tracks whether we've called
+ // OnExternalVideoFrameRequest on the current renderer. Calls which may
+ // create a new renderer in the RendererWrapper (Start, Resume, SetCdm) will
+ // reset this member. There is no guarantee to the client that
+ // OnExternalVideoFrameRequest will be called only once.
+ bool external_video_frame_request_signaled_ = false;
+
base::ThreadChecker thread_checker_;
base::WeakPtrFactory<PipelineImpl> weak_factory_{this};
};
diff --git a/chromium/media/base/renderer.cc b/chromium/media/base/renderer.cc
index c83cf95eef2..e2cb8d39e22 100644
--- a/chromium/media/base/renderer.cc
+++ b/chromium/media/base/renderer.cc
@@ -39,4 +39,8 @@ void Renderer::SetWasPlayedWithUserActivation(
// Not supported by most renderers.
}
+void Renderer::OnExternalVideoFrameRequest() {
+ // Default implementation of OnExternalVideoFrameRequest is to no-op.
+}
+
} // namespace media
diff --git a/chromium/media/base/renderer.h b/chromium/media/base/renderer.h
index 8b26b864f3e..b7738604fa2 100644
--- a/chromium/media/base/renderer.h
+++ b/chromium/media/base/renderer.h
@@ -92,6 +92,17 @@ class MEDIA_EXPORT Renderer {
virtual void OnEnabledAudioTracksChanged(
const std::vector<DemuxerStream*>& enabled_tracks,
base::OnceClosure change_completed_cb);
+
+ // Signal to the renderer that there has been a client request to access a
+ // VideoFrame. This signal may be used by the renderer to ensure it is
+ // operating in a mode which produces a VideoFrame usable by the client.
+ // E.g., the MediaFoundationRendererClient on Windows has two modes
+ // of operation: Frame Server & Direct Composition. Direct Composition mode
+ // does not produce a VideoFrame with an accessible 'data' buffer, so clients
+ // cannot access the underlying image data. In order for
+ // MediaFoundationRendererClient to produce a VideoFrame with 'data'
+ // accessible by the client it must switch to operate in Frame Server mode.
+ virtual void OnExternalVideoFrameRequest();
};
} // namespace media
diff --git a/chromium/media/base/renderer_client.h b/chromium/media/base/renderer_client.h
index 374f026c299..e8e3de956a8 100644
--- a/chromium/media/base/renderer_client.h
+++ b/chromium/media/base/renderer_client.h
@@ -23,6 +23,9 @@ class MEDIA_EXPORT RendererClient {
// Executed if any error was encountered after Renderer initialization.
virtual void OnError(PipelineStatus status) = 0;
+ // Executed if there is a non-fatal fallback that should be reported
+ virtual void OnFallback(PipelineStatus status) = 0;
+
// Executed when rendering has reached the end of stream.
virtual void OnEnded() = 0;
diff --git a/chromium/media/base/scoped_async_trace.cc b/chromium/media/base/scoped_async_trace.cc
deleted file mode 100644
index c76e83f737d..00000000000
--- a/chromium/media/base/scoped_async_trace.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/scoped_async_trace.h"
-
-#include "base/memory/ptr_util.h"
-#include "base/trace_event/trace_event.h"
-
-namespace media {
-
-namespace {
-constexpr const char kCategory[] = "media";
-} // namespace
-
-// static
-std::unique_ptr<ScopedAsyncTrace> ScopedAsyncTrace::CreateIfEnabled(
- const char* name) {
- bool enabled = false;
- TRACE_EVENT_CATEGORY_GROUP_ENABLED(kCategory, &enabled);
- return enabled ? base::WrapUnique(new ScopedAsyncTrace(name)) : nullptr;
-}
-
-ScopedAsyncTrace::ScopedAsyncTrace(const char* name) : name_(name) {
- TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kCategory, name_, TRACE_ID_LOCAL(this));
-}
-
-ScopedAsyncTrace::~ScopedAsyncTrace() {
- TRACE_EVENT_NESTABLE_ASYNC_END0(kCategory, name_, TRACE_ID_LOCAL(this));
-}
-
-} // namespace media
diff --git a/chromium/media/base/scoped_async_trace.h b/chromium/media/base/scoped_async_trace.h
index 05f2b6c62a4..7a244f397a8 100644
--- a/chromium/media/base/scoped_async_trace.h
+++ b/chromium/media/base/scoped_async_trace.h
@@ -7,36 +7,70 @@
#include <memory>
+#include "base/memory/ptr_util.h"
+#include "base/trace_event/trace_event.h"
#include "media/base/media_export.h"
namespace media {
+enum class TraceCategory : uint32_t {
+ kMedia,
+ kMediaStream,
+};
+namespace {
+template <TraceCategory category>
+struct Category {};
+
+template <>
+struct Category<TraceCategory::kMedia> {
+ static constexpr const char* Name() { return "media"; }
+};
+
+template <>
+struct Category<TraceCategory::kMediaStream> {
+ static constexpr const char* Name() {
+ return TRACE_DISABLED_BY_DEFAULT("mediastream");
+ }
+};
+
+} // namespace
// Utility class that starts and stops an async trace event. The intention is
// that it it will be created somewhere to start the trace event, passed around
// such as via unique_ptr argument in a callback, and eventually freed to end
// the trace event. This guarantees that it'll be closed, even if the callback
// is destroyed without being run.
-class MEDIA_EXPORT ScopedAsyncTrace {
+template <TraceCategory category>
+class MEDIA_EXPORT TypedScopedAsyncTrace {
public:
- // Create a ScopedAsyncTrace if tracing for "media" is enabled, else return
- // nullptr. |name| provided to the trace as the name(!).
- // IMPORTANT: These strings must outlive |this|, since tracing needs it. In
- // other words, use literal strings only. See trace_event_common.h .
- static std::unique_ptr<ScopedAsyncTrace> CreateIfEnabled(const char* name);
-
- ScopedAsyncTrace(const ScopedAsyncTrace&) = delete;
- ScopedAsyncTrace& operator=(const ScopedAsyncTrace&) = delete;
-
- ~ScopedAsyncTrace();
+ // Create a TypedScopedAsyncTrace if tracing for "media" is enabled, else
+ // return nullptr. |name| provided to the trace as the name(!). IMPORTANT:
+ // These strings must outlive |this|, since tracing needs it. In other words,
+ // use literal strings only. See trace_event_common.h.
+ static std::unique_ptr<TypedScopedAsyncTrace<category>> CreateIfEnabled(
+ const char* name) {
+ bool enabled = false;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(Category<category>::Name(), &enabled);
+ return enabled ? base::WrapUnique(new TypedScopedAsyncTrace(name))
+ : nullptr;
+ }
+ ~TypedScopedAsyncTrace() {
+ TRACE_EVENT_NESTABLE_ASYNC_END0(Category<category>::Name(), name_,
+ TRACE_ID_LOCAL(this));
+ }
// TODO(liberato): Add StepInto / StepPast.
private:
- explicit ScopedAsyncTrace(const char* name);
+ explicit TypedScopedAsyncTrace(const char* name) : name_(name) {
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(Category<category>::Name(), name_,
+ TRACE_ID_LOCAL(this));
+ }
const char* name_ = nullptr;
};
+using ScopedAsyncTrace = TypedScopedAsyncTrace<TraceCategory::kMedia>;
+
} // namespace media
#endif // MEDIA_BASE_SCOPED_ASYNC_TRACE_H_
diff --git a/chromium/media/base/sinc_resampler.h b/chromium/media/base/sinc_resampler.h
index e3798c43841..6d069fb4d0a 100644
--- a/chromium/media/base/sinc_resampler.h
+++ b/chromium/media/base/sinc_resampler.h
@@ -19,22 +19,21 @@ namespace media {
// SincResampler is a high-quality single-channel sample-rate converter.
class MEDIA_EXPORT SincResampler {
public:
- enum {
- // The kernel size can be adjusted for quality (higher is better) at the
- // expense of performance. Must be a multiple of 32.
- // TODO(dalecurtis): Test performance to see if we can jack this up to 64+.
- kKernelSize = 32,
-
- // Default request size. Affects how often and for how much SincResampler
- // calls back for input. Must be greater than kKernelSize.
- kDefaultRequestSize = 512,
-
- // The kernel offset count is used for interpolation and is the number of
- // sub-sample kernel shifts. Can be adjusted for quality (higher is better)
- // at the expense of allocating more memory.
- kKernelOffsetCount = 32,
- kKernelStorageSize = kKernelSize * (kKernelOffsetCount + 1),
- };
+ // The kernel size can be adjusted for quality (higher is better) at the
+ // expense of performance. Must be a multiple of 32.
+ // TODO(dalecurtis): Test performance to see if we can jack this up to 64+.
+ static constexpr int kKernelSize = 32;
+
+ // Default request size. Affects how often and for how much SincResampler
+ // calls back for input. Must be greater than kKernelSize.
+ static constexpr int kDefaultRequestSize = 512;
+
+ // The kernel offset count is used for interpolation and is the number of
+ // sub-sample kernel shifts. Can be adjusted for quality (higher is better)
+ // at the expense of allocating more memory.
+ static constexpr int kKernelOffsetCount = 32;
+ static constexpr int kKernelStorageSize =
+ kKernelSize * (kKernelOffsetCount + 1);
// Callback type for providing more data into the resampler. Expects |frames|
// of data to be rendered into |destination|; zero padded if not enough frames
diff --git a/chromium/media/base/sinc_resampler_unittest.cc b/chromium/media/base/sinc_resampler_unittest.cc
index da56eca9afb..2faaade27a7 100644
--- a/chromium/media/base/sinc_resampler_unittest.cc
+++ b/chromium/media/base/sinc_resampler_unittest.cc
@@ -228,9 +228,7 @@ class SinusoidalLinearChirpSource {
}
private:
- enum {
- kMinFrequency = 5
- };
+ static constexpr int kMinFrequency = 5;
double sample_rate_;
int total_samples_;
diff --git a/chromium/media/base/status.h b/chromium/media/base/status.h
index abcffbc394b..345a31a7ce7 100644
--- a/chromium/media/base/status.h
+++ b/chromium/media/base/status.h
@@ -287,6 +287,28 @@ class MEDIA_EXPORT TypedStatus {
Traits::OnCreateFrom(this, data);
}
+ // Used to allow returning {TypedStatus::Codes::kValue, cause}
+ template <typename CausalStatusType>
+ TypedStatus(Codes code,
+ TypedStatus<CausalStatusType>&& cause,
+ const base::Location& location = base::Location::Current())
+ : TypedStatus(code, "", location) {
+ static_assert(!std::is_same_v<CausalStatusType, Traits>);
+ DCHECK(data_);
+ AddCause(std::move(cause));
+ }
+
+ // Used to allow returning {TypedStatus::Codes::kValue, "message", cause}
+ template <typename CausalStatusType>
+ TypedStatus(Codes code,
+ base::StringPiece message,
+ TypedStatus<CausalStatusType>&& cause,
+ const base::Location& location = base::Location::Current())
+ : TypedStatus(code, message, location) {
+ DCHECK(data_);
+ AddCause(std::move(cause));
+ }
+
// Constructor to create a new TypedStatus from a numeric code & message.
// These are immutable; if you'd like to change them, then you likely should
// create a new TypedStatus.
@@ -514,6 +536,18 @@ class MEDIA_EXPORT TypedStatus {
return value;
}
+ // Return constref of the value, if we have one.
+ // Callers should ensure that this |has_value()|.
+ const OtherType& operator->() const {
+ CHECK(value_);
+ return std::get<0>(*value_);
+ }
+
+ const OtherType& operator*() const {
+ CHECK(value_);
+ return std::get<0>(*value_);
+ }
+
typename T::Codes code() const {
DCHECK(error_ || value_);
// It is invalid to call |code()| on an |Or| with a value that
diff --git a/chromium/media/base/status.md b/chromium/media/base/status.md
index 288e9d5791d..209de759dbd 100644
--- a/chromium/media/base/status.md
+++ b/chromium/media/base/status.md
@@ -125,6 +125,39 @@ int main() {
}
```
+## Constructing a TypedStatus<T>
+There are several ways to create a typed status, depending on what data you'd
+like to encapsulate:
+
+```
+// To create an status with the default OK type, there's a helper function that
+// creates any type you want, so long as it actually has a kOk value or
+|DefaultEnumValue| implementation.
+TypedStatus<MyType> ok = OkStatus();
+
+// A status can be implicitly created from a code
+TypedStatus<MyType> status = MyType::Codes::kMyCode;
+
+// A status can be explicitly created from a code and message, or implicitly
+// created from a brace initializer list of code and message
+TypedStatus<MyType> status(MyType::Codes::kMyCode, "MyMessage");
+TypedStatus<MyType> status = {MyType::Codes::kMyCode, "MyMessage"};
+
+// If |MyType::OnCreateFrom<T>| is implemented, then a status can be created
+// from a {code, T} pack, or a {code, message, T} pack:
+TypedStatus<MyType> status = {MyType::Codes::kMyCode, 667};
+TypedStatus<MyType> status = {MyType::Codes::kMyCode, "MyMessage", 667};
+
+// A status can be created from packs of either {code, TypedStatus<Any>} or
+// {code, message, TypedStatus<Any>} where TypedStatus<Any> will become the
+// status that causes the return. Note that in this example,
+// OtherType::Codes::kOther is itself being implicitly converted from a code
+// to a TypedStatus<OtherType>.
+TypedStatus<MyType> status = {MyType::Codes::kCode, OtherType::Codes::kOther};
+TypedStatus<MyType> status = {MyType::Codes::kCode, "M", OtherType::Codes::kOther};
+```
+
+
## TypedStatus<T>::Or<D>
diff --git a/chromium/media/base/status_unittest.cc b/chromium/media/base/status_unittest.cc
index f49bbb66c17..708f378e63f 100644
--- a/chromium/media/base/status_unittest.cc
+++ b/chromium/media/base/status_unittest.cc
@@ -214,6 +214,34 @@ TEST_F(StatusTest, DifferentModesOfConstruction) {
ASSERT_EQ(unpacked->FindIntPath("DataA"), 7);
ASSERT_EQ(unpacked->FindIntPath("DataB"), 3);
ASSERT_EQ(*unpacked->FindStringPath("DataC"), "apple pie");
+
+ NormalStatus root = NormalStatus::Codes::kFoo;
+ PackingStatus derrived = {PackingStatus::Codes::kFail, std::move(root)};
+ serialized = MediaSerialize(derrived);
+ unpacked = serialized.FindDictPath("cause");
+ ASSERT_NE(unpacked, nullptr);
+ ASSERT_EQ(unpacked->DictSize(), 5ul);
+ ASSERT_EQ(unpacked->FindIntPath("code").value_or(0),
+ static_cast<int>(NormalStatus::Codes::kFoo));
+
+ root = NormalStatus::Codes::kFoo;
+ derrived = {PackingStatus::Codes::kFail, "blah", std::move(root)};
+ serialized = MediaSerialize(derrived);
+ unpacked = serialized.FindDictPath("cause");
+ ASSERT_EQ(*serialized.FindStringPath("message"), "blah");
+ ASSERT_NE(unpacked, nullptr);
+ ASSERT_EQ(unpacked->DictSize(), 5ul);
+ ASSERT_EQ(unpacked->FindIntPath("code").value_or(0),
+ static_cast<int>(NormalStatus::Codes::kFoo));
+}
+
+TEST_F(StatusTest, DerefOpOnOrType) {
+ struct SimpleThing {
+ int CallMe() { return 77712; }
+ };
+ NormalStatus::Or<std::unique_ptr<SimpleThing>> sor =
+ std::make_unique<SimpleThing>();
+ ASSERT_EQ(sor->CallMe(), 77712);
}
TEST_F(StatusTest, StaticOKMethodGivesCorrectSerialization) {
diff --git a/chromium/media/base/stream_parser.cc b/chromium/media/base/stream_parser.cc
index 904e16fe32d..e7cb46769ed 100644
--- a/chromium/media/base/stream_parser.cc
+++ b/chromium/media/base/stream_parser.cc
@@ -125,15 +125,15 @@ bool MergeBufferQueues(const StreamParser::BufferQueueMap& buffer_queue_map,
// FrameProcessorTest.AudioVideo_Discontinuity currently depends on audio
// buffers being processed first.
std::vector<const StreamParser::BufferQueue*> buffer_queues;
- for (const auto& it : buffer_queue_map) {
- DCHECK(!it.second.empty());
- if (it.second[0]->type() == DemuxerStream::AUDIO)
- buffer_queues.push_back(&it.second);
+ for (const auto& [track_id, buffer_queue] : buffer_queue_map) {
+ DCHECK(!buffer_queue.empty());
+ if (buffer_queue[0]->type() == DemuxerStream::AUDIO)
+ buffer_queues.push_back(&buffer_queue);
}
- for (const auto& it : buffer_queue_map) {
- DCHECK(!it.second.empty());
- if (it.second[0]->type() != DemuxerStream::AUDIO)
- buffer_queues.push_back(&it.second);
+ for (const auto& [track_id, buffer_queue] : buffer_queue_map) {
+ DCHECK(!buffer_queue.empty());
+ if (buffer_queue[0]->type() != DemuxerStream::AUDIO)
+ buffer_queues.push_back(&buffer_queue);
}
// Do the merge.
diff --git a/chromium/media/base/supported_types.cc b/chromium/media/base/supported_types.cc
index 5540061ad77..c2efcdbb743 100644
--- a/chromium/media/base/supported_types.cc
+++ b/chromium/media/base/supported_types.cc
@@ -206,22 +206,36 @@ bool IsHevcProfileSupported(const VideoType& type) {
#if BUILDFLAG(ENABLE_PLATFORM_HEVC)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_CHROMEOS_LACROS)
+ // TODO(b/171813538): For Lacros, the supplemental profile cache will be
+ // asking lacros-gpu, but we will be doing decoding in ash-gpu. Until the
+ // codec detection is plumbed through to ash-gpu we can do this extra check
+ // for HEVC support.
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kLacrosEnablePlatformHevc)) {
+ return true;
+ }
+#endif // BUILDFLAG(IS_CHROMEOS_LACROS)
return GetSupplementalProfileCache()->IsProfileSupported(type.profile);
+#elif BUILDFLAG(IS_MAC)
+ if (__builtin_available(macOS 11.0, *))
+ return base::FeatureList::IsEnabled(kPlatformHEVCDecoderSupport) &&
+ (type.profile == HEVCPROFILE_MAIN ||
+ type.profile == HEVCPROFILE_MAIN10 ||
+ type.profile == HEVCPROFILE_MAIN_STILL_PICTURE ||
+ type.profile == HEVCPROFILE_REXT);
+ return false;
+#elif BUILDFLAG(IS_ANDROID)
+ // Technically android 5.0 mandates support for only HEVC main profile,
+ // however some platforms (like chromecast) have had more profiles supported
+ // so we'll see what happens if we just enable them all.
+ return base::FeatureList::IsEnabled(kPlatformHEVCDecoderSupport);
#else
return true;
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
-#elif BUILDFLAG(ENABLE_PLATFORM_ENCRYPTED_HEVC)
- // Only encrypted HEVC content is supported, and normally MSE.isTypeSupported
- // returns false for HEVC. The kEnableClearHevcForTesting flag allows it to
- // return true to enable a wider array of test scenarios to function properly.
- if (!base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableClearHevcForTesting)) {
- return false;
- }
- return type.profile == HEVCPROFILE_MAIN || type.profile == HEVCPROFILE_MAIN10;
#else
return false;
-#endif
+#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC)
}
bool IsVp9ProfileSupported(const VideoType& type) {
diff --git a/chromium/media/base/unaligned_shared_memory.cc b/chromium/media/base/unaligned_shared_memory.cc
deleted file mode 100644
index 542225fad63..00000000000
--- a/chromium/media/base/unaligned_shared_memory.cc
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/unaligned_shared_memory.h"
-
-#include <limits>
-
-#include "base/logging.h"
-#include "base/memory/read_only_shared_memory_region.h"
-#include "base/system/sys_info.h"
-#include "mojo/public/cpp/system/platform_handle.h"
-
-namespace media {
-
-namespace {
-
-bool CalculateMisalignmentAndOffset(size_t size,
- off_t offset,
- size_t* misalignment,
- off_t* adjusted_offset) {
- /* | | | | | | shm pages
- * | offset (may exceed max size_t)
- * |-----------| size
- * |-| misalignment
- * | adjusted offset
- * |-------------| requested mapping
- */
-
- // Note: result of % computation may be off_t or size_t, depending on the
- // relative ranks of those types. In any case we assume that
- // VMAllocationGranularity() fits in both types, so the final result does too.
- DCHECK_GE(offset, 0);
- *misalignment = offset % base::SysInfo::VMAllocationGranularity();
-
- // Above this |max_size|, |size| + |*misalignment| overflows.
- size_t max_size = std::numeric_limits<size_t>::max() - *misalignment;
- if (size > max_size) {
- DLOG(ERROR) << "Invalid size";
- return false;
- }
-
- *adjusted_offset = offset - static_cast<off_t>(*misalignment);
-
- return true;
-}
-
-} // namespace
-
-UnalignedSharedMemory::UnalignedSharedMemory(
- base::subtle::PlatformSharedMemoryRegion region,
- size_t size,
- bool read_only)
- : region_(std::move(region)), read_only_(read_only), size_(size) {}
-
-UnalignedSharedMemory::~UnalignedSharedMemory() = default;
-
-bool UnalignedSharedMemory::MapAt(off_t offset, size_t size) {
- if (offset < 0) {
- DLOG(ERROR) << "Invalid offset";
- return false;
- }
-
- size_t misalignment;
- off_t adjusted_offset;
-
- if (!CalculateMisalignmentAndOffset(size, offset, &misalignment,
- &adjusted_offset)) {
- return false;
- }
-
- if (read_only_) {
- auto shm =
- base::ReadOnlySharedMemoryRegion::Deserialize(std::move(region_));
- read_only_mapping_ = shm.MapAt(adjusted_offset, size + misalignment);
- if (!read_only_mapping_.IsValid()) {
- DLOG(ERROR) << "Failed to map shared memory";
- return false;
- }
- // TODO(crbug.com/849207): this ugly const cast will go away when uses of
- // UnalignedSharedMemory are converted to
- // {Writable,ReadOnly}UnalignedMapping.
- mapping_ptr_ = const_cast<uint8_t*>(
- static_cast<const uint8_t*>(read_only_mapping_.memory()));
- } else {
- auto shm = base::UnsafeSharedMemoryRegion::Deserialize(std::move(region_));
- writable_mapping_ = shm.MapAt(adjusted_offset, size + misalignment);
- if (!writable_mapping_.IsValid()) {
- DLOG(ERROR) << "Failed to map shared memory";
- return false;
- }
- mapping_ptr_ = static_cast<uint8_t*>(writable_mapping_.memory());
- }
-
- DCHECK(mapping_ptr_);
- // There should be no way for the IsValid() checks above to succeed and yet
- // |mapping_ptr_| remain null. However, since an invalid but non-null pointer
- // could be disastrous an extra-careful check is done.
- if (mapping_ptr_)
- mapping_ptr_ += misalignment;
- return true;
-}
-
-WritableUnalignedMapping::WritableUnalignedMapping(
- const base::UnsafeSharedMemoryRegion& region,
- size_t size,
- off_t offset)
- : size_(size), misalignment_(0) {
- if (!region.IsValid()) {
- DLOG(ERROR) << "Invalid region";
- return;
- }
-
- if (offset < 0) {
- DLOG(ERROR) << "Invalid offset";
- return;
- }
-
- off_t adjusted_offset;
- if (!CalculateMisalignmentAndOffset(size_, offset, &misalignment_,
- &adjusted_offset)) {
- return;
- }
-
- mapping_ = region.MapAt(adjusted_offset, size_ + misalignment_);
- if (!mapping_.IsValid()) {
- DLOG(ERROR) << "Failed to map shared memory " << adjusted_offset << "("
- << offset << ")"
- << "@" << size << "/\\" << misalignment_ << " on "
- << region.GetSize();
-
- return;
- }
-}
-
-WritableUnalignedMapping::~WritableUnalignedMapping() = default;
-
-void* WritableUnalignedMapping::memory() const {
- if (!IsValid()) {
- return nullptr;
- }
- return mapping_.GetMemoryAs<uint8_t>() + misalignment_;
-}
-
-ReadOnlyUnalignedMapping::ReadOnlyUnalignedMapping(
- const base::ReadOnlySharedMemoryRegion& region,
- size_t size,
- off_t offset)
- : size_(size), misalignment_(0) {
- if (!region.IsValid()) {
- DLOG(ERROR) << "Invalid region";
- return;
- }
-
- if (offset < 0) {
- DLOG(ERROR) << "Invalid offset";
- return;
- }
-
- off_t adjusted_offset;
- if (!CalculateMisalignmentAndOffset(size_, offset, &misalignment_,
- &adjusted_offset)) {
- return;
- }
-
- mapping_ = region.MapAt(adjusted_offset, size_ + misalignment_);
- if (!mapping_.IsValid()) {
- DLOG(ERROR) << "Failed to map shared memory " << adjusted_offset << "("
- << offset << ")"
- << "@" << size << "/\\" << misalignment_ << " on "
- << region.GetSize();
-
- return;
- }
-}
-
-ReadOnlyUnalignedMapping::~ReadOnlyUnalignedMapping() = default;
-
-const void* ReadOnlyUnalignedMapping::memory() const {
- if (!IsValid()) {
- return nullptr;
- }
- return mapping_.GetMemoryAs<uint8_t>() + misalignment_;
-}
-
-} // namespace media
diff --git a/chromium/media/base/unaligned_shared_memory.h b/chromium/media/base/unaligned_shared_memory.h
deleted file mode 100644
index 481590cd129..00000000000
--- a/chromium/media/base/unaligned_shared_memory.h
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_UNALIGNED_SHARED_MEMORY_H_
-#define MEDIA_BASE_UNALIGNED_SHARED_MEMORY_H_
-
-#include <stdint.h>
-
-#include "base/memory/platform_shared_memory_region.h"
-#include "base/memory/raw_ptr.h"
-#include "base/memory/read_only_shared_memory_region.h"
-#include "base/memory/shared_memory_mapping.h"
-#include "base/memory/unsafe_shared_memory_region.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Wrapper over base::PlatformSharedMemoryRegion that can be mapped at unaligned
-// offsets.
-// DEPRECATED! See https://crbug.com/795291.
-class MEDIA_EXPORT UnalignedSharedMemory {
- public:
- // Creates an |UnalignedSharedMemory| instance from a
- // |PlatformSharedMemoryRegion|. |size| sets the maximum size that may be
- // mapped. This instance will own the handle.
- UnalignedSharedMemory(base::subtle::PlatformSharedMemoryRegion region,
- size_t size,
- bool read_only);
-
- UnalignedSharedMemory(const UnalignedSharedMemory&) = delete;
- UnalignedSharedMemory& operator=(const UnalignedSharedMemory&) = delete;
-
- ~UnalignedSharedMemory();
-
- // Map the shared memory region. Note that the passed |size| parameter should
- // be less than or equal to |size()|.
- bool MapAt(off_t offset, size_t size);
- size_t size() const { return size_; }
- void* memory() const { return mapping_ptr_; }
-
- private:
- // Only one of the mappings is active, depending on the value of |read_only_|.
- // These variables are held to keep the shared memory mapping valid for the
- // lifetime of this instance.
- base::subtle::PlatformSharedMemoryRegion region_;
- base::WritableSharedMemoryMapping writable_mapping_;
- base::ReadOnlySharedMemoryMapping read_only_mapping_;
-
- // If the mapping should be made read-only.
- bool read_only_;
-
- // The size of the region associated with |region_|.
- size_t size_;
-
- // Pointer to the unaligned data in the shared memory mapping.
- raw_ptr<uint8_t> mapping_ptr_ = nullptr;
-};
-
-// Wrapper over base::WritableSharedMemoryMapping that is mapped at unaligned
-// offsets.
-class MEDIA_EXPORT WritableUnalignedMapping {
- public:
- // Creates an |WritableUnalignedMapping| instance from a
- // |UnsafeSharedMemoryRegion|. |size| sets the maximum size that may be mapped
- // within |region| and |offset| is the offset that will be mapped. |region| is
- // not retained and is used only in the constructor.
- WritableUnalignedMapping(const base::UnsafeSharedMemoryRegion& region,
- size_t size,
- off_t offset);
-
- WritableUnalignedMapping(const WritableUnalignedMapping&) = delete;
- WritableUnalignedMapping& operator=(const WritableUnalignedMapping&) = delete;
-
- ~WritableUnalignedMapping();
-
- size_t size() const { return size_; }
- void* memory() const;
-
- // True if the mapping backing the memory is valid.
- bool IsValid() const { return mapping_.IsValid(); }
-
- private:
- base::WritableSharedMemoryMapping mapping_;
-
- // The size of the region associated with |mapping_|.
- size_t size_;
-
- // Difference between actual offset within |mapping_| where data has been
- // mapped and requested offset; strictly less than
- // base::SysInfo::VMAllocationGranularity().
- size_t misalignment_;
-};
-
-// Wrapper over base::ReadOnlySharedMemoryMapping that is mapped at unaligned
-// offsets.
-class MEDIA_EXPORT ReadOnlyUnalignedMapping {
- public:
- // Creates an |WritableUnalignedMapping| instance from a
- // |ReadOnlySharedMemoryRegion|. |size| sets the maximum size that may be
- // mapped within |region| and |offset| is the offset that will be mapped.
- // |region| is not retained and is used only in the constructor.
- ReadOnlyUnalignedMapping(const base::ReadOnlySharedMemoryRegion& region,
- size_t size,
- off_t offset);
-
- ReadOnlyUnalignedMapping(const ReadOnlyUnalignedMapping&) = delete;
- ReadOnlyUnalignedMapping& operator=(const ReadOnlyUnalignedMapping&) = delete;
-
- ~ReadOnlyUnalignedMapping();
-
- size_t size() const { return size_; }
- const void* memory() const;
-
- // True if the mapping backing the memory is valid.
- bool IsValid() const { return mapping_.IsValid(); }
-
- private:
- base::ReadOnlySharedMemoryMapping mapping_;
-
- // The size of the region associated with |mapping_|.
- size_t size_;
-
- // Difference between actual offset within |mapping_| where data has been
- // mapped and requested offset; strictly less than
- // base::SysInfo::VMAllocationGranularity().
- size_t misalignment_;
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_UNALIGNED_SHARED_MEMORY_H_
diff --git a/chromium/media/base/unaligned_shared_memory_unittest.cc b/chromium/media/base/unaligned_shared_memory_unittest.cc
deleted file mode 100644
index 9d553cbc63f..00000000000
--- a/chromium/media/base/unaligned_shared_memory_unittest.cc
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/unaligned_shared_memory.h"
-
-#include <stdint.h>
-#include <string.h>
-
-#include <limits>
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-namespace {
-
-const uint8_t kUnalignedData[] = "XXXhello";
-const size_t kUnalignedDataSize = std::size(kUnalignedData);
-const off_t kUnalignedOffset = 3;
-
-const uint8_t kData[] = "hello";
-const size_t kDataSize = std::size(kData);
-
-base::UnsafeSharedMemoryRegion CreateRegion(const uint8_t* data, size_t size) {
- auto region = base::UnsafeSharedMemoryRegion::Create(size);
- auto mapping = region.Map();
- EXPECT_TRUE(mapping.IsValid());
- memcpy(mapping.memory(), data, size);
- return region;
-}
-
-base::ReadOnlySharedMemoryRegion CreateReadOnlyRegion(const uint8_t* data,
- size_t size) {
- auto mapped_region = base::ReadOnlySharedMemoryRegion::Create(size);
- EXPECT_TRUE(mapped_region.IsValid());
- memcpy(mapped_region.mapping.memory(), data, size);
- return std::move(mapped_region.region);
-}
-} // namespace
-
-TEST(UnalignedSharedMemoryTest, CreateAndDestroyRegion) {
- auto region = CreateRegion(kData, kDataSize);
- UnalignedSharedMemory shm(
- base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kDataSize, false);
-}
-
-TEST(UnalignedSharedMemoryTest, CreateAndDestroyReadOnlyRegion) {
- auto region = CreateReadOnlyRegion(kData, kDataSize);
- UnalignedSharedMemory shm(
- base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kDataSize, true);
-}
-
-TEST(UnalignedSharedMemoryTest, CreateAndDestroy_InvalidRegion) {
- UnalignedSharedMemory shm(base::subtle::PlatformSharedMemoryRegion(),
- kDataSize, false);
-}
-
-TEST(UnalignedSharedMemoryTest, MapRegion) {
- auto region = CreateRegion(kData, kDataSize);
- UnalignedSharedMemory shm(
- base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kDataSize, false);
- ASSERT_TRUE(shm.MapAt(0, kDataSize));
- EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
-}
-
-TEST(UnalignedSharedMemoryTest, MapReadOnlyRegion) {
- auto region = CreateReadOnlyRegion(kData, kDataSize);
- UnalignedSharedMemory shm(
- base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kDataSize, true);
- ASSERT_TRUE(shm.MapAt(0, kDataSize));
- EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
-}
-
-TEST(UnalignedSharedMemoryTest, Map_UnalignedRegion) {
- auto region = CreateRegion(kUnalignedData, kUnalignedDataSize);
- UnalignedSharedMemory shm(
- base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kUnalignedDataSize, false);
- ASSERT_TRUE(shm.MapAt(kUnalignedOffset, kDataSize));
- EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
-}
-
-TEST(UnalignedSharedMemoryTest, Map_UnalignedReadOnlyRegion) {
- auto region = CreateReadOnlyRegion(kUnalignedData, kUnalignedDataSize);
- UnalignedSharedMemory shm(
- base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kUnalignedDataSize, true);
- ASSERT_TRUE(shm.MapAt(kUnalignedOffset, kDataSize));
- EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
-}
-
-TEST(UnalignedSharedMemoryTest, Map_InvalidRegion) {
- UnalignedSharedMemory shm(base::subtle::PlatformSharedMemoryRegion(),
- kDataSize, true);
- ASSERT_FALSE(shm.MapAt(1, kDataSize));
- EXPECT_EQ(shm.memory(), nullptr);
-}
-
-TEST(UnalignedSharedMemoryTest, Map_NegativeOffsetRegion) {
- auto region = CreateRegion(kData, kDataSize);
- UnalignedSharedMemory shm(
- base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kDataSize, false);
- ASSERT_FALSE(shm.MapAt(-1, kDataSize));
-}
-
-TEST(UnalignedSharedMemoryTest, Map_NegativeOffsetReadOnlyRegion) {
- auto region = CreateReadOnlyRegion(kData, kDataSize);
- UnalignedSharedMemory shm(
- base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kDataSize, true);
- ASSERT_FALSE(shm.MapAt(-1, kDataSize));
-}
-
-TEST(UnalignedSharedMemoryTest, Map_SizeOverflowRegion) {
- auto region = CreateRegion(kData, kDataSize);
- UnalignedSharedMemory shm(
- base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kDataSize, false);
- ASSERT_FALSE(shm.MapAt(1, std::numeric_limits<size_t>::max()));
-}
-
-TEST(UnalignedSharedMemoryTest, Map_SizeOverflowReadOnlyRegion) {
- auto region = CreateReadOnlyRegion(kData, kDataSize);
- UnalignedSharedMemory shm(
- base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kDataSize, true);
- ASSERT_FALSE(shm.MapAt(1, std::numeric_limits<size_t>::max()));
-}
-
-TEST(UnalignedSharedMemoryTest, UnmappedRegionIsNullptr) {
- auto region = CreateRegion(kData, kDataSize);
- UnalignedSharedMemory shm(
- base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kDataSize, false);
- ASSERT_EQ(shm.memory(), nullptr);
-}
-
-TEST(UnalignedSharedMemoryTest, UnmappedReadOnlyRegionIsNullptr) {
- auto region = CreateReadOnlyRegion(kData, kDataSize);
- UnalignedSharedMemory shm(
- base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
- std::move(region)),
- kDataSize, true);
- ASSERT_EQ(shm.memory(), nullptr);
-}
-
-TEST(WritableUnalignedMappingTest, CreateAndDestroy) {
- auto region = CreateRegion(kData, kDataSize);
- WritableUnalignedMapping shm(region, kDataSize, 0);
- EXPECT_TRUE(shm.IsValid());
-}
-
-TEST(WritableUnalignedMappingTest, CreateAndDestroy_InvalidRegion) {
- base::UnsafeSharedMemoryRegion region;
- WritableUnalignedMapping shm(region, kDataSize, 0);
- EXPECT_FALSE(shm.IsValid());
-}
-
-TEST(WritableUnalignedMappingTest, Map) {
- auto region = CreateRegion(kData, kDataSize);
- WritableUnalignedMapping shm(region, kDataSize, 0);
- ASSERT_TRUE(shm.IsValid());
- EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
-}
-
-TEST(WritableUnalignedMappingTest, Map_Unaligned) {
- auto region = CreateRegion(kUnalignedData, kUnalignedDataSize);
- WritableUnalignedMapping shm(region, kDataSize, kUnalignedOffset);
- ASSERT_TRUE(shm.IsValid());
- EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
-}
-
-TEST(WritableUnalignedMappingTest, Map_InvalidRegion) {
- base::UnsafeSharedMemoryRegion region;
- WritableUnalignedMapping shm(region, kDataSize, 0);
- ASSERT_FALSE(shm.IsValid());
- EXPECT_EQ(shm.memory(), nullptr);
-}
-
-TEST(WritableUnalignedMappingTest, Map_NegativeOffset) {
- auto region = CreateRegion(kData, kDataSize);
- WritableUnalignedMapping shm(region, kDataSize, -1);
- ASSERT_FALSE(shm.IsValid());
-}
-
-TEST(WritableUnalignedMappingTest, Map_SizeOverflow) {
- auto region = CreateRegion(kData, kDataSize);
- WritableUnalignedMapping shm(region, std::numeric_limits<size_t>::max(), 1);
- ASSERT_FALSE(shm.IsValid());
-}
-
-TEST(ReadOnlyUnalignedMappingTest, CreateAndDestroy) {
- auto region = CreateReadOnlyRegion(kData, kDataSize);
- ReadOnlyUnalignedMapping shm(region, kDataSize, 0);
- EXPECT_TRUE(shm.IsValid());
-}
-
-TEST(ReadOnlyUnalignedMappingTest, CreateAndDestroy_InvalidRegion) {
- base::ReadOnlySharedMemoryRegion region;
- ReadOnlyUnalignedMapping shm(region, kDataSize, 0);
- EXPECT_FALSE(shm.IsValid());
-}
-
-TEST(ReadOnlyUnalignedMappingTest, Map) {
- auto region = CreateReadOnlyRegion(kData, kDataSize);
- ReadOnlyUnalignedMapping shm(region, kDataSize, 0);
- ASSERT_TRUE(shm.IsValid());
- EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
-}
-
-TEST(ReadOnlyUnalignedMappingTest, Map_Unaligned) {
- auto region = CreateReadOnlyRegion(kUnalignedData, kUnalignedDataSize);
- ReadOnlyUnalignedMapping shm(region, kDataSize, kUnalignedOffset);
- ASSERT_TRUE(shm.IsValid());
- EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
-}
-
-TEST(ReadOnlyUnalignedMappingTest, Map_InvalidRegion) {
- base::ReadOnlySharedMemoryRegion region;
- ReadOnlyUnalignedMapping shm(region, kDataSize, 0);
- ASSERT_FALSE(shm.IsValid());
- EXPECT_EQ(shm.memory(), nullptr);
-}
-
-TEST(ReadOnlyUnalignedMappingTest, Map_NegativeOffset) {
- auto region = CreateReadOnlyRegion(kData, kDataSize);
- ReadOnlyUnalignedMapping shm(region, kDataSize, -1);
- ASSERT_FALSE(shm.IsValid());
-}
-
-TEST(ReadOnlyUnalignedMappingTest, Map_SizeOverflow) {
- auto region = CreateReadOnlyRegion(kData, kDataSize);
- ReadOnlyUnalignedMapping shm(region, std::numeric_limits<size_t>::max(), 1);
- ASSERT_FALSE(shm.IsValid());
-}
-
-} // namespace media
diff --git a/chromium/media/base/video_bitrate_allocation.cc b/chromium/media/base/video_bitrate_allocation.cc
index cf01aa25da4..8c66ffb3288 100644
--- a/chromium/media/base/video_bitrate_allocation.cc
+++ b/chromium/media/base/video_bitrate_allocation.cc
@@ -98,6 +98,10 @@ const Bitrate VideoBitrateAllocation::GetSumBitrate() const {
return sum_bitrate_;
}
+Bitrate::Mode VideoBitrateAllocation::GetMode() const {
+ return sum_bitrate_.mode();
+}
+
std::string VideoBitrateAllocation::ToString() const {
size_t num_active_spatial_layers = 0;
size_t num_temporal_layers[kMaxSpatialLayers] = {};
diff --git a/chromium/media/base/video_bitrate_allocation.h b/chromium/media/base/video_bitrate_allocation.h
index 62a3cc49a47..18c3f193c4c 100644
--- a/chromium/media/base/video_bitrate_allocation.h
+++ b/chromium/media/base/video_bitrate_allocation.h
@@ -64,6 +64,9 @@ class MEDIA_EXPORT VideoBitrateAllocation {
// bps equals the sum of the layers' bitrates.
const Bitrate GetSumBitrate() const;
+ // Returns the encoding rate control mode of this allocation.
+ Bitrate::Mode GetMode() const;
+
std::string ToString() const;
bool operator==(const VideoBitrateAllocation& other) const;
diff --git a/chromium/media/base/video_bitrate_allocation_unittest.cc b/chromium/media/base/video_bitrate_allocation_unittest.cc
index 7ab8fdf07d7..7606827d195 100644
--- a/chromium/media/base/video_bitrate_allocation_unittest.cc
+++ b/chromium/media/base/video_bitrate_allocation_unittest.cc
@@ -12,19 +12,19 @@ namespace media {
TEST(VideoBitrateAllocationTest, Constructor_DefaultsModeConstant) {
VideoBitrateAllocation allocation;
- ASSERT_EQ(allocation.GetSumBitrate().mode(), Bitrate::Mode::kConstant);
+ ASSERT_EQ(allocation.GetMode(), Bitrate::Mode::kConstant);
}
TEST(VideoBitrateAllocationTest, Constructor_ConstantBitrate_CorrectMode) {
VideoBitrateAllocation allocation(Bitrate::Mode::kConstant);
- ASSERT_EQ(allocation.GetSumBitrate().mode(), Bitrate::Mode::kConstant);
+ ASSERT_EQ(allocation.GetMode(), Bitrate::Mode::kConstant);
}
TEST(VideoBitrateAllocationTest, Constructor_VariableBitrate_CorrectMode) {
VideoBitrateAllocation allocation(Bitrate::Mode::kVariable);
- ASSERT_EQ(allocation.GetSumBitrate().mode(), Bitrate::Mode::kVariable);
+ ASSERT_EQ(allocation.GetMode(), Bitrate::Mode::kVariable);
}
TEST(VideoBitrateAllocationTest,
diff --git a/chromium/media/base/video_codecs.cc b/chromium/media/base/video_codecs.cc
index 112fc655dc2..4af7ed4684b 100644
--- a/chromium/media/base/video_codecs.cc
+++ b/chromium/media/base/video_codecs.cc
@@ -77,6 +77,22 @@ std::string GetProfileName(VideoCodecProfile profile) {
return "hevc main 10";
case HEVCPROFILE_MAIN_STILL_PICTURE:
return "hevc main still-picture";
+ case HEVCPROFILE_REXT:
+ return "hevc range extensions";
+ case HEVCPROFILE_HIGH_THROUGHPUT:
+ return "hevc high throughput";
+ case HEVCPROFILE_MULTIVIEW_MAIN:
+ return "hevc multiview main";
+ case HEVCPROFILE_SCALABLE_MAIN:
+ return "hevc scalable main";
+ case HEVCPROFILE_3D_MAIN:
+ return "hevc 3d main";
+ case HEVCPROFILE_SCREEN_EXTENDED:
+ return "hevc screen extended";
+ case HEVCPROFILE_SCALABLE_REXT:
+ return "hevc scalable range extensions";
+ case HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED:
+ return "hevc high throughput screen extended";
case VP8PROFILE_ANY:
return "vp8";
case VP9PROFILE_PROFILE0:
@@ -707,20 +723,70 @@ bool ParseHEVCCodecId(const std::string& codec_id,
return false;
}
- if (profile) {
- // TODO(servolk): Handle format range extension profiles as explained in
- // HEVC standard (ISO/IEC ISO/IEC 23008-2) section A.3.5
- if (general_profile_idc == 3 || (general_profile_compatibility_flags & 4)) {
- *profile = HEVCPROFILE_MAIN_STILL_PICTURE;
- }
- if (general_profile_idc == 2 || (general_profile_compatibility_flags & 2)) {
- *profile = HEVCPROFILE_MAIN10;
- }
- if (general_profile_idc == 1 || (general_profile_compatibility_flags & 1)) {
- *profile = HEVCPROFILE_MAIN;
- }
+ VideoCodecProfile out_profile = VIDEO_CODEC_PROFILE_UNKNOWN;
+ // Spec A.3.8
+ if (general_profile_idc == 11 ||
+ (general_profile_compatibility_flags & 2048)) {
+ out_profile = HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED;
+ }
+ // Spec H.11.1.2
+ if (general_profile_idc == 10 ||
+ (general_profile_compatibility_flags & 1024)) {
+ out_profile = HEVCPROFILE_SCALABLE_REXT;
+ }
+ // Spec A.3.7
+ if (general_profile_idc == 9 || (general_profile_compatibility_flags & 512)) {
+ out_profile = HEVCPROFILE_SCREEN_EXTENDED;
+ }
+ // Spec I.11.1.1
+ if (general_profile_idc == 8 || (general_profile_compatibility_flags & 256)) {
+ out_profile = HEVCPROFILE_3D_MAIN;
+ }
+ // Spec H.11.1.1
+ if (general_profile_idc == 7 || (general_profile_compatibility_flags & 128)) {
+ out_profile = HEVCPROFILE_SCALABLE_MAIN;
+ }
+ // Spec G.11.1.1
+ if (general_profile_idc == 6 || (general_profile_compatibility_flags & 64)) {
+ out_profile = HEVCPROFILE_MULTIVIEW_MAIN;
+ }
+ // Spec A.3.6
+ if (general_profile_idc == 5 || (general_profile_compatibility_flags & 32)) {
+ out_profile = HEVCPROFILE_HIGH_THROUGHPUT;
+ }
+ // Spec A.3.5
+ if (general_profile_idc == 4 || (general_profile_compatibility_flags & 16)) {
+ out_profile = HEVCPROFILE_REXT;
+ }
+ // Spec A.3.3
+ // NOTICE: Do not change the order of below sections
+ if (general_profile_idc == 2 || (general_profile_compatibility_flags & 4)) {
+ out_profile = HEVCPROFILE_MAIN10;
+ }
+ // Spec A.3.2
+ // When general_profile_compatibility_flag[1] is equal to 1,
+ // general_profile_compatibility_flag[2] should be equal to 1 as well.
+ if (general_profile_idc == 1 || (general_profile_compatibility_flags & 2)) {
+ out_profile = HEVCPROFILE_MAIN;
+ }
+ // Spec A.3.4
+ // When general_profile_compatibility_flag[3] is equal to 1,
+ // general_profile_compatibility_flag[1] and
+ // general_profile_compatibility_flag[2] should be equal to 1 as well.
+ if (general_profile_idc == 3 || (general_profile_compatibility_flags & 8)) {
+ out_profile = HEVCPROFILE_MAIN_STILL_PICTURE;
+ }
+
+ if (out_profile == VIDEO_CODEC_PROFILE_UNKNOWN) {
+ DVLOG(1) << "Warning: unrecognized HEVC/H.265 general_profile_idc: "
+ << general_profile_idc << ", general_profile_compatibility_flags: "
+ << general_profile_compatibility_flags;
+ return false;
}
+ if (profile)
+ *profile = out_profile;
+
uint8_t general_tier_flag;
if (elem[3].size() > 0 && (elem[3][0] == 'L' || elem[3][0] == 'H')) {
general_tier_flag = (elem[3][0] == 'L') ? 0 : 1;
@@ -954,6 +1020,14 @@ VideoCodec VideoCodecProfileToVideoCodec(VideoCodecProfile profile) {
case HEVCPROFILE_MAIN:
case HEVCPROFILE_MAIN10:
case HEVCPROFILE_MAIN_STILL_PICTURE:
+ case HEVCPROFILE_REXT:
+ case HEVCPROFILE_HIGH_THROUGHPUT:
+ case HEVCPROFILE_MULTIVIEW_MAIN:
+ case HEVCPROFILE_SCALABLE_MAIN:
+ case HEVCPROFILE_3D_MAIN:
+ case HEVCPROFILE_SCREEN_EXTENDED:
+ case HEVCPROFILE_SCALABLE_REXT:
+ case HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED:
return VideoCodec::kHEVC;
case VP8PROFILE_ANY:
return VideoCodec::kVP8;
diff --git a/chromium/media/base/video_codecs.h b/chromium/media/base/video_codecs.h
index 9f881b30a94..c5222568255 100644
--- a/chromium/media/base/video_codecs.h
+++ b/chromium/media/base/video_codecs.h
@@ -92,7 +92,17 @@ enum VideoCodecProfile {
AV1PROFILE_MAX = AV1PROFILE_PROFILE_PRO,
DOLBYVISION_PROFILE8 = 27,
DOLBYVISION_PROFILE9 = 28,
- VIDEO_CODEC_PROFILE_MAX = DOLBYVISION_PROFILE9,
+ HEVCPROFILE_EXT_MIN = 29,
+ HEVCPROFILE_REXT = HEVCPROFILE_EXT_MIN,
+ HEVCPROFILE_HIGH_THROUGHPUT = 30,
+ HEVCPROFILE_MULTIVIEW_MAIN = 31,
+ HEVCPROFILE_SCALABLE_MAIN = 32,
+ HEVCPROFILE_3D_MAIN = 33,
+ HEVCPROFILE_SCREEN_EXTENDED = 34,
+ HEVCPROFILE_SCALABLE_REXT = 35,
+ HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED = 36,
+ HEVCPROFILE_EXT_MAX = HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED,
+ VIDEO_CODEC_PROFILE_MAX = HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED,
};
using VideoCodecLevel = uint32_t;
diff --git a/chromium/media/base/video_codecs_unittest.cc b/chromium/media/base/video_codecs_unittest.cc
index c5ac4c592ae..0fd5f63c739 100644
--- a/chromium/media/base/video_codecs_unittest.cc
+++ b/chromium/media/base/video_codecs_unittest.cc
@@ -513,12 +513,94 @@ TEST(ParseHEVCCodecIdTest, InvalidHEVCCodecIds) {
// decimal-encoded number (between 0 and 31)
EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.6.L93.B0", &profile, &level_idc));
EXPECT_TRUE(ParseHEVCCodecId("hvc1.31.6.L93.B0", &profile, &level_idc));
+ // Spec A.3.2
+ // When general_profile_compatibility_flag[1] is equal to 1,
+ // general_profile_compatibility_flag[2] should be equal to 1 as well.
EXPECT_TRUE(ParseHEVCCodecId("hvc1.1.6.L93.B0", &profile, &level_idc));
EXPECT_EQ(profile, HEVCPROFILE_MAIN);
- EXPECT_TRUE(ParseHEVCCodecId("hvc1.2.2.L93.B0", &profile, &level_idc));
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.1.0.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_MAIN);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.6.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_MAIN);
+ // Spec A.3.3
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.2.4.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_MAIN10);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.2.0.L93.B0", &profile, &level_idc));
EXPECT_EQ(profile, HEVCPROFILE_MAIN10);
- EXPECT_TRUE(ParseHEVCCodecId("hvc1.3.4.L93.B0", &profile, &level_idc));
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.4.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_MAIN10);
+ // Spec A.3.4
+ // When general_profile_compatibility_flag[3] is equal to 1,
+ // general_profile_compatibility_flag[1] and
+ // general_profile_compatibility_flag[2] should be equal to 1 as well.
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.3.E.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_MAIN_STILL_PICTURE);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.E.L93.B0", &profile, &level_idc));
EXPECT_EQ(profile, HEVCPROFILE_MAIN_STILL_PICTURE);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.3.0.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_MAIN_STILL_PICTURE);
+ // Spec A.3.5
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.4.10.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_REXT);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.4.0.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_REXT);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.10.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_REXT);
+ // Spec A.3.6
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.5.20.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_HIGH_THROUGHPUT);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.5.0.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_HIGH_THROUGHPUT);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.20.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_HIGH_THROUGHPUT);
+ // Spec G.11.1.1
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.6.40.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_MULTIVIEW_MAIN);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.6.0.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_MULTIVIEW_MAIN);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.40.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_MULTIVIEW_MAIN);
+ // Spec H.11.1.1
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.7.80.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_SCALABLE_MAIN);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.7.0.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_SCALABLE_MAIN);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.80.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_SCALABLE_MAIN);
+ // Spec I.11.1.1
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.8.100.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_3D_MAIN);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.8.0.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_3D_MAIN);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.100.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_3D_MAIN);
+ // Spec A.3.7
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.9.200.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_SCREEN_EXTENDED);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.9.0.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_SCREEN_EXTENDED);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.200.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_SCREEN_EXTENDED);
+ // Spec H.11.1.2
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.10.400.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_SCALABLE_REXT);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.10.0.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_SCALABLE_REXT);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.400.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_SCALABLE_REXT);
+ // Spec A.3.8
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.11.800.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.11.0.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED);
+ EXPECT_TRUE(ParseHEVCCodecId("hvc1.0.800.L93.B0", &profile, &level_idc));
+ EXPECT_EQ(profile, HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED);
+
+ // Unmatched general_profile_idc and general_profile_compatibility_flags
+ EXPECT_FALSE(ParseHEVCCodecId("hvc1.12.1000.L93.B0", &profile, &level_idc));
+ EXPECT_FALSE(ParseHEVCCodecId("hvc1.12.0.L93.B0", &profile, &level_idc));
+ EXPECT_FALSE(ParseHEVCCodecId("hvc1.0.1000.L93.B0", &profile, &level_idc));
+
EXPECT_FALSE(ParseHEVCCodecId("hvc1.-1.6.L93.B0", &profile, &level_idc));
EXPECT_FALSE(ParseHEVCCodecId("hvc1.32.6.L93.B0", &profile, &level_idc));
EXPECT_FALSE(ParseHEVCCodecId("hvc1.999.6.L93.B0", &profile, &level_idc));
diff --git a/chromium/media/base/video_encoder.cc b/chromium/media/base/video_encoder.cc
index 73ae8eadf35..dffeaa0b3a7 100644
--- a/chromium/media/base/video_encoder.cc
+++ b/chromium/media/base/video_encoder.cc
@@ -4,6 +4,7 @@
#include "media/base/video_encoder.h"
+#include "base/cxx17_backports.h"
#include "base/numerics/clamped_math.h"
#include "media/base/video_frame.h"
@@ -20,10 +21,10 @@ uint32_t GetDefaultVideoEncodeBitrate(gfx::Size frame_size,
// Scale default bitrate to the given frame size and fps
base::ClampedNumeric<uint64_t> result = kDefaultBitrateForHD30fps;
- result *= std::clamp(framerate, 1u, 300u);
- result *= std::clamp(frame_size.GetArea(), 1, kMaxArea);
+ result *= base::clamp(framerate, 1u, 300u);
+ result *= base::clamp(frame_size.GetArea(), 1, kMaxArea);
result /= kHDArea * 30u; // HD resolution, 30 fps
- return std::clamp(result.RawValue(), kMinBitrate, kMaxBitrate);
+ return base::clamp(result.RawValue(), kMinBitrate, kMaxBitrate);
}
VideoEncoderOutput::VideoEncoderOutput() = default;
@@ -41,4 +42,4 @@ VideoEncoder::PendingEncode::PendingEncode() = default;
VideoEncoder::PendingEncode::PendingEncode(PendingEncode&&) = default;
VideoEncoder::PendingEncode::~PendingEncode() = default;
-} // namespace media \ No newline at end of file
+} // namespace media
diff --git a/chromium/media/base/video_frame.h b/chromium/media/base/video_frame.h
index edf74296fd3..08a48d7d7a2 100644
--- a/chromium/media/base/video_frame.h
+++ b/chromium/media/base/video_frame.h
@@ -610,8 +610,8 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// This method is thread safe. Both blink and compositor threads can call it.
gpu::SyncToken UpdateReleaseSyncToken(SyncTokenClient* client);
- // Similar to UpdateReleaseSyncToken() but operates on the gpu::SyncToken for
- // each plane. This should only be called when a VideoFrame has a single
+ // Similar to UpdateReleaseSyncToken() but operates on the gpu::SyncToken
+ // for each plane. This should only be called when a VideoFrame has a single
// owner. I.e., before it has been vended after creation.
gpu::SyncToken UpdateMailboxHolderSyncToken(size_t plane,
SyncTokenClient* client);
diff --git a/chromium/media/base/video_frame_metadata.cc b/chromium/media/base/video_frame_metadata.cc
index 3a95860569e..6e8287326c8 100644
--- a/chromium/media/base/video_frame_metadata.cc
+++ b/chromium/media/base/video_frame_metadata.cc
@@ -37,7 +37,7 @@ void VideoFrameMetadata::MergeMetadataFrom(
MERGE_OPTIONAL_FIELD(capture_update_rect, metadata_source);
MERGE_OPTIONAL_FIELD(region_capture_rect, metadata_source);
MERGE_VALUE_FIELD(crop_version, metadata_source);
- MERGE_OPTIONAL_FIELD(copy_mode, metadata_source);
+ MERGE_OPTIONAL_FIELD(copy_required, metadata_source);
MERGE_VALUE_FIELD(end_of_stream, metadata_source);
MERGE_OPTIONAL_FIELD(frame_duration, metadata_source);
MERGE_OPTIONAL_FIELD(frame_rate, metadata_source);
diff --git a/chromium/media/base/video_frame_metadata.h b/chromium/media/base/video_frame_metadata.h
index 0f7560a4855..3f92d91cf73 100644
--- a/chromium/media/base/video_frame_metadata.h
+++ b/chromium/media/base/video_frame_metadata.h
@@ -24,25 +24,6 @@ struct MEDIA_EXPORT VideoFrameMetadata {
VideoFrameMetadata(const VideoFrameMetadata& other);
- enum CopyMode {
- // Indicates that mailbox created in one context, is also being used in a
- // different context belonging to another share group and video frames are
- // using SurfaceTexture to render frames.
- // Textures generated from SurfaceTexture can't be shared between contexts
- // of different share group and hence this frame must be copied to a new
- // texture before use, rather than being used directly.
- kCopyToNewTexture = 0,
-
- // Indicates that mailbox created in one context, is also being used in a
- // different context belonging to another share group and video frames are
- // using AImageReader to render frames.
- // AImageReader allows to render image data to AHardwareBuffer which can be
- // shared between contexts of different share group. AHB from existing
- // mailbox is wrapped into a new mailbox(AHB backed) which can then be used
- // by another context.
- kCopyMailboxesOnly = 1,
- };
-
// Merges internal values from |metadata_source|.
void MergeMetadataFrom(const VideoFrameMetadata& metadata_source);
@@ -76,6 +57,10 @@ struct MEDIA_EXPORT VideoFrameMetadata {
// If cropping was applied due to Region Capture to produce this frame,
// then this reflects where the frame's contents originate from in the
// original uncropped frame.
+ //
+ // NOTE: May also be nullopt if region capture is enabled but the capture rect
+ // is in a different coordinate space. For more info, see
+ // https://crbug.com/1327560.
absl::optional<gfx::Rect> region_capture_rect;
// Whenever cropTo() is called, Blink increments the crop_version and records
@@ -86,9 +71,13 @@ struct MEDIA_EXPORT VideoFrameMetadata {
// have this value set to zero.
uint32_t crop_version = 0;
- // If not null, it indicates how video frame mailbox should be copied to a
- // new mailbox.
- absl::optional<CopyMode> copy_mode;
+ // Indicates that mailbox created in one context, is also being used in a
+ // different context belonging to another share group and video frames are
+ // using SurfaceTexture to render frames.
+ // Textures generated from SurfaceTexture can't be shared between contexts
+ // of different share group and hence this frame must be copied to a new
+ // texture before use, rather than being used directly.
+ bool copy_required = false;
// Indicates if the current frame is the End of its current Stream.
bool end_of_stream = false;
diff --git a/chromium/media/base/video_frame_unittest.cc b/chromium/media/base/video_frame_unittest.cc
index 5db228c01b1..cc1b2107e00 100644
--- a/chromium/media/base/video_frame_unittest.cc
+++ b/chromium/media/base/video_frame_unittest.cc
@@ -62,11 +62,9 @@ media::VideoFrameMetadata GetFullVideoFrameMetadata() {
// media::VideoTransformation
metadata.transformation = media::VIDEO_ROTATION_90;
- // media::VideoFrameMetadata::CopyMode
- metadata.copy_mode = media::VideoFrameMetadata::CopyMode::kCopyToNewTexture;
-
// bools
metadata.allow_overlay = true;
+ metadata.copy_required = true;
metadata.end_of_stream = true;
metadata.texture_owner = true;
metadata.wants_promotion_hint = true;
@@ -112,7 +110,7 @@ void VerifyVideoFrameMetadataEquality(const media::VideoFrameMetadata& a,
EXPECT_EQ(a.capture_end_time, b.capture_end_time);
EXPECT_EQ(a.capture_counter, b.capture_counter);
EXPECT_EQ(a.capture_update_rect, b.capture_update_rect);
- EXPECT_EQ(a.copy_mode, b.copy_mode);
+ EXPECT_EQ(a.copy_required, b.copy_required);
EXPECT_EQ(a.end_of_stream, b.end_of_stream);
EXPECT_EQ(a.frame_duration, b.frame_duration);
EXPECT_EQ(a.frame_rate, b.frame_rate);
diff --git a/chromium/media/base/video_util.cc b/chromium/media/base/video_util.cc
index a786d60496a..60475b69d37 100644
--- a/chromium/media/base/video_util.cc
+++ b/chromium/media/base/video_util.cc
@@ -15,6 +15,7 @@
#include "base/numerics/safe_conversions.h"
#include "base/numerics/safe_math.h"
#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/raster_interface.h"
#include "media/base/limits.h"
@@ -713,6 +714,8 @@ scoped_refptr<VideoFrame> ReadbackTextureBackedFrameToMemorySync(
VideoFramePool* pool) {
DCHECK(ri);
+ TRACE_EVENT2("media", "ReadbackTextureBackedFrameToMemorySync", "timestamp",
+ txt_frame.timestamp(), "gr_ctx", !!gr_context);
VideoPixelFormat format = ReadbackFormat(txt_frame);
if (format == PIXEL_FORMAT_UNKNOWN) {
DLOG(ERROR) << "Readback is not possible for this frame: "
@@ -765,6 +768,9 @@ bool ReadbackTexturePlaneToMemorySync(const VideoFrame& src_frame,
EncoderStatus ConvertAndScaleFrame(const VideoFrame& src_frame,
VideoFrame& dst_frame,
std::vector<uint8_t>& tmp_buf) {
+ TRACE_EVENT2("media", "ConvertAndScaleFrame", "src_format",
+ VideoPixelFormatToString(src_frame.format()), "dst_format",
+ VideoPixelFormatToString(dst_frame.format()));
constexpr auto kDefaultFiltering = libyuv::kFilterBox;
if (!src_frame.IsMappable() || !dst_frame.IsMappable())
return EncoderStatus::Codes::kUnsupportedFrameFormat;
diff --git a/chromium/media/base/video_util.h b/chromium/media/base/video_util.h
index 5e09b2c0e1e..c451a57a7d4 100644
--- a/chromium/media/base/video_util.h
+++ b/chromium/media/base/video_util.h
@@ -84,6 +84,9 @@ MEDIA_EXPORT gfx::Rect ComputeLetterboxRegion(const gfx::Rect& bounds,
// have color distortions around the edges in a letterboxed video frame. Note
// that, in cases where ComputeLetterboxRegion() would return a 1x1-sized Rect,
// this function could return either a 0x0-sized Rect or a 2x2-sized Rect.
+// Note that calling this function with `bounds` that already have the aspect
+// ratio of `content` is not guaranteed to be a no-op (for context, see
+// https://crbug.com/1323367).
MEDIA_EXPORT gfx::Rect ComputeLetterboxRegionForI420(const gfx::Rect& bounds,
const gfx::Size& content);
diff --git a/chromium/media/base/win/BUILD.gn b/chromium/media/base/win/BUILD.gn
index 558334a8612..31da9d6bc60 100644
--- a/chromium/media/base/win/BUILD.gn
+++ b/chromium/media/base/win/BUILD.gn
@@ -54,6 +54,11 @@ source_set("dcomp_texture_wrapper") {
]
}
+source_set("overlay_state_observer_subscription") {
+ sources = [ "overlay_state_observer_subscription.h" ]
+ deps = [ "//base" ]
+}
+
source_set("test_support") {
testonly = true
sources = [
diff --git a/chromium/media/base/win/DEPS b/chromium/media/base/win/DEPS
new file mode 100644
index 00000000000..d2ecd2deff4
--- /dev/null
+++ b/chromium/media/base/win/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+media/base/win"
+]
diff --git a/chromium/media/base/win/dcomp_texture_wrapper.h b/chromium/media/base/win/dcomp_texture_wrapper.h
index 2974ea75cb2..7944024f900 100644
--- a/chromium/media/base/win/dcomp_texture_wrapper.h
+++ b/chromium/media/base/win/dcomp_texture_wrapper.h
@@ -14,6 +14,10 @@ class Rect;
class Size;
} // namespace gfx
+namespace gpu {
+struct Mailbox;
+} // namespace gpu
+
namespace media {
class VideoFrame;
@@ -40,7 +44,7 @@ class DCOMPTextureWrapper {
// Creates VideoFrame which will be returned in `create_video_frame_cb`.
using CreateVideoFrameCB =
- base::OnceCallback<void(scoped_refptr<VideoFrame>)>;
+ base::OnceCallback<void(scoped_refptr<VideoFrame>, const gpu::Mailbox&)>;
virtual void CreateVideoFrame(const gfx::Size& natural_size,
CreateVideoFrameCB create_video_frame_cb) = 0;
diff --git a/chromium/media/base/win/dxgi_device_manager.cc b/chromium/media/base/win/dxgi_device_manager.cc
index aefa923861c..b1bcfbd1b62 100644
--- a/chromium/media/base/win/dxgi_device_manager.cc
+++ b/chromium/media/base/win/dxgi_device_manager.cc
@@ -97,9 +97,13 @@ HRESULT DXGIDeviceManager::ResetDevice(
Microsoft::WRL::ComPtr<ID3D11Device>& d3d_device) {
constexpr uint32_t kDeviceFlags =
D3D11_CREATE_DEVICE_VIDEO_SUPPORT | D3D11_CREATE_DEVICE_BGRA_SUPPORT;
- HRESULT hr = D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr,
- kDeviceFlags, nullptr, 0, D3D11_SDK_VERSION,
- &d3d_device, nullptr, nullptr);
+ const D3D_FEATURE_LEVEL kFeatureLevels[] = {
+ D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_10_1,
+ D3D_FEATURE_LEVEL_10_0};
+ HRESULT hr =
+ D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr,
+ kDeviceFlags, kFeatureLevels, std::size(kFeatureLevels),
+ D3D11_SDK_VERSION, &d3d_device, nullptr, nullptr);
RETURN_ON_HR_FAILURE(hr, "D3D11 device creation failed", hr);
RETURN_ON_HR_FAILURE(
hr, media::SetDebugName(d3d_device.Get(), "Media_DXGIDeviceManager"), hr);
diff --git a/chromium/media/base/win/mf_helpers.cc b/chromium/media/base/win/mf_helpers.cc
index a27a30c748c..b5c963ed85a 100644
--- a/chromium/media/base/win/mf_helpers.cc
+++ b/chromium/media/base/win/mf_helpers.cc
@@ -5,6 +5,8 @@
#include "media/base/win/mf_helpers.h"
#include <d3d11.h>
+#include <ks.h>
+#include <ksmedia.h>
#include "base/check_op.h"
#include "base/win/windows_version.h"
@@ -94,4 +96,36 @@ HRESULT SetDebugName(IDXGIObject* dxgi_object, const char* debug_string) {
return SetDebugNameInternal(dxgi_object, debug_string);
}
+ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
+ switch (config) {
+ case KSAUDIO_SPEAKER_MONO:
+ return CHANNEL_LAYOUT_MONO;
+ case KSAUDIO_SPEAKER_STEREO:
+ return CHANNEL_LAYOUT_STEREO;
+ case KSAUDIO_SPEAKER_QUAD:
+ return CHANNEL_LAYOUT_QUAD;
+ case KSAUDIO_SPEAKER_SURROUND:
+ return CHANNEL_LAYOUT_4_0;
+ case KSAUDIO_SPEAKER_5POINT1:
+ return CHANNEL_LAYOUT_5_1_BACK;
+ case KSAUDIO_SPEAKER_5POINT1_SURROUND:
+ return CHANNEL_LAYOUT_5_1;
+ case KSAUDIO_SPEAKER_7POINT1:
+ return CHANNEL_LAYOUT_7_1_WIDE;
+ case KSAUDIO_SPEAKER_7POINT1_SURROUND:
+ return CHANNEL_LAYOUT_7_1;
+ case KSAUDIO_SPEAKER_DIRECTOUT:
+ // When specifying the wave format for a direct-out stream, an application
+ // should set the dwChannelMask member of the WAVEFORMATEXTENSIBLE
+ // structure to the value KSAUDIO_SPEAKER_DIRECTOUT, which is zero.
+ // A channel mask of zero indicates that no speaker positions are defined.
+ // As always, the number of channels in the stream is specified in the
+ // Format.nChannels member.
+ return CHANNEL_LAYOUT_DISCRETE;
+ default:
+ DVLOG(2) << "Unsupported channel configuration: " << config;
+ return CHANNEL_LAYOUT_UNSUPPORTED;
+ }
+}
+
} // namespace media
diff --git a/chromium/media/base/win/mf_helpers.h b/chromium/media/base/win/mf_helpers.h
index 70492dcb757..bba22260798 100644
--- a/chromium/media/base/win/mf_helpers.h
+++ b/chromium/media/base/win/mf_helpers.h
@@ -10,6 +10,7 @@
#include <wrl/client.h>
#include "base/logging.h"
+#include "media/base/channel_layout.h"
#include "media/base/win/mf_util_export.h"
struct ID3D11DeviceChild;
@@ -92,6 +93,21 @@ MF_UTIL_EXPORT HRESULT SetDebugName(ID3D11Device* d3d11_device,
MF_UTIL_EXPORT HRESULT SetDebugName(IDXGIObject* dxgi_object,
const char* debug_string);
+// Represents audio channel configuration constants as understood by Windows.
+// E.g. KSAUDIO_SPEAKER_MONO. For a list of possible values see:
+// http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx
+using ChannelConfig = uint32_t;
+
+// Converts Microsoft's channel configuration to ChannelLayout.
+// This mapping is not perfect but the best we can do given the current
+// ChannelLayout enumerator and the Windows-specific speaker configurations
+// defined in ksmedia.h. Don't assume that the channel ordering in
+// ChannelLayout is exactly the same as the Windows specific configuration.
+// As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
+// CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
+// speakers are different in these two definitions.
+MF_UTIL_EXPORT ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config);
+
} // namespace media
#endif // MEDIA_BASE_WIN_MF_HELPERS_H_
diff --git a/chromium/media/base/win/overlay_state_observer_subscription.h b/chromium/media/base/win/overlay_state_observer_subscription.h
new file mode 100644
index 00000000000..16ca0870074
--- /dev/null
+++ b/chromium/media/base/win/overlay_state_observer_subscription.h
@@ -0,0 +1,42 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_WIN_OVERLAY_STATE_OBSERVER_SUBSCRIPTION_H_
+#define MEDIA_BASE_WIN_OVERLAY_STATE_OBSERVER_SUBSCRIPTION_H_
+
+#include "base/callback.h"
+
+namespace gpu {
+struct Mailbox;
+}
+
+namespace media {
+
+// OverlayStateObserverSubscription is an empty interface class which allows a
+// media component (e.g. MediaFoundationRendererClient) to manage a reference
+// to an OverlayStateObserver implementation where a reference to the concrete
+// implementation may not be allowed as a result of the dependency chain (e.g.
+// a concrete implementation in //content).
+//
+// No additional interface methods are required as creation parameters (a
+// Mailbox & notification callback) may be provided at the time the object is
+// constructed. All further interactions between the OverlayStateObserver and a
+// subscribing component flow from the OverlayStateObserver to the subscriber
+// via the notification callback using a push model.
+class OverlayStateObserverSubscription {
+ public:
+ virtual ~OverlayStateObserverSubscription() = default;
+
+ // The bool parameter indicates if the surface was promoted to an overlay.
+ using StateChangedCB = base::RepeatingCallback<void(bool)>;
+};
+
+using ObserveOverlayStateCB = base::RepeatingCallback<
+ std::unique_ptr<media::OverlayStateObserverSubscription>(
+ const gpu::Mailbox& mailbox,
+ OverlayStateObserverSubscription::StateChangedCB on_state_changed_cb)>;
+
+} // namespace media
+
+#endif // MEDIA_BASE_WIN_OVERLAY_STATE_OBSERVER_SUBSCRIPTION_H_
diff --git a/chromium/media/capabilities/BUILD.gn b/chromium/media/capabilities/BUILD.gn
index c12dc315800..794dda86d03 100644
--- a/chromium/media/capabilities/BUILD.gn
+++ b/chromium/media/capabilities/BUILD.gn
@@ -26,6 +26,8 @@ source_set("capabilities") {
"in_memory_video_decode_stats_db_impl.h",
"learning_helper.cc",
"learning_helper.h",
+ "pending_operations.cc",
+ "pending_operations.h",
"video_decode_stats_db.cc",
"video_decode_stats_db.h",
"video_decode_stats_db_impl.cc",
@@ -61,6 +63,7 @@ source_set("unit_tests") {
testonly = true
sources = [
"in_memory_video_decode_stats_db_unittest.cc",
+ "pending_operations_unittest.cc",
"video_decode_stats_db_impl_unittest.cc",
"video_decode_stats_db_unittest.cc",
"webrtc_video_stats_db_impl_unittest.cc",
diff --git a/chromium/media/capabilities/pending_operations.cc b/chromium/media/capabilities/pending_operations.cc
new file mode 100644
index 00000000000..c2060088f40
--- /dev/null
+++ b/chromium/media/capabilities/pending_operations.cc
@@ -0,0 +1,111 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capabilities/pending_operations.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace media {
+namespace {
+// Timeout threshold for DB operations. See OnOperationTimeout().
+// NOTE: Used by UmaHistogramOpTime. Change the name if you change the time.
+static constexpr base::TimeDelta kPendingOpTimeout = base::Seconds(30);
+} // namespace
+
+PendingOperations::PendingOperation::PendingOperation(
+ const std::string& uma_prefix,
+ std::string uma_str,
+ std::unique_ptr<base::CancelableOnceClosure> timeout_closure)
+ : uma_prefix_(uma_prefix),
+ uma_str_(uma_str),
+ timeout_closure_(std::move(timeout_closure)),
+ start_ticks_(base::TimeTicks::Now()) {
+ DVLOG(3) << __func__ << " Started " << uma_str_;
+}
+
+PendingOperations::PendingOperation::~PendingOperation() {
+ // Destroying a pending operation that hasn't timed out yet implies the
+ // operation has completed.
+ if (timeout_closure_ && !timeout_closure_->IsCancelled()) {
+ base::TimeDelta op_duration = base::TimeTicks::Now() - start_ticks_;
+ UmaHistogramOpTime(uma_str_, op_duration);
+ DVLOG(3) << __func__ << " Completed " << uma_str_ << " ("
+ << op_duration.InMilliseconds() << ")";
+
+ // Ensure the timeout doesn't fire. Destruction should cancel the callback
+ // implicitly, but that's not a documented contract, so just taking the safe
+ // route.
+ timeout_closure_->Cancel();
+ }
+}
+
+void PendingOperations::PendingOperation::UmaHistogramOpTime(
+ const std::string& op_name,
+ base::TimeDelta duration) {
+ base::UmaHistogramCustomMicrosecondsTimes(uma_prefix_ + op_name, duration,
+ base::Milliseconds(1),
+ kPendingOpTimeout, 50);
+}
+
+void PendingOperations::PendingOperation::OnTimeout() {
+ UmaHistogramOpTime(uma_str_, kPendingOpTimeout);
+ LOG(WARNING) << " Timeout performing " << uma_str_
+ << " operation on WebrtcVideoStatsDB";
+
+ // Cancel the closure to ensure we don't double report the task as completed
+ // in ~PendingOperation().
+ timeout_closure_->Cancel();
+}
+
+PendingOperations::PendingOperations(std::string uma_prefix)
+ : uma_prefix_(uma_prefix) {}
+
+PendingOperations::~PendingOperations() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+}
+
+PendingOperations::Id PendingOperations::Start(std::string uma_str) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ Id op_id = next_op_id_++;
+
+ auto timeout_closure = std::make_unique<base::CancelableOnceClosure>(
+ base::BindOnce(&PendingOperations::OnTimeout,
+ weak_ptr_factory_.GetWeakPtr(), op_id));
+
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, timeout_closure->callback(), kPendingOpTimeout);
+
+ pending_ops_.emplace(
+ op_id, std::make_unique<PendingOperation>(uma_prefix_, uma_str,
+ std::move(timeout_closure)));
+
+ return op_id;
+}
+
+void PendingOperations::Complete(Id op_id) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ // Destructing the PendingOperation will trigger UMA for completion timing.
+ int count = pending_ops_.erase(op_id);
+
+ // No big deal, but very unusual. Timeout is very generous, so tasks that
+ // timeout are generally assumed to be permanently hung.
+ if (!count)
+ DVLOG(2) << __func__ << " DB operation completed after timeout.";
+}
+
+void PendingOperations::OnTimeout(Id op_id) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ auto it = pending_ops_.find(op_id);
+ DCHECK(it != pending_ops_.end());
+
+ it->second->OnTimeout();
+ pending_ops_.erase(it);
+}
+
+} // namespace media \ No newline at end of file
diff --git a/chromium/media/capabilities/pending_operations.h b/chromium/media/capabilities/pending_operations.h
new file mode 100644
index 00000000000..f775335e59e
--- /dev/null
+++ b/chromium/media/capabilities/pending_operations.h
@@ -0,0 +1,92 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAPABILITIES_PENDING_OPERATIONS_H_
+#define MEDIA_CAPABILITIES_PENDING_OPERATIONS_H_
+
+#include "base/cancelable_callback.h"
+#include "base/containers/flat_map.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MEDIA_EXPORT PendingOperations {
+ public:
+ using Id = int;
+
+ // Helper to report timing information for DB operations, including when they
+ // hang indefinitely.
+ class PendingOperation {
+ public:
+ PendingOperation(
+ const std::string& uma_prefix,
+ std::string uma_str,
+ std::unique_ptr<base::CancelableOnceClosure> timeout_closure);
+ // Records task timing UMA if it hasn't already timed out.
+ virtual ~PendingOperation();
+
+ // Copies disallowed. Incompatible with move-only members and UMA logging in
+ // the destructor.
+ PendingOperation(const PendingOperation&) = delete;
+ PendingOperation& operator=(const PendingOperation&) = delete;
+
+ void UmaHistogramOpTime(const std::string& op_name,
+ base::TimeDelta duration);
+
+ // Trigger UMA recording for timeout.
+ void OnTimeout();
+
+ private:
+ friend class VideoDecodeStatsDBImplTest;
+ friend class WebrtcVideoStatsDBImplTest;
+ const std::string& uma_prefix_;
+ const std::string uma_str_;
+ std::unique_ptr<base::CancelableOnceClosure> timeout_closure_;
+ const base::TimeTicks start_ticks_;
+ };
+
+ explicit PendingOperations(std::string uma_prefix);
+ ~PendingOperations();
+
+ // Creates a PendingOperation using `uma_str` and adds it to `pending_ops_`
+ // map. Returns Id for newly started operation. Callers must later call
+ // Complete() with this id to destroy the PendingOperation and finalize timing
+ // UMA.
+ Id Start(std::string uma_str);
+
+ // Removes PendingOperation from `pending_ops_` using `op_id_` as a key. This
+ // destroys the object and triggers timing UMA.
+ void Complete(Id op_id);
+
+ // Unified handler for timeouts of pending DB operations. PendingOperation
+ // will be notified that it timed out (to trigger timing UMA) and removed from
+ // `pending_ops_`.
+ void OnTimeout(Id id);
+
+ const base::flat_map<Id, std::unique_ptr<PendingOperation>>&
+ get_pending_ops_for_test() const {
+ return pending_ops_;
+ }
+
+ private:
+ // UMA prefix that is used for pending operations histograms.
+ const std::string uma_prefix_;
+
+ // Next Id for use in `pending_ops_` map. See Start().
+ Id next_op_id_ = 0;
+
+ // Map of operation id -> outstanding PendingOperations.
+ base::flat_map<Id, std::unique_ptr<PendingOperation>> pending_ops_;
+
+ // Ensures all access to class members come on the same sequence.
+ SEQUENCE_CHECKER(sequence_checker_);
+
+ base::WeakPtrFactory<PendingOperations> weak_ptr_factory_{this};
+};
+
+} // namespace media
+
+#endif // MEDIA_CAPABILITIES_PENDING_OPERATIONS_H_ \ No newline at end of file
diff --git a/chromium/media/capabilities/pending_operations_unittest.cc b/chromium/media/capabilities/pending_operations_unittest.cc
new file mode 100644
index 00000000000..5a9ce027e15
--- /dev/null
+++ b/chromium/media/capabilities/pending_operations_unittest.cc
@@ -0,0 +1,115 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/test/metrics/histogram_tester.h"
+#include "base/test/task_environment.h"
+#include "base/time/time.h"
+#include "media/capabilities/pending_operations.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace {
+
+class PendingOperationsTest : public ::testing::Test {
+ protected:
+ base::test::TaskEnvironment task_environment_{
+ base::test::TaskEnvironment::TimeSource::MOCK_TIME};
+ base::HistogramTester histogram_;
+};
+
+// Test that histogram is created with correct data.
+TEST_F(PendingOperationsTest, OperationTiming) {
+ const std::string kUmaPrefix = "Media.PendingOperations.";
+ const std::string kOperation = "init";
+ constexpr base::TimeDelta kInitDelay = base::Seconds(2);
+ PendingOperations pending_operations(kUmaPrefix);
+ PendingOperations::Id init_id = pending_operations.Start(kOperation);
+ EXPECT_EQ(pending_operations.get_pending_ops_for_test().size(), 1u);
+ task_environment_.FastForwardBy(kInitDelay);
+ pending_operations.Complete(init_id);
+
+ // No pending operations.
+ EXPECT_TRUE(pending_operations.get_pending_ops_for_test().empty());
+ // UMA histogram emitted.
+ histogram_.ExpectUniqueSample(kUmaPrefix + kOperation,
+ kInitDelay.InMicroseconds(), 1);
+}
+
+// Test that timeout histogram works.
+TEST_F(PendingOperationsTest, OperationTimeout) {
+ const std::string kUmaPrefix = "Media.PendingOperations.";
+ const std::string kOperation = "read";
+ constexpr base::TimeDelta kLongTimeout = base::Hours(1);
+ // Current setting of the pending operation timeout.
+ constexpr base::TimeDelta kPendingOperationTimeout = base::Seconds(30);
+ PendingOperations pending_operations(kUmaPrefix);
+ pending_operations.Start(kOperation);
+ EXPECT_EQ(pending_operations.get_pending_ops_for_test().size(), 1u);
+ task_environment_.FastForwardBy(kLongTimeout);
+
+ // No pending operations.
+ EXPECT_TRUE(pending_operations.get_pending_ops_for_test().empty());
+ // UMA histogram emitted, operation reported as timeout.
+ histogram_.ExpectUniqueSample(kUmaPrefix + kOperation,
+ kPendingOperationTimeout.InMicroseconds(), 1);
+}
+
+// Nested operations.
+struct SimulatedOperation {
+ std::string name;
+ base::TimeDelta start_time;
+ base::TimeDelta stop_time;
+ PendingOperations::Id id;
+};
+
+TEST_F(PendingOperationsTest, NestedOperation) {
+ const std::string kUmaPrefix = "Media.PendingOperations.";
+ PendingOperations pending_operations(kUmaPrefix);
+ // A list of operations named after their start and stop time.
+ SimulatedOperation operations[] = {
+ {"0_10", base::Milliseconds(0), base::Milliseconds(10), 0},
+ {"5_15", base::Milliseconds(5), base::Milliseconds(15), 0},
+ {"6_30", base::Milliseconds(6), base::Milliseconds(30), 0},
+ {"10_12", base::Milliseconds(10), base::Milliseconds(12), 0},
+ {"20_27", base::Milliseconds(20), base::Milliseconds(27), 0},
+ {"5_80", base::Milliseconds(5), base::Milliseconds(80), 0},
+ {"30_60", base::Milliseconds(30), base::Milliseconds(60), 0},
+ {"25_90", base::Milliseconds(25), base::Milliseconds(90), 0},
+ };
+
+ size_t expected_pending_operations = 0;
+ // Run a loop from 0 to 100 ms.
+ base::TimeDelta tick_length = base::Milliseconds(1);
+ for (base::TimeDelta elapsed_time; elapsed_time < base::Milliseconds(100);
+ elapsed_time += tick_length) {
+ // Start/Complete each operation if the elapsed time has reached the
+ // corresponding start/stop time.
+ for (auto& operation : operations) {
+ if (operation.start_time == elapsed_time) {
+ operation.id = pending_operations.Start(operation.name);
+ ++expected_pending_operations;
+ }
+ if (operation.stop_time == elapsed_time) {
+ pending_operations.Complete(operation.id);
+ --expected_pending_operations;
+ }
+ }
+ EXPECT_EQ(pending_operations.get_pending_ops_for_test().size(),
+ expected_pending_operations);
+ task_environment_.FastForwardBy(tick_length);
+ }
+
+ for (const auto& operation : operations) {
+ histogram_.ExpectUniqueSample(
+ kUmaPrefix + operation.name,
+ (operation.stop_time - operation.start_time).InMicroseconds(), 1);
+ }
+}
+
+} // namespace
+} // namespace media
diff --git a/chromium/media/capabilities/video_decode_stats_db_impl.cc b/chromium/media/capabilities/video_decode_stats_db_impl.cc
index 7df25489c40..929fed2147d 100644
--- a/chromium/media/capabilities/video_decode_stats_db_impl.cc
+++ b/chromium/media/capabilities/video_decode_stats_db_impl.cc
@@ -18,7 +18,6 @@
#include "base/metrics/histogram_macros.h"
#include "base/sequence_checker.h"
#include "base/task/thread_pool.h"
-#include "base/threading/thread_task_runner_handle.h"
#include "base/time/default_clock.h"
#include "components/leveldb_proto/public/proto_database_provider.h"
#include "media/base/media_switches.h"
@@ -30,22 +29,12 @@ using ProtoDecodeStatsEntry = leveldb_proto::ProtoDatabase<DecodeStatsProto>;
namespace {
-// Timeout threshold for DB operations. See OnOperationTimeout().
-// NOTE: Used by UmaHistogramOpTime. Change the name if you change the time.
-static constexpr base::TimeDelta kPendingOpTimeout = base::Seconds(30);
-
const int kMaxFramesPerBufferDefault = 2500;
const int kMaxDaysToKeepStatsDefault = 30;
const bool kEnableUnweightedEntriesDefault = false;
-void UmaHistogramOpTime(const std::string& op_name, base::TimeDelta duration) {
- base::UmaHistogramCustomMicrosecondsTimes(
- "Media.VideoDecodeStatsDB.OpTiming." + op_name, duration,
- base::Milliseconds(1), kPendingOpTimeout, 50);
-}
-
} // namespace
const char VideoDecodeStatsDBImpl::kMaxFramesPerBufferParamName[] =
@@ -57,41 +46,6 @@ const char VideoDecodeStatsDBImpl::kMaxDaysToKeepStatsParamName[] =
const char VideoDecodeStatsDBImpl::kEnableUnweightedEntriesParamName[] =
"db_enable_unweighted_entries";
-VideoDecodeStatsDBImpl::PendingOperation::PendingOperation(
- std::string uma_str,
- std::unique_ptr<base::CancelableOnceClosure> timeout_closure)
- : uma_str_(uma_str),
- timeout_closure_(std::move(timeout_closure)),
- start_ticks_(base::TimeTicks::Now()) {
- DVLOG(3) << __func__ << " Started " << uma_str_;
-}
-
-VideoDecodeStatsDBImpl::PendingOperation::~PendingOperation() {
- // Destroying a pending operation that hasn't timed out yet implies the
- // operation has completed.
- if (timeout_closure_ && !timeout_closure_->IsCancelled()) {
- base::TimeDelta op_duration = base::TimeTicks::Now() - start_ticks_;
- UmaHistogramOpTime(uma_str_, op_duration);
- DVLOG(3) << __func__ << " Completed " << uma_str_ << " ("
- << op_duration.InMilliseconds() << ")";
-
- // Ensure the timeout doesn't fire. Destruction should cancel the callback
- // implicitly, but that's not a documented contract, so just taking the safe
- // route.
- timeout_closure_->Cancel();
- }
-}
-
-void VideoDecodeStatsDBImpl::PendingOperation::OnTimeout() {
- UmaHistogramOpTime(uma_str_, kPendingOpTimeout);
- LOG(WARNING) << " Timeout performing " << uma_str_
- << " operation on VideoDecodeStatsDB";
-
- // Cancel the closure to ensure we don't double report the task as completed
- // in ~PendingOperation().
- timeout_closure_->Cancel();
-}
-
// static
int VideoDecodeStatsDBImpl::GetMaxFramesPerBuffer() {
return base::GetFieldTrialParamByFeatureAsDouble(
@@ -143,7 +97,9 @@ constexpr char VideoDecodeStatsDBImpl::kDefaultWriteTime[];
VideoDecodeStatsDBImpl::VideoDecodeStatsDBImpl(
std::unique_ptr<leveldb_proto::ProtoDatabase<DecodeStatsProto>> db)
- : db_(std::move(db)), wall_clock_(base::DefaultClock::GetInstance()) {
+ : pending_operations_(/*uma_prefix=*/"Media.VideoDecodeStatsDB.OpTiming."),
+ db_(std::move(db)),
+ wall_clock_(base::DefaultClock::GetInstance()) {
bool time_parsed =
base::Time::FromString(kDefaultWriteTime, &default_write_time_);
DCHECK(time_parsed);
@@ -155,43 +111,6 @@ VideoDecodeStatsDBImpl::~VideoDecodeStatsDBImpl() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
}
-VideoDecodeStatsDBImpl::PendingOpId VideoDecodeStatsDBImpl::StartPendingOp(
- std::string uma_str) {
- PendingOpId op_id = next_op_id_++;
-
- auto timeout_closure = std::make_unique<base::CancelableOnceClosure>(
- base::BindOnce(&VideoDecodeStatsDBImpl::OnPendingOpTimeout,
- weak_ptr_factory_.GetWeakPtr(), op_id));
-
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, timeout_closure->callback(), kPendingOpTimeout);
-
- pending_ops_.emplace(op_id, std::make_unique<PendingOperation>(
- uma_str, std::move(timeout_closure)));
-
- return op_id;
-}
-
-void VideoDecodeStatsDBImpl::CompletePendingOp(PendingOpId op_id) {
- // Destructing the PendingOperation will trigger UMA for completion timing.
- int count = pending_ops_.erase(op_id);
-
- // No big deal, but very unusual. Timeout is very generous, so tasks that
- // timeout are generally assumed to be permanently hung.
- if (!count)
- DVLOG(2) << __func__ << " DB operation completed after timeout.";
-}
-
-void VideoDecodeStatsDBImpl::OnPendingOpTimeout(PendingOpId op_id) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-
- auto it = pending_ops_.find(op_id);
- DCHECK(it != pending_ops_.end());
-
- it->second->OnTimeout();
- pending_ops_.erase(it);
-}
-
void VideoDecodeStatsDBImpl::Initialize(InitializeCB init_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(init_cb);
@@ -201,19 +120,19 @@ void VideoDecodeStatsDBImpl::Initialize(InitializeCB init_cb) {
// case our whole DB will be less than 35K, so we aren't worried about
// spamming the cache.
// TODO(chcunningham): Keep an eye on the size as the table evolves.
- db_->Init(base::BindOnce(&VideoDecodeStatsDBImpl::OnInit,
- weak_ptr_factory_.GetWeakPtr(),
- StartPendingOp("Initialize"), std::move(init_cb)));
+ db_->Init(base::BindOnce(
+ &VideoDecodeStatsDBImpl::OnInit, weak_ptr_factory_.GetWeakPtr(),
+ pending_operations_.Start("Initialize"), std::move(init_cb)));
}
-void VideoDecodeStatsDBImpl::OnInit(PendingOpId op_id,
+void VideoDecodeStatsDBImpl::OnInit(PendingOperations::Id op_id,
InitializeCB init_cb,
leveldb_proto::Enums::InitStatus status) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_NE(status, leveldb_proto::Enums::InitStatus::kInvalidOperation);
bool success = status == leveldb_proto::Enums::InitStatus::kOK;
DVLOG(2) << __func__ << (success ? " succeeded" : " FAILED!");
- CompletePendingOp(op_id);
+ pending_operations_.Complete(op_id);
UMA_HISTOGRAM_BOOLEAN("Media.VideoDecodeStatsDB.OpSuccess.Initialize",
success);
@@ -241,11 +160,11 @@ void VideoDecodeStatsDBImpl::AppendDecodeStats(
DVLOG(3) << __func__ << " Reading key " << key.ToLogString()
<< " from DB with intent to update with " << entry.ToLogString();
- db_->GetEntry(
- key.Serialize(),
- base::BindOnce(&VideoDecodeStatsDBImpl::WriteUpdatedEntry,
- weak_ptr_factory_.GetWeakPtr(), StartPendingOp("Read"),
- key, entry, std::move(append_done_cb)));
+ db_->GetEntry(key.Serialize(),
+ base::BindOnce(&VideoDecodeStatsDBImpl::WriteUpdatedEntry,
+ weak_ptr_factory_.GetWeakPtr(),
+ pending_operations_.Start("Read"), key, entry,
+ std::move(append_done_cb)));
}
void VideoDecodeStatsDBImpl::GetDecodeStats(const VideoDescKey& key,
@@ -255,11 +174,11 @@ void VideoDecodeStatsDBImpl::GetDecodeStats(const VideoDescKey& key,
DVLOG(3) << __func__ << " " << key.ToLogString();
- db_->GetEntry(
- key.Serialize(),
- base::BindOnce(&VideoDecodeStatsDBImpl::OnGotDecodeStats,
- weak_ptr_factory_.GetWeakPtr(), StartPendingOp("Read"),
- std::move(get_stats_cb)));
+ db_->GetEntry(key.Serialize(),
+ base::BindOnce(&VideoDecodeStatsDBImpl::OnGotDecodeStats,
+ weak_ptr_factory_.GetWeakPtr(),
+ pending_operations_.Start("Read"),
+ std::move(get_stats_cb)));
}
bool VideoDecodeStatsDBImpl::AreStatsUsable(
@@ -311,7 +230,7 @@ bool VideoDecodeStatsDBImpl::AreStatsUsable(
}
void VideoDecodeStatsDBImpl::WriteUpdatedEntry(
- PendingOpId op_id,
+ PendingOperations::Id op_id,
const VideoDescKey& key,
const DecodeStatsEntry& new_entry,
AppendDecodeStatsCB append_done_cb,
@@ -319,7 +238,7 @@ void VideoDecodeStatsDBImpl::WriteUpdatedEntry(
std::unique_ptr<DecodeStatsProto> stats_proto) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(IsInitialized());
- CompletePendingOp(op_id);
+ pending_operations_.Complete(op_id);
// Note: outcome of "Write" operation logged in OnEntryUpdated().
UMA_HISTOGRAM_BOOLEAN("Media.VideoDecodeStatsDB.OpSuccess.Read",
@@ -457,31 +376,32 @@ void VideoDecodeStatsDBImpl::WriteUpdatedEntry(
std::unique_ptr<DBType::KeyEntryVector> entries =
std::make_unique<DBType::KeyEntryVector>();
entries->emplace_back(key.Serialize(), *stats_proto);
- db_->UpdateEntries(
- std::move(entries), std::make_unique<leveldb_proto::KeyVector>(),
- base::BindOnce(&VideoDecodeStatsDBImpl::OnEntryUpdated,
- weak_ptr_factory_.GetWeakPtr(), StartPendingOp("Write"),
- std::move(append_done_cb)));
+ db_->UpdateEntries(std::move(entries),
+ std::make_unique<leveldb_proto::KeyVector>(),
+ base::BindOnce(&VideoDecodeStatsDBImpl::OnEntryUpdated,
+ weak_ptr_factory_.GetWeakPtr(),
+ pending_operations_.Start("Write"),
+ std::move(append_done_cb)));
}
-void VideoDecodeStatsDBImpl::OnEntryUpdated(PendingOpId op_id,
+void VideoDecodeStatsDBImpl::OnEntryUpdated(PendingOperations::Id op_id,
AppendDecodeStatsCB append_done_cb,
bool success) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOG(3) << __func__ << " update " << (success ? "succeeded" : "FAILED!");
- CompletePendingOp(op_id);
+ pending_operations_.Complete(op_id);
UMA_HISTOGRAM_BOOLEAN("Media.VideoDecodeStatsDB.OpSuccess.Write", success);
std::move(append_done_cb).Run(success);
}
void VideoDecodeStatsDBImpl::OnGotDecodeStats(
- PendingOpId op_id,
+ PendingOperations::Id op_id,
GetDecodeStatsCB get_stats_cb,
bool success,
std::unique_ptr<DecodeStatsProto> stats_proto) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOG(3) << __func__ << " get " << (success ? "succeeded" : "FAILED!");
- CompletePendingOp(op_id);
+ pending_operations_.Complete(op_id);
UMA_HISTOGRAM_BOOLEAN("Media.VideoDecodeStatsDB.OpSuccess.Read", success);
std::unique_ptr<DecodeStatsEntry> entry;
@@ -546,17 +466,18 @@ void VideoDecodeStatsDBImpl::ClearStats(base::OnceClosure clear_done_cb) {
std::make_unique<ProtoDecodeStatsEntry::KeyEntryVector>(),
base::BindRepeating([](const std::string& key) { return true; }),
base::BindOnce(&VideoDecodeStatsDBImpl::OnStatsCleared,
- weak_ptr_factory_.GetWeakPtr(), StartPendingOp("Clear"),
+ weak_ptr_factory_.GetWeakPtr(),
+ pending_operations_.Start("Clear"),
std::move(clear_done_cb)));
}
-void VideoDecodeStatsDBImpl::OnStatsCleared(PendingOpId op_id,
+void VideoDecodeStatsDBImpl::OnStatsCleared(PendingOperations::Id op_id,
base::OnceClosure clear_done_cb,
bool success) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOG(2) << __func__ << (success ? " succeeded" : " FAILED!");
- CompletePendingOp(op_id);
+ pending_operations_.Complete(op_id);
UMA_HISTOGRAM_BOOLEAN("Media.VideoDecodeStatsDB.OpSuccess.Clear", success);
diff --git a/chromium/media/capabilities/video_decode_stats_db_impl.h b/chromium/media/capabilities/video_decode_stats_db_impl.h
index e7457ea5172..328b7c789f7 100644
--- a/chromium/media/capabilities/video_decode_stats_db_impl.h
+++ b/chromium/media/capabilities/video_decode_stats_db_impl.h
@@ -7,8 +7,6 @@
#include <memory>
-#include "base/cancelable_callback.h"
-#include "base/containers/flat_map.h"
#include "base/files/file_path.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/weak_ptr.h"
@@ -17,6 +15,7 @@
#include "components/leveldb_proto/public/proto_database.h"
#include "media/base/media_export.h"
#include "media/base/video_codecs.h"
+#include "media/capabilities/pending_operations.h"
#include "media/capabilities/video_decode_stats_db.h"
#include "ui/gfx/geometry/size.h"
@@ -66,8 +65,6 @@ class MEDIA_EXPORT VideoDecodeStatsDBImpl : public VideoDecodeStatsDB {
private:
friend class VideoDecodeStatsDBImplTest;
- using PendingOpId = int;
-
// Private constructor only called by tests (friends). Production code
// should always use the static Create() method.
VideoDecodeStatsDBImpl(
@@ -95,53 +92,9 @@ class MEDIA_EXPORT VideoDecodeStatsDBImpl : public VideoDecodeStatsDB {
// Returns current feature params.
static base::FieldTrialParams GetFieldTrialParams();
- // Creates a PendingOperation using |uma_str| and adds it to |pending_ops_|
- // map. Returns PendingOpId for newly started operation. Callers must later
- // call CompletePendingOp() with this id to destroy the PendingOperation and
- // finalize timing UMA.
- PendingOpId StartPendingOp(std::string uma_str);
-
- // Removes PendingOperation from |pending_ops_| using |op_id_| as a key. This
- // destroys the object and triggers timing UMA.
- void CompletePendingOp(PendingOpId op_id);
-
- // Unified handler for timeouts of pending DB operations. PendingOperation
- // will be notified that it timed out (to trigger timing UMA) and removed from
- // |penidng_ops_|.
- void OnPendingOpTimeout(PendingOpId id);
-
- // Helper to report timing information for DB operations, including when they
- // hang indefinitely.
- class PendingOperation {
- public:
- PendingOperation(
- std::string uma_str,
- std::unique_ptr<base::CancelableOnceClosure> timeout_closure);
- // Records task timing UMA if it hasn't already timed out.
- virtual ~PendingOperation();
-
- // Copies disallowed. Incompatible with move-only members and UMA logging in
- // the destructor.
- PendingOperation(const PendingOperation&) = delete;
- PendingOperation& operator=(const PendingOperation&) = delete;
-
- // Trigger UMA recording for timeout.
- void OnTimeout();
-
- private:
- friend class VideoDecodeStatsDBImplTest;
-
- std::string uma_str_;
- std::unique_ptr<base::CancelableOnceClosure> timeout_closure_;
- base::TimeTicks start_ticks_;
- };
-
- // Map of operation id -> outstanding PendingOperations.
- base::flat_map<PendingOpId, std::unique_ptr<PendingOperation>> pending_ops_;
-
// Called when the database has been initialized. Will immediately call
// |init_cb| to forward |success|.
- void OnInit(PendingOpId id,
+ void OnInit(PendingOperations::Id id,
InitializeCB init_cb,
leveldb_proto::Enums::InitStatus status);
@@ -150,7 +103,7 @@ class MEDIA_EXPORT VideoDecodeStatsDBImpl : public VideoDecodeStatsDB {
// Passed as the callback for |OnGotDecodeStats| by |AppendDecodeStats| to
// update the database once we've read the existing stats entry.
- void WriteUpdatedEntry(PendingOpId op_id,
+ void WriteUpdatedEntry(PendingOperations::Id op_id,
const VideoDescKey& key,
const DecodeStatsEntry& entry,
AppendDecodeStatsCB append_done_cb,
@@ -159,21 +112,21 @@ class MEDIA_EXPORT VideoDecodeStatsDBImpl : public VideoDecodeStatsDB {
// Called when the database has been modified after a call to
// |WriteUpdatedEntry|. Will run |append_done_cb| when done.
- void OnEntryUpdated(PendingOpId op_id,
+ void OnEntryUpdated(PendingOperations::Id op_id,
AppendDecodeStatsCB append_done_cb,
bool success);
// Called when GetDecodeStats() operation was performed. |get_stats_cb|
// will be run with |success| and a |DecodeStatsEntry| created from
// |stats_proto| or nullptr if no entry was found for the requested key.
- void OnGotDecodeStats(PendingOpId op_id,
+ void OnGotDecodeStats(PendingOperations::Id op_id,
GetDecodeStatsCB get_stats_cb,
bool success,
std::unique_ptr<DecodeStatsProto> stats_proto);
// Internal callback for OnLoadAllKeysForClearing(), initially triggered by
// ClearStats(). Method simply logs |success| and runs |clear_done_cb|.
- void OnStatsCleared(PendingOpId op_id,
+ void OnStatsCleared(PendingOperations::Id op_id,
base::OnceClosure clear_done_cb,
bool success);
@@ -189,8 +142,7 @@ class MEDIA_EXPORT VideoDecodeStatsDBImpl : public VideoDecodeStatsDB {
wall_clock_ = tick_clock;
}
- // Next PendingOpId for use in |pending_ops_| map. See StartPendingOp().
- PendingOpId next_op_id_ = 0;
+ PendingOperations pending_operations_;
// Indicates whether initialization is completed. Does not indicate whether it
// was successful. Will be reset upon calling DestroyStats(). Failed
diff --git a/chromium/media/capabilities/video_decode_stats_db_impl_unittest.cc b/chromium/media/capabilities/video_decode_stats_db_impl_unittest.cc
index 565b7438744..ab075f6fa3d 100644
--- a/chromium/media/capabilities/video_decode_stats_db_impl_unittest.cc
+++ b/chromium/media/capabilities/video_decode_stats_db_impl_unittest.cc
@@ -77,13 +77,19 @@ class VideoDecodeStatsDBImplTest : public ::testing::Test {
}
void VerifyOnePendingOp(std::string op_name) {
- EXPECT_EQ(stats_db_->pending_ops_.size(), 1u);
- VideoDecodeStatsDBImpl::PendingOperation* pending_op =
- stats_db_->pending_ops_.begin()->second.get();
+ EXPECT_EQ(stats_db_->pending_operations_.get_pending_ops_for_test().size(),
+ 1u);
+ PendingOperations::PendingOperation* pending_op =
+ stats_db_->pending_operations_.get_pending_ops_for_test()
+ .begin()
+ ->second.get();
EXPECT_EQ(pending_op->uma_str_, op_name);
}
- void VerifyNoPendingOps() { EXPECT_TRUE(stats_db_->pending_ops_.empty()); }
+ void VerifyNoPendingOps() {
+ EXPECT_TRUE(
+ stats_db_->pending_operations_.get_pending_ops_for_test().empty());
+ }
int GetMaxFramesPerBuffer() {
return VideoDecodeStatsDBImpl::GetMaxFramesPerBuffer();
diff --git a/chromium/media/capabilities/webrtc_video_stats_db_impl.cc b/chromium/media/capabilities/webrtc_video_stats_db_impl.cc
index fefbc7b7c6f..6dbf9309361 100644
--- a/chromium/media/capabilities/webrtc_video_stats_db_impl.cc
+++ b/chromium/media/capabilities/webrtc_video_stats_db_impl.cc
@@ -19,7 +19,6 @@
#include "base/sequence_checker.h"
#include "base/strings/string_util.h"
#include "base/task/thread_pool.h"
-#include "base/threading/thread_task_runner_handle.h"
#include "base/time/default_clock.h"
#include "components/leveldb_proto/public/proto_database_provider.h"
#include "media/base/media_switches.h"
@@ -30,55 +29,6 @@ namespace media {
using ProtoVideoStatsEntry =
leveldb_proto::ProtoDatabase<WebrtcVideoStatsEntryProto>;
-namespace {
-
-// Timeout threshold for DB operations. See OnOperationTimeout().
-// NOTE: Used by UmaHistogramOpTime. Change the name if you change the time.
-static constexpr base::TimeDelta kPendingOpTimeout = base::Seconds(30);
-
-void UmaHistogramOpTime(const std::string& op_name, base::TimeDelta duration) {
- base::UmaHistogramCustomMicrosecondsTimes(
- "Media.WebrtcVideoStatsDB.OpTiming." + op_name, duration,
- base::Milliseconds(1), kPendingOpTimeout, 50);
-}
-
-} // namespace
-
-WebrtcVideoStatsDBImpl::PendingOperation::PendingOperation(
- std::string uma_str,
- std::unique_ptr<base::CancelableOnceClosure> timeout_closure)
- : uma_str_(uma_str),
- timeout_closure_(std::move(timeout_closure)),
- start_ticks_(base::TimeTicks::Now()) {
- DVLOG(3) << __func__ << " Started " << uma_str_;
-}
-
-WebrtcVideoStatsDBImpl::PendingOperation::~PendingOperation() {
- // Destroying a pending operation that hasn't timed out yet implies the
- // operation has completed.
- if (timeout_closure_ && !timeout_closure_->IsCancelled()) {
- base::TimeDelta op_duration = base::TimeTicks::Now() - start_ticks_;
- UmaHistogramOpTime(uma_str_, op_duration);
- DVLOG(3) << __func__ << " Completed " << uma_str_ << " ("
- << op_duration.InMilliseconds() << ")";
-
- // Ensure the timeout doesn't fire. Destruction should cancel the callback
- // implicitly, but that's not a documented contract, so just taking the safe
- // route.
- timeout_closure_->Cancel();
- }
-}
-
-void WebrtcVideoStatsDBImpl::PendingOperation::OnTimeout() {
- UmaHistogramOpTime(uma_str_, kPendingOpTimeout);
- LOG(WARNING) << " Timeout performing " << uma_str_
- << " operation on WebrtcVideoStatsDB";
-
- // Cancel the closure to ensure we don't double report the task as completed
- // in ~PendingOperation().
- timeout_closure_->Cancel();
-}
-
// static
std::unique_ptr<WebrtcVideoStatsDBImpl> WebrtcVideoStatsDBImpl::Create(
base::FilePath db_dir,
@@ -97,7 +47,9 @@ std::unique_ptr<WebrtcVideoStatsDBImpl> WebrtcVideoStatsDBImpl::Create(
WebrtcVideoStatsDBImpl::WebrtcVideoStatsDBImpl(
std::unique_ptr<leveldb_proto::ProtoDatabase<WebrtcVideoStatsEntryProto>>
db)
- : db_(std::move(db)), wall_clock_(base::DefaultClock::GetInstance()) {
+ : pending_operations_(/*uma_prefix=*/"Media.WebrtcVideoStatsDB.OpTiming."),
+ db_(std::move(db)),
+ wall_clock_(base::DefaultClock::GetInstance()) {
DCHECK(db_);
}
@@ -105,61 +57,24 @@ WebrtcVideoStatsDBImpl::~WebrtcVideoStatsDBImpl() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
}
-WebrtcVideoStatsDBImpl::PendingOpId WebrtcVideoStatsDBImpl::StartPendingOp(
- std::string uma_str) {
- PendingOpId op_id = next_op_id_++;
-
- auto timeout_closure = std::make_unique<base::CancelableOnceClosure>(
- base::BindOnce(&WebrtcVideoStatsDBImpl::OnPendingOpTimeout,
- weak_ptr_factory_.GetWeakPtr(), op_id));
-
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, timeout_closure->callback(), kPendingOpTimeout);
-
- pending_ops_.emplace(op_id, std::make_unique<PendingOperation>(
- uma_str, std::move(timeout_closure)));
-
- return op_id;
-}
-
-void WebrtcVideoStatsDBImpl::CompletePendingOp(PendingOpId op_id) {
- // Destructing the PendingOperation will trigger UMA for completion timing.
- int count = pending_ops_.erase(op_id);
-
- // No big deal, but very unusual. Timeout is very generous, so tasks that
- // timeout are generally assumed to be permanently hung.
- if (!count)
- DVLOG(2) << __func__ << " DB operation completed after timeout.";
-}
-
-void WebrtcVideoStatsDBImpl::OnPendingOpTimeout(PendingOpId op_id) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-
- auto it = pending_ops_.find(op_id);
- DCHECK(it != pending_ops_.end());
-
- it->second->OnTimeout();
- pending_ops_.erase(it);
-}
-
void WebrtcVideoStatsDBImpl::Initialize(InitializeCB init_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(init_cb);
DCHECK(!IsInitialized());
- db_->Init(base::BindOnce(&WebrtcVideoStatsDBImpl::OnInit,
- weak_ptr_factory_.GetWeakPtr(),
- StartPendingOp("Initialize"), std::move(init_cb)));
+ db_->Init(base::BindOnce(
+ &WebrtcVideoStatsDBImpl::OnInit, weak_ptr_factory_.GetWeakPtr(),
+ pending_operations_.Start("Initialize"), std::move(init_cb)));
}
-void WebrtcVideoStatsDBImpl::OnInit(PendingOpId op_id,
+void WebrtcVideoStatsDBImpl::OnInit(PendingOperations::Id op_id,
InitializeCB init_cb,
leveldb_proto::Enums::InitStatus status) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_NE(status, leveldb_proto::Enums::InitStatus::kInvalidOperation);
bool success = status == leveldb_proto::Enums::InitStatus::kOK;
DVLOG(2) << __func__ << (success ? " succeeded" : " FAILED!");
- CompletePendingOp(op_id);
+ pending_operations_.Complete(op_id);
UMA_HISTOGRAM_BOOLEAN("Media.WebrtcVideoStatsDB.OpSuccess.Initialize",
success);
@@ -188,11 +103,11 @@ void WebrtcVideoStatsDBImpl::AppendVideoStats(
<< " from DB with intent to update with "
<< video_stats.ToLogString();
- db_->GetEntry(
- key.Serialize(),
- base::BindOnce(&WebrtcVideoStatsDBImpl::WriteUpdatedEntry,
- weak_ptr_factory_.GetWeakPtr(), StartPendingOp("Read"),
- key, video_stats, std::move(append_done_cb)));
+ db_->GetEntry(key.Serialize(),
+ base::BindOnce(&WebrtcVideoStatsDBImpl::WriteUpdatedEntry,
+ weak_ptr_factory_.GetWeakPtr(),
+ pending_operations_.Start("Read"), key,
+ video_stats, std::move(append_done_cb)));
}
void WebrtcVideoStatsDBImpl::GetVideoStats(const VideoDescKey& key,
@@ -202,11 +117,11 @@ void WebrtcVideoStatsDBImpl::GetVideoStats(const VideoDescKey& key,
DVLOG(3) << __func__ << " " << key.ToLogStringForDebug();
- db_->GetEntry(
- key.Serialize(),
- base::BindOnce(&WebrtcVideoStatsDBImpl::OnGotVideoStats,
- weak_ptr_factory_.GetWeakPtr(), StartPendingOp("Read"),
- std::move(get_stats_cb)));
+ db_->GetEntry(key.Serialize(),
+ base::BindOnce(&WebrtcVideoStatsDBImpl::OnGotVideoStats,
+ weak_ptr_factory_.GetWeakPtr(),
+ pending_operations_.Start("Read"),
+ std::move(get_stats_cb)));
}
void WebrtcVideoStatsDBImpl::GetVideoStatsCollection(
@@ -237,7 +152,8 @@ void WebrtcVideoStatsDBImpl::GetVideoStatsCollection(
db_->LoadKeysAndEntriesWhile(
key_without_pixels, key_iterator_controller,
base::BindOnce(&WebrtcVideoStatsDBImpl::OnGotVideoStatsCollection,
- weak_ptr_factory_.GetWeakPtr(), StartPendingOp("Read"),
+ weak_ptr_factory_.GetWeakPtr(),
+ pending_operations_.Start("Read"),
std::move(get_stats_cb)));
}
@@ -269,7 +185,7 @@ bool WebrtcVideoStatsDBImpl::AreStatsValid(
}
void WebrtcVideoStatsDBImpl::WriteUpdatedEntry(
- PendingOpId op_id,
+ PendingOperations::Id op_id,
const VideoDescKey& key,
const VideoStats& new_video_stats,
AppendVideoStatsCB append_done_cb,
@@ -278,7 +194,7 @@ void WebrtcVideoStatsDBImpl::WriteUpdatedEntry(
DVLOG(3) << __func__;
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(IsInitialized());
- CompletePendingOp(op_id);
+ pending_operations_.Complete(op_id);
// Note: outcome of "Write" operation logged in OnEntryUpdated().
UMA_HISTOGRAM_BOOLEAN("Media.WebrtcVideoStatsDB.OpSuccess.Read",
@@ -345,31 +261,32 @@ void WebrtcVideoStatsDBImpl::WriteUpdatedEntry(
std::unique_ptr<DBType::KeyEntryVector> entries =
std::make_unique<DBType::KeyEntryVector>();
entries->emplace_back(key.Serialize(), new_entry_proto);
- db_->UpdateEntries(
- std::move(entries), std::make_unique<leveldb_proto::KeyVector>(),
- base::BindOnce(&WebrtcVideoStatsDBImpl::OnEntryUpdated,
- weak_ptr_factory_.GetWeakPtr(), StartPendingOp("Write"),
- std::move(append_done_cb)));
+ db_->UpdateEntries(std::move(entries),
+ std::make_unique<leveldb_proto::KeyVector>(),
+ base::BindOnce(&WebrtcVideoStatsDBImpl::OnEntryUpdated,
+ weak_ptr_factory_.GetWeakPtr(),
+ pending_operations_.Start("Write"),
+ std::move(append_done_cb)));
}
-void WebrtcVideoStatsDBImpl::OnEntryUpdated(PendingOpId op_id,
+void WebrtcVideoStatsDBImpl::OnEntryUpdated(PendingOperations::Id op_id,
AppendVideoStatsCB append_done_cb,
bool success) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOG(3) << __func__ << " update " << (success ? "succeeded" : "FAILED!");
- CompletePendingOp(op_id);
+ pending_operations_.Complete(op_id);
UMA_HISTOGRAM_BOOLEAN("Media.WebrtcVideoStatsDB.OpSuccess.Write", success);
std::move(append_done_cb).Run(success);
}
void WebrtcVideoStatsDBImpl::OnGotVideoStats(
- PendingOpId op_id,
+ PendingOperations::Id op_id,
GetVideoStatsCB get_stats_cb,
bool success,
std::unique_ptr<WebrtcVideoStatsEntryProto> stats_proto) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOG(3) << __func__ << " get " << (success ? "succeeded" : "FAILED!");
- CompletePendingOp(op_id);
+ pending_operations_.Complete(op_id);
UMA_HISTOGRAM_BOOLEAN("Media.WebrtcVideoStatsDB.OpSuccess.Read", success);
// Convert from WebrtcVideoStatsEntryProto to VideoStatsEntry.
@@ -397,14 +314,14 @@ void WebrtcVideoStatsDBImpl::OnGotVideoStats(
}
void WebrtcVideoStatsDBImpl::OnGotVideoStatsCollection(
- PendingOpId op_id,
+ PendingOperations::Id op_id,
GetVideoStatsCollectionCB get_stats_cb,
bool success,
std::unique_ptr<std::map<std::string, WebrtcVideoStatsEntryProto>>
stats_proto_collection) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOG(3) << __func__ << " get " << (success ? "succeeded" : "FAILED!");
- CompletePendingOp(op_id);
+ pending_operations_.Complete(op_id);
UMA_HISTOGRAM_BOOLEAN("Media.WebrtcVideoStatsDB.OpSuccess.Read", success);
// Convert from map of WebrtcVideoStatsEntryProto to VideoStatsCollection.
absl::optional<VideoStatsCollection> collection;
@@ -413,10 +330,10 @@ void WebrtcVideoStatsDBImpl::OnGotVideoStatsCollection(
collection.emplace();
const base::TimeDelta max_time_to_keep_stats = GetMaxTimeToKeepStats();
- for (auto const& stats_proto : *stats_proto_collection) {
- if (AreStatsValid(&stats_proto.second)) {
+ for (auto const& [pixel_key, video_stats_entry] : *stats_proto_collection) {
+ if (AreStatsValid(&video_stats_entry)) {
VideoStatsEntry entry;
- for (auto const& stats : stats_proto.second.stats()) {
+ for (auto const& stats : video_stats_entry.stats()) {
if (wall_clock_->Now() - base::Time::FromJsTime(stats.timestamp()) <=
max_time_to_keep_stats) {
entry.emplace_back(stats.timestamp(), stats.frames_processed(),
@@ -427,7 +344,7 @@ void WebrtcVideoStatsDBImpl::OnGotVideoStatsCollection(
if (!entry.empty()) {
absl::optional<int> pixels =
- VideoDescKey::ParsePixelsFromKey(stats_proto.first);
+ VideoDescKey::ParsePixelsFromKey(pixel_key);
if (pixels) {
collection->insert({*pixels, std::move(entry)});
}
@@ -450,17 +367,18 @@ void WebrtcVideoStatsDBImpl::ClearStats(base::OnceClosure clear_done_cb) {
std::make_unique<ProtoVideoStatsEntry::KeyEntryVector>(),
base::BindRepeating([](const std::string& key) { return true; }),
base::BindOnce(&WebrtcVideoStatsDBImpl::OnStatsCleared,
- weak_ptr_factory_.GetWeakPtr(), StartPendingOp("Clear"),
+ weak_ptr_factory_.GetWeakPtr(),
+ pending_operations_.Start("Clear"),
std::move(clear_done_cb)));
}
-void WebrtcVideoStatsDBImpl::OnStatsCleared(PendingOpId op_id,
+void WebrtcVideoStatsDBImpl::OnStatsCleared(PendingOperations::Id op_id,
base::OnceClosure clear_done_cb,
bool success) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOG(2) << __func__ << (success ? " succeeded" : " FAILED!");
- CompletePendingOp(op_id);
+ pending_operations_.Complete(op_id);
UMA_HISTOGRAM_BOOLEAN("Media.WebrtcVideoStatsDB.OpSuccess.Clear", success);
diff --git a/chromium/media/capabilities/webrtc_video_stats_db_impl.h b/chromium/media/capabilities/webrtc_video_stats_db_impl.h
index ac9b2e1ebf0..a41284a930e 100644
--- a/chromium/media/capabilities/webrtc_video_stats_db_impl.h
+++ b/chromium/media/capabilities/webrtc_video_stats_db_impl.h
@@ -7,15 +7,15 @@
#include <memory>
-#include "base/cancelable_callback.h"
-#include "base/containers/flat_map.h"
#include "base/files/file_path.h"
+#include "base/memory/raw_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/metrics/field_trial_params.h"
#include "base/time/time.h"
#include "components/leveldb_proto/public/proto_database.h"
#include "media/base/media_export.h"
#include "media/base/video_codecs.h"
+#include "media/capabilities/pending_operations.h"
#include "media/capabilities/webrtc_video_stats_db.h"
#include "ui/gfx/geometry/size.h"
@@ -60,9 +60,9 @@ class MEDIA_EXPORT WebrtcVideoStatsDBImpl : public WebrtcVideoStatsDB {
void ClearStats(base::OnceClosure clear_done_cb) override;
private:
+ // Test classes are friends, see comment below.
friend class WebrtcVideoStatsDBImplTest;
-
- using PendingOpId = int;
+ friend class WebrtcVideoPerfLPMFuzzerHelper;
// Private constructor only called by tests (friends). Production code
// should always use the static Create() method.
@@ -70,53 +70,9 @@ class MEDIA_EXPORT WebrtcVideoStatsDBImpl : public WebrtcVideoStatsDB {
std::unique_ptr<leveldb_proto::ProtoDatabase<WebrtcVideoStatsEntryProto>>
db);
- // Creates a PendingOperation using `uma_str` and adds it to `pending_ops_`
- // map. Returns PendingOpId for newly started operation. Callers must later
- // call CompletePendingOp() with this id to destroy the PendingOperation and
- // finalize timing UMA.
- PendingOpId StartPendingOp(std::string uma_str);
-
- // Removes PendingOperation from `pending_ops_` using `op_id_` as a key. This
- // destroys the object and triggers timing UMA.
- void CompletePendingOp(PendingOpId op_id);
-
- // Unified handler for timeouts of pending DB operations. PendingOperation
- // will be notified that it timed out (to trigger timing UMA) and removed from
- // `pending_ops_`.
- void OnPendingOpTimeout(PendingOpId id);
-
- // Helper to report timing information for DB operations, including when they
- // hang indefinitely.
- class PendingOperation {
- public:
- PendingOperation(
- std::string uma_str,
- std::unique_ptr<base::CancelableOnceClosure> timeout_closure);
- // Records task timing UMA if it hasn't already timed out.
- virtual ~PendingOperation();
-
- // Copies disallowed. Incompatible with move-only members and UMA logging in
- // the destructor.
- PendingOperation(const PendingOperation&) = delete;
- PendingOperation& operator=(const PendingOperation&) = delete;
-
- // Trigger UMA recording for timeout.
- void OnTimeout();
-
- private:
- friend class WebrtcVideoStatsDBImplTest;
-
- std::string uma_str_;
- std::unique_ptr<base::CancelableOnceClosure> timeout_closure_;
- base::TimeTicks start_ticks_;
- };
-
- // Map of operation id -> outstanding PendingOperations.
- base::flat_map<PendingOpId, std::unique_ptr<PendingOperation>> pending_ops_;
-
// Called when the database has been initialized. Will immediately call
// `init_cb` to forward `success`.
- void OnInit(PendingOpId id,
+ void OnInit(PendingOperations::Id id,
InitializeCB init_cb,
leveldb_proto::Enums::InitStatus status);
@@ -126,7 +82,7 @@ class MEDIA_EXPORT WebrtcVideoStatsDBImpl : public WebrtcVideoStatsDB {
// Passed as the callback for `OnGotVideoStats` by `AppendVideoStats` to
// update the database once we've read the existing stats entry.
void WriteUpdatedEntry(
- PendingOpId op_id,
+ PendingOperations::Id op_id,
const VideoDescKey& key,
const VideoStats& new_video_stats,
AppendVideoStatsCB append_done_cb,
@@ -135,14 +91,14 @@ class MEDIA_EXPORT WebrtcVideoStatsDBImpl : public WebrtcVideoStatsDB {
// Called when the database has been modified after a call to
// `WriteUpdatedEntry`. Will run `append_done_cb` when done.
- void OnEntryUpdated(PendingOpId op_id,
+ void OnEntryUpdated(PendingOperations::Id op_id,
AppendVideoStatsCB append_done_cb,
bool success);
// Called when GetVideoStats() operation was performed. `get_stats_cb`
// will be run with `success` and a `VideoStatsEntry` created from
// `stats_proto` or nullptr if no entry was found for the requested key.
- void OnGotVideoStats(PendingOpId op_id,
+ void OnGotVideoStats(PendingOperations::Id op_id,
GetVideoStatsCB get_stats_cb,
bool success,
std::unique_ptr<WebrtcVideoStatsEntryProto> stats_proto);
@@ -152,7 +108,7 @@ class MEDIA_EXPORT WebrtcVideoStatsDBImpl : public WebrtcVideoStatsDB {
// created from the `stats_proto` map or nullptr if no entries were found for
// the filtered key.
void OnGotVideoStatsCollection(
- PendingOpId op_id,
+ PendingOperations::Id op_id,
GetVideoStatsCollectionCB get_stats_cb,
bool success,
std::unique_ptr<std::map<std::string, WebrtcVideoStatsEntryProto>>
@@ -160,7 +116,7 @@ class MEDIA_EXPORT WebrtcVideoStatsDBImpl : public WebrtcVideoStatsDB {
// Internal callback for OnLoadAllKeysForClearing(), initially triggered by
// ClearStats(). Method simply logs `success` and runs `clear_done_cb`.
- void OnStatsCleared(PendingOpId op_id,
+ void OnStatsCleared(PendingOperations::Id op_id,
base::OnceClosure clear_done_cb,
bool success);
@@ -172,8 +128,7 @@ class MEDIA_EXPORT WebrtcVideoStatsDBImpl : public WebrtcVideoStatsDB {
wall_clock_ = tick_clock;
}
- // Next PendingOpId for use in `pending_ops_` map. See StartPendingOp().
- PendingOpId next_op_id_ = 0;
+ PendingOperations pending_operations_;
// Indicates whether initialization is completed. Does not indicate whether it
// was successful. Will be reset upon calling DestroyStats(). Failed
@@ -188,7 +143,7 @@ class MEDIA_EXPORT WebrtcVideoStatsDBImpl : public WebrtcVideoStatsDB {
// For getting wall-clock time. Tests may override via
// set_wall_clock_for_test().
- const base::Clock* wall_clock_ = nullptr;
+ raw_ptr<const base::Clock> wall_clock_ = nullptr;
// Ensures all access to class members come on the same sequence. API calls
// and callbacks should occur on the same sequence used during construction.
diff --git a/chromium/media/capabilities/webrtc_video_stats_db_impl_unittest.cc b/chromium/media/capabilities/webrtc_video_stats_db_impl_unittest.cc
index aadc150586d..e1ba54696f5 100644
--- a/chromium/media/capabilities/webrtc_video_stats_db_impl_unittest.cc
+++ b/chromium/media/capabilities/webrtc_video_stats_db_impl_unittest.cc
@@ -94,13 +94,19 @@ class WebrtcVideoStatsDBImplTest : public ::testing::Test {
}
void VerifyOnePendingOp(std::string op_name) {
- EXPECT_EQ(stats_db_->pending_ops_.size(), 1u);
- WebrtcVideoStatsDBImpl::PendingOperation* pending_op =
- stats_db_->pending_ops_.begin()->second.get();
+ EXPECT_EQ(stats_db_->pending_operations_.get_pending_ops_for_test().size(),
+ 1u);
+ PendingOperations::PendingOperation* pending_op =
+ stats_db_->pending_operations_.get_pending_ops_for_test()
+ .begin()
+ ->second.get();
EXPECT_EQ(pending_op->uma_str_, op_name);
}
- void VerifyNoPendingOps() { EXPECT_TRUE(stats_db_->pending_ops_.empty()); }
+ void VerifyNoPendingOps() {
+ EXPECT_TRUE(
+ stats_db_->pending_operations_.get_pending_ops_for_test().empty());
+ }
base::TimeDelta GetMaxTimeToKeepStats() {
return WebrtcVideoStatsDBImpl::GetMaxTimeToKeepStats();
diff --git a/chromium/media/capture/content/android/BUILD.gn b/chromium/media/capture/content/android/BUILD.gn
index e73ae45c798..01c5e88c958 100644
--- a/chromium/media/capture/content/android/BUILD.gn
+++ b/chromium/media/capture/content/android/BUILD.gn
@@ -36,6 +36,8 @@ generate_jni("screen_capture_jni_headers") {
android_library("screen_capture_java") {
deps = [
"//base:base_java",
+ "//base:jni_java",
+ "//build/android:build_java",
"//third_party/androidx:androidx_annotation_annotation_java",
]
sources = [ "java/src/org/chromium/media/ScreenCapture.java" ]
diff --git a/chromium/media/capture/mojom/video_capture_buffer.mojom b/chromium/media/capture/mojom/video_capture_buffer.mojom
index fa28c2eac09..8177c36365c 100644
--- a/chromium/media/capture/mojom/video_capture_buffer.mojom
+++ b/chromium/media/capture/mojom/video_capture_buffer.mojom
@@ -52,8 +52,14 @@ struct SharedMemoryViaRawFileDescriptor {
};
union VideoBufferHandle {
- handle<shared_buffer> shared_buffer_handle;
+ // TODO(https://crbug.com/1316808): It is extremely confusing that this union
+ // has both an unsafe and a read-only shmem region subtype. This is probably
+ // a sign that this union needs better documentation or to be split apart
+ // into more distinct types.
+ mojo_base.mojom.UnsafeSharedMemoryRegion unsafe_shmem_region;
mojo_base.mojom.ReadOnlySharedMemoryRegion read_only_shmem_region;
+ // TODO(https://crbug.com/857537): remove this field; it was only needed for
+ // compatibility with older versions of Mojo on ChromeOS.
SharedMemoryViaRawFileDescriptor shared_memory_via_raw_file_descriptor;
MailboxBufferHandleSet mailbox_handles;
gfx.mojom.GpuMemoryBufferHandle gpu_memory_buffer_handle;
diff --git a/chromium/media/capture/mojom/video_capture_types.mojom b/chromium/media/capture/mojom/video_capture_types.mojom
index 74bde96f32e..d9be2ad42e3 100644
--- a/chromium/media/capture/mojom/video_capture_types.mojom
+++ b/chromium/media/capture/mojom/video_capture_types.mojom
@@ -384,7 +384,7 @@ enum CropRequestResult {
kSuccess,
kErrorGeneric,
kUnsupportedCaptureDevice,
- kErrorUnknownDeviceId,
kNotImplemented,
kNonIncreasingCropVersion,
+ kInvalidCropTarget,
};
diff --git a/chromium/media/capture/video/DEPS b/chromium/media/capture/video/DEPS
index aa779c908c9..267cdf32e8d 100644
--- a/chromium/media/capture/video/DEPS
+++ b/chromium/media/capture/video/DEPS
@@ -1,4 +1,5 @@
include_rules = [
+ "+ash/constants/ash_features.h",
"+chromeos/dbus",
"+components/device_event_log",
"+mojo/public/cpp",
diff --git a/chromium/media/capture/video/android/BUILD.gn b/chromium/media/capture/video/android/BUILD.gn
index 6ffcf8e8ae1..3ff17dbedc2 100644
--- a/chromium/media/capture/video/android/BUILD.gn
+++ b/chromium/media/capture/video/android/BUILD.gn
@@ -51,6 +51,8 @@ java_cpp_enum("media_java_enums_srcjar") {
android_library("capture_java") {
deps = [
"//base:base_java",
+ "//base:jni_java",
+ "//build/android:build_java",
"//third_party/androidx:androidx_annotation_annotation_java",
]
annotation_processor_deps = [ "//base/android/jni_generator:jni_processor" ]
diff --git a/chromium/media/capture/video/chromeos/camera_app_device_bridge_impl.cc b/chromium/media/capture/video/chromeos/camera_app_device_bridge_impl.cc
index 92e25d9b05e..25b1c31f64c 100644
--- a/chromium/media/capture/video/chromeos/camera_app_device_bridge_impl.cc
+++ b/chromium/media/capture/video/chromeos/camera_app_device_bridge_impl.cc
@@ -193,10 +193,10 @@ void CameraAppDeviceBridgeImpl::IsSupported(IsSupportedCallback callback) {
std::move(callback).Run(is_supported_);
}
-void CameraAppDeviceBridgeImpl::SetMultipleStreamsEnabled(
+void CameraAppDeviceBridgeImpl::SetVirtualDeviceEnabled(
const std::string& device_id,
bool enabled,
- SetMultipleStreamsEnabledCallback callback) {
+ SetVirtualDeviceEnabledCallback callback) {
base::AutoLock lock(virtual_device_controller_lock_);
if (!virtual_device_controller_) {
std::move(callback).Run(false);
diff --git a/chromium/media/capture/video/chromeos/camera_app_device_bridge_impl.h b/chromium/media/capture/video/chromeos/camera_app_device_bridge_impl.h
index fc3b093d5bf..42f70a79128 100644
--- a/chromium/media/capture/video/chromeos/camera_app_device_bridge_impl.h
+++ b/chromium/media/capture/video/chromeos/camera_app_device_bridge_impl.h
@@ -73,10 +73,10 @@ class CAPTURE_EXPORT CameraAppDeviceBridgeImpl
void IsSupported(IsSupportedCallback callback) override;
- void SetMultipleStreamsEnabled(
+ void SetVirtualDeviceEnabled(
const std::string& device_id,
bool enabled,
- SetMultipleStreamsEnabledCallback callback) override;
+ SetVirtualDeviceEnabledCallback callback) override;
private:
friend struct base::DefaultSingletonTraits<CameraAppDeviceBridgeImpl>;
diff --git a/chromium/media/capture/video/chromeos/camera_app_device_impl.cc b/chromium/media/capture/video/chromeos/camera_app_device_impl.cc
index c70d144bfa6..6ffc6ca9e88 100644
--- a/chromium/media/capture/video/chromeos/camera_app_device_impl.cc
+++ b/chromium/media/capture/video/chromeos/camera_app_device_impl.cc
@@ -193,6 +193,11 @@ void CameraAppDeviceImpl::MaybeDetectDocumentCorners(
rotation));
}
+bool CameraAppDeviceImpl::IsMultipleStreamsEnabled() {
+ base::AutoLock lock(multi_stream_lock_);
+ return multi_stream_enabled_;
+}
+
void CameraAppDeviceImpl::GetCameraInfo(GetCameraInfoCallback callback) {
DCHECK(mojo_task_runner_->BelongsToCurrentThread());
DCHECK(camera_info_);
@@ -351,6 +356,16 @@ void CameraAppDeviceImpl::RegisterDocumentCornersObserver(
std::move(callback).Run();
}
+void CameraAppDeviceImpl::SetMultipleStreamsEnabled(
+ bool enabled,
+ SetMultipleStreamsEnabledCallback callback) {
+ DCHECK(mojo_task_runner_->BelongsToCurrentThread());
+
+ base::AutoLock lock(multi_stream_lock_);
+ multi_stream_enabled_ = enabled;
+ std::move(callback).Run();
+}
+
// static
void CameraAppDeviceImpl::DisableEeNr(ReprocessTask* task) {
auto ee_entry =
diff --git a/chromium/media/capture/video/chromeos/camera_app_device_impl.h b/chromium/media/capture/video/chromeos/camera_app_device_impl.h
index 1fb733555b1..302d6ea211f 100644
--- a/chromium/media/capture/video/chromeos/camera_app_device_impl.h
+++ b/chromium/media/capture/video/chromeos/camera_app_device_impl.h
@@ -130,6 +130,8 @@ class CAPTURE_EXPORT CameraAppDeviceImpl : public cros::mojom::CameraAppDevice {
void MaybeDetectDocumentCorners(std::unique_ptr<gpu::GpuMemoryBufferImpl> gmb,
VideoRotation rotation);
+ bool IsMultipleStreamsEnabled();
+
// cros::mojom::CameraAppDevice implementations.
void GetCameraInfo(GetCameraInfoCallback callback) override;
void SetReprocessOptions(
@@ -157,6 +159,9 @@ class CAPTURE_EXPORT CameraAppDeviceImpl : public cros::mojom::CameraAppDevice {
void RegisterDocumentCornersObserver(
mojo::PendingRemote<cros::mojom::DocumentCornersObserver> observer,
RegisterDocumentCornersObserverCallback callback) override;
+ void SetMultipleStreamsEnabled(
+ bool enabled,
+ SetMultipleStreamsEnabledCallback callback) override;
private:
static void DisableEeNr(ReprocessTask* task);
@@ -236,6 +241,9 @@ class CAPTURE_EXPORT CameraAppDeviceImpl : public cros::mojom::CameraAppDevice {
// used/destructed on the Mojo thread.
std::unique_ptr<ash::DocumentScannerServiceClient> document_scanner_service_;
+ base::Lock multi_stream_lock_;
+ bool multi_stream_enabled_ GUARDED_BY(multi_stream_lock_) = false;
+
// The weak pointers should be dereferenced and invalidated on camera device
// ipc thread.
base::WeakPtrFactory<CameraAppDeviceImpl> weak_ptr_factory_{this};
diff --git a/chromium/media/capture/video/chromeos/camera_app_device_provider_impl.cc b/chromium/media/capture/video/chromeos/camera_app_device_provider_impl.cc
index 66eb6cc1f39..e48a537190c 100644
--- a/chromium/media/capture/video/chromeos/camera_app_device_provider_impl.cc
+++ b/chromium/media/capture/video/chromeos/camera_app_device_provider_impl.cc
@@ -54,27 +54,27 @@ void CameraAppDeviceProviderImpl::IsSupported(IsSupportedCallback callback) {
bridge_->IsSupported(std::move(callback));
}
-void CameraAppDeviceProviderImpl::SetMultipleStreamsEnabled(
+void CameraAppDeviceProviderImpl::SetVirtualDeviceEnabled(
const std::string& source_id,
bool enabled,
- SetMultipleStreamsEnabledCallback callback) {
+ SetVirtualDeviceEnabledCallback callback) {
mapping_callback_.Run(
source_id,
media::BindToCurrentLoop(base::BindOnce(
- &CameraAppDeviceProviderImpl::SetMultipleStreamsEnabledWithDeviceId,
+ &CameraAppDeviceProviderImpl::SetVirtualDeviceEnabledWithDeviceId,
weak_ptr_factory_.GetWeakPtr(), enabled, std::move(callback))));
}
-void CameraAppDeviceProviderImpl::SetMultipleStreamsEnabledWithDeviceId(
+void CameraAppDeviceProviderImpl::SetVirtualDeviceEnabledWithDeviceId(
bool enabled,
- SetMultipleStreamsEnabledCallback callback,
+ SetVirtualDeviceEnabledCallback callback,
const absl::optional<std::string>& device_id) {
if (!device_id.has_value()) {
std::move(callback).Run(false);
return;
}
- bridge_->SetMultipleStreamsEnabled(*device_id, enabled, std::move(callback));
+ bridge_->SetVirtualDeviceEnabled(*device_id, enabled, std::move(callback));
}
} // namespace media
diff --git a/chromium/media/capture/video/chromeos/camera_app_device_provider_impl.h b/chromium/media/capture/video/chromeos/camera_app_device_provider_impl.h
index a5c475e0622..e5a58c6d3a8 100644
--- a/chromium/media/capture/video/chromeos/camera_app_device_provider_impl.h
+++ b/chromium/media/capture/video/chromeos/camera_app_device_provider_impl.h
@@ -37,19 +37,19 @@ class CAPTURE_EXPORT CameraAppDeviceProviderImpl
void GetCameraAppDevice(const std::string& source_id,
GetCameraAppDeviceCallback callback) override;
void IsSupported(IsSupportedCallback callback) override;
- void SetMultipleStreamsEnabled(
+ void SetVirtualDeviceEnabled(
const std::string& device_id,
bool enabled,
- SetMultipleStreamsEnabledCallback callback) override;
+ SetVirtualDeviceEnabledCallback callback) override;
private:
void GetCameraAppDeviceWithDeviceId(
GetCameraAppDeviceCallback callback,
const absl::optional<std::string>& device_id);
- void SetMultipleStreamsEnabledWithDeviceId(
+ void SetVirtualDeviceEnabledWithDeviceId(
bool enable,
- SetMultipleStreamsEnabledCallback callback,
+ SetVirtualDeviceEnabledCallback callback,
const absl::optional<std::string>& device_id);
mojo::Remote<cros::mojom::CameraAppDeviceBridge> bridge_;
diff --git a/chromium/media/capture/video/chromeos/camera_device_delegate.cc b/chromium/media/capture/video/chromeos/camera_device_delegate.cc
index add72214efb..70c3f13743f 100644
--- a/chromium/media/capture/video/chromeos/camera_device_delegate.cc
+++ b/chromium/media/capture/video/chromeos/camera_device_delegate.cc
@@ -906,10 +906,10 @@ void CameraDeviceDelegate::ConfigureStreams(
switch (param.first) {
case ClientType::kPreviewClient:
usage = cros::mojom::GRALLOC_USAGE_HW_COMPOSER;
- // TODO(henryhsu): PreviewClient should remove HW_VIDEO_ENCODER usage
- // when multiple streams enabled.
- if (camera_app_device && camera_app_device->GetCaptureIntent() ==
- cros::mojom::CaptureIntent::VIDEO_RECORD) {
+ if (camera_app_device &&
+ camera_app_device->GetCaptureIntent() ==
+ cros::mojom::CaptureIntent::VIDEO_RECORD &&
+ !camera_app_device->IsMultipleStreamsEnabled()) {
usage |= cros::mojom::GRALLOC_USAGE_HW_VIDEO_ENCODER;
}
break;
diff --git a/chromium/media/capture/video/chromeos/camera_device_delegate_unittest.cc b/chromium/media/capture/video/chromeos/camera_device_delegate_unittest.cc
index a2bfc92e6f0..7f0bee7e2ff 100644
--- a/chromium/media/capture/video/chromeos/camera_device_delegate_unittest.cc
+++ b/chromium/media/capture/video/chromeos/camera_device_delegate_unittest.cc
@@ -342,11 +342,11 @@ class CameraDeviceDelegateTest : public ::testing::Test {
cros::mojom::Camera3NotifyMsgPtr msg = cros::mojom::Camera3NotifyMsg::New();
msg->type = cros::mojom::Camera3MsgType::CAMERA3_MSG_SHUTTER;
- msg->message = cros::mojom::Camera3NotifyMsgMessage::New();
cros::mojom::Camera3ShutterMsgPtr shutter_msg =
cros::mojom::Camera3ShutterMsg::New();
shutter_msg->timestamp = base::TimeTicks::Now().ToInternalValue();
- msg->message->set_shutter(std::move(shutter_msg));
+ msg->message = cros::mojom::Camera3NotifyMsgMessage::NewShutter(
+ std::move(shutter_msg));
callback_ops_->Notify(std::move(msg));
cros::mojom::Camera3CaptureResultPtr result =
diff --git a/chromium/media/capture/video/chromeos/camera_hal_delegate.cc b/chromium/media/capture/video/chromeos/camera_hal_delegate.cc
index ef67081ebda..fbbdf705216 100644
--- a/chromium/media/capture/video/chromeos/camera_hal_delegate.cc
+++ b/chromium/media/capture/video/chromeos/camera_hal_delegate.cc
@@ -559,7 +559,11 @@ bool CameraHalDelegate::UpdateBuiltInCameraInfo() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!ipc_task_runner_->BelongsToCurrentThread());
- camera_module_has_been_set_.Wait();
+ if (!camera_module_has_been_set_.TimedWait(kEventWaitTimeoutSecs)) {
+ LOG(ERROR) << "Camera module not set; platform camera service might not be "
+ "ready yet";
+ return false;
+ }
if (builtin_camera_info_updated_.IsSignaled()) {
return true;
}
diff --git a/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc b/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc
index ca9b47773bf..63edfa7987e 100644
--- a/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc
+++ b/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc
@@ -308,9 +308,7 @@ bool CameraHalDispatcherImpl::IsStarted() {
void CameraHalDispatcherImpl::AddActiveClientObserver(
CameraActiveClientObserver* observer) {
base::AutoLock lock(opened_camera_id_map_lock_);
- for (auto& opened_camera_id_pair : opened_camera_id_map_) {
- const auto& camera_client_type = opened_camera_id_pair.first;
- const auto& camera_id_set = opened_camera_id_pair.second;
+ for (auto& [camera_client_type, camera_id_set] : opened_camera_id_map_) {
if (!camera_id_set.empty()) {
observer->OnActiveClientChange(camera_client_type, /*is_active=*/true);
}
@@ -347,6 +345,10 @@ void CameraHalDispatcherImpl::UnregisterPluginVmToken(
token_manager_.UnregisterPluginVmToken(token);
}
+void CameraHalDispatcherImpl::DisableSensorForTesting() {
+ sensor_enabled_ = false;
+}
+
CameraHalDispatcherImpl::CameraHalDispatcherImpl()
: proxy_thread_("CameraProxyThread"),
blocking_io_thread_("CameraBlockingIOThread"),
@@ -451,6 +453,11 @@ void CameraHalDispatcherImpl::RegisterSensorClientWithToken(
RegisterSensorClientWithTokenCallback callback) {
DCHECK(proxy_task_runner_->BelongsToCurrentThread());
+ if (!sensor_enabled_) {
+ std::move(callback).Run(-EPERM);
+ return;
+ }
+
main_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(
@@ -698,9 +705,7 @@ void CameraHalDispatcherImpl::OnCameraHalServerConnectionError() {
CAMERA_LOG(EVENT) << "Camera HAL server connection lost";
camera_hal_server_.reset();
camera_hal_server_callbacks_.reset();
- for (auto& opened_camera_id_pair : opened_camera_id_map_) {
- auto camera_client_type = opened_camera_id_pair.first;
- const auto& camera_id_set = opened_camera_id_pair.second;
+ for (auto& [camera_client_type, camera_id_set] : opened_camera_id_map_) {
if (!camera_id_set.empty()) {
active_client_observers_->Notify(
FROM_HERE, &CameraActiveClientObserver::OnActiveClientChange,
diff --git a/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.h b/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.h
index a6ef60eb0f8..eb7953b7d0a 100644
--- a/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.h
+++ b/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.h
@@ -166,6 +166,9 @@ class CAPTURE_EXPORT CameraHalDispatcherImpl final
void RegisterPluginVmToken(const base::UnguessableToken& token);
void UnregisterPluginVmToken(const base::UnguessableToken& token);
+ // Used when running capture unittests to avoid running sensor related path.
+ void DisableSensorForTesting();
+
// CameraHalDispatcher implementations.
void RegisterServer(
mojo::PendingRemote<cros::mojom::CameraHalServer> server) final;
@@ -287,6 +290,8 @@ class CAPTURE_EXPORT CameraHalDispatcherImpl final
scoped_refptr<base::ObserverListThreadSafe<CameraPrivacySwitchObserver>>
privacy_switch_observers_;
+ bool sensor_enabled_ = true;
+
base::WeakPtrFactory<CameraHalDispatcherImpl> weak_factory_{this};
};
diff --git a/chromium/media/capture/video/chromeos/mojom/camera_app.mojom b/chromium/media/capture/video/chromeos/mojom/camera_app.mojom
index 17cbe28b475..2c5708c42ff 100644
--- a/chromium/media/capture/video/chromeos/mojom/camera_app.mojom
+++ b/chromium/media/capture/video/chromeos/mojom/camera_app.mojom
@@ -57,7 +57,7 @@ interface CameraAppDeviceProvider {
// Add/Remove a virtual device for recording stream according to |enabled|.
// The virtual device has the same config as |device_id| except facing
// attribute.
- SetMultipleStreamsEnabled(string device_id, bool enabled) => (bool success);
+ SetVirtualDeviceEnabled(string device_id, bool enabled) => (bool success);
};
// Inner interface that used to communicate between browser process (Remote) and
@@ -77,7 +77,7 @@ interface CameraAppDeviceBridge {
// Add/Remove a virtual device for recording stream according to |enabled|.
// The virtual device has the same config as |device_id| except facing
// attribute.
- SetMultipleStreamsEnabled(string device_id, bool enabled) => (bool success);
+ SetVirtualDeviceEnabled(string device_id, bool enabled) => (bool success);
};
// Interface for communication from the Chrome Camera App (Remote) to the camera
@@ -136,6 +136,10 @@ interface CameraAppDevice {
// Registers the document corners observer for preview.
RegisterDocumentCornersObserver(
pending_remote<DocumentCornersObserver> observer) => ();
+
+ // Enable/Disable the multiple streams feature for video recording on the
+ // device given by its |device_id|.
+ SetMultipleStreamsEnabled(bool enabled) => ();
};
// Interface for camera device to send camera metadata to Chrome Camera App.
diff --git a/chromium/media/capture/video/chromeos/request_manager.cc b/chromium/media/capture/video/chromeos/request_manager.cc
index a55165be97f..8afdb6e074a 100644
--- a/chromium/media/capture/video/chromeos/request_manager.cc
+++ b/chromium/media/capture/video/chromeos/request_manager.cc
@@ -457,10 +457,10 @@ bool RequestManager::TryPrepareReprocessRequest(
// Consume reprocess task.
ReprocessJobInfo* reprocess_job_info;
- for (auto& it : buffer_id_reprocess_job_info_map_) {
- if (processing_buffer_ids_.count(it.first) == 0) {
- *input_buffer_id = it.first;
- reprocess_job_info = &it.second;
+ for (auto& [buffer_id, job_info] : buffer_id_reprocess_job_info_map_) {
+ if (processing_buffer_ids_.count(buffer_id) == 0) {
+ *input_buffer_id = buffer_id;
+ reprocess_job_info = &job_info;
break;
}
}
diff --git a/chromium/media/capture/video/chromeos/request_manager_unittest.cc b/chromium/media/capture/video/chromeos/request_manager_unittest.cc
index eb62aafc0f7..32e0d4be17d 100644
--- a/chromium/media/capture/video/chromeos/request_manager_unittest.cc
+++ b/chromium/media/capture/video/chromeos/request_manager_unittest.cc
@@ -251,9 +251,9 @@ class RequestManagerTest : public ::testing::Test {
static_cast<uint64_t>(StreamType::kPreviewOutput);
error_msg->error_code = error_code;
auto notify_msg = cros::mojom::Camera3NotifyMsg::New();
- notify_msg->message = cros::mojom::Camera3NotifyMsgMessage::New();
notify_msg->type = cros::mojom::Camera3MsgType::CAMERA3_MSG_ERROR;
- notify_msg->message->set_error(std::move(error_msg));
+ notify_msg->message =
+ cros::mojom::Camera3NotifyMsgMessage::NewError(std::move(error_msg));
return notify_msg;
}
@@ -264,9 +264,9 @@ class RequestManagerTest : public ::testing::Test {
shutter_msg->frame_number = frame_number;
shutter_msg->timestamp = timestamp;
auto notify_msg = cros::mojom::Camera3NotifyMsg::New();
- notify_msg->message = cros::mojom::Camera3NotifyMsgMessage::New();
notify_msg->type = cros::mojom::Camera3MsgType::CAMERA3_MSG_SHUTTER;
- notify_msg->message->set_shutter(std::move(shutter_msg));
+ notify_msg->message = cros::mojom::Camera3NotifyMsgMessage::NewShutter(
+ std::move(shutter_msg));
return notify_msg;
}
diff --git a/chromium/media/capture/video/chromeos/token_manager.cc b/chromium/media/capture/video/chromeos/token_manager.cc
index 90e8f169d7a..d57d42f4c54 100644
--- a/chromium/media/capture/video/chromeos/token_manager.cc
+++ b/chromium/media/capture/video/chromeos/token_manager.cc
@@ -149,10 +149,9 @@ absl::optional<cros::mojom::CameraClientType> TokenManager::AuthenticateClient(
const base::UnguessableToken& token) {
base::AutoLock l(client_token_map_lock_);
if (type == cros::mojom::CameraClientType::UNKNOWN) {
- for (const auto& client_token_map_pair : client_token_map_) {
- const auto& token_set = client_token_map_pair.second;
+ for (const auto& [client_type, token_set] : client_token_map_) {
if (token_set.find(token) != token_set.end()) {
- return client_token_map_pair.first;
+ return client_type;
}
}
return absl::nullopt;
diff --git a/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia.cc b/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia.cc
index 958e54d0f52..b41314befe4 100644
--- a/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia.cc
+++ b/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia.cc
@@ -255,6 +255,25 @@ void VideoCaptureDeviceFactoryFuchsia::OnWatchDevicesResult(
// Watch for further updates.
WatchDevices();
+ // Notify system monitor that the list of the devices has changed, except if
+ // this is the first WatchDevices() response we've received. The first
+ // time WatchDevices() is called it responds immediately with the current list
+ // of the devices, i.e. there is no need to emit notification in that case. If
+ // the `device_watcher_` was disconnected and reconnected later then we still
+ // want to emit the notification as the list could change while
+ // `device_watcher_` was disconnected. There is no need to compare the content
+ // of the list: `MediaDeviceManager` will notify applications only if the list
+ // has actually changed.
+ if (received_initial_list_) {
+ auto* system_monitor = base::SystemMonitor::Get();
+ if (system_monitor) {
+ system_monitor->ProcessDevicesChanged(
+ base::SystemMonitor::DEVTYPE_VIDEO_CAPTURE);
+ }
+ } else {
+ received_initial_list_ = true;
+ }
+
// Calls callbacks, which may delete |this|.
MaybeResolvePendingDeviceInfoCallbacks();
}
@@ -289,14 +308,6 @@ void VideoCaptureDeviceFactoryFuchsia::
if (num_pending_device_info_requests_ > 0)
return;
- // Notify system monitor if devices have changed. This will indirectly update
- // media device manager and the web app eventually.
- auto* system_monitor = base::SystemMonitor::Get();
- if (system_monitor) {
- system_monitor->ProcessDevicesChanged(
- base::SystemMonitor::DEVTYPE_VIDEO_CAPTURE);
- }
-
std::vector<GetDevicesInfoCallback> callbacks;
callbacks.swap(pending_devices_info_requests_);
diff --git a/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia.h b/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia.h
index 49ea546bef5..72a38096721 100644
--- a/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia.h
+++ b/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia.h
@@ -48,6 +48,8 @@ class CAPTURE_EXPORT VideoCaptureDeviceFactoryFuchsia
std::vector<VideoCaptureDeviceInfo> MakeDevicesInfo();
void MaybeResolvePendingDeviceInfoCallbacks();
+ bool received_initial_list_ = false;
+
fuchsia::camera3::DeviceWatcherPtr device_watcher_;
// Current list of devices. Set to nullopt if the list hasn't been received
diff --git a/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia_test.cc b/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia_test.cc
index 37bde43a1d4..8527d732d61 100644
--- a/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia_test.cc
+++ b/chromium/media/capture/video/fuchsia/video_capture_device_factory_fuchsia_test.cc
@@ -6,6 +6,7 @@
#include "base/fuchsia/test_component_context_for_process.h"
#include "base/run_loop.h"
+#include "base/system/system_monitor.h"
#include "base/test/bind.h"
#include "base/test/task_environment.h"
#include "media/fuchsia/camera/fake_fuchsia_camera.h"
@@ -94,4 +95,41 @@ TEST_F(VideoCaptureDeviceFactoryFuchsiaTest, RemoveWhileEnumerating) {
EXPECT_TRUE(devices_info.empty());
}
+class TestDeviceChangeObserver
+ : public base::SystemMonitor::DevicesChangedObserver {
+ public:
+ TestDeviceChangeObserver() = default;
+ ~TestDeviceChangeObserver() override = default;
+
+ // DevicesChangedObserver implementation.
+ void OnDevicesChanged(base::SystemMonitor::DeviceType device_type) final {
+ EXPECT_EQ(device_type, base::SystemMonitor::DEVTYPE_VIDEO_CAPTURE);
+ ++num_events_;
+ }
+
+ size_t num_events() { return num_events_; };
+
+ private:
+ size_t num_events_ = 0;
+};
+
+TEST_F(VideoCaptureDeviceFactoryFuchsiaTest, DeviceChangeEvent) {
+ base::SystemMonitor system_monitor;
+ TestDeviceChangeObserver test_observer;
+ system_monitor.AddDevicesChangedObserver(&test_observer);
+
+ // DevicesChanged event should not be produced when the list of devices is
+ // fetched for the first time.
+ auto devices_info = GetDevicesInfo();
+ EXPECT_EQ(test_observer.num_events(), 0U);
+
+ // Remove the first camera device. The factory is expected to notify
+ // SystemMonitor about the change.
+ fake_device_watcher_.RemoveDevice(
+ fake_device_watcher_.devices().begin()->first);
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(test_observer.num_events(), 1U);
+}
+
} // namespace media
diff --git a/chromium/media/capture/video/fuchsia/video_capture_device_fuchsia_test.cc b/chromium/media/capture/video/fuchsia/video_capture_device_fuchsia_test.cc
index a0932e65cda..8cb98fa3638 100644
--- a/chromium/media/capture/video/fuchsia/video_capture_device_fuchsia_test.cc
+++ b/chromium/media/capture/video/fuchsia/video_capture_device_fuchsia_test.cc
@@ -71,11 +71,6 @@ class HeapBufferHandleProvider final
return {};
}
- mojo::ScopedSharedBufferHandle DuplicateAsMojoBuffer() override {
- NOTREACHED();
- return {};
- }
-
std::unique_ptr<VideoCaptureBufferHandle> GetHandleForInProcessAccess()
override {
return std::make_unique<HeapBufferHandle>(data_.size(), data_.data());
diff --git a/chromium/media/capture/video/linux/fake_v4l2_impl.cc b/chromium/media/capture/video/linux/fake_v4l2_impl.cc
index 40b22bbcc69..94e9af7ab0d 100644
--- a/chromium/media/capture/video/linux/fake_v4l2_impl.cc
+++ b/chromium/media/capture/video/linux/fake_v4l2_impl.cc
@@ -80,6 +80,8 @@ class FakeV4L2Impl::OpenedDevice {
timeperframe_.denominator = kDefaultFrameInternvalDenominator;
}
+ ~OpenedDevice() { DCHECK(!frame_production_thread_.IsRunning()); }
+
const std::string& device_id() const { return config_.descriptor.device_id; }
int open_flags() const { return open_flags_; }
diff --git a/chromium/media/capture/video/linux/video_capture_device_factory_linux_unittest.cc b/chromium/media/capture/video/linux/video_capture_device_factory_linux_unittest.cc
index 8fe25871bac..38b9519c28a 100644
--- a/chromium/media/capture/video/linux/video_capture_device_factory_linux_unittest.cc
+++ b/chromium/media/capture/video/linux/video_capture_device_factory_linux_unittest.cc
@@ -35,6 +35,8 @@ class VideoCaptureDeviceFactoryLinuxTest
std::move(fake_device_provider));
}
+ void TearDown() override { task_environment_.RunUntilIdle(); }
+
base::test::TaskEnvironment task_environment_;
FakeV4L2Impl* fake_v4l2_;
FakeDeviceProvider* fake_device_provider_;
diff --git a/chromium/media/capture/video/linux/video_capture_device_linux.cc b/chromium/media/capture/video/linux/video_capture_device_linux.cc
index b4dfd1489d0..3f2848ae935 100644
--- a/chromium/media/capture/video/linux/video_capture_device_linux.cc
+++ b/chromium/media/capture/video/linux/video_capture_device_linux.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/task/single_thread_task_runner.h"
+#include "base/task/thread_pool.h"
#include "build/build_config.h"
#include "media/capture/video/linux/v4l2_capture_delegate.h"
@@ -56,15 +57,16 @@ VideoCaptureDeviceLinux::VideoCaptureDeviceLinux(
const VideoCaptureDeviceDescriptor& device_descriptor)
: device_descriptor_(device_descriptor),
v4l2_(std::move(v4l2)),
- v4l2_thread_("V4L2CaptureThread"),
+ task_runner_(base::ThreadPool::CreateSingleThreadTaskRunner(
+ {base::TaskPriority::USER_BLOCKING, base::MayBlock(),
+ base::WithBaseSyncPrimitives()},
+ base::SingleThreadTaskRunnerThreadMode::DEDICATED)),
rotation_(0) {}
VideoCaptureDeviceLinux::~VideoCaptureDeviceLinux() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- // Check if the thread is running.
- // This means that the device has not been StopAndDeAllocate()d properly.
- DCHECK(!v4l2_thread_.IsRunning());
- v4l2_thread_.Stop();
+ DCHECK(!capture_impl_)
+ << "StopAndDeAllocate() must be called before destruction.";
}
void VideoCaptureDeviceLinux::AllocateAndStart(
@@ -72,97 +74,74 @@ void VideoCaptureDeviceLinux::AllocateAndStart(
std::unique_ptr<VideoCaptureDevice::Client> client) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!capture_impl_);
- if (v4l2_thread_.IsRunning())
- return; // Wrong state.
- v4l2_thread_.Start();
const int line_frequency =
TranslatePowerLineFrequencyToV4L2(GetPowerLineFrequency(params));
capture_impl_ = std::make_unique<V4L2CaptureDelegate>(
- v4l2_.get(), device_descriptor_, v4l2_thread_.task_runner(),
- line_frequency, rotation_);
+ v4l2_.get(), device_descriptor_, task_runner_, line_frequency, rotation_);
if (!capture_impl_) {
client->OnError(VideoCaptureError::
kDeviceCaptureLinuxFailedToCreateVideoCaptureDelegate,
FROM_HERE, "Failed to create VideoCaptureDelegate");
return;
}
- v4l2_thread_.task_runner()->PostTask(
+ task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&V4L2CaptureDelegate::AllocateAndStart,
capture_impl_->GetWeakPtr(),
params.requested_format.frame_size.width(),
params.requested_format.frame_size.height(),
params.requested_format.frame_rate, std::move(client)));
-
- for (auto& request : photo_requests_queue_)
- v4l2_thread_.task_runner()->PostTask(FROM_HERE, std::move(request));
- photo_requests_queue_.clear();
}
void VideoCaptureDeviceLinux::StopAndDeAllocate() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- if (!v4l2_thread_.IsRunning())
+ if (!capture_impl_)
return; // Wrong state.
- v4l2_thread_.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&V4L2CaptureDelegate::StopAndDeAllocate,
- capture_impl_->GetWeakPtr()));
- v4l2_thread_.task_runner()->DeleteSoon(FROM_HERE, capture_impl_.release());
- v4l2_thread_.Stop();
+ task_runner_->PostTask(FROM_HERE,
+ base::BindOnce(&V4L2CaptureDelegate::StopAndDeAllocate,
+ capture_impl_->GetWeakPtr()));
+ task_runner_->DeleteSoon(FROM_HERE, std::move(capture_impl_));
capture_impl_ = nullptr;
}
void VideoCaptureDeviceLinux::TakePhoto(TakePhotoCallback callback) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(capture_impl_);
- auto functor =
+ task_runner_->PostTask(
+ FROM_HERE,
base::BindOnce(&V4L2CaptureDelegate::TakePhoto,
- capture_impl_->GetWeakPtr(), std::move(callback));
- if (!v4l2_thread_.IsRunning()) {
- // We have to wait until we get the device AllocateAndStart()ed.
- photo_requests_queue_.push_back(std::move(functor));
- return;
- }
- v4l2_thread_.task_runner()->PostTask(FROM_HERE, std::move(functor));
+ capture_impl_->GetWeakPtr(), std::move(callback)));
}
void VideoCaptureDeviceLinux::GetPhotoState(GetPhotoStateCallback callback) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- auto functor =
+ DCHECK(capture_impl_);
+ task_runner_->PostTask(
+ FROM_HERE,
base::BindOnce(&V4L2CaptureDelegate::GetPhotoState,
- capture_impl_->GetWeakPtr(), std::move(callback));
- if (!v4l2_thread_.IsRunning()) {
- // We have to wait until we get the device AllocateAndStart()ed.
- photo_requests_queue_.push_back(std::move(functor));
- return;
- }
- v4l2_thread_.task_runner()->PostTask(FROM_HERE, std::move(functor));
+ capture_impl_->GetWeakPtr(), std::move(callback)));
}
void VideoCaptureDeviceLinux::SetPhotoOptions(
mojom::PhotoSettingsPtr settings,
SetPhotoOptionsCallback callback) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- auto functor = base::BindOnce(&V4L2CaptureDelegate::SetPhotoOptions,
+ DCHECK(capture_impl_);
+ task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&V4L2CaptureDelegate::SetPhotoOptions,
capture_impl_->GetWeakPtr(),
- std::move(settings), std::move(callback));
- if (!v4l2_thread_.IsRunning()) {
- // We have to wait until we get the device AllocateAndStart()ed.
- photo_requests_queue_.push_back(std::move(functor));
- return;
- }
- v4l2_thread_.task_runner()->PostTask(FROM_HERE, std::move(functor));
+ std::move(settings), std::move(callback)));
}
void VideoCaptureDeviceLinux::SetRotation(int rotation) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(capture_impl_);
rotation_ = rotation;
- if (v4l2_thread_.IsRunning()) {
- v4l2_thread_.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&V4L2CaptureDelegate::SetRotation,
- capture_impl_->GetWeakPtr(), rotation));
- }
+ task_runner_->PostTask(FROM_HERE,
+ base::BindOnce(&V4L2CaptureDelegate::SetRotation,
+ capture_impl_->GetWeakPtr(), rotation));
}
} // namespace media
diff --git a/chromium/media/capture/video/linux/video_capture_device_linux.h b/chromium/media/capture/video/linux/video_capture_device_linux.h
index 08de3f0d477..0d2d413b59e 100644
--- a/chromium/media/capture/video/linux/video_capture_device_linux.h
+++ b/chromium/media/capture/video/linux/video_capture_device_linux.h
@@ -13,11 +13,7 @@
#include <stdint.h>
#include <memory>
-#include <vector>
-#include "base/files/file_util.h"
-#include "base/files/scoped_file.h"
-#include "base/threading/thread.h"
#include "media/capture/video/linux/v4l2_capture_device_impl.h"
#include "media/capture/video/video_capture_device.h"
#include "media/capture/video_capture_types.h"
@@ -66,10 +62,7 @@ class VideoCaptureDeviceLinux : public VideoCaptureDevice {
// |v4l2_thread_|.
std::unique_ptr<V4L2CaptureDelegate> capture_impl_;
- // Photo-related requests waiting for |v4l2_thread_| to be active.
- std::vector<base::OnceClosure> photo_requests_queue_;
-
- base::Thread v4l2_thread_; // Thread used for reading data from the device.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// SetRotation() may get called even when the device is not started. When that
// is the case we remember the value here and use it as soon as the device
diff --git a/chromium/media/capture/video/mac/video_capture_device_mac.mm b/chromium/media/capture/video/mac/video_capture_device_mac.mm
index dd1aba77ac5..1b9519b0319 100644
--- a/chromium/media/capture/video/mac/video_capture_device_mac.mm
+++ b/chromium/media/capture/video/mac/video_capture_device_mac.mm
@@ -115,7 +115,7 @@ static bool FindDeviceWithVendorAndProductIds(int vendor_id,
kern_return_t kr = IOServiceGetMatchingServices(
kIOMasterPortDefault, query_dictionary.release(), usb_iterator);
if (kr != kIOReturnSuccess) {
- DLOG(ERROR) << "No devices found with specified Vendor and Product ID.";
+ VLOG(1) << "No devices found with specified Vendor and Product ID.";
return false;
}
return true;
@@ -136,7 +136,7 @@ static bool FindDeviceInterfaceInUsbDevice(
usb_device, kIOUSBDeviceUserClientTypeID, kIOCFPlugInInterfaceID, &plugin,
&score);
if (kr != kIOReturnSuccess || !plugin) {
- DLOG(ERROR) << "IOCreatePlugInInterfaceForService";
+ VLOG(1) << "IOCreatePlugInInterfaceForService";
return false;
}
base::mac::ScopedIOPluginInterface<IOCFPlugInInterface> plugin_ref(plugin);
@@ -146,7 +146,7 @@ static bool FindDeviceInterfaceInUsbDevice(
plugin, CFUUIDGetUUIDBytes(kIOUSBDeviceInterfaceID),
reinterpret_cast<LPVOID*>(device_interface));
if (!SUCCEEDED(res) || !*device_interface) {
- DLOG(ERROR) << "QueryInterface, couldn't create interface to USB";
+ VLOG(1) << "QueryInterface, couldn't create interface to USB";
return false;
}
return true;
@@ -171,7 +171,7 @@ static bool FindVideoControlInterfaceInDeviceInterface(
->CreateInterfaceIterator(device_interface, &interface_request,
&interface_iterator);
if (kr != kIOReturnSuccess) {
- DLOG(ERROR) << "Could not create an iterator to the device's interfaces.";
+ VLOG(1) << "Could not create an iterator to the device's interfaces.";
return false;
}
base::mac::ScopedIOObject<io_iterator_t> iterator_ref(interface_iterator);
@@ -180,7 +180,7 @@ static bool FindVideoControlInterfaceInDeviceInterface(
io_service_t found_interface;
found_interface = IOIteratorNext(interface_iterator);
if (!found_interface) {
- DLOG(ERROR) << "Could not find a Video-AVControl interface in the device.";
+ VLOG(1) << "Could not find a Video-AVControl interface in the device.";
return false;
}
base::mac::ScopedIOObject<io_service_t> found_interface_ref(found_interface);
@@ -191,7 +191,7 @@ static bool FindVideoControlInterfaceInDeviceInterface(
found_interface, kIOUSBInterfaceUserClientTypeID, kIOCFPlugInInterfaceID,
video_control_interface, &score);
if (kr != kIOReturnSuccess || !*video_control_interface) {
- DLOG(ERROR) << "IOCreatePlugInInterfaceForService";
+ VLOG(1) << "IOCreatePlugInInterfaceForService";
return false;
}
return true;
@@ -211,7 +211,7 @@ static void SetAntiFlickerInVideoControlInterface(
plugin_interface, CFUUIDGetUUIDBytes(kIOUSBInterfaceInterfaceID),
reinterpret_cast<LPVOID*>(control_interface.InitializeInto()));
if (!SUCCEEDED(res) || !control_interface) {
- DLOG(ERROR) << "Couldn’t create control interface";
+ VLOG(1) << "Couldn’t create control interface";
return;
}
@@ -231,7 +231,7 @@ static void SetAntiFlickerInVideoControlInterface(
break;
}
}
- DVLOG_IF(1, real_unit_id == -1)
+ VLOG_IF(1, real_unit_id == -1)
<< "This USB device doesn't seem to have a "
<< " VC_PROCESSING_UNIT, anti-flicker not available";
if (real_unit_id == -1)
@@ -239,15 +239,15 @@ static void SetAntiFlickerInVideoControlInterface(
if ((*control_interface)->USBInterfaceOpen(control_interface) !=
kIOReturnSuccess) {
- DLOG(ERROR) << "Unable to open control interface";
+ VLOG(1) << "Unable to open control interface";
return;
}
// Create the control request and launch it to the device's control interface.
// Note how the wIndex needs the interface number OR'ed in the lowest bits.
IOUSBDevRequest command;
- command.bmRequestType =
- USBmakebmRequestType(kUSBOut, kUSBClass, kUSBInterface);
+ command.bmRequestType = USBmakebmRequestType(UInt8{kUSBOut}, UInt8{kUSBClass},
+ UInt8{kUSBInterface});
command.bRequest = kVcRequestCodeSetCur;
UInt8 interface_number;
(*control_interface)
@@ -263,10 +263,10 @@ static void SetAntiFlickerInVideoControlInterface(
IOReturn ret =
(*control_interface)->ControlRequest(control_interface, 0, &command);
- DLOG_IF(ERROR, ret != kIOReturnSuccess) << "Anti-flicker control request"
+ VLOG_IF(1, ret != kIOReturnSuccess) << "Anti-flicker control request"
<< " failed (0x" << std::hex << ret
<< "), unit id: " << real_unit_id;
- DVLOG_IF(1, ret == kIOReturnSuccess) << "Anti-flicker set to "
+ VLOG_IF(1, ret == kIOReturnSuccess) << "Anti-flicker set to "
<< static_cast<int>(frequency) << "Hz";
(*control_interface)->USBInterfaceClose(control_interface);
@@ -328,8 +328,8 @@ static IOUSBDevRequest CreateEmptyPanTiltZoomRequest(
(*control_interface)
->GetInterfaceNumber(control_interface, &interface_number);
IOUSBDevRequest command;
- command.bmRequestType =
- USBmakebmRequestType(endpoint_direction, kUSBClass, kUSBInterface);
+ command.bmRequestType = USBmakebmRequestType(
+ endpoint_direction, UInt8{kUSBClass}, UInt8{kUSBInterface});
command.bRequest = request_code;
command.wIndex = (unit_id << 8) | interface_number;
command.wValue = (control_selector << 8);
@@ -355,7 +355,7 @@ static bool SendPanTiltControlRequest(
IOReturn ret =
(*control_interface)->ControlRequest(control_interface, 0, &command);
- DLOG_IF(ERROR, ret != kIOReturnSuccess)
+ VLOG_IF(1, ret != kIOReturnSuccess)
<< "Control pan tilt request"
<< " failed (0x" << std::hex << ret << "), unit id: " << unit_id;
if (ret != kIOReturnSuccess)
@@ -382,7 +382,7 @@ static bool SendZoomControlRequest(
IOReturn ret =
(*control_interface)->ControlRequest(control_interface, 0, &command);
- DLOG_IF(ERROR, ret != kIOReturnSuccess)
+ VLOG_IF(1, ret != kIOReturnSuccess)
<< "Control zoom request"
<< " failed (0x" << std::hex << ret << "), unit id: " << unit_id;
if (ret != kIOReturnSuccess)
@@ -476,12 +476,12 @@ static void SetPanTiltInUsbDevice(
IOReturn ret =
(*control_interface)->ControlRequest(control_interface, 0, &command);
- DLOG_IF(ERROR, ret != kIOReturnSuccess)
+ VLOG_IF(1, ret != kIOReturnSuccess)
<< "Control request"
<< " failed (0x" << std::hex << ret << "), unit id: " << unit_id
<< " pan value: " << pan.value_or(pan_current)
<< " tilt value: " << tilt.value_or(tilt_current);
- DVLOG_IF(1, ret == kIOReturnSuccess)
+ VLOG_IF(1, ret == kIOReturnSuccess)
<< "Setting pan value to " << pan.value_or(pan_current)
<< " and tilt value to " << tilt.value_or(tilt_current);
}
@@ -497,11 +497,11 @@ static void SetZoomInUsbDevice(IOUSBInterfaceInterface220** control_interface,
IOReturn ret =
(*control_interface)->ControlRequest(control_interface, 0, &command);
- DLOG_IF(ERROR, ret != kIOReturnSuccess)
+ VLOG_IF(1, ret != kIOReturnSuccess)
<< "Control request"
<< " failed (0x" << std::hex << ret << "), unit id: " << unit_id
<< " zoom value: " << zoom;
- DVLOG_IF(1, ret == kIOReturnSuccess) << "Setting zoom value to " << zoom;
+ VLOG_IF(1, ret == kIOReturnSuccess) << "Setting zoom value to " << zoom;
}
// Open the pan, tilt, zoom interface in a USB webcam identified by
@@ -560,7 +560,7 @@ static ScopedIOUSBInterfaceInterface OpenPanTiltZoomControlInterface(
CFUUIDGetUUIDBytes(kIOUSBInterfaceInterfaceID220),
reinterpret_cast<LPVOID*>(control_interface.InitializeInto()));
if (!SUCCEEDED(res) || !control_interface) {
- DLOG(ERROR) << "Couldn’t get control interface";
+ VLOG(1) << "Couldn’t get control interface";
return ScopedIOUSBInterfaceInterface();
}
@@ -580,7 +580,7 @@ static ScopedIOUSBInterfaceInterface OpenPanTiltZoomControlInterface(
}
}
- DVLOG_IF(1, *unit_id == -1)
+ VLOG_IF(1, *unit_id == -1)
<< "This USB device doesn't seem to have a "
<< " VC_INPUT_TERMINAL. Pan, tilt, zoom are not available.";
if (*unit_id == -1)
@@ -588,7 +588,7 @@ static ScopedIOUSBInterfaceInterface OpenPanTiltZoomControlInterface(
if ((*control_interface)->USBInterfaceOpen(control_interface) !=
kIOReturnSuccess) {
- DLOG(ERROR) << "Unable to open control interface";
+ VLOG(1) << "Unable to open control interface";
return ScopedIOUSBInterfaceInterface();
}
@@ -841,7 +841,7 @@ void VideoCaptureDeviceMac::OnPhotoTaken(const uint8_t* image_data,
}
void VideoCaptureDeviceMac::OnPhotoError() {
- DLOG(ERROR) << __func__ << " error taking picture";
+ VLOG(1) << __func__ << " error taking picture";
photo_callback_.Reset();
}
diff --git a/chromium/media/capture/video/mock_video_capture_device_client.cc b/chromium/media/capture/video/mock_video_capture_device_client.cc
index 78e122d8bf5..9eabb5fe41a 100644
--- a/chromium/media/capture/video/mock_video_capture_device_client.cc
+++ b/chromium/media/capture/video/mock_video_capture_device_client.cc
@@ -41,11 +41,6 @@ class StubBufferHandleProvider
return {};
}
- mojo::ScopedSharedBufferHandle DuplicateAsMojoBuffer() override {
- NOTREACHED();
- return mojo::ScopedSharedBufferHandle();
- }
-
std::unique_ptr<VideoCaptureBufferHandle> GetHandleForInProcessAccess()
override {
return std::make_unique<StubBufferHandle>(mapped_size_, data_);
diff --git a/chromium/media/capture/video/video_capture_buffer_pool_util.cc b/chromium/media/capture/video/video_capture_buffer_pool_util.cc
index ed0a308993d..26363105214 100644
--- a/chromium/media/capture/video/video_capture_buffer_pool_util.cc
+++ b/chromium/media/capture/video/video_capture_buffer_pool_util.cc
@@ -8,6 +8,10 @@
#include "build/chromeos_buildflags.h"
#include "media/capture/capture_switches.h"
+#if BUILDFLAG(IS_CHROMEOS_ASH)
+#include "ash/constants/ash_features.h"
+#endif // BUILDFLAG(IS_CHROMEOS_ASH)
+
namespace media {
int DeviceVideoCaptureMaxBufferPoolSize() {
@@ -26,7 +30,16 @@ int DeviceVideoCaptureMaxBufferPoolSize() {
// here to take into account the delay caused by the consumer (e.g. display or
// video encoder).
if (switches::IsVideoCaptureUseGpuMemoryBufferEnabled()) {
- max_buffer_count = 36;
+ if (base::FeatureList::IsEnabled(
+ chromeos::features::kMoreVideoCaptureBuffers)) {
+ // Some devices might need more buffers to enable advanced features and
+ // might report pipeline depth as 8 for preview, 8 for video snapshot and
+ // 36 for recording. And some extra buffers are needed for the possible
+ // delay of display and video encoder, and also a few for spare usage.
+ max_buffer_count = 76;
+ } else {
+ max_buffer_count = 36;
+ }
}
#elif BUILDFLAG(IS_WIN)
// On Windows, for GMB backed zero-copy more buffers are needed because it's
diff --git a/chromium/media/capture/video/video_capture_device.h b/chromium/media/capture/video/video_capture_device.h
index c52c748141e..3ba84b551de 100644
--- a/chromium/media/capture/video/video_capture_device.h
+++ b/chromium/media/capture/video/video_capture_device.h
@@ -108,9 +108,6 @@ class CAPTURE_EXPORT VideoCaptureDevice
// Duplicate as an writable (unsafe) shared memory region.
virtual base::UnsafeSharedMemoryRegion DuplicateAsUnsafeRegion() = 0;
- // Duplicate as a writable (unsafe) mojo buffer.
- virtual mojo::ScopedSharedBufferHandle DuplicateAsMojoBuffer() = 0;
-
// Access a |VideoCaptureBufferHandle| for local, writable memory.
virtual std::unique_ptr<VideoCaptureBufferHandle>
GetHandleForInProcessAccess() = 0;
diff --git a/chromium/media/capture/video/video_capture_device_client.cc b/chromium/media/capture/video/video_capture_device_client.cc
index bad82eb634b..a96765c137e 100644
--- a/chromium/media/capture/video/video_capture_device_client.cc
+++ b/chromium/media/capture/video/video_capture_device_client.cc
@@ -148,9 +148,6 @@ class BufferPoolBufferHandleProvider
base::UnsafeSharedMemoryRegion DuplicateAsUnsafeRegion() override {
return buffer_pool_->DuplicateAsUnsafeRegion(buffer_id_);
}
- mojo::ScopedSharedBufferHandle DuplicateAsMojoBuffer() override {
- return buffer_pool_->DuplicateAsMojoBuffer(buffer_id_);
- }
gfx::GpuMemoryBufferHandle GetGpuMemoryBufferHandle() override {
return buffer_pool_->GetGpuMemoryBufferHandle(buffer_id_);
}
@@ -508,8 +505,8 @@ ReadyFrameInBuffer VideoCaptureDeviceClient::CreateReadyFrameFromExternalBuffer(
// Register the buffer with the receiver if it is new.
if (!base::Contains(buffer_ids_known_by_receiver_, buffer_id)) {
media::mojom::VideoBufferHandlePtr buffer_handle =
- media::mojom::VideoBufferHandle::New();
- buffer_handle->set_gpu_memory_buffer_handle(std::move(buffer.handle));
+ media::mojom::VideoBufferHandle::NewGpuMemoryBufferHandle(
+ std::move(buffer.handle));
receiver_->OnNewBuffer(buffer_id, std::move(buffer_handle));
buffer_ids_known_by_receiver_.push_back(buffer_id);
}
@@ -569,8 +566,6 @@ VideoCaptureDeviceClient::ReserveOutputBuffer(const gfx::Size& frame_size,
DCHECK_NE(VideoCaptureBufferPool::kInvalidId, buffer_id);
if (!base::Contains(buffer_ids_known_by_receiver_, buffer_id)) {
- media::mojom::VideoBufferHandlePtr buffer_handle =
- media::mojom::VideoBufferHandle::New();
VideoCaptureBufferType target_buffer_type = target_buffer_type_;
#if BUILDFLAG(IS_WIN)
// If MediaFoundationD3D11VideoCapture fails, a shared memory buffer may be
@@ -580,22 +575,25 @@ VideoCaptureDeviceClient::ReserveOutputBuffer(const gfx::Size& frame_size,
target_buffer_type = VideoCaptureBufferType::kSharedMemory;
}
#endif
+ media::mojom::VideoBufferHandlePtr buffer_handle;
switch (target_buffer_type) {
case VideoCaptureBufferType::kSharedMemory:
- buffer_handle->set_shared_buffer_handle(
- buffer_pool_->DuplicateAsMojoBuffer(buffer_id));
+ buffer_handle = media::mojom::VideoBufferHandle::NewUnsafeShmemRegion(
+ buffer_pool_->DuplicateAsUnsafeRegion(buffer_id));
break;
case VideoCaptureBufferType::kSharedMemoryViaRawFileDescriptor:
- buffer_handle->set_shared_memory_via_raw_file_descriptor(
- buffer_pool_->CreateSharedMemoryViaRawFileDescriptorStruct(
- buffer_id));
+ buffer_handle = media::mojom::VideoBufferHandle::
+ NewSharedMemoryViaRawFileDescriptor(
+ buffer_pool_->CreateSharedMemoryViaRawFileDescriptorStruct(
+ buffer_id));
break;
case VideoCaptureBufferType::kMailboxHolder:
NOTREACHED();
break;
case VideoCaptureBufferType::kGpuMemoryBuffer:
- buffer_handle->set_gpu_memory_buffer_handle(
- buffer_pool_->GetGpuMemoryBufferHandle(buffer_id));
+ buffer_handle =
+ media::mojom::VideoBufferHandle::NewGpuMemoryBufferHandle(
+ buffer_pool_->GetGpuMemoryBufferHandle(buffer_id));
break;
}
receiver_->OnNewBuffer(buffer_id, std::move(buffer_handle));
diff --git a/chromium/media/capture/video/video_capture_device_unittest.cc b/chromium/media/capture/video/video_capture_device_unittest.cc
index 61165afc29e..fb75b046383 100644
--- a/chromium/media/capture/video/video_capture_device_unittest.cc
+++ b/chromium/media/capture/video/video_capture_device_unittest.cc
@@ -236,16 +236,16 @@ class MockImageCaptureClient
mojom::PhotoStatePtr state_;
};
-base::test::SingleThreadTaskEnvironment::MainThreadType kMainThreadType =
+constexpr auto kMainThreadType =
#if BUILDFLAG(IS_MAC)
// Video capture code on MacOSX must run on a CFRunLoop enabled thread
// for interaction with AVFoundation.
- base::test::SingleThreadTaskEnvironment::MainThreadType::UI;
+ base::test::TaskEnvironment::MainThreadType::UI;
#elif BUILDFLAG(IS_FUCHSIA)
// FIDL APIs on Fuchsia requires IO thread.
- base::test::SingleThreadTaskEnvironment::MainThreadType::IO;
+ base::test::TaskEnvironment::MainThreadType::IO;
#else
- base::test::SingleThreadTaskEnvironment::MainThreadType::DEFAULT;
+ base::test::TaskEnvironment::MainThreadType::DEFAULT;
#endif
} // namespace
@@ -289,6 +289,15 @@ class VideoCaptureDeviceTest
!CameraHalDispatcherImpl::GetInstance()->IsStarted()) {
CameraHalDispatcherImpl::GetInstance()->Start(base::DoNothing(),
base::DoNothing());
+ // Since the callback is posted to the main task, it might introduce
+ // issues when destroying the main task runner while the callback hasn't
+ // been triggered. Since we don't do sensor related check in video capture
+ // tests, it should be okay to simply disable sensor code path for
+ // testing.
+ // If the sensor initialization becomes a part of the camera
+ // initialization in the future, we should include the check for sensors
+ // in the test codes instead of simply disabling it.
+ CameraHalDispatcherImpl::GetInstance()->DisableSensorForTesting();
}
#endif
video_capture_device_factory_ =
@@ -311,6 +320,7 @@ class VideoCaptureDeviceTest
}
void TearDown() override {
+ task_environment_.RunUntilIdle();
#if BUILDFLAG(IS_CHROMEOS_ASH)
chromeos::PowerManagerClient::Shutdown();
#endif
@@ -460,7 +470,7 @@ class VideoCaptureDeviceTest
#if BUILDFLAG(IS_WIN)
base::win::ScopedCOMInitializer initialize_com_;
#endif
- base::test::SingleThreadTaskEnvironment task_environment_;
+ base::test::TaskEnvironment task_environment_;
std::vector<VideoCaptureDeviceInfo> devices_info_;
std::unique_ptr<base::RunLoop> run_loop_;
scoped_refptr<base::TaskRunner> main_thread_task_runner_;
diff --git a/chromium/media/capture/video/win/video_capture_device_factory_win.cc b/chromium/media/capture/video/win/video_capture_device_factory_win.cc
index 8fb4433c9e2..ac02f906774 100644
--- a/chromium/media/capture/video/win/video_capture_device_factory_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_factory_win.cc
@@ -125,7 +125,9 @@ const char* const kModelIdsBlockedForMediaFoundation[] = {
// Acer Aspire f5-573g. See https://crbug.com/1034644.
"0bda:57f2",
// Elgato Camlink 4k
- "0fd9:0066"};
+ "0fd9:0066",
+ // ACER Aspire VN7-571G. See https://crbug.com/1327948.
+ "04f2:b469"};
// Use this list only for non-USB webcams.
const char* const kDisplayNamesBlockedForMediaFoundation[] = {
diff --git a/chromium/media/capture/video/win/video_capture_device_mf_win.cc b/chromium/media/capture/video/win/video_capture_device_mf_win.cc
index 3fbcc3bdb63..fcbc0d1d929 100644
--- a/chromium/media/capture/video/win/video_capture_device_mf_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_mf_win.cc
@@ -256,9 +256,9 @@ VideoPixelFormat MfSubTypeToSourcePixelFormat(
{MFVideoFormat_YV12, PIXEL_FORMAT_YV12},
{GUID_ContainerFormatJpeg, PIXEL_FORMAT_MJPEG}};
- for (const auto& kEntry : kPixelFormatMap) {
- if (kEntry.mf_source_media_subtype == mf_source_media_subtype) {
- return kEntry.pixel_format;
+ for (const auto& [source_media_subtype, pixel_format] : kPixelFormatMap) {
+ if (source_media_subtype == mf_source_media_subtype) {
+ return pixel_format;
}
}
return PIXEL_FORMAT_UNKNOWN;
@@ -999,12 +999,22 @@ void VideoCaptureDeviceMFWin::AllocateAndStart(
const VideoCaptureParams& params,
std::unique_ptr<VideoCaptureDevice::Client> client) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-
- base::AutoLock lock(lock_);
- return AllocateAndStartLocked(params, std::move(client));
+ bool success = false;
+ {
+ base::AutoLock lock(lock_);
+ success = AllocateAndStartLocked(params, std::move(client));
+ }
+ // Do not hold the lock while waiting.
+ if (success) {
+ HRESULT hr = WaitOnCaptureEvent(MF_CAPTURE_ENGINE_PREVIEW_STARTED);
+ if (SUCCEEDED(hr)) {
+ base::AutoLock lock(lock_);
+ is_started_ = true;
+ }
+ }
}
-void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
+bool VideoCaptureDeviceMFWin::AllocateAndStartLocked(
const VideoCaptureParams& params,
std::unique_ptr<VideoCaptureDevice::Client> client) {
params_ = params;
@@ -1014,7 +1024,7 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
if (!engine_) {
OnError(VideoCaptureError::kWinMediaFoundationEngineIsNull, FROM_HERE,
E_FAIL);
- return;
+ return false;
}
ComPtr<IMFCaptureSource> source;
@@ -1022,14 +1032,14 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
if (FAILED(hr)) {
OnError(VideoCaptureError::kWinMediaFoundationEngineGetSourceFailed,
FROM_HERE, hr);
- return;
+ return false;
}
hr = FillCapabilities(source.Get(), true, &photo_capabilities_);
if (FAILED(hr)) {
OnError(VideoCaptureError::kWinMediaFoundationFillPhotoCapabilitiesFailed,
FROM_HERE, hr);
- return;
+ return false;
}
if (!photo_capabilities_.empty()) {
@@ -1042,13 +1052,13 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
if (FAILED(hr)) {
OnError(VideoCaptureError::kWinMediaFoundationFillVideoCapabilitiesFailed,
FROM_HERE, hr);
- return;
+ return false;
}
if (video_capabilities.empty()) {
OnError(VideoCaptureError::kWinMediaFoundationNoVideoCapabilityFound,
FROM_HERE, "No video capability found");
- return;
+ return false;
}
const CapabilityWin best_match_video_capability =
@@ -1061,7 +1071,7 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
OnError(
VideoCaptureError::kWinMediaFoundationGetAvailableDeviceMediaTypeFailed,
FROM_HERE, hr);
- return;
+ return false;
}
hr = source->SetCurrentDeviceMediaType(
@@ -1070,7 +1080,7 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
OnError(
VideoCaptureError::kWinMediaFoundationSetCurrentDeviceMediaTypeFailed,
FROM_HERE, hr);
- return;
+ return false;
}
ComPtr<IMFCaptureSink> sink;
@@ -1078,7 +1088,7 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
if (FAILED(hr)) {
OnError(VideoCaptureError::kWinMediaFoundationEngineGetSinkFailed,
FROM_HERE, hr);
- return;
+ return false;
}
ComPtr<IMFCapturePreviewSink> preview_sink;
@@ -1087,14 +1097,14 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
OnError(VideoCaptureError::
kWinMediaFoundationSinkQueryCapturePreviewInterfaceFailed,
FROM_HERE, hr);
- return;
+ return false;
}
hr = preview_sink->RemoveAllStreams();
if (FAILED(hr)) {
OnError(VideoCaptureError::kWinMediaFoundationSinkRemoveAllStreamsFailed,
FROM_HERE, hr);
- return;
+ return false;
}
ComPtr<IMFMediaType> sink_video_media_type;
@@ -1103,7 +1113,7 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
OnError(
VideoCaptureError::kWinMediaFoundationCreateSinkVideoMediaTypeFailed,
FROM_HERE, hr);
- return;
+ return false;
}
hr = ConvertToVideoSinkMediaType(
@@ -1114,7 +1124,7 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
OnError(
VideoCaptureError::kWinMediaFoundationConvertToVideoSinkMediaTypeFailed,
FROM_HERE, hr);
- return;
+ return false;
}
DWORD dw_sink_stream_index = 0;
@@ -1124,7 +1134,7 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
if (FAILED(hr)) {
OnError(VideoCaptureError::kWinMediaFoundationSinkAddStreamFailed,
FROM_HERE, hr);
- return;
+ return false;
}
hr = preview_sink->SetSampleCallback(dw_sink_stream_index,
@@ -1132,7 +1142,7 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
if (FAILED(hr)) {
OnError(VideoCaptureError::kWinMediaFoundationSinkSetSampleCallbackFailed,
FROM_HERE, hr);
- return;
+ return false;
}
// Note, that it is not sufficient to wait for
@@ -1143,19 +1153,19 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
// event. For the lack of any other events indicating success, we have to wait
// for the first video frame to arrive before sending our |OnStarted| event to
// |client_|.
+ // We still need to wait for MF_CAPTURE_ENGINE_PREVIEW_STARTED event to ensure
+ // that we won't call StopPreview before the preview is started.
has_sent_on_started_to_client_ = false;
hr = engine_->StartPreview();
if (FAILED(hr)) {
OnError(VideoCaptureError::kWinMediaFoundationEngineStartPreviewFailed,
FROM_HERE, hr);
- return;
+ return false;
}
selected_video_capability_ =
std::make_unique<CapabilityWin>(best_match_video_capability);
- is_started_ = true;
-
base::UmaHistogramEnumeration(
"Media.VideoCapture.Win.Device.InternalPixelFormat",
best_match_video_capability.source_pixel_format,
@@ -1168,17 +1178,31 @@ void VideoCaptureDeviceMFWin::AllocateAndStartLocked(
"Media.VideoCapture.Win.Device.RequestedPixelFormat",
params.requested_format.pixel_format,
media::VideoPixelFormat::PIXEL_FORMAT_MAX);
+
+ return true;
}
void VideoCaptureDeviceMFWin::StopAndDeAllocate() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- base::AutoLock lock(lock_);
+ HRESULT hr = E_FAIL;
+ {
+ base::AutoLock lock(lock_);
- if (is_started_ && engine_)
- engine_->StopPreview();
- is_started_ = false;
+ if (is_started_ && engine_) {
+ hr = engine_->StopPreview();
+ }
+ }
- client_.reset();
+ // Do not hold the lock while waiting.
+ if (SUCCEEDED(hr)) {
+ WaitOnCaptureEvent(MF_CAPTURE_ENGINE_PREVIEW_STOPPED);
+ }
+
+ {
+ base::AutoLock lock(lock_);
+ is_started_ = false;
+ client_.reset();
+ }
}
void VideoCaptureDeviceMFWin::TakePhoto(TakePhotoCallback callback) {
@@ -1759,21 +1783,24 @@ void VideoCaptureDeviceMFWin::OnEvent(IMFMediaEvent* media_event) {
media_event->GetStatus(&hr);
media_event->GetExtendedType(&capture_event_guid);
- // TODO(http://crbug.com/1093521): Add cases for Start
- // MF_CAPTURE_ENGINE_PREVIEW_STARTED and MF_CAPTURE_ENGINE_PREVIEW_STOPPED
// When MF_CAPTURE_ENGINE_ERROR is returned the captureengine object is no
// longer valid.
if (capture_event_guid == MF_CAPTURE_ENGINE_ERROR || FAILED(hr)) {
+ last_error_hr_ = hr;
capture_error_.Signal();
// There should always be a valid error
hr = SUCCEEDED(hr) ? E_UNEXPECTED : hr;
- } else if (capture_event_guid == MF_CAPTURE_ENGINE_INITIALIZED) {
- capture_initialize_.Signal();
+ } else {
+ if (capture_event_guid == MF_CAPTURE_ENGINE_INITIALIZED) {
+ capture_initialize_.Signal();
+ } else if (capture_event_guid == MF_CAPTURE_ENGINE_PREVIEW_STOPPED) {
+ capture_stopped_.Signal();
+ } else if (capture_event_guid == MF_CAPTURE_ENGINE_PREVIEW_STARTED) {
+ capture_started_.Signal();
+ }
+ return;
}
- // Lock is taken after events are signalled, because if the capture
- // is being restarted, lock is currently owned by another thread running
- // OnEvent().
base::AutoLock lock(lock_);
if (hr == DXGI_ERROR_DEVICE_REMOVED && dxgi_device_manager_ != nullptr) {
@@ -1804,6 +1831,12 @@ void VideoCaptureDeviceMFWin::OnEvent(IMFMediaEvent* media_event) {
// If AllocateAndStart fails somehow, OnError() will be called
// internally. Therefore, it's safe to always override |hr| here.
hr = S_OK;
+ // Ideally we should wait for MF_CAPTURE_ENGINE_PREVIEW_STARTED.
+ // However introducing that wait here could deadlocks in case if
+ // the same thread is used by MFCaptureEngine to signal events to
+ // the client.
+ // So we mark |is_started_| speculatevly here.
+ is_started_ = true;
++num_restarts_;
} else {
LOG(ERROR) << "Failed to re-initialize.";
@@ -1845,10 +1878,12 @@ HRESULT VideoCaptureDeviceMFWin::WaitOnCaptureEvent(GUID capture_event_guid) {
HRESULT hr = S_OK;
HANDLE events[] = {nullptr, capture_error_.handle()};
- // TODO(http://crbug.com/1093521): Add cases for Start
- // MF_CAPTURE_ENGINE_PREVIEW_STARTED and MF_CAPTURE_ENGINE_PREVIEW_STOPPED
if (capture_event_guid == MF_CAPTURE_ENGINE_INITIALIZED) {
events[0] = capture_initialize_.handle();
+ } else if (capture_event_guid == MF_CAPTURE_ENGINE_PREVIEW_STOPPED) {
+ events[0] = capture_stopped_.handle();
+ } else if (capture_event_guid == MF_CAPTURE_ENGINE_PREVIEW_STARTED) {
+ events[0] = capture_started_.handle();
} else {
// no registered event handle for the event requested
hr = E_NOTIMPL;
@@ -1866,7 +1901,10 @@ HRESULT VideoCaptureDeviceMFWin::WaitOnCaptureEvent(GUID capture_event_guid) {
LogError(FROM_HERE, hr);
break;
default:
- hr = E_UNEXPECTED;
+ hr = last_error_hr_;
+ if (SUCCEEDED(hr)) {
+ hr = MF_E_UNEXPECTED;
+ }
LogError(FROM_HERE, hr);
break;
}
diff --git a/chromium/media/capture/video/win/video_capture_device_mf_win.h b/chromium/media/capture/video/win/video_capture_device_mf_win.h
index 0e7cfb4a2ce..207cae258da 100644
--- a/chromium/media/capture/video/win/video_capture_device_mf_win.h
+++ b/chromium/media/capture/video/win/video_capture_device_mf_win.h
@@ -148,7 +148,7 @@ class CAPTURE_EXPORT VideoCaptureDeviceMFWin : public VideoCaptureDevice {
base::TimeDelta timestamp,
VideoCaptureFrameDropReason& frame_drop_reason);
bool RecreateMFSource();
- void AllocateAndStartLocked(
+ bool AllocateAndStartLocked(
const VideoCaptureParams& params,
std::unique_ptr<VideoCaptureDevice::Client> client);
@@ -181,6 +181,9 @@ class CAPTURE_EXPORT VideoCaptureDeviceMFWin : public VideoCaptureDevice {
base::queue<TakePhotoCallback> video_stream_take_photo_callbacks_;
base::WaitableEvent capture_initialize_;
base::WaitableEvent capture_error_;
+ base::WaitableEvent capture_stopped_;
+ base::WaitableEvent capture_started_;
+ HRESULT last_error_hr_ = S_OK;
scoped_refptr<DXGIDeviceManager> dxgi_device_manager_;
absl::optional<int> camera_rotation_;
VideoCaptureParams params_;
diff --git a/chromium/media/capture/video/win/video_capture_device_mf_win_unittest.cc b/chromium/media/capture/video/win/video_capture_device_mf_win_unittest.cc
index 223604687da..54291373ca4 100644
--- a/chromium/media/capture/video/win/video_capture_device_mf_win_unittest.cc
+++ b/chromium/media/capture/video/win/video_capture_device_mf_win_unittest.cc
@@ -575,6 +575,7 @@ class MockMFCaptureEngine : public MockInterface<IMFCaptureEngine> {
IFACEMETHODIMP StartPreview(void) override {
OnStartPreview();
+ FireCaptureEvent(MF_CAPTURE_ENGINE_PREVIEW_STARTED, S_OK);
return S_OK;
}
@@ -582,6 +583,7 @@ class MockMFCaptureEngine : public MockInterface<IMFCaptureEngine> {
IFACEMETHODIMP StopPreview(void) override {
OnStopPreview();
+ FireCaptureEvent(MF_CAPTURE_ENGINE_PREVIEW_STOPPED, S_OK);
return S_OK;
}
@@ -994,11 +996,6 @@ class MockCaptureHandleProvider
return base::UnsafeSharedMemoryRegion();
}
- // Duplicate as a writable (unsafe) mojo buffer.
- mojo::ScopedSharedBufferHandle DuplicateAsMojoBuffer() override {
- return mojo::ScopedSharedBufferHandle();
- }
-
// Access a |VideoCaptureBufferHandle| for local, writable memory.
std::unique_ptr<VideoCaptureBufferHandle> GetHandleForInProcessAccess()
override {
@@ -1466,6 +1463,49 @@ TEST_F(VideoCaptureDeviceMFWinTest,
MF_E_VIDEO_RECORDING_DEVICE_INVALIDATED);
}
+// Send random event before MF_CAPTURE_ENGINE_STOPPED
+TEST_F(VideoCaptureDeviceMFWinTest,
+ SendArbitraryMFVideoCallbackBeforeOnStoppedEvent) {
+ if (ShouldSkipTest())
+ return;
+
+ VideoCaptureDeviceDescriptor descriptor = VideoCaptureDeviceDescriptor();
+ Microsoft::WRL::ComPtr<MockMFMediaSource> media_source =
+ new MockMFMediaSource();
+ Microsoft::WRL::ComPtr<MockMFCaptureEngine> engine =
+ new MockMFCaptureEngine();
+ std::unique_ptr<VideoCaptureDeviceMFWin> device =
+ std::make_unique<VideoCaptureDeviceMFWin>(
+ descriptor, media_source,
+ /*mf_dxgi_device_manager=*/nullptr, engine);
+
+ EXPECT_CALL(*(engine.Get()), OnInitEventGuid).WillOnce([]() {
+ return MF_CAPTURE_ENGINE_INITIALIZED;
+ });
+
+ EXPECT_CALL(*(engine.Get()), OnCorrectInitializeQueued());
+
+ EXPECT_TRUE(device->Init());
+
+ PrepareMFDeviceWithOneVideoStream(MFVideoFormat_I420);
+
+ EXPECT_CALL(*(engine_.Get()), OnStartPreview());
+ EXPECT_CALL(*client_, OnStarted());
+
+ VideoCaptureFormat format(gfx::Size(640, 480), 30, media::PIXEL_FORMAT_NV12);
+ VideoCaptureParams video_capture_params;
+ video_capture_params.requested_format = format;
+ device_->AllocateAndStart(video_capture_params, std::move(client_));
+
+ // Send an arbitrary event before stopping the preview.
+ EXPECT_CALL(*(engine_.Get()), OnStopPreview()).WillRepeatedly([&]() {
+ engine->FireCaptureEvent(MF_CAPTURE_ENGINE_CAMERA_STREAM_BLOCKED, S_OK);
+ });
+
+ capture_preview_sink_->sample_callback->OnSample(nullptr);
+ device_->StopAndDeAllocate();
+}
+
// Allocates device with flaky methods failing with MF_E_INVALIDREQUEST and
// expects the device to retry and start correctly
TEST_F(VideoCaptureDeviceMFWinTest, AllocateAndStartWithFlakyInvalidRequest) {
diff --git a/chromium/media/capture/video/win/video_capture_device_win.cc b/chromium/media/capture/video/win/video_capture_device_win.cc
index 81aa33a2881..df0aef940a0 100644
--- a/chromium/media/capture/video/win/video_capture_device_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_win.cc
@@ -534,8 +534,11 @@ void VideoCaptureDeviceWin::AllocateAndStart(
params.requested_format.pixel_format,
media::VideoPixelFormat::PIXEL_FORMAT_MAX);
- client_->OnStarted();
- state_ = kCapturing;
+ {
+ base::AutoLock lock(lock_);
+ client_->OnStarted();
+ state_ = kCapturing;
+ }
}
void VideoCaptureDeviceWin::StopAndDeAllocate() {
@@ -554,8 +557,11 @@ void VideoCaptureDeviceWin::StopAndDeAllocate() {
graph_builder_->Disconnect(output_capture_pin_.Get());
graph_builder_->Disconnect(input_sink_pin_.Get());
- client_.reset();
- state_ = kIdle;
+ {
+ base::AutoLock lock(lock_);
+ client_.reset();
+ state_ = kIdle;
+ }
}
void VideoCaptureDeviceWin::TakePhoto(TakePhotoCallback callback) {
@@ -860,6 +866,12 @@ void VideoCaptureDeviceWin::FrameReceived(const uint8_t* buffer,
const VideoCaptureFormat& format,
base::TimeDelta timestamp,
bool flip_y) {
+ {
+ base::AutoLock lock(lock_);
+ if (state_ != kCapturing)
+ return;
+ }
+
if (first_ref_time_.is_null())
first_ref_time_ = base::TimeTicks::Now();
diff --git a/chromium/media/capture/video/win/video_capture_device_win.h b/chromium/media/capture/video/win/video_capture_device_win.h
index ee3b5e98453..7d7b0d694e8 100644
--- a/chromium/media/capture/video/win/video_capture_device_win.h
+++ b/chromium/media/capture/video/win/video_capture_device_win.h
@@ -161,6 +161,11 @@ class VideoCaptureDeviceWin : public VideoCaptureDevice,
base::ThreadChecker thread_checker_;
+ // Used to guard between race checking capture state between the thread used
+ // in |thread_checker_| and a thread used in
+ // |SinkFilterObserver::SinkFilterObserver| callbacks.
+ base::Lock lock_;
+
bool enable_get_photo_state_;
absl::optional<int> camera_rotation_;
diff --git a/chromium/media/cast/BUILD.gn b/chromium/media/cast/BUILD.gn
index 83ae2a3f636..7298d8bd22f 100644
--- a/chromium/media/cast/BUILD.gn
+++ b/chromium/media/cast/BUILD.gn
@@ -17,19 +17,25 @@ proto_library("logging_proto") {
# Common code shared by all cast components.
source_set("common") {
sources = [
+ "cast_callbacks.h",
"cast_config.cc",
"cast_config.h",
"cast_environment.cc",
"cast_environment.h",
"common/clock_drift_smoother.cc",
"common/clock_drift_smoother.h",
+ "common/encoded_frame.cc",
+ "common/encoded_frame.h",
"common/expanded_value_base.h",
"common/frame_id.cc",
"common/frame_id.h",
"common/rtp_time.cc",
"common/rtp_time.h",
+ "common/sender_encoded_frame.cc",
+ "common/sender_encoded_frame.h",
"common/transport_encryption_handler.cc",
"common/transport_encryption_handler.h",
+ "common/video_frame_factory.h",
"constants.h",
"logging/encoding_event_subscriber.cc",
"logging/encoding_event_subscriber.h",
@@ -118,49 +124,31 @@ source_set("net") {
public_deps = [ ":common" ]
}
-source_set("sender") {
+source_set("encoding") {
sources = [
- "cast_sender.h",
- "cast_sender_impl.cc",
- "cast_sender_impl.h",
- "sender/audio_encoder.cc",
- "sender/audio_encoder.h",
- "sender/audio_sender.cc",
- "sender/audio_sender.h",
- "sender/congestion_control.cc",
- "sender/congestion_control.h",
- "sender/external_video_encoder.cc",
- "sender/external_video_encoder.h",
- "sender/fake_software_video_encoder.cc",
- "sender/fake_software_video_encoder.h",
- "sender/frame_sender.cc",
- "sender/frame_sender.h",
- "sender/performance_metrics_overlay.cc",
- "sender/performance_metrics_overlay.h",
- "sender/sender_encoded_frame.cc",
- "sender/sender_encoded_frame.h",
- "sender/size_adaptable_video_encoder_base.cc",
- "sender/size_adaptable_video_encoder_base.h",
- "sender/software_video_encoder.h",
- "sender/video_encoder.cc",
- "sender/video_encoder.h",
- "sender/video_encoder_impl.cc",
- "sender/video_encoder_impl.h",
- "sender/video_frame_factory.h",
- "sender/video_sender.cc",
- "sender/video_sender.h",
- "sender/vpx_encoder.cc",
- "sender/vpx_encoder.h",
- "sender/vpx_quantizer_parser.cc",
- "sender/vpx_quantizer_parser.h",
+ "encoding/audio_encoder.cc",
+ "encoding/audio_encoder.h",
+ "encoding/external_video_encoder.cc",
+ "encoding/external_video_encoder.h",
+ "encoding/fake_software_video_encoder.cc",
+ "encoding/fake_software_video_encoder.h",
+ "encoding/size_adaptable_video_encoder_base.cc",
+ "encoding/size_adaptable_video_encoder_base.h",
+ "encoding/software_video_encoder.h",
+ "encoding/video_encoder.cc",
+ "encoding/video_encoder.h",
+ "encoding/video_encoder_impl.cc",
+ "encoding/video_encoder_impl.h",
+ "encoding/vpx_encoder.cc",
+ "encoding/vpx_encoder.h",
+ "encoding/vpx_quantizer_parser.cc",
+ "encoding/vpx_quantizer_parser.h",
]
deps = [
":common",
- ":net",
"//base",
"//media",
- "//media/capture:capture_base",
"//third_party/libaom:libaom_buildflags",
"//third_party/libvpx",
"//third_party/opus",
@@ -170,8 +158,8 @@ source_set("sender") {
# iOS and OS X encoders
if (is_apple) {
sources += [
- "sender/h264_vt_encoder.cc",
- "sender/h264_vt_encoder.h",
+ "encoding/h264_vt_encoder.cc",
+ "encoding/h264_vt_encoder.h",
]
frameworks = [
@@ -185,8 +173,8 @@ source_set("sender") {
if (enable_libaom) {
sources += [
- "sender/av1_encoder.cc",
- "sender/av1_encoder.h",
+ "encoding/av1_encoder.cc",
+ "encoding/av1_encoder.h",
]
deps += [ "//third_party/libaom" ]
@@ -197,6 +185,42 @@ source_set("sender") {
}
}
+# TODO(https://crbug.com/1327074): should be split into multiple source sets
+# once the new Open Screen frame sender implementation is added.
+source_set("sender") {
+ sources = [
+ "cast_sender.h",
+ "cast_sender_impl.cc",
+ "cast_sender_impl.h",
+ "sender/audio_sender.cc",
+ "sender/audio_sender.h",
+ "sender/congestion_control.cc",
+ "sender/congestion_control.h",
+ "sender/frame_sender.cc",
+ "sender/frame_sender.h",
+ "sender/frame_sender_impl.cc",
+ "sender/frame_sender_impl.h",
+ "sender/performance_metrics_overlay.cc",
+ "sender/performance_metrics_overlay.h",
+ "sender/video_sender.cc",
+ "sender/video_sender.h",
+ ]
+
+ deps = [
+ ":common",
+ ":encoding",
+ ":net",
+ "//base",
+ "//media",
+ "//media/capture:capture_base",
+ "//ui/gfx/geometry",
+ ]
+
+ if (is_chromeos) {
+ deps += [ "//third_party/re2" ]
+ }
+}
+
source_set("test_receiver") {
testonly = true
sources = [
@@ -242,10 +266,18 @@ static_library("test_support") {
"test/loopback_transport.h",
"test/mock_cast_transport.cc",
"test/mock_cast_transport.h",
+ "test/mock_paced_packet_sender.cc",
+ "test/mock_paced_packet_sender.h",
+ "test/mock_rtp_payload_feedback.cc",
+ "test/mock_rtp_payload_feedback.h",
+ "test/rtp_packet_builder.cc",
+ "test/rtp_packet_builder.h",
"test/skewed_single_thread_task_runner.cc",
"test/skewed_single_thread_task_runner.h",
"test/skewed_tick_clock.cc",
"test/skewed_tick_clock.h",
+ "test/test_rtcp_packet_builder.cc",
+ "test/test_rtcp_packet_builder.h",
"test/utility/audio_utility.cc",
"test/utility/audio_utility.h",
"test/utility/barcode.cc",
@@ -299,39 +331,28 @@ test("cast_unittests") {
sources = [
"common/expanded_value_base_unittest.cc",
"common/rtp_time_unittest.cc",
+ "encoding/audio_encoder_unittest.cc",
+ "encoding/external_video_encoder_unittest.cc",
+ "encoding/video_encoder_unittest.cc",
+ "encoding/vpx_quantizer_parser_unittest.cc",
"logging/encoding_event_subscriber_unittest.cc",
"logging/receiver_time_offset_estimator_impl_unittest.cc",
"logging/simple_event_subscriber_unittest.cc",
"logging/stats_event_subscriber_unittest.cc",
"net/cast_transport_impl_unittest.cc",
- "net/pacing/mock_paced_packet_sender.cc",
- "net/pacing/mock_paced_packet_sender.h",
"net/pacing/paced_sender_unittest.cc",
"net/rtcp/receiver_rtcp_event_subscriber_unittest.cc",
"net/rtcp/rtcp_builder_unittest.cc",
"net/rtcp/rtcp_unittest.cc",
"net/rtcp/rtcp_utility_unittest.cc",
-
- # TODO(jophba): The following two are test utility modules. Rename/move the
- # files.
- "net/rtcp/test_rtcp_packet_builder.cc",
- "net/rtcp/test_rtcp_packet_builder.h",
- "net/rtp/mock_rtp_payload_feedback.cc",
- "net/rtp/mock_rtp_payload_feedback.h",
"net/rtp/packet_storage_unittest.cc",
- "net/rtp/rtp_packet_builder.cc",
- "net/rtp/rtp_packet_builder.h",
"net/rtp/rtp_packetizer_unittest.cc",
"net/rtp/rtp_parser_unittest.cc",
"net/udp_packet_pipe_unittest.cc",
"net/udp_transport_unittest.cc",
- "sender/audio_encoder_unittest.cc",
"sender/audio_sender_unittest.cc",
"sender/congestion_control_unittest.cc",
- "sender/external_video_encoder_unittest.cc",
- "sender/video_encoder_unittest.cc",
"sender/video_sender_unittest.cc",
- "sender/vpx_quantizer_parser_unittest.cc",
"test/end2end_unittest.cc",
"test/receiver/audio_decoder_unittest.cc",
"test/receiver/cast_message_builder_unittest.cc",
@@ -346,6 +367,7 @@ test("cast_unittests") {
deps = [
":common",
+ ":encoding",
":net",
":sender",
":test_receiver",
@@ -372,7 +394,7 @@ test("cast_unittests") {
}
if (is_apple) {
- sources += [ "sender/h264_vt_encoder_unittest.cc" ]
+ sources += [ "encoding/h264_vt_encoder_unittest.cc" ]
deps += [ "//third_party/ffmpeg" ]
}
diff --git a/chromium/media/cast/cast_callbacks.h b/chromium/media/cast/cast_callbacks.h
new file mode 100644
index 00000000000..03ce4fe2a90
--- /dev/null
+++ b/chromium/media/cast/cast_callbacks.h
@@ -0,0 +1,26 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_CAST_CALLBACKS_H_
+#define MEDIA_CAST_CAST_CALLBACKS_H_
+
+#include "base/callback_forward.h"
+#include "media/cast/constants.h"
+
+namespace media {
+namespace cast {
+
+// Callback that is run to update the client with current status. This is used
+// to allow the client to wait for asynchronous initialization to complete
+// before sending frames, and also to be notified of any runtime errors that
+// have halted the session.
+using StatusChangeCallback = base::RepeatingCallback<void(OperationalStatus)>;
+
+// The equivalent of StatusChangeCallback when only one change is expected.
+using StatusChangeOnceCallback = base::OnceCallback<void(OperationalStatus)>;
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_CALLBACKS_H_
diff --git a/chromium/media/cast/cast_sender.h b/chromium/media/cast/cast_sender.h
index 7784d60f42a..902a8c6c3dd 100644
--- a/chromium/media/cast/cast_sender.h
+++ b/chromium/media/cast/cast_sender.h
@@ -18,6 +18,7 @@
#include "base/time/time.h"
#include "media/base/audio_bus.h"
#include "media/base/video_frame.h"
+#include "media/cast/cast_callbacks.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/constants.h"
@@ -76,15 +77,6 @@ class AudioFrameInput : public base::RefCountedThreadSafe<AudioFrameInput> {
friend class base::RefCountedThreadSafe<AudioFrameInput>;
};
-// Callback that is run to update the client with current status. This is used
-// to allow the client to wait for asynchronous initialization to complete
-// before sending frames, and also to be notified of any runtime errors that
-// have halted the session.
-using StatusChangeCallback = base::RepeatingCallback<void(OperationalStatus)>;
-
-// The equivalent of StatusChangeCallback when only one change is expected.
-using StatusChangeOnceCallback = base::OnceCallback<void(OperationalStatus)>;
-
// All methods of CastSender must be called on the main thread.
// Provided CastTransport will also be called on the main thread.
class CastSender {
diff --git a/chromium/media/cast/cast_sender_impl.cc b/chromium/media/cast/cast_sender_impl.cc
index 0a82c9bd92c..64fa3ac5f36 100644
--- a/chromium/media/cast/cast_sender_impl.cc
+++ b/chromium/media/cast/cast_sender_impl.cc
@@ -11,7 +11,7 @@
#include "base/callback_helpers.h"
#include "base/logging.h"
#include "media/base/video_frame.h"
-#include "media/cast/sender/video_frame_factory.h"
+#include "media/cast/common/video_frame_factory.h"
namespace media {
namespace cast {
diff --git a/chromium/media/cast/common/encoded_frame.cc b/chromium/media/cast/common/encoded_frame.cc
new file mode 100644
index 00000000000..6fc5c764576
--- /dev/null
+++ b/chromium/media/cast/common/encoded_frame.cc
@@ -0,0 +1,28 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/common/encoded_frame.h"
+
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+EncodedFrame::EncodedFrame()
+ : dependency(UNKNOWN_DEPENDENCY), new_playout_delay_ms(0) {}
+
+EncodedFrame::~EncodedFrame() = default;
+
+void EncodedFrame::CopyMetadataTo(EncodedFrame* dest) const {
+ DCHECK(dest);
+ dest->dependency = this->dependency;
+ dest->frame_id = this->frame_id;
+ dest->referenced_frame_id = this->referenced_frame_id;
+ dest->rtp_timestamp = this->rtp_timestamp;
+ dest->reference_time = this->reference_time;
+ dest->new_playout_delay_ms = this->new_playout_delay_ms;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/common/encoded_frame.h b/chromium/media/cast/common/encoded_frame.h
new file mode 100644
index 00000000000..f5897da2915
--- /dev/null
+++ b/chromium/media/cast/common/encoded_frame.h
@@ -0,0 +1,94 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_COMMON_ENCODED_FRAME_H_
+#define MEDIA_CAST_COMMON_ENCODED_FRAME_H_
+
+#include <cstdint>
+#include <string>
+
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/common/frame_id.h"
+#include "media/cast/common/rtp_time.h"
+
+namespace media {
+namespace cast {
+
+// A combination of metadata and data for one encoded frame. This can contain
+// audio data or video data or other.
+struct EncodedFrame {
+ enum Dependency {
+ // "null" value, used to indicate whether |dependency| has been set.
+ UNKNOWN_DEPENDENCY,
+
+ // Not decodable without the reference frame indicated by
+ // |referenced_frame_id|.
+ DEPENDENT,
+
+ // Independently decodable.
+ INDEPENDENT,
+
+ // Independently decodable, and no future frames will depend on any frames
+ // before this one.
+ KEY,
+
+ DEPENDENCY_LAST = KEY
+ };
+
+ EncodedFrame();
+ virtual ~EncodedFrame();
+
+ // Convenience accessors to data as an array of uint8_t elements.
+ const uint8_t* bytes() const {
+ return reinterpret_cast<const uint8_t*>(std::data(data));
+ }
+ uint8_t* mutable_bytes() {
+ return reinterpret_cast<uint8_t*>(std::data(data));
+ }
+
+ // Copies all data members except |data| to |dest|.
+ // Does not modify |dest->data|.
+ void CopyMetadataTo(EncodedFrame* dest) const;
+
+ // This frame's dependency relationship with respect to other frames.
+ Dependency dependency;
+
+ // The label associated with this frame. Implies an ordering relative to
+ // other frames in the same stream.
+ FrameId frame_id;
+
+ // The label associated with the frame upon which this frame depends. If
+ // this frame does not require any other frame in order to become decodable
+ // (e.g., key frames), |referenced_frame_id| must equal |frame_id|.
+ FrameId referenced_frame_id;
+
+ // The stream timestamp, on the timeline of the signal data. For example, RTP
+ // timestamps for audio are usually defined as the total number of audio
+ // samples encoded in all prior frames. A playback system uses this value to
+ // detect gaps in the stream, and otherwise stretch the signal to match
+ // playout targets.
+ RtpTimeTicks rtp_timestamp;
+
+ // The common reference clock timestamp for this frame. This value originates
+ // from a sender and is used to provide lip synchronization between streams in
+ // a receiver. Thus, in the sender context, this is set to the time at which
+ // the frame was captured/recorded. In the receiver context, this is set to
+ // the target playout time. Over a sequence of frames, this time value is
+ // expected to drift with respect to the elapsed time implied by the RTP
+ // timestamps; and it may not necessarily increment with precise regularity.
+ base::TimeTicks reference_time;
+
+ // Playout delay for this and all future frames. Used by the Adaptive
+ // Playout delay extension. Zero means no change.
+ uint16_t new_playout_delay_ms;
+
+ // The encoded signal data.
+ std::string data;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_COMMON_ENCODED_FRAME_H_
diff --git a/chromium/media/cast/common/expanded_value_base.h b/chromium/media/cast/common/expanded_value_base.h
index 8021a4fb365..4902e0168eb 100644
--- a/chromium/media/cast/common/expanded_value_base.h
+++ b/chromium/media/cast/common/expanded_value_base.h
@@ -108,12 +108,30 @@ class ExpandedValueBase {
}
// Comparison operators.
- bool operator==(Subclass rhs) const { return value_ == rhs.value_; }
- bool operator!=(Subclass rhs) const { return value_ != rhs.value_; }
- bool operator<(Subclass rhs) const { return value_ < rhs.value_; }
- bool operator>(Subclass rhs) const { return value_ > rhs.value_; }
- bool operator<=(Subclass rhs) const { return value_ <= rhs.value_; }
- bool operator>=(Subclass rhs) const { return value_ >= rhs.value_; }
+ bool operator==(
+ const ExpandedValueBase<FullWidthInteger, Subclass>& rhs) const {
+ return value_ == rhs.value_;
+ }
+ bool operator!=(
+ const ExpandedValueBase<FullWidthInteger, Subclass>& rhs) const {
+ return value_ != rhs.value_;
+ }
+ bool operator<(
+ const ExpandedValueBase<FullWidthInteger, Subclass>& rhs) const {
+ return value_ < rhs.value_;
+ }
+ bool operator>(
+ const ExpandedValueBase<FullWidthInteger, Subclass>& rhs) const {
+ return value_ > rhs.value_;
+ }
+ bool operator<=(
+ const ExpandedValueBase<FullWidthInteger, Subclass>& rhs) const {
+ return value_ <= rhs.value_;
+ }
+ bool operator>=(
+ const ExpandedValueBase<FullWidthInteger, Subclass>& rhs) const {
+ return value_ >= rhs.value_;
+ }
// (De)Serialize for transmission over IPC. Do not use these to subvert the
// valid set of operators allowed by this class or its Subclass.
diff --git a/chromium/media/cast/sender/sender_encoded_frame.cc b/chromium/media/cast/common/sender_encoded_frame.cc
index 4463ebdbf78..2828f7d553c 100644
--- a/chromium/media/cast/sender/sender_encoded_frame.cc
+++ b/chromium/media/cast/common/sender_encoded_frame.cc
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/sender_encoded_frame.h"
+#include "media/cast/common/sender_encoded_frame.h"
namespace media {
namespace cast {
-SenderEncodedFrame::SenderEncodedFrame()
- : EncodedFrame(), encoder_utilization(-1.0), lossy_utilization(-1.0) {}
-
+SenderEncodedFrame::SenderEncodedFrame() = default;
SenderEncodedFrame::~SenderEncodedFrame() = default;
} // namespace cast
diff --git a/chromium/media/cast/sender/sender_encoded_frame.h b/chromium/media/cast/common/sender_encoded_frame.h
index 45c4e1126ab..2f859bda07a 100644
--- a/chromium/media/cast/sender/sender_encoded_frame.h
+++ b/chromium/media/cast/common/sender_encoded_frame.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_SENDER_ENCODED_FRAME_H_
-#define MEDIA_CAST_SENDER_SENDER_ENCODED_FRAME_H_
+#ifndef MEDIA_CAST_COMMON_SENDER_ENCODED_FRAME_H_
+#define MEDIA_CAST_COMMON_SENDER_ENCODED_FRAME_H_
#include "base/time/time.h"
-#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/common/encoded_frame.h"
namespace media {
namespace cast {
@@ -26,7 +26,10 @@ struct SenderEncodedFrame final : public EncodedFrame {
// indicating the encoder utilized more resources than a maximum sustainable
// rate, based on the data volume of the input. Negative values indicate the
// field was not computed.
- double encoder_utilization;
+ double encoder_utilization = -1.0;
+
+ // The bitrate the encoder used for encoding this frame.
+ int encoder_bitrate = 0;
// The amount of "lossiness" needed to encode the frame within the targeted
// bandwidth. More-complex frame content and/or lower target encode bitrates
@@ -38,9 +41,7 @@ struct SenderEncodedFrame final : public EncodedFrame {
// very small, and values greater than 1.0 indicating the encoder cannot
// encode the frame within the target bitrate (even at its lowest quality
// setting). Negative values indicate the field was not computed.
- //
- // TODO(jophba): Rename to idealized_bitrate_utilization.
- double lossy_utilization = {};
+ double lossiness = -1.0;
// The time at which the encode of the frame completed.
base::TimeTicks encode_completion_time;
@@ -49,4 +50,4 @@ struct SenderEncodedFrame final : public EncodedFrame {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_SENDER_ENCODED_FRAME_H_
+#endif // MEDIA_CAST_COMMON_SENDER_ENCODED_FRAME_H_
diff --git a/chromium/media/cast/sender/video_frame_factory.h b/chromium/media/cast/common/video_frame_factory.h
index 1db6ebecf2f..b294581eb05 100644
--- a/chromium/media/cast/sender/video_frame_factory.h
+++ b/chromium/media/cast/common/video_frame_factory.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_VIDEO_FRAME_FACTORY_H_
-#define MEDIA_CAST_SENDER_VIDEO_FRAME_FACTORY_H_
+#ifndef MEDIA_CAST_COMMON_VIDEO_FRAME_FACTORY_H_
+#define MEDIA_CAST_COMMON_VIDEO_FRAME_FACTORY_H_
#include "base/time/time.h"
@@ -41,10 +41,11 @@ class VideoFrameFactory {
// this means that |MaybeCreateFrame| must somehow signal the encoder to
// perform whatever initialization is needed to eventually produce frames.
virtual scoped_refptr<VideoFrame> MaybeCreateFrame(
- const gfx::Size& frame_size, base::TimeDelta timestamp) = 0;
+ const gfx::Size& frame_size,
+ base::TimeDelta timestamp) = 0;
};
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_VIDEO_FRAME_FACTORY_H_
+#endif // MEDIA_CAST_COMMON_VIDEO_FRAME_FACTORY_H_
diff --git a/chromium/media/cast/sender/audio_encoder.cc b/chromium/media/cast/encoding/audio_encoder.cc
index 8f3d4da34bc..2ecf019f323 100644
--- a/chromium/media/cast/sender/audio_encoder.cc
+++ b/chromium/media/cast/encoding/audio_encoder.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/audio_encoder.h"
+#include "media/cast/encoding/audio_encoder.h"
#include <stdint.h>
@@ -22,6 +22,7 @@
#include "build/build_config.h"
#include "media/base/audio_sample_types.h"
#include "media/cast/common/rtp_time.h"
+#include "media/cast/common/sender_encoded_frame.h"
#include "media/cast/constants.h"
#if !BUILDFLAG(IS_IOS)
@@ -56,11 +57,13 @@ class AudioEncoder::ImplBase
int num_channels,
int sampling_rate,
int samples_per_frame,
+ int bitrate,
FrameEncodedCallback callback)
: cast_environment_(cast_environment),
codec_(codec),
num_channels_(num_channels),
samples_per_frame_(samples_per_frame),
+ bitrate_(bitrate),
callback_(std::move(callback)),
operational_status_(STATUS_UNINITIALIZED),
frame_duration_(base::Seconds(static_cast<double>(samples_per_frame_) /
@@ -80,18 +83,14 @@ class AudioEncoder::ImplBase
ImplBase(const ImplBase&) = delete;
ImplBase& operator=(const ImplBase&) = delete;
- OperationalStatus InitializationResult() const {
- return operational_status_;
- }
+ OperationalStatus InitializationResult() const { return operational_status_; }
- int samples_per_frame() const {
- return samples_per_frame_;
- }
+ int samples_per_frame() const { return samples_per_frame_; }
base::TimeDelta frame_duration() const { return frame_duration_; }
void EncodeAudio(std::unique_ptr<AudioBus> audio_bus,
- const base::TimeTicks& recorded_time) {
+ const base::TimeTicks recorded_time) {
DCHECK_EQ(operational_status_, STATUS_INITIALIZED);
DCHECK(!recorded_time.is_null());
@@ -117,9 +116,9 @@ class AudioEncoder::ImplBase
<< num_frames_missed * samples_per_frame_
<< " samples' worth of underrun.";
TRACE_EVENT_INSTANT2("cast.stream", "Audio Skip",
- TRACE_EVENT_SCOPE_THREAD,
- "frames missed", num_frames_missed,
- "samples dropped", samples_dropped_from_buffer_);
+ TRACE_EVENT_SCOPE_THREAD, "frames missed",
+ num_frames_missed, "samples dropped",
+ samples_dropped_from_buffer_);
}
}
frame_capture_time_ = recorded_time - buffer_fill_duration;
@@ -135,8 +134,8 @@ class AudioEncoder::ImplBase
const int num_samples_to_xfer = std::min(
samples_per_frame_ - buffer_fill_end_, audio_bus->frames() - src_pos);
DCHECK_EQ(audio_bus->channels(), num_channels_);
- TransferSamplesIntoBuffer(
- audio_bus.get(), src_pos, buffer_fill_end_, num_samples_to_xfer);
+ TransferSamplesIntoBuffer(audio_bus.get(), src_pos, buffer_fill_end_,
+ num_samples_to_xfer);
src_pos += num_samples_to_xfer;
buffer_fill_end_ += num_samples_to_xfer;
@@ -159,7 +158,7 @@ class AudioEncoder::ImplBase
// by the signal duration.
audio_frame->encoder_utilization =
(base::TimeTicks::Now() - start_time) / frame_duration_;
-
+ audio_frame->encoder_bitrate = bitrate_;
TRACE_EVENT_NESTABLE_ASYNC_END1(
"cast.stream", "Audio Encode", TRACE_ID_LOCAL(audio_frame.get()),
"encoder_utilization", audio_frame->encoder_utilization);
@@ -195,6 +194,7 @@ class AudioEncoder::ImplBase
const Codec codec_;
const int num_channels_;
const int samples_per_frame_;
+ const int bitrate_;
const FrameEncodedCallback callback_;
// Subclass' ctor is expected to set this to STATUS_INITIALIZED.
@@ -244,6 +244,7 @@ class AudioEncoder::OpusImpl final : public AudioEncoder::ImplBase {
num_channels,
sampling_rate,
sampling_rate / kDefaultFramesPerSecond, /* 10 ms frames */
+ bitrate,
std::move(callback)),
encoder_memory_(new uint8_t[opus_encoder_get_size(num_channels)]),
opus_encoder_(reinterpret_cast<OpusEncoder*>(encoder_memory_.get())),
@@ -253,9 +254,7 @@ class AudioEncoder::OpusImpl final : public AudioEncoder::ImplBase {
!IsValidFrameDuration(frame_duration_)) {
return;
}
- if (opus_encoder_init(opus_encoder_,
- sampling_rate,
- num_channels,
+ if (opus_encoder_init(opus_encoder_, sampling_rate, num_channels,
OPUS_APPLICATION_AUDIO) != OPUS_OK) {
ImplBase::operational_status_ = STATUS_INVALID_CONFIGURATION;
return;
@@ -352,6 +351,7 @@ class AudioEncoder::AppleAacImpl final : public AudioEncoder::ImplBase {
num_channels,
sampling_rate,
kAccessUnitSamples,
+ bitrate,
std::move(callback)),
input_buffer_(AudioBus::Create(num_channels, kAccessUnitSamples)),
input_bus_(AudioBus::CreateWrapper(num_channels)),
@@ -405,7 +405,8 @@ class AudioEncoder::AppleAacImpl final : public AudioEncoder::ImplBase {
in_asbd.mSampleRate = sampling_rate;
in_asbd.mFormatID = kAudioFormatLinearPCM;
in_asbd.mFormatFlags =
- kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
+ AudioFormatFlags{kAudioFormatFlagsNativeFloatPacked} |
+ kAudioFormatFlagIsNonInterleaved;
in_asbd.mChannelsPerFrame = num_channels_;
in_asbd.mBitsPerChannel = sizeof(float) * 8;
in_asbd.mFramesPerPacket = 1;
@@ -419,11 +420,8 @@ class AudioEncoder::AppleAacImpl final : public AudioEncoder::ImplBase {
out_asbd.mFormatID = kAudioFormatMPEG4AAC;
out_asbd.mChannelsPerFrame = num_channels_;
UInt32 prop_size = sizeof(out_asbd);
- if (AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
- 0,
- nullptr,
- &prop_size,
- &out_asbd) != noErr) {
+ if (AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, nullptr,
+ &prop_size, &out_asbd) != noErr) {
return false;
}
@@ -436,8 +434,7 @@ class AudioEncoder::AppleAacImpl final : public AudioEncoder::ImplBase {
prop_size = sizeof(out_asbd);
if (AudioConverterGetProperty(converter_,
kAudioConverterCurrentOutputStreamDescription,
- &prop_size,
- &out_asbd) != noErr) {
+ &prop_size, &out_asbd) != noErr) {
return false;
}
@@ -446,9 +443,8 @@ class AudioEncoder::AppleAacImpl final : public AudioEncoder::ImplBase {
// or compatible with the output sampling rate or channels).
if (bitrate > 0) {
prop_size = sizeof(int);
- if (AudioConverterSetProperty(
- converter_, kAudioConverterEncodeBitRate, prop_size, &bitrate) !=
- noErr) {
+ if (AudioConverterSetProperty(converter_, kAudioConverterEncodeBitRate,
+ prop_size, &bitrate) != noErr) {
return false;
}
}
@@ -460,10 +456,8 @@ class AudioEncoder::AppleAacImpl final : public AudioEncoder::ImplBase {
if (max_access_unit_size == 0) {
prop_size = sizeof(max_access_unit_size);
if (AudioConverterGetProperty(
- converter_,
- kAudioConverterPropertyMaximumOutputPacketSize,
- &prop_size,
- &max_access_unit_size) != noErr) {
+ converter_, kAudioConverterPropertyMaximumOutputPacketSize,
+ &prop_size, &max_access_unit_size) != noErr) {
return false;
}
}
@@ -490,34 +484,25 @@ class AudioEncoder::AppleAacImpl final : public AudioEncoder::ImplBase {
UInt32 cookie_size;
if (AudioConverterGetPropertyInfo(converter_,
kAudioConverterCompressionMagicCookie,
- &cookie_size,
- nullptr) != noErr) {
+ &cookie_size, nullptr) != noErr) {
return false;
}
std::unique_ptr<uint8_t[]> cookie_data(new uint8_t[cookie_size]);
if (AudioConverterGetProperty(converter_,
kAudioConverterCompressionMagicCookie,
- &cookie_size,
- cookie_data.get()) != noErr) {
+ &cookie_size, cookie_data.get()) != noErr) {
return false;
}
- if (AudioFileInitializeWithCallbacks(this,
- &FileReadCallback,
- &FileWriteCallback,
- &FileGetSizeCallback,
- &FileSetSizeCallback,
- kAudioFileAAC_ADTSType,
- &out_asbd,
- 0,
- &file_) != noErr) {
+ if (AudioFileInitializeWithCallbacks(
+ this, &FileReadCallback, &FileWriteCallback, &FileGetSizeCallback,
+ &FileSetSizeCallback, kAudioFileAAC_ADTSType, &out_asbd, 0,
+ &file_) != noErr) {
return false;
}
- if (AudioFileSetProperty(file_,
- kAudioFilePropertyMagicCookieData,
- cookie_size,
- cookie_data.get()) != noErr) {
+ if (AudioFileSetProperty(file_, kAudioFilePropertyMagicCookieData,
+ cookie_size, cookie_data.get()) != noErr) {
return false;
}
@@ -552,24 +537,21 @@ class AudioEncoder::AppleAacImpl final : public AudioEncoder::ImplBase {
// Copy the samples into the input buffer.
DCHECK_EQ(input_bus_->channel(0), input_buffer_->channel(0));
- audio_bus->CopyPartialFramesTo(
- source_offset, num_samples, buffer_fill_offset, input_buffer_.get());
+ audio_bus->CopyPartialFramesTo(source_offset, num_samples,
+ buffer_fill_offset, input_buffer_.get());
}
bool EncodeFromFilledBuffer(std::string* out) final {
// Reset the buffer size field to the buffer capacity.
converter_abl_.mBuffers[0].mDataByteSize = max_access_unit_size_;
- // Encode the current input buffer. This is a sychronous call.
+ // Encode the current input buffer. This is a synchronous call.
OSStatus oserr;
UInt32 io_num_packets = 1;
AudioStreamPacketDescription packet_description;
- oserr = AudioConverterFillComplexBuffer(converter_,
- &ConverterFillDataCallback,
- this,
- &io_num_packets,
- &converter_abl_,
- &packet_description);
+ oserr = AudioConverterFillComplexBuffer(
+ converter_, &ConverterFillDataCallback, this, &io_num_packets,
+ &converter_abl_, &packet_description);
if (oserr != noErr || io_num_packets == 0) {
return false;
}
@@ -580,13 +562,10 @@ class AudioEncoder::AppleAacImpl final : public AudioEncoder::ImplBase {
// Set the current output buffer and emit an ADTS-wrapped AAC access unit.
// This is a synchronous call. After it returns, reset the output buffer.
output_buffer_ = out;
- oserr = AudioFileWritePackets(file_,
- false,
- converter_abl_.mBuffers[0].mDataByteSize,
- &packet_description,
- num_access_units_,
- &io_num_packets,
- converter_abl_.mBuffers[0].mData);
+ oserr = AudioFileWritePackets(
+ file_, false, converter_abl_.mBuffers[0].mDataByteSize,
+ &packet_description, num_access_units_, &io_num_packets,
+ converter_abl_.mBuffers[0].mData);
output_buffer_ = nullptr;
if (oserr != noErr || io_num_packets == 0) {
return false;
@@ -718,6 +697,7 @@ class AudioEncoder::Pcm16Impl final : public AudioEncoder::ImplBase {
num_channels,
sampling_rate,
sampling_rate / kDefaultFramesPerSecond, /* 10 ms frames */
+ 0 /* bitrate, which is unused for the PCM16 implementation */,
std::move(callback)),
buffer_(new int16_t[num_channels * samples_per_frame_]) {
if (ImplBase::operational_status_ != STATUS_UNINITIALIZED)
@@ -765,7 +745,7 @@ AudioEncoder::AudioEncoder(
: cast_environment_(cast_environment) {
// Note: It doesn't matter which thread constructs AudioEncoder, just so long
// as all calls to InsertAudio() are by the same thread.
- insert_thread_checker_.DetachFromThread();
+ DETACH_FROM_THREAD(insert_thread_checker_);
switch (codec) {
#if !BUILDFLAG(IS_IOS)
case CODEC_AUDIO_OPUS:
@@ -792,7 +772,7 @@ AudioEncoder::AudioEncoder(
AudioEncoder::~AudioEncoder() = default;
OperationalStatus AudioEncoder::InitializationResult() const {
- DCHECK(insert_thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_THREAD(insert_thread_checker_);
if (impl_.get()) {
return impl_->InitializationResult();
}
@@ -800,7 +780,7 @@ OperationalStatus AudioEncoder::InitializationResult() const {
}
int AudioEncoder::GetSamplesPerFrame() const {
- DCHECK(insert_thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_THREAD(insert_thread_checker_);
if (InitializationResult() != STATUS_INITIALIZED) {
NOTREACHED();
return std::numeric_limits<int>::max();
@@ -809,7 +789,7 @@ int AudioEncoder::GetSamplesPerFrame() const {
}
base::TimeDelta AudioEncoder::GetFrameDuration() const {
- DCHECK(insert_thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_THREAD(insert_thread_checker_);
if (InitializationResult() != STATUS_INITIALIZED) {
NOTREACHED();
return base::TimeDelta();
@@ -818,8 +798,8 @@ base::TimeDelta AudioEncoder::GetFrameDuration() const {
}
void AudioEncoder::InsertAudio(std::unique_ptr<AudioBus> audio_bus,
- const base::TimeTicks& recorded_time) {
- DCHECK(insert_thread_checker_.CalledOnValidThread());
+ const base::TimeTicks recorded_time) {
+ DCHECK_CALLED_ON_VALID_THREAD(insert_thread_checker_);
DCHECK(audio_bus.get());
if (InitializationResult() != STATUS_INITIALIZED) {
NOTREACHED();
diff --git a/chromium/media/cast/sender/audio_encoder.h b/chromium/media/cast/encoding/audio_encoder.h
index d0160e29570..1e6f009c437 100644
--- a/chromium/media/cast/sender/audio_encoder.h
+++ b/chromium/media/cast/encoding/audio_encoder.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_AUDIO_ENCODER_H_
-#define MEDIA_CAST_SENDER_AUDIO_ENCODER_H_
+#ifndef MEDIA_CAST_ENCODING_AUDIO_ENCODER_H_
+#define MEDIA_CAST_ENCODING_AUDIO_ENCODER_H_
#include <memory>
@@ -11,8 +11,8 @@
#include "base/threading/thread_checker.h"
#include "media/base/audio_bus.h"
#include "media/cast/cast_environment.h"
+#include "media/cast/common/sender_encoded_frame.h"
#include "media/cast/constants.h"
-#include "media/cast/sender/sender_encoded_frame.h"
namespace base {
class TimeTicks;
@@ -46,7 +46,7 @@ class AudioEncoder {
base::TimeDelta GetFrameDuration() const;
void InsertAudio(std::unique_ptr<AudioBus> audio_bus,
- const base::TimeTicks& recorded_time);
+ base::TimeTicks recorded_time);
private:
class ImplBase;
@@ -58,10 +58,10 @@ class AudioEncoder {
scoped_refptr<ImplBase> impl_;
// Used to ensure only one thread invokes InsertAudio().
- base::ThreadChecker insert_thread_checker_;
+ THREAD_CHECKER(insert_thread_checker_);
};
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_AUDIO_ENCODER_H_
+#endif // MEDIA_CAST_ENCODING_AUDIO_ENCODER_H_
diff --git a/chromium/media/cast/sender/audio_encoder_unittest.cc b/chromium/media/cast/encoding/audio_encoder_unittest.cc
index 0ae5bdffacc..32802666ad8 100644
--- a/chromium/media/cast/sender/audio_encoder_unittest.cc
+++ b/chromium/media/cast/encoding/audio_encoder_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/audio_encoder.h"
+#include "media/cast/encoding/audio_encoder.h"
#include <stddef.h>
#include <stdint.h>
@@ -22,6 +22,7 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/common/rtp_time.h"
+#include "media/cast/common/sender_encoded_frame.h"
#include "media/cast/test/utility/audio_utility.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -44,8 +45,8 @@ class TestEncodedAudioFrameReceiver {
int frames_received() const { return frames_received_; }
- void SetCaptureTimeBounds(const base::TimeTicks& lower_bound,
- const base::TimeTicks& upper_bound) {
+ void SetCaptureTimeBounds(base::TimeTicks lower_bound,
+ base::TimeTicks upper_bound) {
lower_bound_ = lower_bound;
upper_bound_ = upper_bound;
}
@@ -72,7 +73,7 @@ class TestEncodedAudioFrameReceiver {
EXPECT_GT(upper_bound_, encoded_frame->reference_time);
EXPECT_LE(0.0, encoded_frame->encoder_utilization);
- EXPECT_EQ(-1.0, encoded_frame->lossy_utilization);
+ EXPECT_EQ(-1.0, encoded_frame->lossiness);
++frames_received_;
}
@@ -156,10 +157,8 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
private:
void CreateObjectsForCodec(Codec codec) {
audio_bus_factory_.reset(
- new TestAudioBusFactory(kNumChannels,
- kDefaultAudioSamplingRate,
- TestAudioBusFactory::kMiddleANoteFreq,
- 0.5f));
+ new TestAudioBusFactory(kNumChannels, kDefaultAudioSamplingRate,
+ TestAudioBusFactory::kMiddleANoteFreq, 0.5f));
receiver_.reset(new TestEncodedAudioFrameReceiver());
diff --git a/chromium/media/cast/sender/av1_encoder.cc b/chromium/media/cast/encoding/av1_encoder.cc
index fc7e5fbfa86..2d80c5c932e 100644
--- a/chromium/media/cast/sender/av1_encoder.cc
+++ b/chromium/media/cast/encoding/av1_encoder.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/av1_encoder.h"
+#include "media/cast/encoding/av1_encoder.h"
#include "base/logging.h"
#include "media/base/video_frame.h"
+#include "media/cast/common/sender_encoded_frame.h"
#include "media/cast/constants.h"
#include "third_party/libaom/source/libaom/aom/aomcx.h"
@@ -268,7 +269,6 @@ void Av1Encoder::Encode(scoped_refptr<media::VideoFrame> video_frame,
const base::TimeDelta processing_time = base::TimeTicks::Now() - start_time;
encoded_frame->encoder_utilization =
processing_time / predicted_frame_duration;
-
// Compute lossy utilization. The AV1 encoder took an estimated guess at what
// quantizer value would produce an encoded frame size as close to the target
// as possible. Now that the frame has been encoded and the number of bytes
@@ -277,6 +277,7 @@ void Av1Encoder::Encode(scoped_refptr<media::VideoFrame> video_frame,
// used as the lossy utilization.
const double actual_bitrate =
encoded_frame->data.size() * 8.0 / predicted_frame_duration.InSecondsF();
+ encoded_frame->encoder_bitrate = actual_bitrate;
const double target_bitrate = 1000.0 * config_.rc_target_bitrate;
DCHECK_GT(target_bitrate, 0.0);
const double bitrate_utilization = actual_bitrate / target_bitrate;
@@ -287,12 +288,12 @@ void Av1Encoder::Encode(scoped_refptr<media::VideoFrame> video_frame,
// Side note: If it was possible for the encoder to encode within the target
// number of bytes, the |perfect_quantizer| will be in the range [0.0,63.0].
// If it was never possible, the value will be greater than 63.0.
- encoded_frame->lossy_utilization = perfect_quantizer / 63.0;
+ encoded_frame->lossiness = perfect_quantizer / 63.0;
DVLOG(2) << "AV1 encoded frame_id " << encoded_frame->frame_id
<< ", sized: " << encoded_frame->data.size()
<< ", encoder_utilization: " << encoded_frame->encoder_utilization
- << ", lossy_utilization: " << encoded_frame->lossy_utilization
+ << ", lossiness: " << encoded_frame->lossiness
<< " (quantizer chosen by the encoder was " << quantizer << ')';
if (encoded_frame->dependency == EncodedFrame::KEY) {
diff --git a/chromium/media/cast/sender/av1_encoder.h b/chromium/media/cast/encoding/av1_encoder.h
index 4a0ae6dfffc..0b175eac15d 100644
--- a/chromium/media/cast/sender/av1_encoder.h
+++ b/chromium/media/cast/encoding/av1_encoder.h
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_AV1_ENCODER_H_
-#define MEDIA_CAST_SENDER_AV1_ENCODER_H_
+#ifndef MEDIA_CAST_ENCODING_AV1_ENCODER_H_
+#define MEDIA_CAST_ENCODING_AV1_ENCODER_H_
#include <stdint.h>
#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
#include "media/base/feedback_signal_accumulator.h"
#include "media/cast/cast_config.h"
-#include "media/cast/sender/software_video_encoder.h"
+#include "media/cast/common/frame_id.h"
+#include "media/cast/encoding/software_video_encoder.h"
#include "third_party/libaom/source/libaom/aom/aom_encoder.h"
-
-#include "base/time/time.h"
#include "ui/gfx/geometry/size.h"
namespace media {
@@ -87,4 +87,4 @@ class Av1Encoder final : public SoftwareVideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_AV1_ENCODER_H_
+#endif // MEDIA_CAST_ENCODING_AV1_ENCODER_H_
diff --git a/chromium/media/cast/sender/external_video_encoder.cc b/chromium/media/cast/encoding/external_video_encoder.cc
index 91780c98f0d..c0172b470be 100644
--- a/chromium/media/cast/sender/external_video_encoder.cc
+++ b/chromium/media/cast/encoding/external_video_encoder.cc
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/external_video_encoder.h"
+#include "media/cast/encoding/external_video_encoder.h"
#include <array>
#include <cmath>
+#include <list>
#include <sstream>
#include <utility>
-#if BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#if BUILDFLAG(IS_CHROMEOS)
#include "base/cpu.h" // nogncheck
#include "base/no_destructor.h" // nogncheck
#include "third_party/re2/src/re2/re2.h" // nogncheck
@@ -35,10 +36,11 @@
#include "media/base/video_types.h"
#include "media/base/video_util.h"
#include "media/cast/cast_config.h"
+#include "media/cast/common/encoded_frame.h"
#include "media/cast/common/rtp_time.h"
+#include "media/cast/common/sender_encoded_frame.h"
+#include "media/cast/encoding/vpx_quantizer_parser.h"
#include "media/cast/logging/logging_defines.h"
-#include "media/cast/net/cast_transport_config.h"
-#include "media/cast/sender/vpx_quantizer_parser.h"
#include "media/video/h264_parser.h"
namespace {
@@ -71,7 +73,7 @@ bool IsHardwareVP8EncodingSupported(
base::StringPiece receiver_model_name,
const std::vector<media::VideoEncodeAccelerator::SupportedProfile>&
profiles) {
-#if BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#if BUILDFLAG(IS_CHROMEOS)
// NOTE: the hardware encoder on some Chrome OS devices does not play well
// with Vizio TVs. See https://crbug.com/1238774 for more information.
// Vizio uses the TV model string for the receiver model name.
@@ -101,12 +103,12 @@ bool IsHardwareH264EncodingSupported(
profiles) {
// TODO(b/169533953): Look into chromecast fails to decode bitstreams produced
// by the AMD HW encoder.
-#if BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#if BUILDFLAG(IS_CHROMEOS)
static const base::NoDestructor<base::CPU> cpuid;
static const bool is_amd = cpuid->vendor_name() == "AuthenticAMD";
if (is_amd)
return false;
-#endif // BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#endif // BUILDFLAG(IS_CHROMEOS)
// TODO(crbug.com/1015482): Look into why H.264 hardware encoder on MacOS is
// broken.
@@ -460,11 +462,12 @@ class ExternalVideoEncoder::VEAClientImpl final
static_cast<double>(in_progress_frame_encodes_.size()) /
kBacklogRedlineThreshold;
- const double actual_bit_rate =
+ const double actual_bitrate =
encoded_frame->data.size() * 8.0 / frame_duration.InSecondsF();
+ encoded_frame->encoder_bitrate = actual_bitrate;
DCHECK_GT(request.target_bit_rate, 0);
const double bitrate_utilization =
- actual_bit_rate / request.target_bit_rate;
+ actual_bitrate / request.target_bit_rate;
double quantizer = QuantizerEstimator::NO_RESULT;
// If the quantizer can be parsed from the key frame, try to parse
// the following delta frames as well.
@@ -505,7 +508,7 @@ class ExternalVideoEncoder::VEAClientImpl final
codec_profile_ == media::VP8PROFILE_ANY
? static_cast<int>(QuantizerEstimator::MAX_VP8_QUANTIZER)
: static_cast<int>(kMaxH264Quantizer);
- encoded_frame->lossy_utilization =
+ encoded_frame->lossiness =
bitrate_utilization * (quantizer / max_quantizer);
}
} else {
@@ -925,8 +928,7 @@ double QuantizerEstimator::EstimateForKeyFrame(const VideoFrame& frame) {
// Copy the row of pixels into the buffer. This will be used when
// generating histograms for future delta frames.
- memcpy(last_frame_pixel_buffer_.get() + i * size.width(),
- row_begin,
+ memcpy(last_frame_pixel_buffer_.get() + i * size.width(), row_begin,
size.width());
}
@@ -986,8 +988,7 @@ double QuantizerEstimator::EstimateForDeltaFrame(const VideoFrame& frame) {
bool QuantizerEstimator::CanExamineFrame(const VideoFrame& frame) {
DCHECK_EQ(8, VideoFrame::PlaneHorizontalBitsPerPixel(frame.format(),
VideoFrame::kYPlane));
- return media::IsYuvPlanar(frame.format()) &&
- !frame.visible_rect().IsEmpty();
+ return media::IsYuvPlanar(frame.format()) && !frame.visible_rect().IsEmpty();
}
// static
diff --git a/chromium/media/cast/sender/external_video_encoder.h b/chromium/media/cast/encoding/external_video_encoder.h
index 9eb6973e150..4f81e16d4a4 100644
--- a/chromium/media/cast/sender/external_video_encoder.h
+++ b/chromium/media/cast/encoding/external_video_encoder.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_EXTERNAL_VIDEO_ENCODER_H_
-#define MEDIA_CAST_SENDER_EXTERNAL_VIDEO_ENCODER_H_
+#ifndef MEDIA_CAST_ENCODING_EXTERNAL_VIDEO_ENCODER_H_
+#define MEDIA_CAST_ENCODING_EXTERNAL_VIDEO_ENCODER_H_
#include <stddef.h>
#include <stdint.h>
@@ -12,8 +12,8 @@
#include "base/memory/weak_ptr.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/sender/size_adaptable_video_encoder_base.h"
-#include "media/cast/sender/video_encoder.h"
+#include "media/cast/encoding/size_adaptable_video_encoder_base.h"
+#include "media/cast/encoding/video_encoder.h"
#include "media/video/video_encode_accelerator.h"
#include "ui/gfx/geometry/size.h"
@@ -123,11 +123,9 @@ class SizeAdaptableExternalVideoEncoder final
// value is related to the complexity of the content of the frame.
class QuantizerEstimator {
public:
- enum {
- NO_RESULT = -1,
- MIN_VP8_QUANTIZER = 4,
- MAX_VP8_QUANTIZER = 63,
- };
+ static constexpr int NO_RESULT = -1;
+ static constexpr int MIN_VP8_QUANTIZER = 4;
+ static constexpr int MAX_VP8_QUANTIZER = 63;
QuantizerEstimator();
@@ -170,4 +168,4 @@ class QuantizerEstimator {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_EXTERNAL_VIDEO_ENCODER_H_
+#endif // MEDIA_CAST_ENCODING_EXTERNAL_VIDEO_ENCODER_H_
diff --git a/chromium/media/cast/sender/external_video_encoder_unittest.cc b/chromium/media/cast/encoding/external_video_encoder_unittest.cc
index 65021a07ba7..cdf5e72b102 100644
--- a/chromium/media/cast/sender/external_video_encoder_unittest.cc
+++ b/chromium/media/cast/encoding/external_video_encoder_unittest.cc
@@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/external_video_encoder.h"
+#include "media/cast/encoding/external_video_encoder.h"
#include <stdint.h>
+#include "build/build_config.h"
#include "media/base/video_frame.h"
#include "media/base/video_types.h"
#include "testing/gtest/include/gtest/gtest.h"
-#if BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#if BUILDFLAG(IS_CHROMEOS)
#include "base/cpu.h" // nogncheck
#endif
@@ -20,16 +21,12 @@ namespace {
scoped_refptr<VideoFrame> CreateFrame(const uint8_t* y_plane_data,
const gfx::Size& size) {
- scoped_refptr<VideoFrame> result = VideoFrame::CreateFrame(PIXEL_FORMAT_I420,
- size,
- gfx::Rect(size),
- size,
- base::TimeDelta());
+ scoped_refptr<VideoFrame> result = VideoFrame::CreateFrame(
+ PIXEL_FORMAT_I420, size, gfx::Rect(size), size, base::TimeDelta());
for (int y = 0, y_end = size.height(); y < y_end; ++y) {
memcpy(result->visible_data(VideoFrame::kYPlane) +
y * result->stride(VideoFrame::kYPlane),
- y_plane_data + y * size.width(),
- size.width());
+ y_plane_data + y * size.width(), size.width());
}
return result;
}
@@ -45,7 +42,7 @@ static const std::vector<media::VideoEncodeAccelerator::SupportedProfile>
constexpr std::array<const char*, 3> kFirstPartyModelNames{
{"Chromecast", "Eureka Dongle", "Chromecast Ultra"}};
-} // namespace
+} // namespace
TEST(QuantizerEstimatorTest, EstimatesForTrivialFrames) {
QuantizerEstimator qe;
@@ -104,7 +101,7 @@ TEST(ExternalVideoEncoderTest,
for (const char* model_name : kVizioTvModelNames) {
constexpr bool should_recommend =
-#if BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#if BUILDFLAG(IS_CHROMEOS)
false;
#else
true;
@@ -127,7 +124,7 @@ TEST(ExternalVideoEncoderTest, RecommendsH264HardwareEncoderProperly) {
for (const char* model_name : kFirstPartyModelNames) {
// On ChromeOS only, disable hardware encoder on AMD chipsets due to
// failure on Chromecast chipsets to decode.
-#if BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#if BUILDFLAG(IS_CHROMEOS)
if (base::CPU().vendor_name() == "AuthenticAMD") {
EXPECT_FALSE(ExternalVideoEncoder::IsRecommended(
CODEC_VIDEO_H264, std::string(model_name), kValidVeaProfiles));
diff --git a/chromium/media/cast/sender/fake_software_video_encoder.cc b/chromium/media/cast/encoding/fake_software_video_encoder.cc
index 69ecac5ec39..b300e96ac11 100644
--- a/chromium/media/cast/sender/fake_software_video_encoder.cc
+++ b/chromium/media/cast/encoding/fake_software_video_encoder.cc
@@ -2,14 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/fake_software_video_encoder.h"
+#include "media/cast/encoding/fake_software_video_encoder.h"
#include <stddef.h>
#include "base/json/json_writer.h"
#include "base/values.h"
#include "media/base/video_frame.h"
+#include "media/cast/common/encoded_frame.h"
+#include "media/cast/common/frame_id.h"
#include "media/cast/common/rtp_time.h"
+#include "media/cast/common/sender_encoded_frame.h"
#include "media/cast/constants.h"
namespace media {
@@ -63,10 +66,10 @@ void FakeSoftwareVideoEncoder::Encode(
if (encoded_frame->dependency == EncodedFrame::KEY) {
encoded_frame->encoder_utilization = 1.0;
- encoded_frame->lossy_utilization = 6.0;
+ encoded_frame->lossiness = 6.0;
} else {
encoded_frame->encoder_utilization = 0.8;
- encoded_frame->lossy_utilization = 0.8;
+ encoded_frame->lossiness = 0.8;
}
}
diff --git a/chromium/media/cast/sender/fake_software_video_encoder.h b/chromium/media/cast/encoding/fake_software_video_encoder.h
index 6b515270b67..c1c26c06bdf 100644
--- a/chromium/media/cast/sender/fake_software_video_encoder.h
+++ b/chromium/media/cast/encoding/fake_software_video_encoder.h
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
-#define MEDIA_CAST_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
+#ifndef MEDIA_CAST_ENCODING_FAKE_SOFTWARE_VIDEO_ENCODER_H_
+#define MEDIA_CAST_ENCODING_FAKE_SOFTWARE_VIDEO_ENCODER_H_
#include <stdint.h>
#include "media/cast/cast_config.h"
-#include "media/cast/sender/software_video_encoder.h"
+#include "media/cast/common/frame_id.h"
+#include "media/cast/encoding/software_video_encoder.h"
#include "ui/gfx/geometry/size.h"
namespace media {
@@ -16,7 +17,7 @@ namespace cast {
class FakeSoftwareVideoEncoder final : public SoftwareVideoEncoder {
public:
- FakeSoftwareVideoEncoder(const FrameSenderConfig& video_config);
+ explicit FakeSoftwareVideoEncoder(const FrameSenderConfig& video_config);
~FakeSoftwareVideoEncoder() final;
// SoftwareVideoEncoder implementations.
@@ -38,4 +39,4 @@ class FakeSoftwareVideoEncoder final : public SoftwareVideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
+#endif // MEDIA_CAST_ENCODING_FAKE_SOFTWARE_VIDEO_ENCODER_H_
diff --git a/chromium/media/cast/sender/h264_vt_encoder.cc b/chromium/media/cast/encoding/h264_vt_encoder.cc
index c5a5fd9b772..67177fc60b4 100644
--- a/chromium/media/cast/sender/h264_vt_encoder.cc
+++ b/chromium/media/cast/encoding/h264_vt_encoder.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/h264_vt_encoder.h"
+#include "media/cast/encoding/h264_vt_encoder.h"
#include <stddef.h>
@@ -15,14 +15,16 @@
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/power_monitor/power_monitor.h"
#include "base/synchronization/lock.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "media/base/mac/video_frame_mac.h"
#include "media/cast/common/rtp_time.h"
+#include "media/cast/common/sender_encoded_frame.h"
+#include "media/cast/common/video_frame_factory.h"
#include "media/cast/constants.h"
-#include "media/cast/sender/video_frame_factory.h"
namespace media {
namespace cast {
@@ -159,6 +161,8 @@ H264VideoToolboxEncoder::H264VideoToolboxEncoder(
StatusChangeCallback status_change_cb)
: cast_environment_(cast_environment),
video_config_(video_config),
+ average_bitrate_((video_config_.min_bitrate + video_config_.max_bitrate) /
+ 2),
status_change_cb_(std::move(status_change_cb)),
next_frame_id_(FrameId::first()),
encode_next_frame_as_keyframe_(false),
@@ -198,7 +202,7 @@ H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
}
void H264VideoToolboxEncoder::ResetCompressionSession() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
// Ignore reset requests while power suspended.
if (power_suspended_)
@@ -298,11 +302,8 @@ void H264VideoToolboxEncoder::ConfigureCompressionSession() {
240);
session_property_setter.Set(
kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, 240);
- // TODO(jfroy): implement better bitrate control
- // https://crbug.com/425352
- session_property_setter.Set(
- kVTCompressionPropertyKey_AverageBitRate,
- (video_config_.min_bitrate + video_config_.max_bitrate) / 2);
+ session_property_setter.Set(kVTCompressionPropertyKey_AverageBitRate,
+ average_bitrate_);
session_property_setter.Set(
kVTCompressionPropertyKey_ExpectedFrameRate,
static_cast<int>(video_config_.max_frame_rate + 0.5));
@@ -321,7 +322,7 @@ void H264VideoToolboxEncoder::ConfigureCompressionSession() {
}
void H264VideoToolboxEncoder::DestroyCompressionSession() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
// If the compression session exists, invalidate it. This blocks until all
// pending output callbacks have returned and any internal threads have
@@ -347,7 +348,7 @@ bool H264VideoToolboxEncoder::EncodeVideoFrame(
scoped_refptr<media::VideoFrame> video_frame,
base::TimeTicks reference_time,
FrameEncodedCallback frame_encoded_callback) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(!frame_encoded_callback.is_null());
// Reject empty video frames.
@@ -415,7 +416,7 @@ bool H264VideoToolboxEncoder::EncodeVideoFrame(
}
void H264VideoToolboxEncoder::UpdateFrameSize(const gfx::Size& size_needed) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
// Our video frame factory posts a task to update the frame size when its
// cache of the frame size differs from what the client requested. To avoid
@@ -441,24 +442,24 @@ void H264VideoToolboxEncoder::UpdateFrameSize(const gfx::Size& size_needed) {
}
void H264VideoToolboxEncoder::SetBitRate(int /*new_bit_rate*/) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
// VideoToolbox does not seem to support bitrate reconfiguration.
}
void H264VideoToolboxEncoder::GenerateKeyFrame() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
encode_next_frame_as_keyframe_ = true;
}
std::unique_ptr<VideoFrameFactory>
H264VideoToolboxEncoder::CreateVideoFrameFactory() {
- DCHECK(thread_checker_.CalledOnValidThread());
- return std::unique_ptr<VideoFrameFactory>(
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ return base::WrapUnique<VideoFrameFactory>(
new VideoFrameFactoryImpl::Proxy(video_frame_factory_));
}
void H264VideoToolboxEncoder::EmitFrames() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (!compression_session_)
return;
@@ -498,7 +499,7 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
auto* encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
std::unique_ptr<InProgressH264VTFrameEncode> request(
reinterpret_cast<InProgressH264VTFrameEncode*>(request_opaque));
- bool keyframe = false;
+ bool is_keyframe = false;
bool has_frame_data = false;
if (status != noErr) {
@@ -516,8 +517,8 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
// If the NotSync key is not present, it implies Sync, which indicates a
// keyframe (at least I think, VT documentation is, erm, sparse). Could
// alternatively use kCMSampleAttachmentKey_DependsOnOthers == false.
- keyframe = !CFDictionaryContainsKey(sample_attachments,
- kCMSampleAttachmentKey_NotSync);
+ is_keyframe = !CFDictionaryContainsKey(sample_attachments,
+ kCMSampleAttachmentKey_NotSync);
has_frame_data = true;
}
@@ -529,7 +530,7 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
encoded_frame->frame_id = frame_id;
encoded_frame->reference_time = request->reference_time;
encoded_frame->rtp_timestamp = request->rtp_timestamp;
- if (keyframe) {
+ if (is_keyframe) {
encoded_frame->dependency = EncodedFrame::KEY;
encoded_frame->referenced_frame_id = frame_id;
} else {
@@ -547,12 +548,13 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
}
if (has_frame_data) {
- video_toolbox::CopySampleBufferToAnnexBBuffer(sbuf, keyframe,
+ video_toolbox::CopySampleBufferToAnnexBBuffer(sbuf, is_keyframe,
&encoded_frame->data);
}
encoded_frame->encode_completion_time =
encoder->cast_environment_->Clock()->NowTicks();
+ encoded_frame->encoder_bitrate = encoder->average_bitrate_;
encoder->cast_environment_->GetTaskRunner(CastEnvironment::MAIN)
->PostTask(FROM_HERE,
base::BindOnce(std::move(request->frame_encoded_callback),
diff --git a/chromium/media/cast/sender/h264_vt_encoder.h b/chromium/media/cast/encoding/h264_vt_encoder.h
index 8573388d9af..aa05756f77f 100644
--- a/chromium/media/cast/sender/h264_vt_encoder.h
+++ b/chromium/media/cast/encoding/h264_vt_encoder.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_H264_VT_ENCODER_H_
-#define MEDIA_CAST_SENDER_H264_VT_ENCODER_H_
+#ifndef MEDIA_CAST_ENCODING_H264_VT_ENCODER_H_
+#define MEDIA_CAST_ENCODING_H264_VT_ENCODER_H_
#include <stdint.h>
@@ -11,8 +11,8 @@
#include "base/power_monitor/power_observer.h"
#include "base/threading/thread_checker.h"
#include "media/base/mac/videotoolbox_helpers.h"
-#include "media/cast/sender/size_adaptable_video_encoder_base.h"
-#include "media/cast/sender/video_encoder.h"
+#include "media/cast/encoding/size_adaptable_video_encoder_base.h"
+#include "media/cast/encoding/video_encoder.h"
namespace media {
namespace cast {
@@ -88,6 +88,10 @@ class H264VideoToolboxEncoder final : public VideoEncoder,
// invalidate compression sessions.
const FrameSenderConfig video_config_;
+ // The VideoToolbox does not support bitrate configuration, so we use a
+ // constant bitrate determined on construction.
+ const int average_bitrate_;
+
// Frame size of the current compression session. Can be changed by submitting
// a frame of a different size, which will cause a compression session reset.
gfx::Size frame_size_;
@@ -96,7 +100,7 @@ class H264VideoToolboxEncoder final : public VideoEncoder,
const StatusChangeCallback status_change_cb_;
// Thread checker to enforce that this object is used on a specific thread.
- base::ThreadChecker thread_checker_;
+ THREAD_CHECKER(thread_checker_);
// The compression session.
base::ScopedCFTypeRef<VTCompressionSessionRef> compression_session_;
@@ -120,4 +124,4 @@ class H264VideoToolboxEncoder final : public VideoEncoder,
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_H264_VT_ENCODER_H_
+#endif // MEDIA_CAST_ENCODING_H264_VT_ENCODER_H_
diff --git a/chromium/media/cast/sender/h264_vt_encoder_unittest.cc b/chromium/media/cast/encoding/h264_vt_encoder_unittest.cc
index 3af0fd96e3f..5e3fbe4c382 100644
--- a/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
+++ b/chromium/media/cast/encoding/h264_vt_encoder_unittest.cc
@@ -22,9 +22,10 @@
#include "media/base/media_switches.h"
#include "media/base/media_util.h"
#include "media/cast/common/rtp_time.h"
+#include "media/cast/common/sender_encoded_frame.h"
+#include "media/cast/common/video_frame_factory.h"
#include "media/cast/constants.h"
-#include "media/cast/sender/h264_vt_encoder.h"
-#include "media/cast/sender/video_frame_factory.h"
+#include "media/cast/encoding/h264_vt_encoder.h"
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/video_utility.h"
#include "media/ffmpeg/ffmpeg_common.h"
@@ -81,11 +82,10 @@ class MetadataRecorder : public base::RefCountedThreadSafe<MetadataRecorder> {
void PushExpectation(FrameId expected_frame_id,
FrameId expected_last_referenced_frame_id,
RtpTimeTicks expected_rtp_timestamp,
- const base::TimeTicks& expected_reference_time) {
- expectations_.push(Expectation{expected_frame_id,
- expected_last_referenced_frame_id,
- expected_rtp_timestamp,
- expected_reference_time});
+ base::TimeTicks expected_reference_time) {
+ expectations_.push(
+ Expectation{expected_frame_id, expected_last_referenced_frame_id,
+ expected_rtp_timestamp, expected_reference_time});
}
void CompareFrameWithExpected(
@@ -185,8 +185,9 @@ void CreateFrameAndMemsetPlane(VideoFrameFactory* const video_frame_factory) {
CVPixelBufferLockBaseAddress(cv_pixel_buffer, 0);
auto* ptr = CVPixelBufferGetBaseAddressOfPlane(cv_pixel_buffer, 0);
ASSERT_TRUE(ptr);
- memset(ptr, 0xfe, CVPixelBufferGetBytesPerRowOfPlane(cv_pixel_buffer, 0) *
- CVPixelBufferGetHeightOfPlane(cv_pixel_buffer, 0));
+ memset(ptr, 0xfe,
+ CVPixelBufferGetBytesPerRowOfPlane(cv_pixel_buffer, 0) *
+ CVPixelBufferGetHeightOfPlane(cv_pixel_buffer, 0));
CVPixelBufferUnlockBaseAddress(cv_pixel_buffer, 0);
}
diff --git a/chromium/media/cast/sender/size_adaptable_video_encoder_base.cc b/chromium/media/cast/encoding/size_adaptable_video_encoder_base.cc
index 03b76b76ecd..40234d9351b 100644
--- a/chromium/media/cast/sender/size_adaptable_video_encoder_base.cc
+++ b/chromium/media/cast/encoding/size_adaptable_video_encoder_base.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/size_adaptable_video_encoder_base.h"
+#include "media/cast/encoding/size_adaptable_video_encoder_base.h"
#include <utility>
@@ -10,6 +10,7 @@
#include "base/location.h"
#include "base/logging.h"
#include "media/base/video_frame.h"
+#include "media/cast/common/sender_encoded_frame.h"
namespace media {
namespace cast {
@@ -49,7 +50,8 @@ bool SizeAdaptableVideoEncoderBase::EncodeVideoFrame(
}
if (frame_size != frame_size_ || !encoder_) {
VLOG(1) << "Dropping this frame, and future frames until a replacement "
- "encoder is spun-up to handle size " << frame_size.ToString();
+ "encoder is spun-up to handle size "
+ << frame_size.ToString();
TrySpawningReplacementEncoder(frame_size);
return false;
}
@@ -94,7 +96,7 @@ void SizeAdaptableVideoEncoderBase::EmitFrames() {
}
StatusChangeCallback
- SizeAdaptableVideoEncoderBase::CreateEncoderStatusChangeCallback() {
+SizeAdaptableVideoEncoderBase::CreateEncoderStatusChangeCallback() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
return base::BindRepeating(
&SizeAdaptableVideoEncoderBase::OnEncoderStatusChange,
@@ -136,8 +138,7 @@ void SizeAdaptableVideoEncoderBase::TrySpawningReplacementEncoder(
frames_in_encoder_ = kEncoderIsInitializing;
OnEncoderStatusChange(STATUS_CODEC_REINIT_PENDING);
VLOG(1) << "Creating replacement video encoder (for frame size change from "
- << frame_size_.ToString() << " to "
- << size_needed.ToString() << ").";
+ << frame_size_.ToString() << " to " << size_needed.ToString() << ").";
frame_size_ = size_needed;
encoder_ = CreateEncoder();
DCHECK(encoder_);
diff --git a/chromium/media/cast/sender/size_adaptable_video_encoder_base.h b/chromium/media/cast/encoding/size_adaptable_video_encoder_base.h
index 230b759322e..3e7543c1af5 100644
--- a/chromium/media/cast/sender/size_adaptable_video_encoder_base.h
+++ b/chromium/media/cast/encoding/size_adaptable_video_encoder_base.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_SIZE_ADAPTABLE_VIDEO_ENCODER_BASE_H_
-#define MEDIA_CAST_SENDER_SIZE_ADAPTABLE_VIDEO_ENCODER_BASE_H_
+#ifndef MEDIA_CAST_ENCODING_SIZE_ADAPTABLE_VIDEO_ENCODER_BASE_H_
+#define MEDIA_CAST_ENCODING_SIZE_ADAPTABLE_VIDEO_ENCODER_BASE_H_
#include <stdint.h>
@@ -14,12 +14,14 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/constants.h"
-#include "media/cast/sender/video_encoder.h"
+#include "media/cast/encoding/video_encoder.h"
#include "ui/gfx/geometry/size.h"
namespace media {
namespace cast {
+struct SenderEncodedFrame;
+
// Creates and owns a VideoEncoder instance. The owned instance is an
// implementation that does not support changing frame sizes, and so
// SizeAdaptableVideoEncoderBase acts as a proxy to automatically detect when
@@ -49,13 +51,9 @@ class SizeAdaptableVideoEncoderBase : public VideoEncoder {
protected:
// Accessors for subclasses.
- CastEnvironment* cast_environment() const {
- return cast_environment_.get();
- }
+ CastEnvironment* cast_environment() const { return cast_environment_.get(); }
const FrameSenderConfig& video_config() const { return video_config_; }
- const gfx::Size& frame_size() const {
- return frame_size_;
- }
+ const gfx::Size& frame_size() const { return frame_size_; }
FrameId next_frame_id() const { return next_frame_id_; }
// Returns a callback that calls OnEncoderStatusChange(). The callback is
@@ -117,4 +115,4 @@ class SizeAdaptableVideoEncoderBase : public VideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_SIZE_ADAPTABLE_VIDEO_ENCODER_BASE_H_
+#endif // MEDIA_CAST_ENCODING_SIZE_ADAPTABLE_VIDEO_ENCODER_BASE_H_
diff --git a/chromium/media/cast/sender/software_video_encoder.h b/chromium/media/cast/encoding/software_video_encoder.h
index 53d817f9cec..e2232b3722d 100644
--- a/chromium/media/cast/sender/software_video_encoder.h
+++ b/chromium/media/cast/encoding/software_video_encoder.h
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_SOFTWARE_VIDEO_ENCODER_H_
-#define MEDIA_CAST_SENDER_SOFTWARE_VIDEO_ENCODER_H_
+#ifndef MEDIA_CAST_ENCODING_SOFTWARE_VIDEO_ENCODER_H_
+#define MEDIA_CAST_ENCODING_SOFTWARE_VIDEO_ENCODER_H_
#include <stdint.h>
#include "base/memory/ref_counted.h"
-#include "media/cast/sender/sender_encoded_frame.h"
namespace base {
class TimeTicks;
@@ -21,6 +20,8 @@ class VideoFrame;
namespace media {
namespace cast {
+struct SenderEncodedFrame;
+
class SoftwareVideoEncoder {
public:
virtual ~SoftwareVideoEncoder() {}
@@ -44,4 +45,4 @@ class SoftwareVideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_SOFTWARE_VIDEO_ENCODER_H_
+#endif // MEDIA_CAST_ENCODING_SOFTWARE_VIDEO_ENCODER_H_
diff --git a/chromium/media/cast/sender/video_encoder.cc b/chromium/media/cast/encoding/video_encoder.cc
index a9f0ff6782b..24e2de942cf 100644
--- a/chromium/media/cast/sender/video_encoder.cc
+++ b/chromium/media/cast/encoding/video_encoder.cc
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/video_encoder.h"
+#include "media/cast/encoding/video_encoder.h"
+#include "base/memory/ptr_util.h"
#include "build/build_config.h"
-#include "media/cast/sender/external_video_encoder.h"
-#include "media/cast/sender/video_encoder_impl.h"
+#include "media/cast/encoding/external_video_encoder.h"
+#include "media/cast/encoding/video_encoder_impl.h"
#if BUILDFLAG(IS_MAC)
-#include "media/cast/sender/h264_vt_encoder.h"
+#include "media/cast/encoding/h264_vt_encoder.h"
#endif
namespace media {
@@ -25,7 +26,7 @@ std::unique_ptr<VideoEncoder> VideoEncoder::Create(
// perform optimized H.264 encoding.
#if BUILDFLAG(IS_MAC)
if (H264VideoToolboxEncoder::IsSupported(video_config)) {
- return std::unique_ptr<VideoEncoder>(new H264VideoToolboxEncoder(
+ return base::WrapUnique<VideoEncoder>(new H264VideoToolboxEncoder(
cast_environment, video_config, status_change_cb));
}
#endif // BUILDFLAG(IS_MAC)
@@ -33,14 +34,14 @@ std::unique_ptr<VideoEncoder> VideoEncoder::Create(
#if !BUILDFLAG(IS_IOS)
// If the system provides a hardware-accelerated encoder, use it.
if (ExternalVideoEncoder::IsSupported(video_config)) {
- return std::unique_ptr<VideoEncoder>(new SizeAdaptableExternalVideoEncoder(
+ return base::WrapUnique<VideoEncoder>(new SizeAdaptableExternalVideoEncoder(
cast_environment, video_config, std::move(status_change_cb),
create_vea_cb));
}
// Attempt to use the software encoder implementation.
if (VideoEncoderImpl::IsSupported(video_config)) {
- return std::unique_ptr<VideoEncoder>(
+ return base::WrapUnique<VideoEncoder>(
new VideoEncoderImpl(cast_environment, video_config, status_change_cb));
}
#endif // !BUILDFLAG(IS_IOS)
@@ -53,8 +54,7 @@ std::unique_ptr<VideoFrameFactory> VideoEncoder::CreateVideoFrameFactory() {
return nullptr;
}
-void VideoEncoder::EmitFrames() {
-}
+void VideoEncoder::EmitFrames() {}
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/sender/video_encoder.h b/chromium/media/cast/encoding/video_encoder.h
index 1bcb401f5e6..57c13c629ae 100644
--- a/chromium/media/cast/sender/video_encoder.h
+++ b/chromium/media/cast/encoding/video_encoder.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_VIDEO_ENCODER_H_
-#define MEDIA_CAST_SENDER_VIDEO_ENCODER_H_
+#ifndef MEDIA_CAST_ENCODING_VIDEO_ENCODER_H_
+#define MEDIA_CAST_ENCODING_VIDEO_ENCODER_H_
#include <memory>
@@ -11,15 +11,16 @@
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "media/base/video_frame.h"
+#include "media/cast/cast_callbacks.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/cast_sender.h"
-#include "media/cast/sender/sender_encoded_frame.h"
-#include "media/cast/sender/video_frame_factory.h"
+#include "media/cast/common/video_frame_factory.h"
namespace media {
namespace cast {
+struct SenderEncodedFrame;
+
// All these functions are called from the main cast thread.
class VideoEncoder {
public:
@@ -74,4 +75,4 @@ class VideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_VIDEO_ENCODER_H_
+#endif // MEDIA_CAST_ENCODING_VIDEO_ENCODER_H_
diff --git a/chromium/media/cast/sender/video_encoder_impl.cc b/chromium/media/cast/encoding/video_encoder_impl.cc
index 24cdf7e5c64..a93abcec508 100644
--- a/chromium/media/cast/sender/video_encoder_impl.cc
+++ b/chromium/media/cast/encoding/video_encoder_impl.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/video_encoder_impl.h"
+#include "media/cast/encoding/video_encoder_impl.h"
#include "third_party/libaom/libaom_buildflags.h"
#include <utility>
@@ -13,10 +13,11 @@
#include "base/check.h"
#include "media/base/video_frame.h"
#if BUILDFLAG(ENABLE_LIBAOM)
-#include "media/cast/sender/av1_encoder.h"
+#include "media/cast/encoding/av1_encoder.h"
#endif
-#include "media/cast/sender/fake_software_video_encoder.h"
-#include "media/cast/sender/vpx_encoder.h"
+#include "media/cast/common/sender_encoded_frame.h"
+#include "media/cast/encoding/fake_software_video_encoder.h"
+#include "media/cast/encoding/vpx_encoder.h"
namespace media {
namespace cast {
diff --git a/chromium/media/cast/sender/video_encoder_impl.h b/chromium/media/cast/encoding/video_encoder_impl.h
index 9e17e8b44fc..2e5563e154f 100644
--- a/chromium/media/cast/sender/video_encoder_impl.h
+++ b/chromium/media/cast/encoding/video_encoder_impl.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_VIDEO_ENCODER_IMPL_H_
-#define MEDIA_CAST_SENDER_VIDEO_ENCODER_IMPL_H_
+#ifndef MEDIA_CAST_ENCODING_VIDEO_ENCODER_IMPL_H_
+#define MEDIA_CAST_ENCODING_VIDEO_ENCODER_IMPL_H_
#include <memory>
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/sender/software_video_encoder.h"
-#include "media/cast/sender/video_encoder.h"
+#include "media/cast/encoding/software_video_encoder.h"
+#include "media/cast/encoding/video_encoder.h"
namespace media {
class VideoFrame;
@@ -59,4 +59,4 @@ class VideoEncoderImpl final : public VideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_VIDEO_ENCODER_IMPL_H_
+#endif // MEDIA_CAST_ENCODING_VIDEO_ENCODER_IMPL_H_
diff --git a/chromium/media/cast/sender/video_encoder_unittest.cc b/chromium/media/cast/encoding/video_encoder_unittest.cc
index 2ea529513a6..d30944d4f83 100644
--- a/chromium/media/cast/sender/video_encoder_unittest.cc
+++ b/chromium/media/cast/encoding/video_encoder_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/video_encoder.h"
+#include "media/cast/encoding/video_encoder.h"
#include <stdint.h>
@@ -22,7 +22,8 @@
#include "media/base/video_frame.h"
#include "media/cast/cast_environment.h"
#include "media/cast/common/rtp_time.h"
-#include "media/cast/sender/video_frame_factory.h"
+#include "media/cast/common/sender_encoded_frame.h"
+#include "media/cast/common/video_frame_factory.h"
#include "media/cast/test/fake_video_encode_accelerator_factory.h"
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/video_utility.h"
@@ -30,7 +31,7 @@
#if BUILDFLAG(IS_MAC)
#include "base/threading/platform_thread.h"
-#include "media/cast/sender/h264_vt_encoder.h"
+#include "media/cast/encoding/h264_vt_encoder.h"
#endif
namespace media {
@@ -91,13 +92,11 @@ class VideoEncoderTest
ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
}
- bool is_encoder_present() const {
- return !!video_encoder_;
- }
+ bool is_encoder_present() const { return !!video_encoder_; }
bool is_testing_software_vp8_encoder() const {
return video_config_.codec == CODEC_VIDEO_VP8 &&
- !video_config_.use_external_encoder;
+ !video_config_.use_external_encoder;
}
bool is_testing_video_toolbox_encoder() const {
@@ -111,20 +110,16 @@ class VideoEncoderTest
bool is_testing_platform_encoder() const {
return video_config_.use_external_encoder ||
- is_testing_video_toolbox_encoder();
+ is_testing_video_toolbox_encoder();
}
bool encoder_has_resize_delay() const {
return is_testing_platform_encoder() && !is_testing_video_toolbox_encoder();
}
- VideoEncoder* video_encoder() const {
- return video_encoder_.get();
- }
+ VideoEncoder* video_encoder() const { return video_encoder_.get(); }
- void DestroyEncoder() {
- video_encoder_.reset();
- }
+ void DestroyEncoder() { video_encoder_.reset(); }
base::TimeTicks Now() { return testing_clock_.NowTicks(); }
@@ -318,8 +313,9 @@ TEST_P(VideoEncoderTest, EncodesVariedFrameSizes) {
if (is_testing_software_vp8_encoder()) {
ASSERT_TRUE(std::isfinite(encoded_frame->encoder_utilization));
EXPECT_LE(0.0, encoded_frame->encoder_utilization);
- ASSERT_TRUE(std::isfinite(encoded_frame->lossy_utilization));
- EXPECT_LE(0.0, encoded_frame->lossy_utilization);
+ EXPECT_LE(0, encoded_frame->encoder_bitrate);
+ ASSERT_TRUE(std::isfinite(encoded_frame->lossiness));
+ EXPECT_LE(0.0, encoded_frame->lossiness);
}
}
}
diff --git a/chromium/media/cast/sender/vpx_encoder.cc b/chromium/media/cast/encoding/vpx_encoder.cc
index f58f027d3dd..a03015c2ad8 100644
--- a/chromium/media/cast/sender/vpx_encoder.cc
+++ b/chromium/media/cast/encoding/vpx_encoder.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/vpx_encoder.h"
+#include "media/cast/encoding/vpx_encoder.h"
#include "base/logging.h"
#include "media/base/video_frame.h"
+#include "media/cast/common/sender_encoded_frame.h"
#include "media/cast/constants.h"
#include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
@@ -317,6 +318,7 @@ void VpxEncoder::Encode(scoped_refptr<media::VideoFrame> video_frame,
// used as the lossy utilization.
const double actual_bitrate =
encoded_frame->data.size() * 8.0 / predicted_frame_duration.InSecondsF();
+ encoded_frame->encoder_bitrate = actual_bitrate;
const double target_bitrate = 1000.0 * config_.rc_target_bitrate;
DCHECK_GT(target_bitrate, 0.0);
const double bitrate_utilization = actual_bitrate / target_bitrate;
@@ -327,12 +329,12 @@ void VpxEncoder::Encode(scoped_refptr<media::VideoFrame> video_frame,
// Side note: If it was possible for the encoder to encode within the target
// number of bytes, the |perfect_quantizer| will be in the range [0.0,63.0].
// If it was never possible, the value will be greater than 63.0.
- encoded_frame->lossy_utilization = perfect_quantizer / 63.0;
+ encoded_frame->lossiness = perfect_quantizer / 63.0;
DVLOG(2) << "VPX encoded frame_id " << encoded_frame->frame_id
<< ", sized: " << encoded_frame->data.size()
<< ", encoder_utilization: " << encoded_frame->encoder_utilization
- << ", lossy_utilization: " << encoded_frame->lossy_utilization
+ << ", lossiness: " << encoded_frame->lossiness
<< " (quantizer chosen by the encoder was " << quantizer << ')';
if (encoded_frame->dependency == EncodedFrame::KEY) {
diff --git a/chromium/media/cast/sender/vpx_encoder.h b/chromium/media/cast/encoding/vpx_encoder.h
index de17fed94af..514c1bdd996 100644
--- a/chromium/media/cast/sender/vpx_encoder.h
+++ b/chromium/media/cast/encoding/vpx_encoder.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_VPX_ENCODER_H_
-#define MEDIA_CAST_SENDER_VPX_ENCODER_H_
+#ifndef MEDIA_CAST_ENCODING_VPX_ENCODER_H_
+#define MEDIA_CAST_ENCODING_VPX_ENCODER_H_
#include <stdint.h>
@@ -11,7 +11,8 @@
#include "base/time/time.h"
#include "media/base/feedback_signal_accumulator.h"
#include "media/cast/cast_config.h"
-#include "media/cast/sender/software_video_encoder.h"
+#include "media/cast/common/frame_id.h"
+#include "media/cast/encoding/software_video_encoder.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
#include "ui/gfx/geometry/size.h"
@@ -90,4 +91,4 @@ class VpxEncoder final : public SoftwareVideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_VPX_ENCODER_H_
+#endif // MEDIA_CAST_ENCODING_VPX_ENCODER_H_
diff --git a/chromium/media/cast/sender/vpx_quantizer_parser.cc b/chromium/media/cast/encoding/vpx_quantizer_parser.cc
index f14ca7092ed..8ace8b1a1e4 100644
--- a/chromium/media/cast/sender/vpx_quantizer_parser.cc
+++ b/chromium/media/cast/encoding/vpx_quantizer_parser.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/sender/vpx_quantizer_parser.h"
+#include "media/cast/encoding/vpx_quantizer_parser.h"
#include "base/logging.h"
namespace media {
diff --git a/chromium/media/cast/sender/vpx_quantizer_parser.h b/chromium/media/cast/encoding/vpx_quantizer_parser.h
index 4249c0b495e..901f758c98a 100644
--- a/chromium/media/cast/sender/vpx_quantizer_parser.h
+++ b/chromium/media/cast/encoding/vpx_quantizer_parser.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_SENDER_VPX_QUANTIZER_PARSER_H_
-#define MEDIA_CAST_SENDER_VPX_QUANTIZER_PARSER_H_
+#ifndef MEDIA_CAST_ENCODING_VPX_QUANTIZER_PARSER_H_
+#define MEDIA_CAST_ENCODING_VPX_QUANTIZER_PARSER_H_
#include <stddef.h>
#include <stdint.h>
@@ -20,4 +20,4 @@ int ParseVpxHeaderQuantizer(const uint8_t* data, size_t size);
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_SENDER_VPX_QUANTIZER_PARSER_H_
+#endif // MEDIA_CAST_ENCODING_VPX_QUANTIZER_PARSER_H_
diff --git a/chromium/media/cast/sender/vpx_quantizer_parser_unittest.cc b/chromium/media/cast/encoding/vpx_quantizer_parser_unittest.cc
index cf0f062f52a..b154e455b3f 100644
--- a/chromium/media/cast/sender/vpx_quantizer_parser_unittest.cc
+++ b/chromium/media/cast/encoding/vpx_quantizer_parser_unittest.cc
@@ -9,9 +9,9 @@
#include "base/time/time.h"
#include "media/cast/cast_config.h"
-#include "media/cast/sender/sender_encoded_frame.h"
-#include "media/cast/sender/vpx_encoder.h"
-#include "media/cast/sender/vpx_quantizer_parser.h"
+#include "media/cast/common/sender_encoded_frame.h"
+#include "media/cast/encoding/vpx_encoder.h"
+#include "media/cast/encoding/vpx_quantizer_parser.h"
#include "media/cast/test/receiver/video_decoder.h"
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/video_utility.h"
diff --git a/chromium/media/cast/logging/logging_defines.h b/chromium/media/cast/logging/logging_defines.h
index c3a668cfa67..aa73cfd6bdf 100644
--- a/chromium/media/cast/logging/logging_defines.h
+++ b/chromium/media/cast/logging/logging_defines.h
@@ -84,7 +84,7 @@ struct FrameEvent {
// encoded. Only set for video FRAME_ENCODED event.
int target_bitrate;
- // Encoding performance metrics. See media/cast/sender/sender_encoded_frame.h
+ // Encoding performance metrics. See media/cast/common/sender_encoded_frame.h
// for a description of these values.
double encoder_cpu_utilization;
double idealized_bitrate_utilization;
diff --git a/chromium/media/cast/net/cast_transport.h b/chromium/media/cast/net/cast_transport.h
index 6dae0c85859..9ee9e8b7ede 100644
--- a/chromium/media/cast/net/cast_transport.h
+++ b/chromium/media/cast/net/cast_transport.h
@@ -33,13 +33,10 @@
#include "media/cast/net/rtcp/rtcp_defines.h"
#include "net/base/ip_endpoint.h"
-namespace base {
-class DictionaryValue;
-} // namespace base
-
namespace media {
namespace cast {
+struct EncodedFrame;
struct RtcpTimeData;
// Following the initialization of either audio or video an initialization
@@ -158,7 +155,7 @@ class CastTransport {
virtual void SendRtcpFromRtpReceiver() = 0;
// Set options for the PacedSender and Wifi.
- virtual void SetOptions(const base::DictionaryValue& options) = 0;
+ virtual void SetOptions(const base::Value::Dict& options) = 0;
};
} // namespace cast
diff --git a/chromium/media/cast/net/cast_transport_config.cc b/chromium/media/cast/net/cast_transport_config.cc
index c9602d0b046..6351977d47f 100644
--- a/chromium/media/cast/net/cast_transport_config.cc
+++ b/chromium/media/cast/net/cast_transport_config.cc
@@ -15,22 +15,6 @@ CastTransportRtpConfig::CastTransportRtpConfig()
CastTransportRtpConfig::~CastTransportRtpConfig() = default;
-EncodedFrame::EncodedFrame()
- : dependency(UNKNOWN_DEPENDENCY),
- new_playout_delay_ms(0) {}
-
-EncodedFrame::~EncodedFrame() = default;
-
-void EncodedFrame::CopyMetadataTo(EncodedFrame* dest) const {
- DCHECK(dest);
- dest->dependency = this->dependency;
- dest->frame_id = this->frame_id;
- dest->referenced_frame_id = this->referenced_frame_id;
- dest->rtp_timestamp = this->rtp_timestamp;
- dest->reference_time = this->reference_time;
- dest->new_playout_delay_ms = this->new_playout_delay_ms;
-}
-
RtcpSenderInfo::RtcpSenderInfo()
: ntp_seconds(0),
ntp_fraction(0),
diff --git a/chromium/media/cast/net/cast_transport_config.h b/chromium/media/cast/net/cast_transport_config.h
index 7b86cc8f991..f8907caa7d0 100644
--- a/chromium/media/cast/net/cast_transport_config.h
+++ b/chromium/media/cast/net/cast_transport_config.h
@@ -43,78 +43,6 @@ struct CastTransportRtpConfig {
std::string aes_iv_mask;
};
-// A combination of metadata and data for one encoded frame. This can contain
-// audio data or video data or other.
-struct EncodedFrame {
- enum Dependency {
- // "null" value, used to indicate whether |dependency| has been set.
- UNKNOWN_DEPENDENCY,
-
- // Not decodable without the reference frame indicated by
- // |referenced_frame_id|.
- DEPENDENT,
-
- // Independently decodable.
- INDEPENDENT,
-
- // Independently decodable, and no future frames will depend on any frames
- // before this one.
- KEY,
-
- DEPENDENCY_LAST = KEY
- };
-
- EncodedFrame();
- virtual ~EncodedFrame();
-
- // Convenience accessors to data as an array of uint8_t elements.
- const uint8_t* bytes() const {
- return reinterpret_cast<const uint8_t*>(std::data(data));
- }
- uint8_t* mutable_bytes() {
- return reinterpret_cast<uint8_t*>(std::data(data));
- }
-
- // Copies all data members except |data| to |dest|.
- // Does not modify |dest->data|.
- void CopyMetadataTo(EncodedFrame* dest) const;
-
- // This frame's dependency relationship with respect to other frames.
- Dependency dependency;
-
- // The label associated with this frame. Implies an ordering relative to
- // other frames in the same stream.
- FrameId frame_id;
-
- // The label associated with the frame upon which this frame depends. If
- // this frame does not require any other frame in order to become decodable
- // (e.g., key frames), |referenced_frame_id| must equal |frame_id|.
- FrameId referenced_frame_id;
-
- // The stream timestamp, on the timeline of the signal data. For example, RTP
- // timestamps for audio are usually defined as the total number of audio
- // samples encoded in all prior frames. A playback system uses this value to
- // detect gaps in the stream, and otherwise stretch the signal to match
- // playout targets.
- RtpTimeTicks rtp_timestamp;
-
- // The common reference clock timestamp for this frame. This value originates
- // from a sender and is used to provide lip synchronization between streams in
- // a receiver. Thus, in the sender context, this is set to the time at which
- // the frame was captured/recorded. In the receiver context, this is set to
- // the target playout time. Over a sequence of frames, this time value is
- // expected to drift with respect to the elapsed time implied by the RTP
- // timestamps; and it may not necessarily increment with precise regularity.
- base::TimeTicks reference_time;
-
- // Playout delay for this and all future frames. Used by the Adaptive
- // Playout delay extension. Zero means no change.
- uint16_t new_playout_delay_ms;
-
- // The encoded signal data.
- std::string data;
-};
-
using PacketReceiverCallback =
base::RepeatingCallback<void(std::unique_ptr<Packet> packet)>;
using PacketReceiverCallbackWithStatus =
diff --git a/chromium/media/cast/net/cast_transport_impl.cc b/chromium/media/cast/net/cast_transport_impl.cc
index e2bfc33b4a8..ef2365e63e8 100644
--- a/chromium/media/cast/net/cast_transport_impl.cc
+++ b/chromium/media/cast/net/cast_transport_impl.cc
@@ -16,6 +16,7 @@
#include "base/memory/raw_ptr.h"
#include "base/task/single_thread_task_runner.h"
#include "build/build_config.h"
+#include "media/cast/common/encoded_frame.h"
#include "media/cast/net/cast_transport_defines.h"
#include "media/cast/net/rtcp/sender_rtcp_session.h"
#include "media/cast/net/transport_util.h"
@@ -417,7 +418,7 @@ void CastTransportImpl::AddValidRtpReceiver(uint32_t rtp_sender_ssrc,
valid_rtp_receiver_ssrcs_.insert(rtp_receiver_ssrc);
}
-void CastTransportImpl::SetOptions(const base::DictionaryValue& options) {
+void CastTransportImpl::SetOptions(const base::Value::Dict& options) {
// Set PacedSender options.
int burst_size = LookupOptionWithDefault(options, kOptionPacerTargetBurstSize,
media::cast::kTargetBurstSize);
@@ -430,10 +431,10 @@ void CastTransportImpl::SetOptions(const base::DictionaryValue& options) {
// Set Wifi options.
int wifi_options = 0;
- if (options.FindKey(kOptionWifiDisableScan)) {
+ if (options.contains(kOptionWifiDisableScan)) {
wifi_options |= net::WIFI_OPTIONS_DISABLE_SCAN;
}
- if (options.FindKey(kOptionWifiMediaStreamingMode)) {
+ if (options.contains(kOptionWifiMediaStreamingMode)) {
wifi_options |= net::WIFI_OPTIONS_MEDIA_STREAMING_MODE;
}
if (wifi_options)
diff --git a/chromium/media/cast/net/cast_transport_impl.h b/chromium/media/cast/net/cast_transport_impl.h
index 4be0ac390b2..60531d837c8 100644
--- a/chromium/media/cast/net/cast_transport_impl.h
+++ b/chromium/media/cast/net/cast_transport_impl.h
@@ -93,7 +93,7 @@ class CastTransportImpl final : public CastTransport {
// "media_streaming_mode" (value ignored)
// - Turn media streaming mode on.
// Note, these options may be ignored on some platforms.
- void SetOptions(const base::DictionaryValue& options) final;
+ void SetOptions(const base::Value::Dict& options) final;
// CastTransport implementation for receiving.
void AddValidRtpReceiver(uint32_t rtp_sender_ssrc,
diff --git a/chromium/media/cast/net/cast_transport_impl_unittest.cc b/chromium/media/cast/net/cast_transport_impl_unittest.cc
index ddda62b8f46..7b02748a637 100644
--- a/chromium/media/cast/net/cast_transport_impl_unittest.cc
+++ b/chromium/media/cast/net/cast_transport_impl_unittest.cc
@@ -18,6 +18,7 @@
#include "base/test/simple_test_tick_clock.h"
#include "base/values.h"
#include "media/base/fake_single_thread_task_runner.h"
+#include "media/cast/common/encoded_frame.h"
#include "media/cast/net/cast_transport_config.h"
#include "media/cast/net/rtcp/rtcp_defines.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -164,17 +165,17 @@ void CastTransportImplTest::InitWithoutLogging() {
}
void CastTransportImplTest::InitWithOptions() {
- std::unique_ptr<base::DictionaryValue> options(new base::DictionaryValue);
- options->SetBoolean("disable_wifi_scan", true);
- options->SetBoolean("media_streaming_mode", true);
- options->SetInteger("pacer_target_burst_size", 20);
- options->SetInteger("pacer_max_burst_size", 100);
+ base::Value::Dict options;
+ options.Set("disable_wifi_scan", true);
+ options.Set("media_streaming_mode", true);
+ options.Set("pacer_target_burst_size", 20);
+ options.Set("pacer_max_burst_size", 100);
transport_ = new FakePacketSender();
transport_sender_ = std::make_unique<CastTransportImpl>(
&testing_clock_, base::TimeDelta(),
std::make_unique<TransportClient>(nullptr),
base::WrapUnique(transport_.get()), task_runner_);
- transport_sender_->SetOptions(*options);
+ transport_sender_->SetOptions(options);
task_runner_->RunTasks();
}
diff --git a/chromium/media/cast/net/pacing/mock_paced_packet_sender.cc b/chromium/media/cast/net/pacing/mock_paced_packet_sender.cc
deleted file mode 100644
index 16e396fd3df..00000000000
--- a/chromium/media/cast/net/pacing/mock_paced_packet_sender.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/net/pacing/mock_paced_packet_sender.h"
-
-namespace media {
-namespace cast {
-
-MockPacedPacketSender::MockPacedPacketSender() = default;
-
-MockPacedPacketSender::~MockPacedPacketSender() = default;
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/net/pacing/mock_paced_packet_sender.h b/chromium/media/cast/net/pacing/mock_paced_packet_sender.h
deleted file mode 100644
index 0193ce89e16..00000000000
--- a/chromium/media/cast/net/pacing/mock_paced_packet_sender.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
-#define MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
-
-#include "media/cast/net/pacing/paced_sender.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-class MockPacedPacketSender : public PacedPacketSender {
- public:
- MockPacedPacketSender();
- ~MockPacedPacketSender() override;
-
- MOCK_METHOD1(SendPackets, bool(const SendPacketVector& packets));
- MOCK_METHOD2(ResendPackets, bool(const SendPacketVector& packets,
- const DedupInfo& dedup_info));
- MOCK_METHOD2(SendRtcpPacket, bool(unsigned int ssrc, PacketRef packet));
- MOCK_METHOD1(CancelSendingPacket, void(const PacketKey& packet_key));
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
diff --git a/chromium/media/cast/net/rtcp/rtcp_builder_unittest.cc b/chromium/media/cast/net/rtcp/rtcp_builder_unittest.cc
index 51d668d00e2..3ec935cf2b3 100644
--- a/chromium/media/cast/net/rtcp/rtcp_builder_unittest.cc
+++ b/chromium/media/cast/net/rtcp/rtcp_builder_unittest.cc
@@ -16,7 +16,7 @@
#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/net/rtcp/receiver_rtcp_event_subscriber.h"
#include "media/cast/net/rtcp/rtcp_utility.h"
-#include "media/cast/net/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/test/test_rtcp_packet_builder.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
diff --git a/chromium/media/cast/net/rtcp/rtcp_utility_unittest.cc b/chromium/media/cast/net/rtcp/rtcp_utility_unittest.cc
index ca1d3edccaf..fa186a57fe7 100644
--- a/chromium/media/cast/net/rtcp/rtcp_utility_unittest.cc
+++ b/chromium/media/cast/net/rtcp/rtcp_utility_unittest.cc
@@ -12,7 +12,7 @@
#include "media/base/fake_single_thread_task_runner.h"
#include "media/cast/cast_environment.h"
#include "media/cast/net/cast_transport_defines.h"
-#include "media/cast/net/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/test/test_rtcp_packet_builder.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/cast/net/rtcp/test_rtcp_packet_builder.cc b/chromium/media/cast/net/rtcp/test_rtcp_packet_builder.cc
deleted file mode 100644
index 57a020344c2..00000000000
--- a/chromium/media/cast/net/rtcp/test_rtcp_packet_builder.cc
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/net/rtcp/test_rtcp_packet_builder.h"
-
-#include <memory>
-
-#include "base/check_op.h"
-#include "media/cast/net/rtcp/rtcp_utility.h"
-
-namespace media {
-namespace cast {
-
-TestRtcpPacketBuilder::TestRtcpPacketBuilder()
- : ptr_of_length_(nullptr),
- big_endian_writer_(reinterpret_cast<char*>(buffer_), kMaxIpPacketSize),
- big_endian_reader_(nullptr, 0) {}
-
-void TestRtcpPacketBuilder::AddSr(uint32_t remote_ssrc,
- int number_of_report_blocks) {
- AddRtcpHeader(200, number_of_report_blocks);
- big_endian_writer_.WriteU32(remote_ssrc);
- big_endian_writer_.WriteU32(kNtpHigh); // NTP timestamp.
- big_endian_writer_.WriteU32(kNtpLow);
- big_endian_writer_.WriteU32(kRtpTimestamp);
- big_endian_writer_.WriteU32(kSendPacketCount);
- big_endian_writer_.WriteU32(kSendOctetCount);
-}
-
-void TestRtcpPacketBuilder::AddSrWithNtp(uint32_t remote_ssrc,
- uint32_t ntp_high,
- uint32_t ntp_low,
- uint32_t rtp_timestamp) {
- AddRtcpHeader(200, 0);
- big_endian_writer_.WriteU32(remote_ssrc);
- big_endian_writer_.WriteU32(ntp_high);
- big_endian_writer_.WriteU32(ntp_low);
- big_endian_writer_.WriteU32(rtp_timestamp);
- big_endian_writer_.WriteU32(kSendPacketCount);
- big_endian_writer_.WriteU32(kSendOctetCount);
-}
-
-void TestRtcpPacketBuilder::AddRr(uint32_t remote_ssrc,
- int number_of_report_blocks) {
- AddRtcpHeader(201, number_of_report_blocks);
- big_endian_writer_.WriteU32(remote_ssrc);
-}
-
-void TestRtcpPacketBuilder::AddRb(uint32_t local_ssrc) {
- big_endian_writer_.WriteU32(local_ssrc);
- big_endian_writer_.WriteU32(kLoss);
- big_endian_writer_.WriteU32(kExtendedMax);
- big_endian_writer_.WriteU32(kTestJitter);
- big_endian_writer_.WriteU32(kLastSr);
- big_endian_writer_.WriteU32(kDelayLastSr);
-}
-
-void TestRtcpPacketBuilder::AddXrHeader(uint32_t remote_ssrc) {
- AddRtcpHeader(207, 0);
- big_endian_writer_.WriteU32(remote_ssrc);
-}
-
-void TestRtcpPacketBuilder::AddXrUnknownBlock() {
- big_endian_writer_.WriteU8(9); // Block type.
- big_endian_writer_.WriteU8(0); // Reserved.
- big_endian_writer_.WriteU16(4); // Block length.
- // First receiver same as sender of this report.
- big_endian_writer_.WriteU32(0);
- big_endian_writer_.WriteU32(0);
- big_endian_writer_.WriteU32(0);
- big_endian_writer_.WriteU32(0);
-}
-
-void TestRtcpPacketBuilder::AddUnknownBlock() {
- AddRtcpHeader(99, 0);
- big_endian_writer_.WriteU32(42);
- big_endian_writer_.WriteU32(42);
- big_endian_writer_.WriteU32(42);
-}
-
-void TestRtcpPacketBuilder::AddXrDlrrBlock(uint32_t remote_ssrc) {
- big_endian_writer_.WriteU8(5); // Block type.
- big_endian_writer_.WriteU8(0); // Reserved.
- big_endian_writer_.WriteU16(3); // Block length.
-
- // First receiver same as sender of this report.
- big_endian_writer_.WriteU32(remote_ssrc);
- big_endian_writer_.WriteU32(kLastRr);
- big_endian_writer_.WriteU32(kDelayLastRr);
-}
-
-void TestRtcpPacketBuilder::AddXrExtendedDlrrBlock(uint32_t remote_ssrc) {
- big_endian_writer_.WriteU8(5); // Block type.
- big_endian_writer_.WriteU8(0); // Reserved.
- big_endian_writer_.WriteU16(9); // Block length.
- big_endian_writer_.WriteU32(0xaaaaaaaa);
- big_endian_writer_.WriteU32(0xaaaaaaaa);
- big_endian_writer_.WriteU32(0xaaaaaaaa);
-
- // First receiver same as sender of this report.
- big_endian_writer_.WriteU32(remote_ssrc);
- big_endian_writer_.WriteU32(kLastRr);
- big_endian_writer_.WriteU32(kDelayLastRr);
- big_endian_writer_.WriteU32(0xbbbbbbbb);
- big_endian_writer_.WriteU32(0xbbbbbbbb);
- big_endian_writer_.WriteU32(0xbbbbbbbb);
-}
-
-void TestRtcpPacketBuilder::AddXrRrtrBlock() {
- big_endian_writer_.WriteU8(4); // Block type.
- big_endian_writer_.WriteU8(0); // Reserved.
- big_endian_writer_.WriteU16(2); // Block length.
- big_endian_writer_.WriteU32(kNtpHigh);
- big_endian_writer_.WriteU32(kNtpLow);
-}
-
-void TestRtcpPacketBuilder::AddNack(uint32_t remote_ssrc, uint32_t local_ssrc) {
- AddRtcpHeader(205, 1);
- big_endian_writer_.WriteU32(remote_ssrc);
- big_endian_writer_.WriteU32(local_ssrc);
- big_endian_writer_.WriteU16(kMissingPacket);
- big_endian_writer_.WriteU16(0);
-}
-
-void TestRtcpPacketBuilder::AddSendReportRequest(uint32_t remote_ssrc,
- uint32_t local_ssrc) {
- AddRtcpHeader(205, 5);
- big_endian_writer_.WriteU32(remote_ssrc);
- big_endian_writer_.WriteU32(local_ssrc);
-}
-
-void TestRtcpPacketBuilder::AddCast(uint32_t remote_ssrc,
- uint32_t local_ssrc,
- base::TimeDelta target_delay) {
- AddRtcpHeader(206, 15);
- big_endian_writer_.WriteU32(remote_ssrc);
- big_endian_writer_.WriteU32(local_ssrc);
- big_endian_writer_.WriteU8('C');
- big_endian_writer_.WriteU8('A');
- big_endian_writer_.WriteU8('S');
- big_endian_writer_.WriteU8('T');
- big_endian_writer_.WriteU8(kAckFrameId);
- big_endian_writer_.WriteU8(3); // Loss fields.
- big_endian_writer_.WriteU16(target_delay.InMilliseconds());
- big_endian_writer_.WriteU8(kLostFrameId);
- big_endian_writer_.WriteU16(kRtcpCastAllPacketsLost);
- big_endian_writer_.WriteU8(0); // Lost packet id mask.
- big_endian_writer_.WriteU8(kFrameIdWithLostPackets);
- big_endian_writer_.WriteU16(kLostPacketId1);
- big_endian_writer_.WriteU8(0x2); // Lost packet id mask.
- big_endian_writer_.WriteU8(kFrameIdWithLostPackets);
- big_endian_writer_.WriteU16(kLostPacketId3);
- big_endian_writer_.WriteU8(0); // Lost packet id mask.
-}
-
-void TestRtcpPacketBuilder::AddCst2(
- const std::vector<FrameId>& later_received_frames) {
- big_endian_writer_.WriteU8('C');
- big_endian_writer_.WriteU8('S');
- big_endian_writer_.WriteU8('T');
- big_endian_writer_.WriteU8('2');
- big_endian_writer_.WriteU8(kFeedbackSeq);
-
- std::vector<uint8_t> ack_bitmasks;
- for (FrameId ack_frame : later_received_frames) {
- const int64_t bit_index = ack_frame - (FrameId::first() + kAckFrameId) - 2;
- CHECK_LE(INT64_C(0), bit_index);
- const size_t index = static_cast<size_t>(bit_index) / 8;
- const size_t bit_index_within_byte = static_cast<size_t>(bit_index) % 8;
- if (index >= ack_bitmasks.size())
- ack_bitmasks.resize(index + 1);
- ack_bitmasks[index] |= 1 << bit_index_within_byte;
- }
-
- CHECK_LT(ack_bitmasks.size(), 256u);
- big_endian_writer_.WriteU8(ack_bitmasks.size());
- for (uint8_t ack_bits : ack_bitmasks)
- big_endian_writer_.WriteU8(ack_bits);
-
- // Pad to ensure the extra CST2 data chunk is 32-bit aligned.
- for (size_t num_bytes_written = 6 + ack_bitmasks.size();
- num_bytes_written % 4; ++num_bytes_written) {
- big_endian_writer_.WriteU8(0);
- }
-}
-
-void TestRtcpPacketBuilder::AddErrorCst2() {
- big_endian_writer_.WriteU8('C');
- big_endian_writer_.WriteU8('A');
- big_endian_writer_.WriteU8('S');
- big_endian_writer_.WriteU8('T');
- big_endian_writer_.WriteU8(kFeedbackSeq);
- big_endian_writer_.WriteU8(0);
- big_endian_writer_.WriteU8(0);
- big_endian_writer_.WriteU8(0);
-}
-
-void TestRtcpPacketBuilder::AddPli(uint32_t remote_ssrc, uint32_t local_ssrc) {
- AddRtcpHeader(206, 1);
- big_endian_writer_.WriteU32(remote_ssrc);
- big_endian_writer_.WriteU32(local_ssrc);
-}
-
-void TestRtcpPacketBuilder::AddReceiverLog(uint32_t remote_ssrc) {
- AddRtcpHeader(204, 2);
- big_endian_writer_.WriteU32(remote_ssrc);
- big_endian_writer_.WriteU8('C');
- big_endian_writer_.WriteU8('A');
- big_endian_writer_.WriteU8('S');
- big_endian_writer_.WriteU8('T');
-}
-
-void TestRtcpPacketBuilder::AddReceiverFrameLog(uint32_t rtp_timestamp,
- int num_events,
- uint32_t event_timesamp_base) {
- big_endian_writer_.WriteU32(rtp_timestamp);
- big_endian_writer_.WriteU8(static_cast<uint8_t>(num_events - 1));
- big_endian_writer_.WriteU8(static_cast<uint8_t>(event_timesamp_base >> 16));
- big_endian_writer_.WriteU8(static_cast<uint8_t>(event_timesamp_base >> 8));
- big_endian_writer_.WriteU8(static_cast<uint8_t>(event_timesamp_base));
-}
-
-void TestRtcpPacketBuilder::AddReceiverEventLog(uint16_t event_data,
- CastLoggingEvent event,
- uint16_t event_timesamp_delta) {
- big_endian_writer_.WriteU16(event_data);
- uint8_t event_id = ConvertEventTypeToWireFormat(event);
- uint16_t type_and_delta = static_cast<uint16_t>(event_id) << 12;
- type_and_delta += event_timesamp_delta & 0x0fff;
- big_endian_writer_.WriteU16(type_and_delta);
-}
-
-std::unique_ptr<media::cast::Packet> TestRtcpPacketBuilder::GetPacket() {
- PatchLengthField();
- return std::make_unique<media::cast::Packet>(buffer_, buffer_ + Length());
-}
-
-const uint8_t* TestRtcpPacketBuilder::Data() {
- PatchLengthField();
- return buffer_;
-}
-
-base::BigEndianReader* TestRtcpPacketBuilder::Reader() {
- big_endian_reader_ = base::BigEndianReader(Data(), Length());
- return &big_endian_reader_;
-}
-
-void TestRtcpPacketBuilder::PatchLengthField() {
- if (ptr_of_length_) {
- // Back-patch the packet length. The client must have taken
- // care of proper padding to 32-bit words.
- int this_packet_length = (big_endian_writer_.ptr() - ptr_of_length_ - 2);
- DCHECK_EQ(0, this_packet_length % 4)
- << "Packets must be a multiple of 32 bits long";
- *ptr_of_length_ = this_packet_length >> 10;
- *(ptr_of_length_ + 1) = (this_packet_length >> 2) & 0xFF;
- ptr_of_length_ = nullptr;
- }
-}
-
-// Set the 5-bit value in the 1st byte of the header
-// and the payload type. Set aside room for the length field,
-// and make provision for back-patching it.
-void TestRtcpPacketBuilder::AddRtcpHeader(int payload, int format_or_count) {
- PatchLengthField();
- big_endian_writer_.WriteU8(0x80 | (format_or_count & 0x1F));
- big_endian_writer_.WriteU8(payload);
- ptr_of_length_ = big_endian_writer_.ptr();
-
- // Initialize length to "clearly illegal".
- big_endian_writer_.WriteU16(0xDEAD);
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/net/rtcp/test_rtcp_packet_builder.h b/chromium/media/cast/net/rtcp/test_rtcp_packet_builder.h
deleted file mode 100644
index 031f341d12c..00000000000
--- a/chromium/media/cast/net/rtcp/test_rtcp_packet_builder.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// A very simple packet builder class for building RTCP packets.
-// Used for testing only.
-#ifndef MEDIA_CAST_NET_RTCP_TEST_RTCP_PACKET_BUILDER_H_
-#define MEDIA_CAST_NET_RTCP_TEST_RTCP_PACKET_BUILDER_H_
-
-#include <stdint.h>
-#include <vector>
-
-#include "base/big_endian.h"
-#include "base/memory/raw_ptr.h"
-#include "media/cast/net/cast_transport_defines.h"
-#include "media/cast/net/rtcp/rtcp_defines.h"
-
-namespace media {
-namespace cast {
-
-// These values are arbitrary only for the purpose of testing.
-
-namespace {
-// Sender report.
-static const uint32_t kNtpHigh = 0x01020304;
-static const uint32_t kNtpLow = 0x05060708;
-static const uint32_t kRtpTimestamp = 0x10203040;
-static const uint32_t kSendPacketCount = 987;
-static const uint32_t kSendOctetCount = 87654;
-
-// Report block.
-static const int kLoss = 0x01000123;
-static const int kExtendedMax = 0x15678;
-static const int kTestJitter = 0x10203;
-static const uint32_t kLastSr = 0x34561234;
-static const uint32_t kDelayLastSr = 1000;
-
-// DLRR block.
-static const int kLastRr = 0x34561234;
-static const int kDelayLastRr = 1000;
-
-// NACK.
-static const int kMissingPacket = 34567;
-
-// CAST.
-static const uint32_t kAckFrameId = 17;
-static const uint32_t kLostFrameId = 18;
-static const uint32_t kFrameIdWithLostPackets = 19;
-static const int kLostPacketId1 = 3;
-static const int kLostPacketId2 = 5;
-static const int kLostPacketId3 = 12;
-static const uint8_t kFeedbackSeq = 1;
-} // namespace
-
-class TestRtcpPacketBuilder {
- public:
- TestRtcpPacketBuilder();
-
- TestRtcpPacketBuilder(const TestRtcpPacketBuilder&) = delete;
- TestRtcpPacketBuilder& operator=(const TestRtcpPacketBuilder&) = delete;
-
- void AddSr(uint32_t remote_ssrc, int number_of_report_blocks);
- void AddSrWithNtp(uint32_t remote_ssrc,
- uint32_t ntp_high,
- uint32_t ntp_low,
- uint32_t rtp_timestamp);
- void AddRr(uint32_t remote_ssrc, int number_of_report_blocks);
- void AddRb(uint32_t rtp_ssrc);
-
- void AddXrHeader(uint32_t remote_ssrc);
- void AddXrDlrrBlock(uint32_t remote_ssrc);
- void AddXrExtendedDlrrBlock(uint32_t remote_ssrc);
- void AddXrRrtrBlock();
- void AddXrUnknownBlock();
- void AddUnknownBlock();
-
- void AddNack(uint32_t remote_ssrc, uint32_t local_ssrc);
- void AddSendReportRequest(uint32_t remote_ssrc, uint32_t local_ssrc);
-
- void AddCast(uint32_t remote_ssrc,
- uint32_t local_ssrc,
- base::TimeDelta target_delay);
- void AddCst2(const std::vector<FrameId>& later_received_frames);
- void AddErrorCst2(); // With wrong identifier.
- void AddPli(uint32_t remote_ssrc, uint32_t local_ssrc);
-
- void AddReceiverLog(uint32_t remote_ssrc);
- void AddReceiverFrameLog(uint32_t rtp_timestamp,
- int num_events,
- uint32_t event_timesamp_base);
- void AddReceiverEventLog(uint16_t event_data,
- CastLoggingEvent event,
- uint16_t event_timesamp_delta);
-
- std::unique_ptr<Packet> GetPacket();
- const uint8_t* Data();
- int Length() { return kMaxIpPacketSize - big_endian_writer_.remaining(); }
- base::BigEndianReader* Reader();
-
- private:
- void AddRtcpHeader(int payload, int format_or_count);
- void PatchLengthField();
-
- // Where the length field of the current packet is.
- // Note: 0 is not a legal value, it is used for "uninitialized".
- uint8_t buffer_[kMaxIpPacketSize];
- raw_ptr<char> ptr_of_length_;
- base::BigEndianWriter big_endian_writer_;
- base::BigEndianReader big_endian_reader_;
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_NET_RTCP_TEST_RTCP_PACKET_BUILDER_H_
diff --git a/chromium/media/cast/net/rtp/mock_rtp_feedback.h b/chromium/media/cast/net/rtp/mock_rtp_feedback.h
deleted file mode 100644
index 0bb21b8b41f..00000000000
--- a/chromium/media/cast/net/rtp/mock_rtp_feedback.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_NET_RTP_MOCK_RTP_FEEDBACK_H_
-#define MEDIA_CAST_NET_RTP_MOCK_RTP_FEEDBACK_H_
-
-#include <stdint.h>
-
-#include "media/cast/net/rtp/rtp_parser/rtp_feedback.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-class MockRtpFeedback : public RtpFeedback {
- public:
- MOCK_METHOD4(OnInitializeDecoder,
- int32_t(const int8_t payloadType,
- const int frequency,
- const uint8_t channels,
- const uint32_t rate));
-
- MOCK_METHOD1(OnPacketTimeout, void(const int32_t id));
- MOCK_METHOD2(OnReceivedPacket,
- void(const int32_t id, const RtpRtcpPacketField packet_type));
- MOCK_METHOD2(OnPeriodicDeadOrAlive,
- void(const int32_t id, const RTPAliveType alive));
- MOCK_METHOD2(OnIncomingSSRCChanged,
- void(const int32_t id, const uint32_t ssrc));
- MOCK_METHOD3(OnIncomingCSRCChanged,
- void(const int32_t id, const uint32_t csrc, const bool added));
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_NET_RTP_MOCK_RTP_FEEDBACK_H_
diff --git a/chromium/media/cast/net/rtp/mock_rtp_payload_feedback.cc b/chromium/media/cast/net/rtp/mock_rtp_payload_feedback.cc
deleted file mode 100644
index fc87b1cf402..00000000000
--- a/chromium/media/cast/net/rtp/mock_rtp_payload_feedback.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/net/rtp/mock_rtp_payload_feedback.h"
-
-namespace media {
-namespace cast {
-
-MockRtpPayloadFeedback::MockRtpPayloadFeedback() = default;
-
-MockRtpPayloadFeedback::~MockRtpPayloadFeedback() = default;
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/net/rtp/mock_rtp_payload_feedback.h b/chromium/media/cast/net/rtp/mock_rtp_payload_feedback.h
deleted file mode 100644
index 90c09437ecc..00000000000
--- a/chromium/media/cast/net/rtp/mock_rtp_payload_feedback.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_NET_RTP_MOCK_RTP_PAYLOAD_FEEDBACK_H_
-#define MEDIA_CAST_NET_RTP_MOCK_RTP_PAYLOAD_FEEDBACK_H_
-
-#include "media/cast/net/rtp/rtp_defines.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-
-class MockRtpPayloadFeedback : public RtpPayloadFeedback {
- public:
- MockRtpPayloadFeedback();
- ~MockRtpPayloadFeedback() override;
-
- MOCK_METHOD1(CastFeedback, void(const RtcpCastMessage& cast_feedback));
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_NET_RTP_MOCK_RTP_PAYLOAD_FEEDBACK_H_
diff --git a/chromium/media/cast/net/rtp/rtp_packet_builder.cc b/chromium/media/cast/net/rtp/rtp_packet_builder.cc
deleted file mode 100644
index c49c7189db7..00000000000
--- a/chromium/media/cast/net/rtp/rtp_packet_builder.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/cast/net/rtp/rtp_packet_builder.h"
-
-#include "base/big_endian.h"
-#include "base/check_op.h"
-
-namespace media {
-namespace cast {
-
-RtpPacketBuilder::RtpPacketBuilder()
- : is_key_(false),
- frame_id_(0),
- packet_id_(0),
- max_packet_id_(0),
- reference_frame_id_(0),
- timestamp_(0),
- sequence_number_(0),
- marker_(false),
- payload_type_(0),
- ssrc_(0) {}
-
-void RtpPacketBuilder::SetKeyFrame(bool is_key) { is_key_ = is_key; }
-
-void RtpPacketBuilder::SetFrameIds(uint32_t frame_id,
- uint32_t reference_frame_id) {
- frame_id_ = frame_id;
- reference_frame_id_ = reference_frame_id;
-}
-
-void RtpPacketBuilder::SetPacketId(uint16_t packet_id) {
- packet_id_ = packet_id;
-}
-
-void RtpPacketBuilder::SetMaxPacketId(uint16_t max_packet_id) {
- max_packet_id_ = max_packet_id;
-}
-
-void RtpPacketBuilder::SetTimestamp(uint32_t timestamp) {
- timestamp_ = timestamp;
-}
-
-void RtpPacketBuilder::SetSequenceNumber(uint16_t sequence_number) {
- sequence_number_ = sequence_number;
-}
-
-void RtpPacketBuilder::SetMarkerBit(bool marker) { marker_ = marker; }
-
-void RtpPacketBuilder::SetPayloadType(int payload_type) {
- payload_type_ = payload_type;
-}
-
-void RtpPacketBuilder::SetSsrc(uint32_t ssrc) {
- ssrc_ = ssrc;
-}
-
-void RtpPacketBuilder::BuildHeader(uint8_t* data, uint32_t data_length) {
- BuildCommonHeader(data, data_length);
- BuildCastHeader(data + kRtpHeaderLength, data_length - kRtpHeaderLength);
-}
-
-void RtpPacketBuilder::BuildCastHeader(uint8_t* data, uint32_t data_length) {
- // Build header.
- DCHECK_LE(kCastHeaderLength, data_length);
- // Set the first 7 bytes to 0.
- memset(data, 0, kCastHeaderLength);
- base::BigEndianWriter big_endian_writer(reinterpret_cast<char*>(data), 56);
- const bool includes_specific_frame_reference =
- (is_key_ && (reference_frame_id_ != frame_id_)) ||
- (!is_key_ && (reference_frame_id_ != (frame_id_ - 1)));
- big_endian_writer.WriteU8((is_key_ ? 0x80 : 0) |
- (includes_specific_frame_reference ? 0x40 : 0));
- big_endian_writer.WriteU8(frame_id_);
- big_endian_writer.WriteU16(packet_id_);
- big_endian_writer.WriteU16(max_packet_id_);
- if (includes_specific_frame_reference) {
- big_endian_writer.WriteU8(reference_frame_id_);
- }
-}
-
-void RtpPacketBuilder::BuildCommonHeader(uint8_t* data, uint32_t data_length) {
- DCHECK_LE(kRtpHeaderLength, data_length);
- base::BigEndianWriter big_endian_writer(reinterpret_cast<char*>(data), 96);
- big_endian_writer.WriteU8(0x80);
- big_endian_writer.WriteU8(payload_type_ | (marker_ ? kRtpMarkerBitMask : 0));
- big_endian_writer.WriteU16(sequence_number_);
- big_endian_writer.WriteU32(timestamp_);
- big_endian_writer.WriteU32(ssrc_);
-}
-
-} // namespace cast
-} // namespace media
diff --git a/chromium/media/cast/net/rtp/rtp_packet_builder.h b/chromium/media/cast/net/rtp/rtp_packet_builder.h
deleted file mode 100644
index 67821f1aa3a..00000000000
--- a/chromium/media/cast/net/rtp/rtp_packet_builder.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Test helper class that builds rtp packets.
-
-#ifndef MEDIA_CAST_NET_RTP_RTP_PACKET_BUILDER_H_
-#define MEDIA_CAST_NET_RTP_RTP_PACKET_BUILDER_H_
-
-#include <stdint.h>
-
-#include "media/cast/net/rtp/rtp_defines.h"
-
-namespace media {
-namespace cast {
-
-class RtpPacketBuilder {
- public:
- RtpPacketBuilder();
-
- RtpPacketBuilder(const RtpPacketBuilder&) = delete;
- RtpPacketBuilder& operator=(const RtpPacketBuilder&) = delete;
-
- void SetKeyFrame(bool is_key);
- void SetFrameIds(uint32_t frame_id, uint32_t reference_frame_id);
- void SetPacketId(uint16_t packet_id);
- void SetMaxPacketId(uint16_t max_packet_id);
- void SetTimestamp(uint32_t timestamp);
- void SetSequenceNumber(uint16_t sequence_number);
- void SetMarkerBit(bool marker);
- void SetPayloadType(int payload_type);
- void SetSsrc(uint32_t ssrc);
- void BuildHeader(uint8_t* data, uint32_t data_length);
-
- private:
- bool is_key_;
- uint32_t frame_id_;
- uint16_t packet_id_;
- uint16_t max_packet_id_;
- uint32_t reference_frame_id_;
- uint32_t timestamp_;
- uint16_t sequence_number_;
- bool marker_;
- int payload_type_;
- uint32_t ssrc_;
-
- void BuildCastHeader(uint8_t* data, uint32_t data_length);
- void BuildCommonHeader(uint8_t* data, uint32_t data_length);
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_NET_RTP_RTP_PACKET_BUILDER_H_
diff --git a/chromium/media/cast/net/rtp/rtp_packetizer.cc b/chromium/media/cast/net/rtp/rtp_packetizer.cc
index 40e3263d344..2f13bc0c792 100644
--- a/chromium/media/cast/net/rtp/rtp_packetizer.cc
+++ b/chromium/media/cast/net/rtp/rtp_packetizer.cc
@@ -9,6 +9,7 @@
#include "base/big_endian.h"
#include "base/check_op.h"
#include "base/logging.h"
+#include "media/cast/common/encoded_frame.h"
#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/net/rtp/rtp_defines.h"
diff --git a/chromium/media/cast/net/rtp/rtp_packetizer.h b/chromium/media/cast/net/rtp/rtp_packetizer.h
index 4480aeefad1..029442aec7a 100644
--- a/chromium/media/cast/net/rtp/rtp_packetizer.h
+++ b/chromium/media/cast/net/rtp/rtp_packetizer.h
@@ -17,6 +17,7 @@
namespace media {
namespace cast {
+struct EncodedFrame;
class PacedSender;
struct RtpPacketizerConfig {
diff --git a/chromium/media/cast/net/rtp/rtp_packetizer_unittest.cc b/chromium/media/cast/net/rtp/rtp_packetizer_unittest.cc
index 89a0bf150e0..ba947bd75b9 100644
--- a/chromium/media/cast/net/rtp/rtp_packetizer_unittest.cc
+++ b/chromium/media/cast/net/rtp/rtp_packetizer_unittest.cc
@@ -11,6 +11,7 @@
#include "base/test/simple_test_tick_clock.h"
#include "media/base/fake_single_thread_task_runner.h"
+#include "media/cast/common/encoded_frame.h"
#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/net/rtp/packet_storage.h"
#include "media/cast/net/rtp/rtp_parser.h"
diff --git a/chromium/media/cast/net/rtp/rtp_parser_unittest.cc b/chromium/media/cast/net/rtp/rtp_parser_unittest.cc
index a832e0ce545..ce3272b8aaa 100644
--- a/chromium/media/cast/net/rtp/rtp_parser_unittest.cc
+++ b/chromium/media/cast/net/rtp/rtp_parser_unittest.cc
@@ -11,7 +11,7 @@
#include "base/rand_util.h"
#include "media/cast/net/rtp/rtp_defines.h"
-#include "media/cast/net/rtp/rtp_packet_builder.h"
+#include "media/cast/test/rtp_packet_builder.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/cast/net/rtp/rtp_sender.cc b/chromium/media/cast/net/rtp/rtp_sender.cc
index 3f73c62d5fe..4a9fe1e98d5 100644
--- a/chromium/media/cast/net/rtp/rtp_sender.cc
+++ b/chromium/media/cast/net/rtp/rtp_sender.cc
@@ -9,6 +9,7 @@
#include "base/big_endian.h"
#include "base/logging.h"
#include "base/rand_util.h"
+#include "media/cast/common/encoded_frame.h"
#include "media/cast/constants.h"
namespace media {
diff --git a/chromium/media/cast/net/rtp/rtp_sender.h b/chromium/media/cast/net/rtp/rtp_sender.h
index 52a00dfab7c..65ce5e994d7 100644
--- a/chromium/media/cast/net/rtp/rtp_sender.h
+++ b/chromium/media/cast/net/rtp/rtp_sender.h
@@ -27,6 +27,8 @@
namespace media {
namespace cast {
+struct EncodedFrame;
+
// This object is only called from the main cast thread.
// This class handles splitting encoded audio and video frames into packets and
// add an RTP header to each packet. The sent packets are stored until they are
diff --git a/chromium/media/cast/net/transport_util.cc b/chromium/media/cast/net/transport_util.cc
index ecba38c243c..0662bf3915e 100644
--- a/chromium/media/cast/net/transport_util.cc
+++ b/chromium/media/cast/net/transport_util.cc
@@ -8,15 +8,10 @@ namespace media {
namespace cast {
namespace transport_util {
-int LookupOptionWithDefault(const base::DictionaryValue& options,
+int LookupOptionWithDefault(const base::Value::Dict& options,
const std::string& path,
int default_value) {
- int ret;
- if (options.GetInteger(path, &ret)) {
- return ret;
- } else {
- return default_value;
- }
+ return options.FindInt(path).value_or(default_value);
}
} // namespace transport_util
diff --git a/chromium/media/cast/net/transport_util.h b/chromium/media/cast/net/transport_util.h
index 7944386347b..b78591999f6 100644
--- a/chromium/media/cast/net/transport_util.h
+++ b/chromium/media/cast/net/transport_util.h
@@ -17,7 +17,7 @@ namespace transport_util {
const char kOptionPacerMaxBurstSize[] = "pacer_max_burst_size";
const char kOptionPacerTargetBurstSize[] = "pacer_target_burst_size";
-int LookupOptionWithDefault(const base::DictionaryValue& options,
+int LookupOptionWithDefault(const base::Value::Dict& options,
const std::string& path,
int default_value);
diff --git a/chromium/media/cast/net/udp_transport_impl.cc b/chromium/media/cast/net/udp_transport_impl.cc
index 1a1102eadcf..fdbe9b64825 100644
--- a/chromium/media/cast/net/udp_transport_impl.cc
+++ b/chromium/media/cast/net/udp_transport_impl.cc
@@ -38,7 +38,7 @@ bool IsEmpty(const net::IPEndPoint& addr) {
return (addr.address().empty() || addr.address().IsZero()) && !addr.port();
}
-int32_t GetTransportSendBufferSize(const base::DictionaryValue& options) {
+int32_t GetTransportSendBufferSize(const base::Value::Dict& options) {
// Socket send buffer size needs to be at least greater than one burst
// size.
int32_t max_burst_size =
@@ -330,15 +330,15 @@ void UdpTransportImpl::OnSent(const scoped_refptr<net::IOBuffer>& buf,
}
}
-void UdpTransportImpl::SetUdpOptions(const base::DictionaryValue& options) {
+void UdpTransportImpl::SetUdpOptions(const base::Value::Dict& options) {
SetSendBufferSize(GetTransportSendBufferSize(options));
- if (options.FindKey(kOptionDscp)) {
+ if (options.contains(kOptionDscp)) {
// The default DSCP value for cast is AF41. Which gives it a higher
// priority over other traffic.
SetDscp(net::DSCP_AF41);
}
#if BUILDFLAG(IS_WIN)
- if (!options.HasKey(kOptionDisableNonBlockingIO)) {
+ if (!options.contains(kOptionDisableNonBlockingIO)) {
UseNonBlockingIO();
}
#endif
diff --git a/chromium/media/cast/net/udp_transport_impl.h b/chromium/media/cast/net/udp_transport_impl.h
index 82f8d9b7b79..afefd29ee73 100644
--- a/chromium/media/cast/net/udp_transport_impl.h
+++ b/chromium/media/cast/net/udp_transport_impl.h
@@ -78,7 +78,7 @@ class UdpTransportImpl final : public PacketTransport, public UdpTransport {
// "disable_non_blocking_io" (value ignored)
// - Windows only. Turns off non-blocking IO for the socket.
// Note: Non-blocking IO is, by default, enabled on all platforms.
- void SetUdpOptions(const base::DictionaryValue& options);
+ void SetUdpOptions(const base::Value::Dict& options);
// This has to be called before |StartReceiving()| to change the
// |send_buffer_size_|. Calling |SetUdpOptions()| will automatically call it.
diff --git a/chromium/media/cast/sender/audio_sender.cc b/chromium/media/cast/sender/audio_sender.cc
index b4b379059a3..27bea443119 100644
--- a/chromium/media/cast/sender/audio_sender.cc
+++ b/chromium/media/cast/sender/audio_sender.cc
@@ -10,27 +10,27 @@
#include "base/check_op.h"
#include "base/notreached.h"
#include "media/cast/common/rtp_time.h"
+#include "media/cast/common/sender_encoded_frame.h"
+#include "media/cast/encoding/audio_encoder.h"
#include "media/cast/net/cast_transport_config.h"
-#include "media/cast/sender/audio_encoder.h"
-namespace media {
-namespace cast {
+namespace media::cast {
AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
const FrameSenderConfig& audio_config,
StatusChangeOnceCallback status_change_cb,
CastTransport* const transport_sender)
- : FrameSender(cast_environment,
- transport_sender,
- audio_config,
- NewFixedCongestionControl(audio_config.max_bitrate)),
- samples_in_encoder_(0) {
+ : cast_environment_(cast_environment),
+ rtp_timebase_(audio_config.rtp_timebase),
+ frame_sender_(FrameSender::Create(cast_environment,
+ audio_config,
+ transport_sender,
+ this)) {
if (!audio_config.use_external_encoder) {
audio_encoder_ = std::make_unique<AudioEncoder>(
- cast_environment, audio_config.channels, audio_config.rtp_timebase,
+ std::move(cast_environment), audio_config.channels, rtp_timebase_,
audio_config.max_bitrate, audio_config.codec,
- base::BindRepeating(&AudioSender::OnEncodedAudioFrame, AsWeakPtr(),
- audio_config.max_bitrate));
+ base::BindRepeating(&AudioSender::OnEncodedAudioFrame, AsWeakPtr()));
}
// AudioEncoder provides no operational status changes during normal use.
@@ -45,8 +45,8 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
// The number of samples per encoded audio frame depends on the codec and its
// initialization parameters. Now that we have an encoder, we can calculate
// the maximum frame rate.
- max_frame_rate_ =
- audio_config.rtp_timebase / audio_encoder_->GetSamplesPerFrame();
+ frame_sender_->SetMaxFrameRate(rtp_timebase_ /
+ audio_encoder_->GetSamplesPerFrame());
}
AudioSender::~AudioSender() = default;
@@ -61,8 +61,8 @@ void AudioSender::InsertAudio(std::unique_ptr<AudioBus> audio_bus,
}
const base::TimeDelta next_frame_duration =
- RtpTimeDelta::FromTicks(audio_bus->frames()).ToTimeDelta(rtp_timebase());
- if (ShouldDropNextFrame(next_frame_duration))
+ RtpTimeDelta::FromTicks(audio_bus->frames()).ToTimeDelta(rtp_timebase_);
+ if (frame_sender_->ShouldDropNextFrame(next_frame_duration))
return;
samples_in_encoder_ += audio_bus->frames();
@@ -70,33 +70,38 @@ void AudioSender::InsertAudio(std::unique_ptr<AudioBus> audio_bus,
audio_encoder_->InsertAudio(std::move(audio_bus), recorded_time);
}
+void AudioSender::SetTargetPlayoutDelay(
+ base::TimeDelta new_target_playout_delay) {
+ frame_sender_->SetTargetPlayoutDelay(new_target_playout_delay);
+}
+
+base::TimeDelta AudioSender::GetTargetPlayoutDelay() const {
+ return frame_sender_->GetTargetPlayoutDelay();
+}
+
base::WeakPtr<AudioSender> AudioSender::AsWeakPtr() {
return weak_factory_.GetWeakPtr();
}
int AudioSender::GetNumberOfFramesInEncoder() const {
// Note: It's possible for a partial frame to be in the encoder, but returning
- // the floor() is good enough for the "design limit" check in FrameSender.
+ // the floor() is good enough for the "design limit" check in FrameSenderImpl.
return samples_in_encoder_ / audio_encoder_->GetSamplesPerFrame();
}
-base::TimeDelta AudioSender::GetInFlightMediaDuration() const {
- const int samples_in_flight = samples_in_encoder_ +
- GetUnacknowledgedFrameCount() * audio_encoder_->GetSamplesPerFrame();
- return RtpTimeDelta::FromTicks(samples_in_flight).ToTimeDelta(rtp_timebase());
+base::TimeDelta AudioSender::GetEncoderBacklogDuration() const {
+ return RtpTimeDelta::FromTicks(samples_in_encoder_)
+ .ToTimeDelta(rtp_timebase_);
}
void AudioSender::OnEncodedAudioFrame(
- int encoder_bitrate,
std::unique_ptr<SenderEncodedFrame> encoded_frame,
int samples_skipped) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
samples_in_encoder_ -= audio_encoder_->GetSamplesPerFrame() + samples_skipped;
DCHECK_GE(samples_in_encoder_, 0);
-
- SendEncodedFrame(encoder_bitrate, std::move(encoded_frame));
+ frame_sender_->EnqueueFrame(std::move(encoded_frame));
}
-} // namespace cast
-} // namespace media
+} // namespace media::cast
diff --git a/chromium/media/cast/sender/audio_sender.h b/chromium/media/cast/sender/audio_sender.h
index 19cfbf8a203..e3f3fd4be6c 100644
--- a/chromium/media/cast/sender/audio_sender.h
+++ b/chromium/media/cast/sender/audio_sender.h
@@ -17,8 +17,7 @@
#include "media/cast/cast_sender.h"
#include "media/cast/sender/frame_sender.h"
-namespace media {
-namespace cast {
+namespace media::cast {
class AudioEncoder;
@@ -28,7 +27,7 @@ class AudioEncoder;
// RTCP packets.
// Additionally it posts a bunch of delayed tasks to the main thread for various
// timeouts.
-class AudioSender final : public FrameSender {
+class AudioSender final : public FrameSender::Client {
public:
AudioSender(scoped_refptr<CastEnvironment> cast_environment,
const FrameSenderConfig& audio_config,
@@ -46,29 +45,39 @@ class AudioSender final : public FrameSender {
void InsertAudio(std::unique_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time);
+ void SetTargetPlayoutDelay(base::TimeDelta new_target_playout_delay);
+ base::TimeDelta GetTargetPlayoutDelay() const;
+
base::WeakPtr<AudioSender> AsWeakPtr();
protected:
+ // FrameSender::Client overrides.
int GetNumberOfFramesInEncoder() const final;
- base::TimeDelta GetInFlightMediaDuration() const final;
+ base::TimeDelta GetEncoderBacklogDuration() const final;
private:
// Called by the |audio_encoder_| with the next EncodedFrame to send.
- void OnEncodedAudioFrame(int encoder_bitrate,
- std::unique_ptr<SenderEncodedFrame> encoded_frame,
+ void OnEncodedAudioFrame(std::unique_ptr<SenderEncodedFrame> encoded_frame,
int samples_skipped);
+ scoped_refptr<CastEnvironment> cast_environment_;
+
+ // The number of RTP units advanced per second;
+ const int rtp_timebase_;
+
+ // The backing frame sender implementation.
+ std::unique_ptr<FrameSender> frame_sender_;
+
// Encodes AudioBuses into EncodedFrames.
std::unique_ptr<AudioEncoder> audio_encoder_;
// The number of audio samples enqueued in |audio_encoder_|.
- int samples_in_encoder_;
+ int samples_in_encoder_ = 0;
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<AudioSender> weak_factory_{this};
};
-} // namespace cast
-} // namespace media
+} // namespace media::cast
#endif // MEDIA_CAST_SENDER_AUDIO_SENDER_H_
diff --git a/chromium/media/cast/sender/audio_sender_unittest.cc b/chromium/media/cast/sender/audio_sender_unittest.cc
index 27bc067368c..b7bd01a451f 100644
--- a/chromium/media/cast/sender/audio_sender_unittest.cc
+++ b/chromium/media/cast/sender/audio_sender_unittest.cc
@@ -26,8 +26,7 @@
#include "media/cast/test/utility/audio_utility.h"
#include "testing/gtest/include/gtest/gtest.h"
-namespace media {
-namespace cast {
+namespace media::cast {
namespace {
@@ -164,5 +163,4 @@ TEST_F(AudioSenderTest, RtcpTimer) {
EXPECT_LE(1, transport_->number_of_rtcp_packets());
}
-} // namespace cast
-} // namespace media
+} // namespace media::cast
diff --git a/chromium/media/cast/sender/frame_sender.cc b/chromium/media/cast/sender/frame_sender.cc
index 47b48cdb6a2..e40797753f2 100644
--- a/chromium/media/cast/sender/frame_sender.cc
+++ b/chromium/media/cast/sender/frame_sender.cc
@@ -1,481 +1,14 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/cast/sender/frame_sender.h"
+#include "base/feature_list.h"
+#include "media/base/media_switches.h"
-#include <algorithm>
-#include <limits>
-#include <memory>
-#include <utility>
-#include <vector>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/trace_event/trace_event.h"
-#include "media/cast/constants.h"
-#include "media/cast/sender/sender_encoded_frame.h"
-
-namespace media {
-namespace cast {
-namespace {
-
-constexpr int kNumAggressiveReportsSentAtStart = 100;
-constexpr base::TimeDelta kMinSchedulingDelay = base::Milliseconds(1);
-constexpr base::TimeDelta kReceiverProcessTime = base::Milliseconds(250);
-
-// The additional number of frames that can be in-flight when input exceeds the
-// maximum frame rate.
-constexpr int kMaxFrameBurst = 5;
-
-} // namespace
-
-// Convenience macro used in logging statements throughout this file.
-#define SENDER_SSRC (is_audio_ ? "AUDIO[" : "VIDEO[") << ssrc_ << "] "
-
-FrameSender::RtcpClient::RtcpClient(base::WeakPtr<FrameSender> frame_sender)
- : frame_sender_(frame_sender) {}
-
-FrameSender::RtcpClient::~RtcpClient() = default;
-
-void FrameSender::RtcpClient::OnReceivedCastMessage(
- const RtcpCastMessage& cast_message) {
- if (frame_sender_)
- frame_sender_->OnReceivedCastFeedback(cast_message);
-}
-
-void FrameSender::RtcpClient::OnReceivedRtt(base::TimeDelta round_trip_time) {
- if (frame_sender_)
- frame_sender_->OnMeasuredRoundTripTime(round_trip_time);
-}
-
-void FrameSender::RtcpClient::OnReceivedPli() {
- if (frame_sender_)
- frame_sender_->OnReceivedPli();
-}
-
-FrameSender::FrameSender(scoped_refptr<CastEnvironment> cast_environment,
- CastTransport* const transport_sender,
- const FrameSenderConfig& config,
- CongestionControl* congestion_control)
- : cast_environment_(cast_environment),
- transport_sender_(transport_sender),
- ssrc_(config.sender_ssrc),
- min_playout_delay_(config.min_playout_delay.is_zero()
- ? config.max_playout_delay
- : config.min_playout_delay),
- max_playout_delay_(config.max_playout_delay),
- animated_playout_delay_(config.animated_playout_delay.is_zero()
- ? config.max_playout_delay
- : config.animated_playout_delay),
- send_target_playout_delay_(false),
- max_frame_rate_(config.max_frame_rate),
- num_aggressive_rtcp_reports_sent_(0),
- duplicate_ack_counter_(0),
- congestion_control_(congestion_control),
- picture_lost_at_receiver_(false),
- rtp_timebase_(config.rtp_timebase),
- is_audio_(config.rtp_payload_type <= RtpPayloadType::AUDIO_LAST),
- max_ack_delay_(config.max_playout_delay) {
- DCHECK(transport_sender_);
- DCHECK_GT(rtp_timebase_, 0);
- DCHECK(congestion_control_);
- // We assume animated content to begin with since that is the common use
- // case today.
- VLOG(1) << SENDER_SSRC << "min latency "
- << min_playout_delay_.InMilliseconds() << "max latency "
- << max_playout_delay_.InMilliseconds() << "animated latency "
- << animated_playout_delay_.InMilliseconds();
- SetTargetPlayoutDelay(animated_playout_delay_);
-
- CastTransportRtpConfig transport_config;
- transport_config.ssrc = config.sender_ssrc;
- transport_config.feedback_ssrc = config.receiver_ssrc;
- transport_config.rtp_payload_type = config.rtp_payload_type;
- transport_config.aes_key = config.aes_key;
- transport_config.aes_iv_mask = config.aes_iv_mask;
-
- transport_sender->InitializeStream(
- transport_config,
- std::make_unique<FrameSender::RtcpClient>(weak_factory_.GetWeakPtr()));
-}
+namespace media::cast {
+FrameSender::FrameSender() = default;
FrameSender::~FrameSender() = default;
-void FrameSender::ScheduleNextRtcpReport() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- cast_environment_->PostDelayedTask(
- CastEnvironment::MAIN, FROM_HERE,
- base::BindOnce(&FrameSender::SendRtcpReport, weak_factory_.GetWeakPtr(),
- true),
- kRtcpReportInterval);
-}
-
-void FrameSender::SendRtcpReport(bool schedule_future_reports) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- // Sanity-check: We should have sent at least the first frame by this point.
- DCHECK(!last_send_time_.is_null());
-
- // Create lip-sync info for the sender report. The last sent frame's
- // reference time and RTP timestamp are used to estimate an RTP timestamp in
- // terms of "now." Note that |now| is never likely to be precise to an exact
- // frame boundary; and so the computation here will result in a
- // |now_as_rtp_timestamp| value that is rarely equal to any one emitted by the
- // encoder.
- const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- const base::TimeDelta time_delta =
- now - GetRecordedReferenceTime(last_sent_frame_id_);
- const RtpTimeDelta rtp_delta =
- RtpTimeDelta::FromTimeDelta(time_delta, rtp_timebase_);
- const RtpTimeTicks now_as_rtp_timestamp =
- GetRecordedRtpTimestamp(last_sent_frame_id_) + rtp_delta;
- transport_sender_->SendSenderReport(ssrc_, now, now_as_rtp_timestamp);
-
- if (schedule_future_reports)
- ScheduleNextRtcpReport();
-}
-
-void FrameSender::OnMeasuredRoundTripTime(base::TimeDelta round_trip_time) {
- DCHECK_GT(round_trip_time, base::TimeDelta());
- current_round_trip_time_ = round_trip_time;
- max_ack_delay_ = 2 * std::max(current_round_trip_time_, base::TimeDelta()) +
- kReceiverProcessTime;
- max_ack_delay_ = std::min(max_ack_delay_, target_playout_delay_);
-}
-
-void FrameSender::SetTargetPlayoutDelay(
- base::TimeDelta new_target_playout_delay) {
- if (send_target_playout_delay_ &&
- target_playout_delay_ == new_target_playout_delay) {
- return;
- }
- new_target_playout_delay = std::max(new_target_playout_delay,
- min_playout_delay_);
- new_target_playout_delay = std::min(new_target_playout_delay,
- max_playout_delay_);
- VLOG(2) << SENDER_SSRC << "Target playout delay changing from "
- << target_playout_delay_.InMilliseconds() << " ms to "
- << new_target_playout_delay.InMilliseconds() << " ms.";
- target_playout_delay_ = new_target_playout_delay;
- max_ack_delay_ = std::min(max_ack_delay_, target_playout_delay_);
- send_target_playout_delay_ = true;
- congestion_control_->UpdateTargetPlayoutDelay(target_playout_delay_);
-}
-
-void FrameSender::ResendCheck() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(!last_send_time_.is_null());
- const base::TimeDelta time_since_last_send =
- cast_environment_->Clock()->NowTicks() - last_send_time_;
- if (time_since_last_send > max_ack_delay_) {
- if (latest_acked_frame_id_ == last_sent_frame_id_) {
- // Last frame acked, no point in doing anything
- } else {
- VLOG(1) << SENDER_SSRC << "ACK timeout; last acked frame: "
- << latest_acked_frame_id_;
- ResendForKickstart();
- }
- }
- ScheduleNextResendCheck();
-}
-
-void FrameSender::ScheduleNextResendCheck() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(!last_send_time_.is_null());
- base::TimeDelta time_to_next =
- last_send_time_ - cast_environment_->Clock()->NowTicks() + max_ack_delay_;
- time_to_next = std::max(time_to_next, kMinSchedulingDelay);
- cast_environment_->PostDelayedTask(
- CastEnvironment::MAIN, FROM_HERE,
- base::BindOnce(&FrameSender::ResendCheck, weak_factory_.GetWeakPtr()),
- time_to_next);
-}
-
-void FrameSender::ResendForKickstart() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- DCHECK(!last_send_time_.is_null());
- VLOG(1) << SENDER_SSRC << "Resending last packet of frame "
- << last_sent_frame_id_ << " to kick-start.";
- last_send_time_ = cast_environment_->Clock()->NowTicks();
- transport_sender_->ResendFrameForKickstart(ssrc_, last_sent_frame_id_);
-}
-
-void FrameSender::RecordLatestFrameTimestamps(FrameId frame_id,
- base::TimeTicks reference_time,
- RtpTimeTicks rtp_timestamp) {
- DCHECK(!reference_time.is_null());
- frame_reference_times_[frame_id.lower_8_bits()] = reference_time;
- frame_rtp_timestamps_[frame_id.lower_8_bits()] = rtp_timestamp;
-}
-
-base::TimeTicks FrameSender::GetRecordedReferenceTime(FrameId frame_id) const {
- return frame_reference_times_[frame_id.lower_8_bits()];
-}
-
-RtpTimeTicks FrameSender::GetRecordedRtpTimestamp(FrameId frame_id) const {
- return frame_rtp_timestamps_[frame_id.lower_8_bits()];
-}
-
-int FrameSender::GetUnacknowledgedFrameCount() const {
- if (last_send_time_.is_null())
- return 0;
- const int count = last_sent_frame_id_ - latest_acked_frame_id_;
- DCHECK_GE(count, 0);
- return count;
-}
-
-base::TimeDelta FrameSender::GetAllowedInFlightMediaDuration() const {
- // The total amount allowed in-flight media should equal the amount that fits
- // within the entire playout delay window, plus the amount of time it takes to
- // receive an ACK from the receiver.
- return target_playout_delay_ + (current_round_trip_time_ / 2);
-}
-
-void FrameSender::SendEncodedFrame(
- int requested_bitrate_before_encode,
- std::unique_ptr<SenderEncodedFrame> encoded_frame) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- VLOG(2) << SENDER_SSRC << "About to send another frame: last_sent="
- << last_sent_frame_id_ << ", latest_acked=" << latest_acked_frame_id_;
-
- const FrameId frame_id = encoded_frame->frame_id;
- const bool is_first_frame_to_be_sent = last_send_time_.is_null();
-
- if (picture_lost_at_receiver_ &&
- (encoded_frame->dependency == EncodedFrame::KEY)) {
- picture_lost_at_receiver_ = false;
- DCHECK(frame_id > latest_acked_frame_id_);
- // Cancel sending remaining frames.
- std::vector<FrameId> cancel_sending_frames;
- for (FrameId id = latest_acked_frame_id_ + 1; id < frame_id; ++id) {
- cancel_sending_frames.push_back(id);
- }
- transport_sender_->CancelSendingFrames(ssrc_, cancel_sending_frames);
- OnCancelSendingFrames();
- }
-
- last_send_time_ = cast_environment_->Clock()->NowTicks();
- last_sent_frame_id_ = frame_id;
- // If this is the first frame about to be sent, fake the value of
- // |latest_acked_frame_id_| to indicate the receiver starts out all caught up.
- // Also, schedule the periodic frame re-send checks.
- if (is_first_frame_to_be_sent) {
- latest_acked_frame_id_ = frame_id - 1;
- ScheduleNextResendCheck();
- }
-
- VLOG_IF(1, !is_audio_ && encoded_frame->dependency == EncodedFrame::KEY)
- << SENDER_SSRC << "Sending encoded key frame, id=" << frame_id;
-
- std::unique_ptr<FrameEvent> encode_event(new FrameEvent());
- encode_event->timestamp = encoded_frame->encode_completion_time;
- encode_event->type = FRAME_ENCODED;
- encode_event->media_type = is_audio_ ? AUDIO_EVENT : VIDEO_EVENT;
- encode_event->rtp_timestamp = encoded_frame->rtp_timestamp;
- encode_event->frame_id = frame_id;
- encode_event->size = base::checked_cast<uint32_t>(encoded_frame->data.size());
- encode_event->key_frame = encoded_frame->dependency == EncodedFrame::KEY;
- encode_event->target_bitrate = requested_bitrate_before_encode;
- encode_event->encoder_cpu_utilization = encoded_frame->encoder_utilization;
- encode_event->idealized_bitrate_utilization =
- encoded_frame->lossy_utilization;
- cast_environment_->logger()->DispatchFrameEvent(std::move(encode_event));
-
- RecordLatestFrameTimestamps(frame_id,
- encoded_frame->reference_time,
- encoded_frame->rtp_timestamp);
-
- if (!is_audio_) {
- // Used by chrome/browser/media/cast_mirroring_performance_browsertest.cc
- TRACE_EVENT_INSTANT1(
- "cast_perf_test", "VideoFrameEncoded",
- TRACE_EVENT_SCOPE_THREAD,
- "rtp_timestamp", encoded_frame->rtp_timestamp.lower_32_bits());
- }
-
- // At the start of the session, it's important to send reports before each
- // frame so that the receiver can properly compute playout times. The reason
- // more than one report is sent is because transmission is not guaranteed,
- // only best effort, so send enough that one should almost certainly get
- // through.
- if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
- // SendRtcpReport() will schedule future reports to be made if this is the
- // last "aggressive report."
- ++num_aggressive_rtcp_reports_sent_;
- const bool is_last_aggressive_report =
- (num_aggressive_rtcp_reports_sent_ == kNumAggressiveReportsSentAtStart);
- VLOG_IF(1, is_last_aggressive_report)
- << SENDER_SSRC << "Sending last aggressive report.";
- SendRtcpReport(is_last_aggressive_report);
- }
-
- congestion_control_->SendFrameToTransport(
- frame_id, encoded_frame->data.size() * 8, last_send_time_);
-
- if (send_target_playout_delay_) {
- encoded_frame->new_playout_delay_ms =
- target_playout_delay_.InMilliseconds();
- }
-
- const char* name = is_audio_ ? "Audio Transport" : "Video Transport";
- TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(
- "cast.stream", name, TRACE_ID_WITH_SCOPE(name, frame_id.lower_32_bits()),
- "rtp_timestamp", encoded_frame->rtp_timestamp.lower_32_bits());
- transport_sender_->InsertFrame(ssrc_, *encoded_frame);
-}
-
-void FrameSender::OnCancelSendingFrames() {}
-
-void FrameSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- const bool have_valid_rtt = current_round_trip_time_.is_positive();
- if (have_valid_rtt) {
- congestion_control_->UpdateRtt(current_round_trip_time_);
-
- // Having the RTT value implies the receiver sent back a receiver report
- // based on it having received a report from here. Therefore, ensure this
- // sender stops aggressively sending reports.
- if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
- VLOG(1) << SENDER_SSRC
- << "No longer a need to send reports aggressively (sent "
- << num_aggressive_rtcp_reports_sent_ << ").";
- num_aggressive_rtcp_reports_sent_ = kNumAggressiveReportsSentAtStart;
- ScheduleNextRtcpReport();
- }
- }
-
- if (last_send_time_.is_null())
- return; // Cannot get an ACK without having first sent a frame.
-
- if (cast_feedback.missing_frames_and_packets.empty() &&
- cast_feedback.received_later_frames.empty()) {
- if (latest_acked_frame_id_ == cast_feedback.ack_frame_id) {
- VLOG(1) << SENDER_SSRC << "Received duplicate ACK for frame "
- << latest_acked_frame_id_;
- TRACE_EVENT_INSTANT2(
- "cast.stream", "Duplicate ACK", TRACE_EVENT_SCOPE_THREAD,
- "ack_frame_id", cast_feedback.ack_frame_id.lower_32_bits(),
- "last_sent_frame_id", last_sent_frame_id_.lower_32_bits());
- }
- // We only count duplicate ACKs when we have sent newer frames.
- if (latest_acked_frame_id_ == cast_feedback.ack_frame_id &&
- latest_acked_frame_id_ != last_sent_frame_id_) {
- duplicate_ack_counter_++;
- } else {
- duplicate_ack_counter_ = 0;
- }
- if (duplicate_ack_counter_ >= 2 && duplicate_ack_counter_ % 3 == 2) {
- ResendForKickstart();
- }
- } else {
- // Only count duplicated ACKs if there is no NACK request in between.
- // This is to avoid aggresive resend.
- duplicate_ack_counter_ = 0;
- }
-
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- congestion_control_->AckFrame(cast_feedback.ack_frame_id, now);
- if (!cast_feedback.received_later_frames.empty()) {
- // Ack the received frames.
- congestion_control_->AckLaterFrames(cast_feedback.received_later_frames,
- now);
- }
-
- std::unique_ptr<FrameEvent> ack_event(new FrameEvent());
- ack_event->timestamp = now;
- ack_event->type = FRAME_ACK_RECEIVED;
- ack_event->media_type = is_audio_ ? AUDIO_EVENT : VIDEO_EVENT;
- ack_event->rtp_timestamp =
- GetRecordedRtpTimestamp(cast_feedback.ack_frame_id);
- ack_event->frame_id = cast_feedback.ack_frame_id;
- cast_environment_->logger()->DispatchFrameEvent(std::move(ack_event));
-
- const bool is_acked_out_of_order =
- cast_feedback.ack_frame_id < latest_acked_frame_id_;
- VLOG(2) << SENDER_SSRC
- << "Received ACK" << (is_acked_out_of_order ? " out-of-order" : "")
- << " for frame " << cast_feedback.ack_frame_id;
- if (is_acked_out_of_order) {
- TRACE_EVENT_INSTANT2(
- "cast.stream", "ACK out of order", TRACE_EVENT_SCOPE_THREAD,
- "ack_frame_id", cast_feedback.ack_frame_id.lower_32_bits(),
- "latest_acked_frame_id", latest_acked_frame_id_.lower_32_bits());
- } else if (latest_acked_frame_id_ < cast_feedback.ack_frame_id) {
- // Cancel resends of acked frames.
- std::vector<FrameId> frames_to_cancel;
- frames_to_cancel.reserve(cast_feedback.ack_frame_id -
- latest_acked_frame_id_);
- do {
- ++latest_acked_frame_id_;
- frames_to_cancel.push_back(latest_acked_frame_id_);
- // This is a good place to match the trace for frame ids
- // since this ensures we not only track frame ids that are
- // implicitly ACKed, but also handles duplicate ACKs
- const char* name = is_audio_ ? "Audio Transport" : "Video Transport";
- TRACE_EVENT_NESTABLE_ASYNC_END1(
- "cast.stream", name,
- TRACE_ID_WITH_SCOPE(name, latest_acked_frame_id_.lower_32_bits()),
- "RTT_usecs", current_round_trip_time_.InMicroseconds());
- } while (latest_acked_frame_id_ < cast_feedback.ack_frame_id);
- transport_sender_->CancelSendingFrames(ssrc_, frames_to_cancel);
- OnCancelSendingFrames();
- }
-}
-
-void FrameSender::OnReceivedPli() {
- picture_lost_at_receiver_ = true;
-}
-
-bool FrameSender::ShouldDropNextFrame(base::TimeDelta frame_duration) const {
- // Check that accepting the next frame won't cause more frames to become
- // in-flight than the system's design limit.
- const int count_frames_in_flight =
- GetUnacknowledgedFrameCount() + GetNumberOfFramesInEncoder();
- if (count_frames_in_flight >= kMaxUnackedFrames) {
- VLOG(1) << SENDER_SSRC << "Dropping: Too many frames would be in-flight.";
- return true;
- }
-
- // Check that accepting the next frame won't exceed the configured maximum
- // frame rate, allowing for short-term bursts.
- base::TimeDelta duration_in_flight = GetInFlightMediaDuration();
- const double max_frames_in_flight =
- max_frame_rate_ * duration_in_flight.InSecondsF();
- if (count_frames_in_flight >= max_frames_in_flight + kMaxFrameBurst) {
- VLOG(1) << SENDER_SSRC << "Dropping: Burst threshold would be exceeded.";
- return true;
- }
-
- // Check that accepting the next frame won't exceed the allowed in-flight
- // media duration.
- const base::TimeDelta duration_would_be_in_flight =
- duration_in_flight + frame_duration;
- const base::TimeDelta allowed_in_flight = GetAllowedInFlightMediaDuration();
- if (VLOG_IS_ON(1)) {
- const int64_t percent =
- allowed_in_flight.is_positive()
- ? base::ClampRound<int64_t>(duration_would_be_in_flight /
- allowed_in_flight * 100)
- : std::numeric_limits<int64_t>::max();
- VLOG_IF(1, percent > 50)
- << SENDER_SSRC
- << duration_in_flight.InMicroseconds() << " usec in-flight + "
- << frame_duration.InMicroseconds() << " usec for next frame --> "
- << percent << "% of allowed in-flight.";
- }
- if (duration_would_be_in_flight > allowed_in_flight) {
- VLOG(1) << SENDER_SSRC << "Dropping: In-flight duration would be too high.";
- return true;
- }
-
- // Next frame is accepted.
- return false;
-}
-
-} // namespace cast
-} // namespace media
+} // namespace media::cast
diff --git a/chromium/media/cast/sender/frame_sender.h b/chromium/media/cast/sender/frame_sender.h
index 7e0b1fc8c21..02221e29bd4 100644
--- a/chromium/media/cast/sender/frame_sender.h
+++ b/chromium/media/cast/sender/frame_sender.h
@@ -1,8 +1,6 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// This is the base class for an object that send frames to a receiver.
#ifndef MEDIA_CAST_SENDER_FRAME_SENDER_H_
#define MEDIA_CAST_SENDER_FRAME_SENDER_H_
@@ -19,196 +17,107 @@
#include "media/cast/net/rtcp/rtcp_defines.h"
#include "media/cast/sender/congestion_control.h"
-namespace media {
-namespace cast {
+namespace media::cast {
struct SenderEncodedFrame;
+class CastEnvironment;
+class CastTransport;
+// This is the pure virtual interface for an object that sends encoded frames
+// to a receiver.
class FrameSender {
public:
- FrameSender(scoped_refptr<CastEnvironment> cast_environment,
- CastTransport* const transport_sender,
- const FrameSenderConfig& config,
- CongestionControl* congestion_control);
-
- FrameSender(const FrameSender&) = delete;
- FrameSender& operator=(const FrameSender&) = delete;
-
- virtual ~FrameSender();
+ // The client is responsible for implementing some encoder-specific methods
+ // as well as having the option to subscribe to frame cancellation events.
+ class Client {
+ public:
+ virtual ~Client();
- int rtp_timebase() const { return rtp_timebase_; }
+ // Returns the number of frames in the encoder's backlog.
+ virtual int GetNumberOfFramesInEncoder() const = 0;
- // Calling this function is only valid if the receiver supports the
- // "extra_playout_delay", rtp extension.
- void SetTargetPlayoutDelay(base::TimeDelta new_target_playout_delay);
+ // Should return the amount of playback time that is in the encoder's
+ // backlog. Assuming that the encoder emits frames consecutively, this is
+ // the same as the difference between the smallest and largest presentation
+ // timestamps in the backlog.
+ virtual base::TimeDelta GetEncoderBacklogDuration() const = 0;
- base::TimeDelta GetTargetPlayoutDelay() const {
- return target_playout_delay_;
- }
+ // The frame associated with |frame_id| was canceled and not sent.
+ virtual void OnFrameCanceled(FrameId frame_id) {}
+ };
- // Called by the encoder with the next EncodeFrame to send.
- void SendEncodedFrame(int requested_bitrate_before_encode,
- std::unique_ptr<SenderEncodedFrame> encoded_frame);
+ static std::unique_ptr<FrameSender> Create(
+ scoped_refptr<CastEnvironment> cast_environment,
+ const FrameSenderConfig& config,
+ CastTransport* const transport_sender,
+ Client* client);
- protected:
- // Returns the number of frames in the encoder's backlog.
- virtual int GetNumberOfFramesInEncoder() const = 0;
+ FrameSender();
+ FrameSender(FrameSender&&) = delete;
+ FrameSender(const FrameSender&) = delete;
+ FrameSender& operator=(const FrameSender&) = delete;
+ FrameSender& operator=(FrameSender&&) = delete;
+ virtual ~FrameSender();
- // Returns the duration of the data in the encoder's backlog plus the duration
- // of sent, unacknowledged frames.
- virtual base::TimeDelta GetInFlightMediaDuration() const = 0;
+ // Setting of the target playout delay. It should be communicated to the
+ // receiver on the next encoded frame.
+ // NOTE: Calling this function is only valid if the receiver supports the
+ // "extra_playout_delay", rtp extension.
+ virtual void SetTargetPlayoutDelay(
+ base::TimeDelta new_target_playout_delay) = 0;
+ virtual base::TimeDelta GetTargetPlayoutDelay() const = 0;
- // One or more frames were canceled.
- virtual void OnCancelSendingFrames();
+ // Whether a key frame is needed, typically caused by a picture loss
+ // indication event.
+ virtual bool NeedsKeyFrame() const = 0;
- protected:
- class RtcpClient : public RtcpObserver {
- public:
- explicit RtcpClient(base::WeakPtr<FrameSender> frame_sender);
- ~RtcpClient() override;
+ // Called by the encoder with the next encoded frame to send.
+ virtual void EnqueueFrame(
+ std::unique_ptr<SenderEncodedFrame> encoded_frame) = 0;
- void OnReceivedCastMessage(const RtcpCastMessage& cast_message) override;
- void OnReceivedRtt(base::TimeDelta round_trip_time) override;
- void OnReceivedPli() override;
+ // Returns true if too many frames would be in-flight by encoding and sending
+ // the next frame having the given |frame_duration|.
+ //
+ // Callers are recommended to compute the frame duration based on the
+ // difference between the next and last frames' reference times, or the period
+ // between frames of the configured max frame rate if the reference times are
+ // unavailable.
+ virtual bool ShouldDropNextFrame(base::TimeDelta frame_duration) const = 0;
- private:
- const base::WeakPtr<FrameSender> frame_sender_;
- };
- // Schedule and execute periodic sending of RTCP report.
- void ScheduleNextRtcpReport();
- void SendRtcpReport(bool schedule_future_reports);
+ // Returns the RTP timestamp on the frame associated with |frame_id|.
+ virtual RtpTimeTicks GetRecordedRtpTimestamp(FrameId frame_id) const = 0;
- // Protected for testability.
- void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback);
+ // Returns the number of frames that were sent but not yet acknowledged.
+ virtual int GetUnacknowledgedFrameCount() const = 0;
- // Called when a Pli message is received.
- void OnReceivedPli();
+ // Returns the suggested bitrate the next frame should be encoded at.
+ virtual int GetSuggestedBitrate(base::TimeTicks playout_time,
+ base::TimeDelta playout_delay) = 0;
- void OnMeasuredRoundTripTime(base::TimeDelta rtt);
+ // Configuration specific methods.
- const scoped_refptr<CastEnvironment> cast_environment_;
+ // The maximum frame rate.
+ virtual double MaxFrameRate() const = 0;
+ virtual void SetMaxFrameRate(double max_frame_rate) = 0;
- // Sends encoded frames over the configured transport (e.g., UDP). In
- // Chromium, this could be a proxy that first sends the frames from a renderer
- // process to the browser process over IPC, with the browser process being
- // responsible for "packetizing" the frames and pushing packets into the
- // network layer.
- const raw_ptr<CastTransport> transport_sender_;
+ // The current target playout delay.
+ virtual base::TimeDelta TargetPlayoutDelay() const = 0;
- const uint32_t ssrc_;
+ // The current, estimated round trip time.
+ virtual base::TimeDelta CurrentRoundTripTime() const = 0;
- protected:
- // Schedule and execute periodic checks for re-sending packets. If no
- // acknowledgements have been received for "too long," FrameSender will
- // speculatively re-send certain packets of an unacked frame to kick-start
- // re-transmission. This is a last resort tactic to prevent the session from
- // getting stuck after a long outage.
- void ScheduleNextResendCheck();
- void ResendCheck();
- void ResendForKickstart();
+ // When the last frame was sent.
+ virtual base::TimeTicks LastSendTime() const = 0;
- // Returns true if too many frames would be in-flight by encoding and sending
- // the next frame having the given |frame_duration|.
- bool ShouldDropNextFrame(base::TimeDelta frame_duration) const;
-
- // Record or retrieve a recent history of each frame's timestamps.
- // Warning: If a frame ID too far in the past is requested, the getters will
- // silently succeed but return incorrect values. Be sure to respect
- // media::cast::kMaxUnackedFrames.
- void RecordLatestFrameTimestamps(FrameId frame_id,
- base::TimeTicks reference_time,
- RtpTimeTicks rtp_timestamp);
- base::TimeTicks GetRecordedReferenceTime(FrameId frame_id) const;
- RtpTimeTicks GetRecordedRtpTimestamp(FrameId frame_id) const;
+ // The latest acknowledged frame ID.
+ virtual FrameId LatestAckedFrameId() const = 0;
- // Returns the number of frames that were sent but not yet acknowledged.
- int GetUnacknowledgedFrameCount() const;
-
- // Playout delay represents total amount of time between a frame's
- // capture/recording on the sender and its playback on the receiver
- // (i.e., shown to a user). This should be a value large enough to
- // give the system sufficient time to encode, transmit/retransmit,
- // receive, decode, and render; given its run-time environment
- // (sender/receiver hardware performance, network conditions,etc.).
-
- // The |target_playout delay_| is the current delay that is adaptively
- // adjusted based on feedback from video capture engine and the congestion
- // control. In case of interactive content, the target is adjusted to start
- // at |min_playout_delay_| and in case of animated content, it starts out at
- // |animated_playout_delay_| and then adaptively adjust based on feedback
- // from congestion control.
- base::TimeDelta target_playout_delay_;
- const base::TimeDelta min_playout_delay_;
- const base::TimeDelta max_playout_delay_;
- // Starting playout delay for animated content.
- const base::TimeDelta animated_playout_delay_;
-
- // If true, we transmit the target playout delay to the receiver.
- bool send_target_playout_delay_;
-
- // Max encoded frames generated per second.
- double max_frame_rate_;
-
- // Counts how many RTCP reports are being "aggressively" sent (i.e., one per
- // frame) at the start of the session. Once a threshold is reached, RTCP
- // reports are instead sent at the configured interval + random drift.
- int num_aggressive_rtcp_reports_sent_;
-
- // This is "null" until the first frame is sent. Thereafter, this tracks the
- // last time any frame was sent or re-sent.
- base::TimeTicks last_send_time_;
-
- // The ID of the last frame sent. This member is invalid until
- // |!last_send_time_.is_null()|.
- FrameId last_sent_frame_id_;
-
- // The ID of the latest (not necessarily the last) frame that has been
- // acknowledged. This member is invalid until |!last_send_time_.is_null()|.
- FrameId latest_acked_frame_id_;
-
- // Counts the number of duplicate ACK that are being received. When this
- // number reaches a threshold, the sender will take this as a sign that the
- // receiver hasn't yet received the first packet of the next frame. In this
- // case, FrameSender will trigger a re-send of the next frame.
- int duplicate_ack_counter_;
-
- // This object controls how we change the bitrate to make sure the
- // buffer doesn't overflow.
- std::unique_ptr<CongestionControl> congestion_control_;
-
- // The most recently measured round trip time.
- base::TimeDelta current_round_trip_time_;
-
- // This flag is set true when a Pli message is received. It is cleared once
- // the FrameSender scheduled an encoded key frame to be sent.
- bool picture_lost_at_receiver_;
-
- private:
- // Returns the maximum media duration currently allowed in-flight. This
- // fluctuates in response to the currently-measured network latency.
- base::TimeDelta GetAllowedInFlightMediaDuration() const;
-
- // RTP timestamp increment representing one second.
- const int rtp_timebase_;
-
- const bool is_audio_;
-
- // This is the maximum delay that the sender should get ack from receiver.
- // Otherwise, sender will call ResendForKickstart().
- base::TimeDelta max_ack_delay_;
-
- // Ring buffers to keep track of recent frame timestamps (both in terms of
- // local reference time and RTP media time). These should only be accessed
- // through the Record/GetXXX() methods. The index into this ring
- // buffer is the lower 8 bits of the FrameId.
- base::TimeTicks frame_reference_times_[256];
- RtpTimeTicks frame_rtp_timestamps_[256];
-
- // NOTE: Weak pointers must be invalidated before all other member variables.
- base::WeakPtrFactory<FrameSender> weak_factory_{this};
+ // RTCP client-specific methods.
+ virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) = 0;
+ virtual void OnReceivedPli() = 0;
+ virtual void OnMeasuredRoundTripTime(base::TimeDelta rtt) = 0;
};
-} // namespace cast
-} // namespace media
+} // namespace media::cast
#endif // MEDIA_CAST_SENDER_FRAME_SENDER_H_
diff --git a/chromium/media/cast/sender/frame_sender_impl.cc b/chromium/media/cast/sender/frame_sender_impl.cc
new file mode 100644
index 00000000000..77b969cfdcb
--- /dev/null
+++ b/chromium/media/cast/sender/frame_sender_impl.cc
@@ -0,0 +1,559 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/sender/frame_sender_impl.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/feature_list.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/trace_event/trace_event.h"
+#include "media/base/media_switches.h"
+#include "media/cast/common/sender_encoded_frame.h"
+#include "media/cast/constants.h"
+
+namespace media::cast {
+namespace {
+
+constexpr int kNumAggressiveReportsSentAtStart = 100;
+constexpr base::TimeDelta kMinSchedulingDelay = base::Milliseconds(1);
+constexpr base::TimeDelta kReceiverProcessTime = base::Milliseconds(250);
+
+// The additional number of frames that can be in-flight when input exceeds the
+// maximum frame rate.
+constexpr int kMaxFrameBurst = 5;
+
+} // namespace
+
+// static
+std::unique_ptr<FrameSender> FrameSender::Create(
+ scoped_refptr<CastEnvironment> cast_environment,
+ const FrameSenderConfig& config,
+ CastTransport* const transport_sender,
+ Client* client) {
+ // TODO(https://crbug.com/1212803): return a new OpenscreenSender if the
+ // Open Screen cast streaming session flag is enabled.
+ if (base::FeatureList::IsEnabled(kOpenscreenCastStreamingSession)) {
+ NOTIMPLEMENTED()
+ << "Enabled the OpenscreenCastStreamingFlag, but no FrameSenderImpl "
+ "implementation yet.";
+ return nullptr;
+ }
+
+ return std::make_unique<FrameSenderImpl>(cast_environment, config,
+ transport_sender, client);
+}
+
+// Convenience macro used in logging statements throughout this file.
+#define SENDER_SSRC \
+ (is_audio_ ? "AUDIO[" : "VIDEO[") << config_.sender_ssrc << "] "
+
+FrameSenderImpl::Client::~Client() = default;
+
+FrameSenderImpl::RtcpClient::RtcpClient(
+ base::WeakPtr<FrameSenderImpl> frame_sender)
+ : frame_sender_(frame_sender) {}
+
+FrameSenderImpl::RtcpClient::~RtcpClient() = default;
+
+void FrameSenderImpl::RtcpClient::OnReceivedCastMessage(
+ const RtcpCastMessage& cast_message) {
+ if (frame_sender_)
+ frame_sender_->OnReceivedCastFeedback(cast_message);
+}
+
+void FrameSenderImpl::RtcpClient::OnReceivedRtt(
+ base::TimeDelta round_trip_time) {
+ if (frame_sender_)
+ frame_sender_->OnMeasuredRoundTripTime(round_trip_time);
+}
+
+void FrameSenderImpl::RtcpClient::OnReceivedPli() {
+ if (frame_sender_)
+ frame_sender_->OnReceivedPli();
+}
+
+FrameSenderImpl::FrameSenderImpl(
+ scoped_refptr<CastEnvironment> cast_environment,
+ const FrameSenderConfig& config,
+ CastTransport* const transport_sender,
+ Client* client)
+ : cast_environment_(cast_environment),
+ config_(config),
+ target_playout_delay_(config.max_playout_delay),
+ max_frame_rate_(config.max_frame_rate),
+ transport_sender_(transport_sender),
+ client_(client),
+ is_audio_(config.rtp_payload_type <= RtpPayloadType::AUDIO_LAST),
+ // We only use the adaptive control for software video encoding.
+ congestion_control_(
+ (!config.use_external_encoder && !is_audio_)
+ ? NewAdaptiveCongestionControl(cast_environment->Clock(),
+ config.max_bitrate,
+ config.min_bitrate,
+ max_frame_rate_)
+ : NewFixedCongestionControl(
+ (config.min_bitrate + config.max_bitrate) / 2)
+
+ ),
+ max_ack_delay_(config_.max_playout_delay) {
+ DCHECK(transport_sender_);
+ DCHECK_GT(config_.rtp_timebase, 0);
+ DCHECK(congestion_control_);
+ // We assume animated content to begin with since that is the common use
+ // case today.
+ VLOG(1) << SENDER_SSRC << "min latency "
+ << config_.min_playout_delay.InMilliseconds() << "max latency "
+ << config_.max_playout_delay.InMilliseconds() << "animated latency "
+ << config_.animated_playout_delay.InMilliseconds();
+ SetTargetPlayoutDelay(config_.animated_playout_delay);
+
+ CastTransportRtpConfig transport_config;
+ transport_config.ssrc = config.sender_ssrc;
+ transport_config.feedback_ssrc = config.receiver_ssrc;
+ transport_config.rtp_payload_type = config.rtp_payload_type;
+ transport_config.aes_key = config.aes_key;
+ transport_config.aes_iv_mask = config.aes_iv_mask;
+ transport_sender_->InitializeStream(
+ transport_config, std::make_unique<FrameSenderImpl::RtcpClient>(
+ weak_factory_.GetWeakPtr()));
+}
+
+FrameSenderImpl::~FrameSenderImpl() = default;
+
+bool FrameSenderImpl::NeedsKeyFrame() const {
+ return picture_lost_at_receiver_;
+}
+
+base::TimeTicks FrameSenderImpl::GetRecordedReferenceTime(
+ FrameId frame_id) const {
+ return frame_reference_times_[frame_id.lower_8_bits()];
+}
+
+void FrameSenderImpl::ScheduleNextRtcpReport() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::BindOnce(&FrameSenderImpl::SendRtcpReport,
+ weak_factory_.GetWeakPtr(), true),
+ kRtcpReportInterval);
+}
+
+void FrameSenderImpl::SendRtcpReport(bool schedule_future_reports) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ // Sanity-check: We should have sent at least the first frame by this point.
+ DCHECK(!last_send_time_.is_null());
+
+ // Create lip-sync info for the sender report. The last sent frame's
+ // reference time and RTP timestamp are used to estimate an RTP timestamp in
+ // terms of "now." Note that |now| is never likely to be precise to an exact
+ // frame boundary; and so the computation here will result in a
+ // |now_as_rtp_timestamp| value that is rarely equal to any one emitted by the
+ // encoder.
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ const base::TimeDelta time_delta =
+ now - GetRecordedReferenceTime(last_sent_frame_id_);
+ const RtpTimeDelta rtp_delta =
+ RtpTimeDelta::FromTimeDelta(time_delta, config_.rtp_timebase);
+ const RtpTimeTicks now_as_rtp_timestamp =
+ GetRecordedRtpTimestamp(last_sent_frame_id_) + rtp_delta;
+ transport_sender_->SendSenderReport(config_.sender_ssrc, now,
+ now_as_rtp_timestamp);
+
+ if (schedule_future_reports)
+ ScheduleNextRtcpReport();
+}
+
+void FrameSenderImpl::OnMeasuredRoundTripTime(base::TimeDelta round_trip_time) {
+ DCHECK_GT(round_trip_time, base::TimeDelta());
+ current_round_trip_time_ = round_trip_time;
+ max_ack_delay_ = 2 * std::max(current_round_trip_time_, base::TimeDelta()) +
+ kReceiverProcessTime;
+ max_ack_delay_ = std::min(max_ack_delay_, target_playout_delay_);
+}
+
+void FrameSenderImpl::SetTargetPlayoutDelay(
+ base::TimeDelta new_target_playout_delay) {
+ if (send_target_playout_delay_ &&
+ target_playout_delay_ == new_target_playout_delay) {
+ return;
+ }
+ new_target_playout_delay =
+ std::max(new_target_playout_delay, config_.min_playout_delay);
+ new_target_playout_delay =
+ std::min(new_target_playout_delay, config_.max_playout_delay);
+ VLOG(2) << SENDER_SSRC << "Target playout delay changing from "
+ << target_playout_delay_.InMilliseconds() << " ms to "
+ << new_target_playout_delay.InMilliseconds() << " ms.";
+ target_playout_delay_ = new_target_playout_delay;
+ max_ack_delay_ = std::min(max_ack_delay_, target_playout_delay_);
+ send_target_playout_delay_ = true;
+ congestion_control_->UpdateTargetPlayoutDelay(target_playout_delay_);
+}
+
+base::TimeDelta FrameSenderImpl::GetTargetPlayoutDelay() const {
+ return target_playout_delay_;
+}
+
+void FrameSenderImpl::ResendCheck() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(!last_send_time_.is_null());
+ const base::TimeDelta time_since_last_send =
+ cast_environment_->Clock()->NowTicks() - last_send_time_;
+ if (time_since_last_send > max_ack_delay_) {
+ if (latest_acked_frame_id_ == last_sent_frame_id_) {
+ // Last frame acked, no point in doing anything
+ } else {
+ VLOG(1) << SENDER_SSRC
+ << "ACK timeout; last acked frame: " << latest_acked_frame_id_;
+ ResendForKickstart();
+ }
+ }
+ ScheduleNextResendCheck();
+}
+
+void FrameSenderImpl::ScheduleNextResendCheck() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(!last_send_time_.is_null());
+ base::TimeDelta time_to_next =
+ last_send_time_ - cast_environment_->Clock()->NowTicks() + max_ack_delay_;
+ time_to_next = std::max(time_to_next, kMinSchedulingDelay);
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::BindOnce(&FrameSenderImpl::ResendCheck, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void FrameSenderImpl::ResendForKickstart() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(!last_send_time_.is_null());
+ VLOG(1) << SENDER_SSRC << "Resending last packet of frame "
+ << last_sent_frame_id_ << " to kick-start.";
+ last_send_time_ = cast_environment_->Clock()->NowTicks();
+ transport_sender_->ResendFrameForKickstart(config_.sender_ssrc,
+ last_sent_frame_id_);
+}
+
+void FrameSenderImpl::RecordLatestFrameTimestamps(
+ FrameId frame_id,
+ base::TimeTicks reference_time,
+ RtpTimeTicks rtp_timestamp) {
+ DCHECK(!reference_time.is_null());
+ frame_reference_times_[frame_id.lower_8_bits()] = reference_time;
+ frame_rtp_timestamps_[frame_id.lower_8_bits()] = rtp_timestamp;
+}
+
+base::TimeDelta FrameSenderImpl::GetInFlightMediaDuration() const {
+ const base::TimeDelta encoder_duration = client_->GetEncoderBacklogDuration();
+ // No frames are in flight, so only look at the encoder duration.
+ if (last_sent_frame_id_ == latest_acked_frame_id_) {
+ return encoder_duration;
+ }
+
+ const RtpTimeTicks oldest_acked_timestamp =
+ GetRecordedRtpTimestamp(latest_acked_frame_id_);
+ const RtpTimeTicks newest_acked_timestamp =
+ GetRecordedRtpTimestamp(last_sent_frame_id_);
+ return (newest_acked_timestamp - oldest_acked_timestamp)
+ .ToTimeDelta(config_.rtp_timebase) +
+ encoder_duration;
+}
+
+RtpTimeTicks FrameSenderImpl::GetRecordedRtpTimestamp(FrameId frame_id) const {
+ return frame_rtp_timestamps_[frame_id.lower_8_bits()];
+}
+
+int FrameSenderImpl::GetUnacknowledgedFrameCount() const {
+ if (last_send_time_.is_null())
+ return 0;
+ const int count = last_sent_frame_id_ - latest_acked_frame_id_;
+ DCHECK_GE(count, 0);
+ return count;
+}
+
+int FrameSenderImpl::GetSuggestedBitrate(base::TimeTicks playout_time,
+ base::TimeDelta playout_delay) {
+ return congestion_control_->GetBitrate(playout_time, playout_delay);
+}
+
+double FrameSenderImpl::MaxFrameRate() const {
+ return max_frame_rate_;
+}
+
+void FrameSenderImpl::SetMaxFrameRate(double max_frame_rate) {
+ max_frame_rate_ = max_frame_rate;
+}
+
+base::TimeDelta FrameSenderImpl::TargetPlayoutDelay() const {
+ return target_playout_delay_;
+}
+base::TimeDelta FrameSenderImpl::CurrentRoundTripTime() const {
+ return current_round_trip_time_;
+}
+base::TimeTicks FrameSenderImpl::LastSendTime() const {
+ return last_send_time_;
+}
+FrameId FrameSenderImpl::LatestAckedFrameId() const {
+ return latest_acked_frame_id_;
+}
+
+base::TimeDelta FrameSenderImpl::GetAllowedInFlightMediaDuration() const {
+ // The total amount allowed in-flight media should equal the amount that fits
+ // within the entire playout delay window, plus the amount of time it takes to
+ // receive an ACK from the receiver.
+ return target_playout_delay_ + (current_round_trip_time_ / 2);
+}
+
+void FrameSenderImpl::EnqueueFrame(
+ std::unique_ptr<SenderEncodedFrame> encoded_frame) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ VLOG(2) << SENDER_SSRC
+ << "About to send another frame: last_sent=" << last_sent_frame_id_
+ << ", latest_acked=" << latest_acked_frame_id_;
+
+ const FrameId frame_id = encoded_frame->frame_id;
+ const bool is_first_frame_to_be_sent = last_send_time_.is_null();
+
+ if (picture_lost_at_receiver_ &&
+ (encoded_frame->dependency == EncodedFrame::KEY)) {
+ picture_lost_at_receiver_ = false;
+ DCHECK(frame_id > latest_acked_frame_id_);
+ // Cancel sending remaining frames.
+ std::vector<FrameId> cancel_sending_frames;
+ for (FrameId id = latest_acked_frame_id_ + 1; id < frame_id; ++id) {
+ cancel_sending_frames.push_back(id);
+ client_->OnFrameCanceled(id);
+ }
+ transport_sender_->CancelSendingFrames(config_.sender_ssrc,
+ cancel_sending_frames);
+ }
+
+ last_send_time_ = cast_environment_->Clock()->NowTicks();
+
+ DCHECK(frame_id > last_sent_frame_id_) << "enqueued frames out of order.";
+ last_sent_frame_id_ = frame_id;
+ // If this is the first frame about to be sent, fake the value of
+ // |latest_acked_frame_id_| to indicate the receiver starts out all
+ // caught up. Also, schedule the periodic frame re-send checks.
+ if (is_first_frame_to_be_sent) {
+ latest_acked_frame_id_ = frame_id - 1;
+ ScheduleNextResendCheck();
+ }
+
+ VLOG_IF(1, !is_audio_ && encoded_frame->dependency == EncodedFrame::KEY)
+ << SENDER_SSRC << "Sending encoded key frame, id=" << frame_id;
+
+ std::unique_ptr<FrameEvent> encode_event(new FrameEvent());
+ encode_event->timestamp = encoded_frame->encode_completion_time;
+ encode_event->type = FRAME_ENCODED;
+ encode_event->media_type = is_audio_ ? AUDIO_EVENT : VIDEO_EVENT;
+ encode_event->rtp_timestamp = encoded_frame->rtp_timestamp;
+ encode_event->frame_id = frame_id;
+ encode_event->size = base::checked_cast<uint32_t>(encoded_frame->data.size());
+ encode_event->key_frame = encoded_frame->dependency == EncodedFrame::KEY;
+ encode_event->target_bitrate = encoded_frame->encoder_bitrate;
+ encode_event->encoder_cpu_utilization = encoded_frame->encoder_utilization;
+ encode_event->idealized_bitrate_utilization = encoded_frame->lossiness;
+ cast_environment_->logger()->DispatchFrameEvent(std::move(encode_event));
+
+ RecordLatestFrameTimestamps(frame_id, encoded_frame->reference_time,
+ encoded_frame->rtp_timestamp);
+
+ if (!is_audio_) {
+ // Used by chrome/browser/media/cast_mirroring_performance_browsertest.cc
+ TRACE_EVENT_INSTANT1("cast_perf_test", "VideoFrameEncoded",
+ TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp",
+ encoded_frame->rtp_timestamp.lower_32_bits());
+ }
+
+ // At the start of the session, it's important to send reports before each
+ // frame so that the receiver can properly compute playout times. The reason
+ // more than one report is sent is because transmission is not guaranteed,
+ // only best effort, so send enough that one should almost certainly get
+ // through.
+ if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
+ // SendRtcpReport() will schedule future reports to be made if this is the
+ // last "aggressive report."
+ ++num_aggressive_rtcp_reports_sent_;
+ const bool is_last_aggressive_report =
+ (num_aggressive_rtcp_reports_sent_ == kNumAggressiveReportsSentAtStart);
+ VLOG_IF(1, is_last_aggressive_report)
+ << SENDER_SSRC << "Sending last aggressive report.";
+ SendRtcpReport(is_last_aggressive_report);
+ }
+
+ congestion_control_->SendFrameToTransport(
+ frame_id, encoded_frame->data.size() * 8, last_send_time_);
+
+ if (send_target_playout_delay_) {
+ encoded_frame->new_playout_delay_ms =
+ target_playout_delay_.InMilliseconds();
+ }
+
+ const char* name = is_audio_ ? "Audio Transport" : "Video Transport";
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(
+ "cast.stream", name, TRACE_ID_WITH_SCOPE(name, frame_id.lower_32_bits()),
+ "rtp_timestamp", encoded_frame->rtp_timestamp.lower_32_bits());
+ transport_sender_->InsertFrame(config_.sender_ssrc, *encoded_frame);
+}
+
+void FrameSenderImpl::OnReceivedCastFeedback(
+ const RtcpCastMessage& cast_feedback) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ const bool have_valid_rtt = current_round_trip_time_.is_positive();
+ if (have_valid_rtt) {
+ congestion_control_->UpdateRtt(current_round_trip_time_);
+
+ // Having the RTT value implies the receiver sent back a receiver report
+ // based on it having received a report from here. Therefore, ensure this
+ // sender stops aggressively sending reports.
+ if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
+ VLOG(1) << SENDER_SSRC
+ << "No longer a need to send reports aggressively (sent "
+ << num_aggressive_rtcp_reports_sent_ << ").";
+ num_aggressive_rtcp_reports_sent_ = kNumAggressiveReportsSentAtStart;
+ ScheduleNextRtcpReport();
+ }
+ }
+
+ if (last_send_time_.is_null())
+ return; // Cannot get an ACK without having first sent a frame.
+
+ if (cast_feedback.missing_frames_and_packets.empty() &&
+ cast_feedback.received_later_frames.empty()) {
+ if (latest_acked_frame_id_ == cast_feedback.ack_frame_id) {
+ VLOG(1) << SENDER_SSRC << "Received duplicate ACK for frame "
+ << latest_acked_frame_id_;
+ TRACE_EVENT_INSTANT2(
+ "cast.stream", "Duplicate ACK", TRACE_EVENT_SCOPE_THREAD,
+ "ack_frame_id", cast_feedback.ack_frame_id.lower_32_bits(),
+ "last_sent_frame_id", last_sent_frame_id_.lower_32_bits());
+ }
+ // We only count duplicate ACKs when we have sent newer frames.
+ if (latest_acked_frame_id_ == cast_feedback.ack_frame_id &&
+ latest_acked_frame_id_ != last_sent_frame_id_) {
+ duplicate_ack_counter_++;
+ } else {
+ duplicate_ack_counter_ = 0;
+ }
+ if (duplicate_ack_counter_ >= 2 && duplicate_ack_counter_ % 3 == 2) {
+ ResendForKickstart();
+ }
+ } else {
+ // Only count duplicated ACKs if there is no NACK request in between.
+ // This is to avoid aggressive resend.
+ duplicate_ack_counter_ = 0;
+ }
+
+ base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ congestion_control_->AckFrame(cast_feedback.ack_frame_id, now);
+ if (!cast_feedback.received_later_frames.empty()) {
+ // Ack the received frames.
+ congestion_control_->AckLaterFrames(cast_feedback.received_later_frames,
+ now);
+ }
+
+ std::unique_ptr<FrameEvent> ack_event(new FrameEvent());
+ ack_event->timestamp = now;
+ ack_event->type = FRAME_ACK_RECEIVED;
+ ack_event->media_type = is_audio_ ? AUDIO_EVENT : VIDEO_EVENT;
+ ack_event->rtp_timestamp =
+ GetRecordedRtpTimestamp(cast_feedback.ack_frame_id);
+ ack_event->frame_id = cast_feedback.ack_frame_id;
+ cast_environment_->logger()->DispatchFrameEvent(std::move(ack_event));
+
+ const bool is_acked_out_of_order =
+ cast_feedback.ack_frame_id < latest_acked_frame_id_;
+ VLOG(2) << SENDER_SSRC << "Received ACK"
+ << (is_acked_out_of_order ? " out-of-order" : "") << " for frame "
+ << cast_feedback.ack_frame_id;
+ if (is_acked_out_of_order) {
+ TRACE_EVENT_INSTANT2(
+ "cast.stream", "ACK out of order", TRACE_EVENT_SCOPE_THREAD,
+ "ack_frame_id", cast_feedback.ack_frame_id.lower_32_bits(),
+ "latest_acked_frame_id", latest_acked_frame_id_.lower_32_bits());
+ } else if (latest_acked_frame_id_ < cast_feedback.ack_frame_id) {
+ // Cancel resends of acked frames.
+ std::vector<FrameId> frames_to_cancel;
+ frames_to_cancel.reserve(cast_feedback.ack_frame_id -
+ latest_acked_frame_id_);
+ do {
+ ++latest_acked_frame_id_;
+ frames_to_cancel.push_back(latest_acked_frame_id_);
+ client_->OnFrameCanceled(latest_acked_frame_id_);
+ // This is a good place to match the trace for frame ids
+ // since this ensures we not only track frame ids that are
+ // implicitly ACKed, but also handles duplicate ACKs
+ const char* name = is_audio_ ? "Audio Transport" : "Video Transport";
+ TRACE_EVENT_NESTABLE_ASYNC_END1(
+ "cast.stream", name,
+ TRACE_ID_WITH_SCOPE(name, latest_acked_frame_id_.lower_32_bits()),
+ "RTT_usecs", current_round_trip_time_.InMicroseconds());
+ } while (latest_acked_frame_id_ < cast_feedback.ack_frame_id);
+ transport_sender_->CancelSendingFrames(config_.sender_ssrc,
+ frames_to_cancel);
+ }
+}
+
+void FrameSenderImpl::OnReceivedPli() {
+ picture_lost_at_receiver_ = true;
+}
+
+bool FrameSenderImpl::ShouldDropNextFrame(
+ base::TimeDelta frame_duration) const {
+ // Check that accepting the next frame won't cause more frames to become
+ // in-flight than the system's design limit.
+ const int count_frames_in_flight =
+ GetUnacknowledgedFrameCount() + client_->GetNumberOfFramesInEncoder();
+ if (count_frames_in_flight >= kMaxUnackedFrames) {
+ VLOG(1) << SENDER_SSRC << "Dropping: Too many frames would be in-flight.";
+ return true;
+ }
+
+ // Check that accepting the next frame won't exceed the configured maximum
+ // frame rate, allowing for short-term bursts.
+ const base::TimeDelta duration_in_flight = GetInFlightMediaDuration();
+ const double max_frames_in_flight =
+ max_frame_rate_ * duration_in_flight.InSecondsF();
+ if (count_frames_in_flight >= max_frames_in_flight + kMaxFrameBurst) {
+ VLOG(1) << SENDER_SSRC << "Dropping: Burst threshold would be exceeded.";
+ return true;
+ }
+
+ // Check that accepting the next frame won't exceed the allowed in-flight
+ // media duration.
+ const base::TimeDelta duration_would_be_in_flight =
+ duration_in_flight + frame_duration;
+ const base::TimeDelta allowed_in_flight = GetAllowedInFlightMediaDuration();
+ if (VLOG_IS_ON(1)) {
+ const int64_t percent =
+ allowed_in_flight.is_positive()
+ ? base::ClampRound<int64_t>(duration_would_be_in_flight /
+ allowed_in_flight * 100)
+ : std::numeric_limits<int64_t>::max();
+ VLOG_IF(1, percent > 50)
+ << SENDER_SSRC << duration_in_flight.InMicroseconds()
+ << " usec in-flight + " << frame_duration.InMicroseconds()
+ << " usec for next frame --> " << percent << "% of allowed in-flight.";
+ }
+ if (duration_would_be_in_flight > allowed_in_flight) {
+ VLOG(1) << SENDER_SSRC << "Dropping: In-flight duration would be too high.";
+ return true;
+ }
+
+ // Next frame is accepted.
+ return false;
+}
+
+} // namespace media::cast
diff --git a/chromium/media/cast/sender/frame_sender_impl.h b/chromium/media/cast/sender/frame_sender_impl.h
new file mode 100644
index 00000000000..900083791f8
--- /dev/null
+++ b/chromium/media/cast/sender/frame_sender_impl.h
@@ -0,0 +1,182 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This is the base class for an object that send frames to a receiver.
+
+#ifndef MEDIA_CAST_SENDER_FRAME_SENDER_IMPL_H_
+#define MEDIA_CAST_SENDER_FRAME_SENDER_IMPL_H_
+
+#include <stdint.h>
+
+#include "base/memory/raw_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/cast_transport.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
+#include "media/cast/sender/congestion_control.h"
+#include "media/cast/sender/frame_sender.h"
+
+namespace media::cast {
+
+struct SenderEncodedFrame;
+
+class FrameSenderImpl : public FrameSender {
+ public:
+ FrameSenderImpl(scoped_refptr<CastEnvironment> cast_environment,
+ const FrameSenderConfig& config,
+ CastTransport* const transport_sender,
+ Client* client);
+ ~FrameSenderImpl() override;
+
+ // FrameSender overrides.
+ void SetTargetPlayoutDelay(base::TimeDelta new_target_playout_delay) override;
+ base::TimeDelta GetTargetPlayoutDelay() const override;
+ bool NeedsKeyFrame() const override;
+ void EnqueueFrame(std::unique_ptr<SenderEncodedFrame> encoded_frame) override;
+ bool ShouldDropNextFrame(base::TimeDelta frame_duration) const override;
+ RtpTimeTicks GetRecordedRtpTimestamp(FrameId frame_id) const override;
+ int GetUnacknowledgedFrameCount() const override;
+ int GetSuggestedBitrate(base::TimeTicks playout_time,
+ base::TimeDelta playout_delay) override;
+ double MaxFrameRate() const override;
+ void SetMaxFrameRate(double max_frame_rate) override;
+ base::TimeDelta TargetPlayoutDelay() const override;
+ base::TimeDelta CurrentRoundTripTime() const override;
+ base::TimeTicks LastSendTime() const override;
+ FrameId LatestAckedFrameId() const override;
+ void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) override;
+ void OnReceivedPli() override;
+ void OnMeasuredRoundTripTime(base::TimeDelta rtt) override;
+
+ private:
+ // Helper for getting the reference time recorded on the frame associated
+ // with |frame_id|.
+ base::TimeTicks GetRecordedReferenceTime(FrameId frame_id) const;
+
+ // Schedule and execute periodic checks for re-sending packets. If no
+ // acknowledgements have been received for "too long," FrameSenderImpl will
+ // speculatively re-send certain packets of an unacked frame to kick-start
+ // re-transmission. This is a last resort tactic to prevent the session from
+ // getting stuck after a long outage.
+ void ScheduleNextResendCheck();
+ void ResendCheck();
+ void ResendForKickstart();
+
+ // Schedule and execute periodic sending of RTCP report.
+ void ScheduleNextRtcpReport();
+ void SendRtcpReport(bool schedule_future_reports);
+
+ // Record or retrieve a recent history of each frame's timestamps.
+ // Warning: If a frame ID too far in the past is requested, the getters will
+ // silently succeed but return incorrect values. Be sure to respect
+ // media::cast::kMaxUnackedFrames.
+ void RecordLatestFrameTimestamps(FrameId frame_id,
+ base::TimeTicks reference_time,
+ RtpTimeTicks rtp_timestamp);
+
+ base::TimeDelta GetInFlightMediaDuration() const;
+
+ private:
+ class RtcpClient : public RtcpObserver {
+ public:
+ explicit RtcpClient(base::WeakPtr<FrameSenderImpl> frame_sender);
+ ~RtcpClient() override;
+
+ void OnReceivedCastMessage(const RtcpCastMessage& cast_message) override;
+ void OnReceivedRtt(base::TimeDelta round_trip_time) override;
+ void OnReceivedPli() override;
+
+ private:
+ const base::WeakPtr<FrameSenderImpl> frame_sender_;
+ };
+
+ // The cast environment.
+ const scoped_refptr<CastEnvironment> cast_environment_;
+
+ // The configuration provided upon initialization.
+ const FrameSenderConfig config_;
+
+ // The target playout delay, may fluctuate between the min and max delays
+ // stored in |config_|.
+ base::TimeDelta target_playout_delay_;
+
+ // Max encoded frames generated per second.
+ double max_frame_rate_;
+
+ // Sends encoded frames over the configured transport (e.g., UDP). In
+ // Chromium, this could be a proxy that first sends the frames from a renderer
+ // process to the browser process over IPC, with the browser process being
+ // responsible for "packetizing" the frames and pushing packets into the
+ // network layer.
+ const raw_ptr<CastTransport> transport_sender_;
+
+ // The frame sender client.
+ raw_ptr<Client> client_ = nullptr;
+
+ // Whether this is an audio or video frame sender.
+ const bool is_audio_;
+
+ // The congestion control manages frame statistics and helps make decisions
+ // about what bitrate we encode the next frame at.
+ std::unique_ptr<CongestionControl> congestion_control_;
+
+ // This is the maximum delay that the sender should get ack from receiver.
+ // Otherwise, sender will call ResendForKickstart().
+ base::TimeDelta max_ack_delay_;
+
+ // This is "null" until the first frame is sent. Thereafter, this tracks the
+ // last time any frame was sent or re-sent.
+ base::TimeTicks last_send_time_;
+
+ // The ID of the last frame sent. This member is invalid until
+ // |!last_send_time_.is_null()|.
+ FrameId last_sent_frame_id_;
+
+ // The ID of the latest (not necessarily the last) frame that has been
+ // acknowledged. This member is invalid until |!last_send_time_.is_null()|.
+ FrameId latest_acked_frame_id_;
+
+ // The most recently measured round trip time.
+ base::TimeDelta current_round_trip_time_;
+
+ // This is the maximum delay that the sender should get ack from receiver.
+ // Counts how many RTCP reports are being "aggressively" sent (i.e., one per
+ // frame) at the start of the session. Once a threshold is reached, RTCP
+ // reports are instead sent at the configured interval + random drift.
+ int num_aggressive_rtcp_reports_sent_ = 0;
+
+ // Counts the number of duplicate ACK that are being received. When this
+ // number reaches a threshold, the sender will take this as a sign that the
+ // receiver hasn't yet received the first packet of the next frame. In this
+ // case, FrameSenderImpl will trigger a re-send of the next frame.
+ int duplicate_ack_counter_ = 0;
+
+ // This flag is set true when a Pli message is received. It is cleared once
+ // the FrameSenderImpl scheduled an encoded key frame to be sent.
+ bool picture_lost_at_receiver_ = false;
+
+ // Should send the target playout delay with the next frame.
+ bool send_target_playout_delay_ = false;
+
+ // Returns the maximum media duration currently allowed in-flight. This
+ // fluctuates in response to the currently-measured network latency.
+ base::TimeDelta GetAllowedInFlightMediaDuration() const;
+
+ // Ring buffers to keep track of recent frame timestamps (both in terms of
+ // local reference time and RTP media time). These should only be accessed
+ // through the Record/GetXXX() methods. The index into this ring
+ // buffer is the lower 8 bits of the FrameId.
+ base::TimeTicks frame_reference_times_[256];
+ RtpTimeTicks frame_rtp_timestamps_[256];
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<FrameSenderImpl> weak_factory_{this};
+};
+
+} // namespace media::cast
+
+#endif // MEDIA_CAST_SENDER_FRAME_SENDER_IMPL_H_
diff --git a/chromium/media/cast/sender/performance_metrics_overlay.cc b/chromium/media/cast/sender/performance_metrics_overlay.cc
index 04619ac6540..b242cb12d78 100644
--- a/chromium/media/cast/sender/performance_metrics_overlay.cc
+++ b/chromium/media/cast/sender/performance_metrics_overlay.cc
@@ -221,7 +221,7 @@ scoped_refptr<VideoFrame> MaybeRenderPerformanceMetricsOverlay(
int target_bitrate,
int frames_ago,
double encoder_utilization,
- double lossy_utilization,
+ double lossiness,
scoped_refptr<VideoFrame> source) {
if (!VLOG_IS_ON(1))
return source;
@@ -334,8 +334,7 @@ scoped_refptr<VideoFrame> MaybeRenderPerformanceMetricsOverlay(
// Line 1: Recent utilization metrics.
const int encoder_pct =
base::saturated_cast<int>(encoder_utilization * 100.0 + 0.5);
- const int lossy_pct =
- base::saturated_cast<int>(lossy_utilization * 100.0 + 0.5);
+ const int lossy_pct = base::saturated_cast<int>(lossiness * 100.0 + 0.5);
RenderLineOfText(base::StringPrintf("%d %3.1d%% %3.1d%%", frames_ago,
encoder_pct, lossy_pct),
top, frame.get());
diff --git a/chromium/media/cast/sender/performance_metrics_overlay.h b/chromium/media/cast/sender/performance_metrics_overlay.h
index 774effe19da..f770a97d72c 100644
--- a/chromium/media/cast/sender/performance_metrics_overlay.h
+++ b/chromium/media/cast/sender/performance_metrics_overlay.h
@@ -71,7 +71,7 @@ scoped_refptr<VideoFrame> MaybeRenderPerformanceMetricsOverlay(
int target_bitrate,
int frames_ago,
double encoder_utilization,
- double lossy_utilization,
+ double lossiness,
scoped_refptr<VideoFrame> source);
} // namespace cast
diff --git a/chromium/media/cast/sender/video_sender.cc b/chromium/media/cast/sender/video_sender.cc
index f1976eb06f1..187051d19f2 100644
--- a/chromium/media/cast/sender/video_sender.cc
+++ b/chromium/media/cast/sender/video_sender.cc
@@ -13,12 +13,12 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/trace_event/trace_event.h"
+#include "media/cast/common/sender_encoded_frame.h"
+#include "media/cast/encoding/video_encoder.h"
#include "media/cast/net/cast_transport_config.h"
#include "media/cast/sender/performance_metrics_overlay.h"
-#include "media/cast/sender/video_encoder.h"
-namespace media {
-namespace cast {
+namespace media::cast {
namespace {
@@ -27,24 +27,27 @@ namespace {
// a combination of cast_benchmark runs and manual testing.
//
// This is how many round trips we think we need on the network.
-const int kRoundTripsNeeded = 4;
+constexpr int kRoundTripsNeeded = 4;
// This is an estimate of all the the constant time needed independent of
// network quality (e.g., additional time that accounts for encode and decode
// time).
-const int kConstantTimeMs = 75;
+constexpr int kConstantTimeMs = 75;
// The target maximum utilization of the encoder and network resources. This is
// used to attenuate the actual measured utilization values in order to provide
// "breathing room" (i.e., to ensure there will be sufficient CPU and bandwidth
// available to handle the occasional more-complex frames).
-const int kTargetUtilizationPercentage = 75;
+constexpr int kTargetUtilizationPercentage = 75;
-// This is the minimum duration in milliseconds that the sender sends key frame
-// request to the encoder on receiving Pli messages. This is used to prevent
-// sending multiple requests while the sender is waiting for an encoded key
-// frame or receiving multiple Pli messages in a short period.
-const int64_t kMinKeyFrameRequestOnPliIntervalMs = 500;
+// This is the minimum duration that the sender sends key frame to the encoder
+// on receiving Pli messages. This is used to prevent sending multiple requests
+// while the sender is waiting for an encoded key frame or receiving multiple
+// Pli messages in a short period.
+constexpr base::TimeDelta kMinKeyFrameRequestInterval = base::Milliseconds(500);
+
+// This is the minimum amount of frames between issuing key frame requests.
+constexpr int kMinKeyFrameRequestFrameInterval = 6;
// Extract capture begin/end timestamps from |video_frame|'s metadata and log
// it.
@@ -93,24 +96,16 @@ VideoSender::VideoSender(
CastTransport* const transport_sender,
PlayoutDelayChangeCB playout_delay_change_cb,
media::VideoCaptureFeedbackCB feedback_callback)
- : FrameSender(
- cast_environment,
- transport_sender,
- video_config,
- video_config.use_external_encoder
- ? NewFixedCongestionControl(
- (video_config.min_bitrate + video_config.max_bitrate) / 2)
- : NewAdaptiveCongestionControl(cast_environment->Clock(),
- video_config.max_bitrate,
- video_config.min_bitrate,
- video_config.max_frame_rate)),
- frames_in_encoder_(0),
- last_bitrate_(0),
+ : frame_sender_(FrameSender::Create(cast_environment,
+ video_config,
+ transport_sender,
+ this)),
+ cast_environment_(cast_environment),
+ min_playout_delay_(video_config.min_playout_delay),
+ max_playout_delay_(video_config.max_playout_delay),
+ animated_playout_delay_(video_config.animated_playout_delay),
playout_delay_change_cb_(std::move(playout_delay_change_cb)),
- feedback_cb_(feedback_callback),
- low_latency_mode_(false),
- last_reported_encoder_utilization_(-1.0),
- last_reported_lossy_utilization_(-1.0) {
+ feedback_cb_(feedback_callback) {
video_encoder_ = VideoEncoder::Create(cast_environment_, video_config,
status_change_cb, create_vea_cb);
if (!video_encoder_) {
@@ -171,13 +166,14 @@ void VideoSender::InsertRawVideoFrame(
// Request a key frame when a Pli message was received, and it has been passed
// long enough from the last time sending key frame request on receiving a Pli
// message.
- if (picture_lost_at_receiver_) {
- const int64_t min_attemp_interval_ms =
- std::max(kMinKeyFrameRequestOnPliIntervalMs,
- 6 * target_playout_delay_.InMilliseconds());
+ if (frame_sender_->NeedsKeyFrame()) {
+ const base::TimeDelta min_attempt_interval = std::max(
+ kMinKeyFrameRequestInterval,
+ kMinKeyFrameRequestFrameInterval * frame_sender_->TargetPlayoutDelay());
+
if (last_time_attempted_to_resolve_pli_.is_null() ||
- ((reference_time - last_time_attempted_to_resolve_pli_)
- .InMilliseconds() > min_attemp_interval_ms)) {
+ ((reference_time - last_time_attempted_to_resolve_pli_) >
+ min_attempt_interval)) {
video_encoder_->GenerateKeyFrame();
last_time_attempted_to_resolve_pli_ = reference_time;
}
@@ -185,22 +181,23 @@ void VideoSender::InsertRawVideoFrame(
// Two video frames are needed to compute the exact media duration added by
// the next frame. If there are no frames in the encoder, compute a guess
- // based on the configured |max_frame_rate_|. Any error introduced by this
+ // based on the configured max frame rate. Any error introduced by this
// guess will be eliminated when |duration_in_encoder_| is updated in
// OnEncodedVideoFrame().
const base::TimeDelta duration_added_by_next_frame =
frames_in_encoder_ > 0
? reference_time - last_enqueued_frame_reference_time_
- : base::Seconds(1.0 / max_frame_rate_);
+ : base::Seconds(1.0 / frame_sender_->MaxFrameRate());
- if (ShouldDropNextFrame(duration_added_by_next_frame)) {
+ if (frame_sender_->ShouldDropNextFrame(duration_added_by_next_frame)) {
base::TimeDelta new_target_delay =
- std::min(current_round_trip_time_ * kRoundTripsNeeded +
+ std::min(frame_sender_->CurrentRoundTripTime() * kRoundTripsNeeded +
base::Milliseconds(kConstantTimeMs),
max_playout_delay_);
// In case of low latency mode, we prefer frame drops over increasing
// playout time.
- if (!low_latency_mode_ && new_target_delay > target_playout_delay_) {
+ if (!low_latency_mode_ &&
+ new_target_delay > frame_sender_->TargetPlayoutDelay()) {
// In case we detect user is no longer in a low latency mode and there is
// a need to drop a frame, we ensure the playout delay is at-least the
// the starting value for playing animated content.
@@ -233,8 +230,9 @@ void VideoSender::InsertRawVideoFrame(
return;
}
- const int bitrate = congestion_control_->GetBitrate(
- reference_time + target_playout_delay_, target_playout_delay_);
+ const int bitrate = frame_sender_->GetSuggestedBitrate(
+ reference_time + frame_sender_->TargetPlayoutDelay(),
+ frame_sender_->TargetPlayoutDelay());
if (bitrate != last_bitrate_) {
video_encoder_->SetBitRate(bitrate);
last_bitrate_ = bitrate;
@@ -244,13 +242,13 @@ void VideoSender::InsertRawVideoFrame(
const scoped_refptr<VideoFrame> frame_to_encode =
MaybeRenderPerformanceMetricsOverlay(
- GetTargetPlayoutDelay(), low_latency_mode_, bitrate,
+ frame_sender_->GetTargetPlayoutDelay(), low_latency_mode_, bitrate,
frames_in_encoder_ + 1, last_reported_encoder_utilization_,
- last_reported_lossy_utilization_, std::move(video_frame));
+ last_reported_lossiness_, std::move(video_frame));
if (video_encoder_->EncodeVideoFrame(
frame_to_encode, reference_time,
base::BindOnce(&VideoSender::OnEncodedVideoFrame, AsWeakPtr(),
- frame_to_encode, bitrate))) {
+ frame_to_encode))) {
TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(
"cast.stream", "Video Encode", TRACE_ID_LOCAL(frame_to_encode.get()),
"rtp_timestamp", rtp_timestamp.lower_32_bits());
@@ -270,6 +268,15 @@ std::unique_ptr<VideoFrameFactory> VideoSender::CreateVideoFrameFactory() {
return video_encoder_ ? video_encoder_->CreateVideoFrameFactory() : nullptr;
}
+void VideoSender::SetTargetPlayoutDelay(
+ base::TimeDelta new_target_playout_delay) {
+ frame_sender_->SetTargetPlayoutDelay(new_target_playout_delay);
+}
+
+base::TimeDelta VideoSender::GetTargetPlayoutDelay() const {
+ return frame_sender_->GetTargetPlayoutDelay();
+}
+
base::WeakPtr<VideoSender> VideoSender::AsWeakPtr() {
return weak_factory_.GetWeakPtr();
}
@@ -278,19 +285,12 @@ int VideoSender::GetNumberOfFramesInEncoder() const {
return frames_in_encoder_;
}
-base::TimeDelta VideoSender::GetInFlightMediaDuration() const {
- if (GetUnacknowledgedFrameCount() > 0) {
- const FrameId oldest_unacked_frame_id = latest_acked_frame_id_ + 1;
- return last_enqueued_frame_reference_time_ -
- GetRecordedReferenceTime(oldest_unacked_frame_id);
- } else {
- return duration_in_encoder_;
- }
+base::TimeDelta VideoSender::GetEncoderBacklogDuration() const {
+ return duration_in_encoder_;
}
void VideoSender::OnEncodedVideoFrame(
scoped_refptr<media::VideoFrame> video_frame,
- int encoder_bitrate,
std::unique_ptr<SenderEncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
@@ -305,19 +305,18 @@ void VideoSender::OnEncodedVideoFrame(
last_enqueued_frame_reference_time_ - encoded_frame->reference_time;
last_reported_encoder_utilization_ = encoded_frame->encoder_utilization;
- last_reported_lossy_utilization_ = encoded_frame->lossy_utilization;
+ last_reported_lossiness_ = encoded_frame->lossiness;
TRACE_EVENT_NESTABLE_ASYNC_END2(
"cast.stream", "Video Encode", TRACE_ID_LOCAL(video_frame.get()),
- "encoder_utilization", last_reported_encoder_utilization_,
- "lossy_utilization", last_reported_lossy_utilization_);
+ "encoder_utilization", last_reported_encoder_utilization_, "lossiness",
+ last_reported_lossiness_);
// Report the resource utilization for processing this frame. Take the
// greater of the two utilization values and attenuate them such that the
// target utilization is reported as the maximum sustainable amount.
const double attenuated_utilization =
- std::max(last_reported_encoder_utilization_,
- last_reported_lossy_utilization_) /
+ std::max(last_reported_encoder_utilization_, last_reported_lossiness_) /
(kTargetUtilizationPercentage / 100.0);
if (attenuated_utilization >= 0.0) {
// Key frames are artificially capped to 1.0 because their actual
@@ -332,8 +331,7 @@ void VideoSender::OnEncodedVideoFrame(
feedback_cb_.Run(feedback);
}
- SendEncodedFrame(encoder_bitrate, std::move(encoded_frame));
+ frame_sender_->EnqueueFrame(std::move(encoded_frame));
}
-} // namespace cast
-} // namespace media
+} // namespace media::cast
diff --git a/chromium/media/cast/sender/video_sender.h b/chromium/media/cast/sender/video_sender.h
index ff9db1fcfa7..c36962012f8 100644
--- a/chromium/media/cast/sender/video_sender.h
+++ b/chromium/media/cast/sender/video_sender.h
@@ -16,14 +16,13 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_sender.h"
#include "media/cast/common/rtp_time.h"
-#include "media/cast/sender/congestion_control.h"
#include "media/cast/sender/frame_sender.h"
namespace media {
-
class VideoFrame;
+}
-namespace cast {
+namespace media::cast {
class CastTransport;
class VideoEncoder;
@@ -37,7 +36,7 @@ using PlayoutDelayChangeCB = base::RepeatingCallback<void(base::TimeDelta)>;
// RTCP packets.
// Additionally it posts a bunch of delayed tasks to the main thread for various
// timeouts.
-class VideoSender : public FrameSender {
+class VideoSender : public FrameSender::Client {
public:
VideoSender(scoped_refptr<CastEnvironment> cast_environment,
const FrameSenderConfig& video_config,
@@ -63,25 +62,36 @@ class VideoSender : public FrameSender {
// the encoder does not have any such capability, returns null.
std::unique_ptr<VideoFrameFactory> CreateVideoFrameFactory();
+ void SetTargetPlayoutDelay(base::TimeDelta new_target_playout_delay);
+ base::TimeDelta GetTargetPlayoutDelay() const;
+
base::WeakPtr<VideoSender> AsWeakPtr();
protected:
+ // FrameSender::Client overrides.
int GetNumberOfFramesInEncoder() const final;
- base::TimeDelta GetInFlightMediaDuration() const final;
+ base::TimeDelta GetEncoderBacklogDuration() const final;
+
+ // Exposed as protected for testing.
+ FrameSender* frame_sender_for_testing() { return frame_sender_.get(); }
private:
// Called by the |video_encoder_| with the next EncodedFrame to send.
void OnEncodedVideoFrame(scoped_refptr<media::VideoFrame> video_frame,
- int encoder_bitrate,
std::unique_ptr<SenderEncodedFrame> encoded_frame);
+ // The backing frame sender implementation.
+ std::unique_ptr<FrameSender> frame_sender_;
+
// Encodes media::VideoFrame images into EncodedFrames. Per configuration,
// this will point to either the internal software-based encoder or a proxy to
// a hardware-based encoder.
std::unique_ptr<VideoEncoder> video_encoder_;
+ scoped_refptr<CastEnvironment> cast_environment_;
+
// The number of frames queued for encoding, but not yet sent.
- int frames_in_encoder_;
+ int frames_in_encoder_ = 0;
// The duration of video queued for encoding, but not yet sent.
base::TimeDelta duration_in_encoder_;
@@ -92,7 +102,15 @@ class VideoSender : public FrameSender {
// Remember what we set the bitrate to before, no need to set it again if
// we get the same value.
- int last_bitrate_;
+ int last_bitrate_ = 0;
+
+ // The total amount of time between a frame's capture/recording on the sender
+ // and its playback on the receiver (i.e., shown to a user).
+ base::TimeDelta min_playout_delay_;
+ base::TimeDelta max_playout_delay_;
+
+ // Starting playout delay when streaming animated content.
+ base::TimeDelta animated_playout_delay_;
PlayoutDelayChangeCB playout_delay_change_cb_;
@@ -101,13 +119,13 @@ class VideoSender : public FrameSender {
// Indicates we are operating in a mode where the target playout latency is
// low for best user experience. When operating in low latency mode, we
// prefer dropping frames over increasing target playout time.
- bool low_latency_mode_;
+ bool low_latency_mode_ = false;
// The video encoder's performance metrics as of the last call to
// OnEncodedVideoFrame(). See header file comments for SenderEncodedFrame for
// an explanation of these values.
- double last_reported_encoder_utilization_;
- double last_reported_lossy_utilization_;
+ double last_reported_encoder_utilization_ = -1.0;
+ double last_reported_lossiness_ = -1.0;
// This tracks the time when the request was sent to encoder to encode a key
// frame on receiving a Pli message. It is used to limit the sender not
@@ -118,7 +136,6 @@ class VideoSender : public FrameSender {
base::WeakPtrFactory<VideoSender> weak_factory_{this};
};
-} // namespace cast
-} // namespace media
+} // namespace media::cast
#endif // MEDIA_CAST_SENDER_VIDEO_SENDER_H_
diff --git a/chromium/media/cast/sender/video_sender_unittest.cc b/chromium/media/cast/sender/video_sender_unittest.cc
index 0aa95a9de1f..d515925c669 100644
--- a/chromium/media/cast/sender/video_sender_unittest.cc
+++ b/chromium/media/cast/sender/video_sender_unittest.cc
@@ -21,12 +21,12 @@
#include "media/base/fake_single_thread_task_runner.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_environment.h"
+#include "media/cast/common/video_frame_factory.h"
#include "media/cast/constants.h"
#include "media/cast/logging/simple_event_subscriber.h"
#include "media/cast/net/cast_transport_config.h"
#include "media/cast/net/cast_transport_impl.h"
#include "media/cast/net/pacing/paced_sender.h"
-#include "media/cast/sender/video_frame_factory.h"
#include "media/cast/test/fake_video_encode_accelerator_factory.h"
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/video_utility.h"
@@ -34,8 +34,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-namespace media {
-namespace cast {
+namespace media::cast {
namespace {
static const uint8_t kPixelValue = 123;
@@ -126,8 +125,11 @@ class PeerVideoSender : public VideoSender {
base::BindRepeating(&PeerVideoSender::ProcessFeedback,
base::Unretained(this))) {}
- using VideoSender::OnReceivedCastFeedback;
- using VideoSender::OnReceivedPli;
+ void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
+ frame_sender_for_testing()->OnReceivedCastFeedback(cast_feedback);
+ }
+
+ void OnReceivedPli() { frame_sender_for_testing()->OnReceivedPli(); }
void ProcessFeedback(const media::VideoCaptureFeedback& feedback) {
feedback_ = feedback;
@@ -640,5 +642,4 @@ TEST_F(VideoSenderTest, CancelSendingOnReceivingPli) {
EXPECT_EQ(2, transport_->number_of_rtp_packets());
}
-} // namespace cast
-} // namespace media
+} // namespace media::cast
diff --git a/chromium/media/cdm/BUILD.gn b/chromium/media/cdm/BUILD.gn
index 0fa04f87006..4a6a61a8aa5 100644
--- a/chromium/media/cdm/BUILD.gn
+++ b/chromium/media/cdm/BUILD.gn
@@ -137,6 +137,8 @@ source_set("cdm") {
"win/media_foundation_cdm_module.h",
"win/media_foundation_cdm_session.cc",
"win/media_foundation_cdm_session.h",
+ "win/media_foundation_cdm_util.cc",
+ "win/media_foundation_cdm_util.h",
]
libs = [ "Propsys.lib" ]
diff --git a/chromium/media/cdm/aes_decryptor.cc b/chromium/media/cdm/aes_decryptor.cc
index 665d1aeb6d4..428875fdfa8 100644
--- a/chromium/media/cdm/aes_decryptor.cc
+++ b/chromium/media/cdm/aes_decryptor.cc
@@ -545,11 +545,10 @@ std::string AesDecryptor::GetSessionStateAsJWK(const std::string& session_id) {
KeyIdAndKeyPairs keys;
{
base::AutoLock auto_lock(key_map_lock_);
- for (const auto& item : key_map_) {
- if (item.second->Contains(session_id)) {
- std::string key_id = item.first;
+ for (const auto& [key_id, session_id_map] : key_map_) {
+ if (session_id_map->Contains(session_id)) {
// |key| is the value used to create the decryption key.
- std::string key = item.second->LatestDecryptionKey()->secret();
+ std::string key = session_id_map->LatestDecryptionKey()->secret();
keys.push_back(std::make_pair(key_id, key));
}
}
diff --git a/chromium/media/cdm/api/content_decryption_module.h b/chromium/media/cdm/api/content_decryption_module.h
index 0e7301230b9..77dc7f0e1f3 100644
--- a/chromium/media/cdm/api/content_decryption_module.h
+++ b/chromium/media/cdm/api/content_decryption_module.h
@@ -479,12 +479,13 @@ class CDM_CLASS_API DecryptedBlock {
virtual ~DecryptedBlock() {}
};
-enum VideoPlane : uint32_t {
- kYPlane = 0,
- kUPlane = 1,
- kVPlane = 2,
- kMaxPlanes = 3,
-};
+// This intentionally avoids using an enum, since it will be used to do math
+// with other enums, which is deprecated in C++20.
+using VideoPlane = uint32_t;
+constexpr VideoPlane kYPlane = 0;
+constexpr VideoPlane kUPlane = 1;
+constexpr VideoPlane kVPlane = 2;
+constexpr VideoPlane kMaxPlanes = 3;
CHECK_TYPE(VideoPlane, 4, 4);
class CDM_CLASS_API VideoFrame {
diff --git a/chromium/media/cdm/external_clear_key_test_helper.h b/chromium/media/cdm/external_clear_key_test_helper.h
index 87feb5229ed..cc5c7a218db 100644
--- a/chromium/media/cdm/external_clear_key_test_helper.h
+++ b/chromium/media/cdm/external_clear_key_test_helper.h
@@ -25,7 +25,7 @@ class ExternalClearKeyTestHelper {
~ExternalClearKeyTestHelper();
- CdmConfig CdmConfig() {
+ media::CdmConfig CdmConfig() {
return {"org.chromium.externalclearkey", false, false, false};
}
diff --git a/chromium/media/cdm/win/media_foundation_cdm_factory.cc b/chromium/media/cdm/win/media_foundation_cdm_factory.cc
index 4829ef079bb..a9a53d749e4 100644
--- a/chromium/media/cdm/win/media_foundation_cdm_factory.cc
+++ b/chromium/media/cdm/win/media_foundation_cdm_factory.cc
@@ -5,26 +5,21 @@
#include "media/cdm/win/media_foundation_cdm_factory.h"
#include <combaseapi.h>
-#include <initguid.h> // Needed for DEFINE_PROPERTYKEY to work properly.
#include <mferror.h>
#include <mfmediaengine.h>
-#include <propkeydef.h> // Needed for DEFINE_PROPERTYKEY.
-#include <propvarutil.h>
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/files/file_util.h"
#include "base/metrics/histogram_functions.h"
#include "base/strings/utf_string_conversions.h"
#include "base/task/thread_pool.h"
-#include "base/win/scoped_propvariant.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/cdm_config.h"
#include "media/base/key_systems.h"
#include "media/base/win/mf_helpers.h"
-#include "media/cdm/cdm_paths.h"
#include "media/cdm/win/media_foundation_cdm.h"
#include "media/cdm/win/media_foundation_cdm_module.h"
+#include "media/cdm/win/media_foundation_cdm_util.h"
namespace media {
@@ -34,142 +29,6 @@ using Microsoft::WRL::ComPtr;
const char kMediaFoundationCdmUmaPrefix[] = "Media.EME.MediaFoundationCdm.";
-// Key to the CDM Origin ID to be passed to the CDM for privacy purposes. The
-// same value is also used in MediaFoundation CDMs. Do NOT change this value!
-DEFINE_PROPERTYKEY(EME_CONTENTDECRYPTIONMODULE_ORIGIN_ID,
- 0x1218a3e2,
- 0xcfb0,
- 0x4c98,
- 0x90,
- 0xe5,
- 0x5f,
- 0x58,
- 0x18,
- 0xd4,
- 0xb6,
- 0x7e,
- PID_FIRST_USABLE);
-
-void SetBSTR(const wchar_t* str, PROPVARIANT* propvariant) {
- propvariant->vt = VT_BSTR;
- propvariant->bstrVal = SysAllocString(str);
-}
-
-// Returns a property store similar to EME MediaKeySystemMediaCapability.
-HRESULT CreateVideoCapability(const CdmConfig& cdm_config,
- ComPtr<IPropertyStore>& video_capability) {
- ComPtr<IPropertyStore> temp_video_capability;
- RETURN_IF_FAILED(
- PSCreateMemoryPropertyStore(IID_PPV_ARGS(&temp_video_capability)));
-
- base::win::ScopedPropVariant robustness;
- if (cdm_config.use_hw_secure_codecs) {
- // TODO(xhwang): Provide a way to support other robustness strings.
- SetBSTR(L"HW_SECURE_ALL", robustness.Receive());
- RETURN_IF_FAILED(
- temp_video_capability->SetValue(MF_EME_ROBUSTNESS, robustness.get()));
- }
- video_capability = temp_video_capability;
- return S_OK;
-}
-
-// Returns a property store similar to EME MediaKeySystemConfigurations.
-// What really matters here are video robustness, persistent state and
-// distinctive identifier.
-HRESULT BuildCdmAccessConfigurations(const CdmConfig& cdm_config,
- ComPtr<IPropertyStore>& configurations) {
- ComPtr<IPropertyStore> temp_configurations;
-
- RETURN_IF_FAILED(
- PSCreateMemoryPropertyStore(IID_PPV_ARGS(&temp_configurations)));
-
- // Add an empty audio capability.
- base::win::ScopedPropVariant audio_capabilities;
- PROPVARIANT* var_to_set = audio_capabilities.Receive();
- var_to_set->vt = VT_VARIANT | VT_VECTOR;
- var_to_set->capropvar.cElems = 0;
- RETURN_IF_FAILED(temp_configurations->SetValue(MF_EME_AUDIOCAPABILITIES,
- audio_capabilities.get()));
-
- // Add a video capability so we can pass the correct robustness level.
- ComPtr<IPropertyStore> video_capability;
- RETURN_IF_FAILED(CreateVideoCapability(cdm_config, video_capability));
-
- base::win::ScopedPropVariant video_config;
- auto* video_config_ptr = video_config.Receive();
- video_config_ptr->vt = VT_UNKNOWN;
- video_config_ptr->punkVal = video_capability.Detach();
-
- base::win::ScopedPropVariant video_capabilities;
- var_to_set = video_capabilities.Receive();
- var_to_set->vt = VT_VARIANT | VT_VECTOR;
- var_to_set->capropvar.cElems = 1;
- var_to_set->capropvar.pElems =
- reinterpret_cast<PROPVARIANT*>(CoTaskMemAlloc(sizeof(PROPVARIANT)));
- PropVariantCopy(var_to_set->capropvar.pElems, video_config.ptr());
- RETURN_IF_FAILED(temp_configurations->SetValue(MF_EME_VIDEOCAPABILITIES,
- video_capabilities.get()));
-
- // Add persistent state.
- DCHECK(cdm_config.allow_persistent_state);
- base::win::ScopedPropVariant persisted_state;
- RETURN_IF_FAILED(InitPropVariantFromUInt32(MF_MEDIAKEYS_REQUIREMENT_REQUIRED,
- persisted_state.Receive()));
- RETURN_IF_FAILED(temp_configurations->SetValue(MF_EME_PERSISTEDSTATE,
- persisted_state.get()));
-
- // Add distinctive identifier.
- DCHECK(cdm_config.allow_distinctive_identifier);
- base::win::ScopedPropVariant distinctive_identifier;
- RETURN_IF_FAILED(InitPropVariantFromUInt32(MF_MEDIAKEYS_REQUIREMENT_REQUIRED,
- distinctive_identifier.Receive()));
- RETURN_IF_FAILED(temp_configurations->SetValue(MF_EME_DISTINCTIVEID,
- distinctive_identifier.get()));
-
- configurations = temp_configurations;
- return S_OK;
-}
-
-HRESULT BuildCdmProperties(
- const base::UnguessableToken& origin_id,
- const absl::optional<std::vector<uint8_t>>& client_token,
- const base::FilePath& store_path,
- ComPtr<IPropertyStore>& properties) {
- DCHECK(!origin_id.is_empty());
-
- ComPtr<IPropertyStore> temp_properties;
- RETURN_IF_FAILED(PSCreateMemoryPropertyStore(IID_PPV_ARGS(&temp_properties)));
-
- base::win::ScopedPropVariant origin_id_var;
- RETURN_IF_FAILED(InitPropVariantFromString(
- base::UTF8ToWide(origin_id.ToString()).c_str(), origin_id_var.Receive()));
- RETURN_IF_FAILED(temp_properties->SetValue(
- EME_CONTENTDECRYPTIONMODULE_ORIGIN_ID, origin_id_var.get()));
-
- if (client_token) {
- base::win::ScopedPropVariant client_token_var;
- PROPVARIANT* client_token_propvar = client_token_var.Receive();
- client_token_propvar->vt = VT_VECTOR | VT_UI1;
- client_token_propvar->caub.cElems = client_token->size();
- client_token_propvar->caub.pElems = reinterpret_cast<unsigned char*>(
- CoTaskMemAlloc(client_token->size() * sizeof(char)));
- memcpy(client_token_propvar->caub.pElems, client_token->data(),
- client_token->size());
-
- RETURN_IF_FAILED(temp_properties->SetValue(
- EME_CONTENTDECRYPTIONMODULE_CLIENT_TOKEN, client_token_var.get()));
- }
-
- base::win::ScopedPropVariant store_path_var;
- RETURN_IF_FAILED(InitPropVariantFromString(store_path.value().c_str(),
- store_path_var.Receive()));
- RETURN_IF_FAILED(temp_properties->SetValue(
- MF_CONTENTDECRYPTIONMODULE_STOREPATH, store_path_var.get()));
-
- properties = temp_properties;
- return S_OK;
-}
-
bool IsTypeSupportedInternal(
ComPtr<IMFContentDecryptionModuleFactory> cdm_factory,
const std::string& key_system,
@@ -334,56 +193,6 @@ void MediaFoundationCdmFactory::OnCdmEvent(CdmEvent event) {
helper_->OnCdmEvent(event);
}
-HRESULT MediaFoundationCdmFactory::CreateMfCdmInternal(
- const CdmConfig& cdm_config,
- const base::UnguessableToken& cdm_origin_id,
- const absl::optional<std::vector<uint8_t>>& cdm_client_token,
- const base::FilePath& cdm_store_path_root,
- ComPtr<IMFContentDecryptionModule>& mf_cdm) {
- const auto key_system = cdm_config.key_system;
- ComPtr<IMFContentDecryptionModuleFactory> cdm_factory;
- RETURN_IF_FAILED(GetCdmFactory(key_system, cdm_factory));
-
- DCHECK(!cdm_origin_id.is_empty());
-
- auto key_system_str = base::UTF8ToWide(key_system);
- if (!cdm_factory->IsTypeSupported(key_system_str.c_str(), nullptr)) {
- DLOG(ERROR) << key_system << " not supported by MF CdmFactory";
- return MF_NOT_SUPPORTED_ERR;
- }
-
- ComPtr<IPropertyStore> property_store;
- RETURN_IF_FAILED(BuildCdmAccessConfigurations(cdm_config, property_store));
-
- IPropertyStore* configurations[] = {property_store.Get()};
- ComPtr<IMFContentDecryptionModuleAccess> cdm_access;
- RETURN_IF_FAILED(cdm_factory->CreateContentDecryptionModuleAccess(
- key_system_str.c_str(), configurations, ARRAYSIZE(configurations),
- &cdm_access));
-
- // Provide a per-user, per-arch, per-origin and per-key-system path.
- auto store_path =
- GetCdmStorePath(cdm_store_path_root, cdm_origin_id, key_system);
- DVLOG(1) << "store_path=" << store_path;
-
- // Ensure the path exists. If it already exists, this call will do nothing.
- base::File::Error file_error;
- if (!base::CreateDirectoryAndGetError(store_path, &file_error)) {
- DLOG(ERROR) << "Create CDM store path failed with " << file_error;
- return MF_INVALID_ACCESS_ERR;
- }
-
- ComPtr<IPropertyStore> cdm_properties;
- ComPtr<IMFContentDecryptionModule> cdm;
- RETURN_IF_FAILED(BuildCdmProperties(cdm_origin_id, cdm_client_token,
- store_path, cdm_properties));
- RETURN_IF_FAILED(
- cdm_access->CreateContentDecryptionModule(cdm_properties.Get(), &cdm));
-
- mf_cdm.Swap(cdm);
- return S_OK;
-}
-
void MediaFoundationCdmFactory::CreateMfCdm(
const CdmConfig& cdm_config,
const base::UnguessableToken& cdm_origin_id,
@@ -391,8 +200,16 @@ void MediaFoundationCdmFactory::CreateMfCdm(
const base::FilePath& cdm_store_path_root,
HRESULT& hresult,
Microsoft::WRL::ComPtr<IMFContentDecryptionModule>& mf_cdm) {
- hresult = CreateMfCdmInternal(cdm_config, cdm_origin_id, cdm_client_token,
- cdm_store_path_root, mf_cdm);
+ ComPtr<IMFContentDecryptionModuleFactory> cdm_factory;
+ hresult = GetCdmFactory(cdm_config.key_system, cdm_factory);
+ if (FAILED(hresult)) {
+ DLOG(ERROR) << "Failed to GetCdmFactory. hr=" << hresult;
+ return;
+ }
+
+ hresult =
+ CreateMediaFoundationCdm(cdm_factory, cdm_config, cdm_origin_id,
+ cdm_client_token, cdm_store_path_root, mf_cdm);
}
} // namespace media
diff --git a/chromium/media/cdm/win/media_foundation_cdm_factory.h b/chromium/media/cdm/win/media_foundation_cdm_factory.h
index 7df1df8edfd..0fdf38b3ae3 100644
--- a/chromium/media/cdm/win/media_foundation_cdm_factory.h
+++ b/chromium/media/cdm/win/media_foundation_cdm_factory.h
@@ -72,15 +72,9 @@ class MEDIA_EXPORT MediaFoundationCdmFactory final : public CdmFactory {
void StoreClientToken(const std::vector<uint8_t>& client_token);
void OnCdmEvent(CdmEvent event);
- HRESULT CreateMfCdmInternal(
- const CdmConfig& cdm_config,
- const base::UnguessableToken& cdm_origin_id,
- const absl::optional<std::vector<uint8_t>>& cdm_client_token,
- const base::FilePath& cdm_store_path_root,
- Microsoft::WRL::ComPtr<IMFContentDecryptionModule>& mf_cdm);
-
- // Same as `CreateMfCdmInternal()`, but returns the HRESULT in out parameter
- // so we can bind it to a repeating callback using weak pointer.
+ // Creates `mf_cdm` based on the input parameters. Same as
+ // CreateMediaFoundationCdm() but returns the HRESULT in out parameter so we
+ // can bind it to a repeating callback using weak pointer.
void CreateMfCdm(const CdmConfig& cdm_config,
const base::UnguessableToken& cdm_origin_id,
const absl::optional<std::vector<uint8_t>>& cdm_client_token,
diff --git a/chromium/media/cdm/win/media_foundation_cdm_util.cc b/chromium/media/cdm/win/media_foundation_cdm_util.cc
new file mode 100644
index 00000000000..6ab78f60e32
--- /dev/null
+++ b/chromium/media/cdm/win/media_foundation_cdm_util.cc
@@ -0,0 +1,216 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cdm/win/media_foundation_cdm_util.h"
+
+#include <combaseapi.h>
+#include <initguid.h> // Needed for DEFINE_PROPERTYKEY to work properly.
+#include <mferror.h>
+#include <propkeydef.h> // Needed for DEFINE_PROPERTYKEY.
+
+#include "base/files/file_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/propvarutil.h"
+#include "base/win/scoped_propvariant.h"
+#include "media/base/win/mf_helpers.h"
+#include "media/cdm/cdm_paths.h"
+#include "media/cdm/win/media_foundation_cdm.h"
+
+namespace media {
+
+namespace {
+
+using Microsoft::WRL::ComPtr;
+
+// Key to the CDM Origin ID to be passed to the CDM for privacy purposes. The
+// same value is also used in MediaFoundation CDMs. Do NOT change this value!
+DEFINE_PROPERTYKEY(EME_CONTENTDECRYPTIONMODULE_ORIGIN_ID,
+ 0x1218a3e2,
+ 0xcfb0,
+ 0x4c98,
+ 0x90,
+ 0xe5,
+ 0x5f,
+ 0x58,
+ 0x18,
+ 0xd4,
+ 0xb6,
+ 0x7e,
+ PID_FIRST_USABLE);
+
+void SetBSTR(const wchar_t* str, PROPVARIANT* propvariant) {
+ propvariant->vt = VT_BSTR;
+ propvariant->bstrVal = SysAllocString(str);
+}
+
+// Returns a property store similar to EME MediaKeySystemMediaCapability.
+HRESULT CreateVideoCapability(const CdmConfig& cdm_config,
+ ComPtr<IPropertyStore>& video_capability) {
+ ComPtr<IPropertyStore> temp_video_capability;
+ RETURN_IF_FAILED(
+ PSCreateMemoryPropertyStore(IID_PPV_ARGS(&temp_video_capability)));
+
+ base::win::ScopedPropVariant robustness;
+ if (cdm_config.use_hw_secure_codecs) {
+ // TODO(xhwang): Provide a way to support other robustness strings.
+ SetBSTR(L"HW_SECURE_ALL", robustness.Receive());
+ RETURN_IF_FAILED(
+ temp_video_capability->SetValue(MF_EME_ROBUSTNESS, robustness.get()));
+ }
+ video_capability = temp_video_capability;
+ return S_OK;
+}
+
+// Returns a property store similar to EME MediaKeySystemConfigurations.
+// What really matters here are video robustness, persistent state and
+// distinctive identifier.
+HRESULT BuildCdmAccessConfigurations(const CdmConfig& cdm_config,
+ ComPtr<IPropertyStore>& configurations) {
+ ComPtr<IPropertyStore> temp_configurations;
+
+ RETURN_IF_FAILED(
+ PSCreateMemoryPropertyStore(IID_PPV_ARGS(&temp_configurations)));
+
+ // Add an empty audio capability.
+ base::win::ScopedPropVariant audio_capabilities;
+ PROPVARIANT* var_to_set = audio_capabilities.Receive();
+ var_to_set->vt = VT_VARIANT | VT_VECTOR;
+ var_to_set->capropvar.cElems = 0;
+ RETURN_IF_FAILED(temp_configurations->SetValue(MF_EME_AUDIOCAPABILITIES,
+ audio_capabilities.get()));
+
+ // Add a video capability so we can pass the correct robustness level.
+ ComPtr<IPropertyStore> video_capability;
+ RETURN_IF_FAILED(CreateVideoCapability(cdm_config, video_capability));
+
+ base::win::ScopedPropVariant video_config;
+ auto* video_config_ptr = video_config.Receive();
+ video_config_ptr->vt = VT_UNKNOWN;
+ video_config_ptr->punkVal = video_capability.Detach();
+
+ base::win::ScopedPropVariant video_capabilities;
+ var_to_set = video_capabilities.Receive();
+ var_to_set->vt = VT_VARIANT | VT_VECTOR;
+ var_to_set->capropvar.cElems = 1;
+ var_to_set->capropvar.pElems =
+ reinterpret_cast<PROPVARIANT*>(CoTaskMemAlloc(sizeof(PROPVARIANT)));
+ PropVariantCopy(var_to_set->capropvar.pElems, video_config.ptr());
+ RETURN_IF_FAILED(temp_configurations->SetValue(MF_EME_VIDEOCAPABILITIES,
+ video_capabilities.get()));
+
+ // Add persistent state.
+ DCHECK(cdm_config.allow_persistent_state);
+ base::win::ScopedPropVariant persisted_state;
+ RETURN_IF_FAILED(InitPropVariantFromUInt32(MF_MEDIAKEYS_REQUIREMENT_REQUIRED,
+ persisted_state.Receive()));
+ RETURN_IF_FAILED(temp_configurations->SetValue(MF_EME_PERSISTEDSTATE,
+ persisted_state.get()));
+
+ // Add distinctive identifier.
+ DCHECK(cdm_config.allow_distinctive_identifier);
+ base::win::ScopedPropVariant distinctive_identifier;
+ RETURN_IF_FAILED(InitPropVariantFromUInt32(MF_MEDIAKEYS_REQUIREMENT_REQUIRED,
+ distinctive_identifier.Receive()));
+ RETURN_IF_FAILED(temp_configurations->SetValue(MF_EME_DISTINCTIVEID,
+ distinctive_identifier.get()));
+
+ configurations = temp_configurations;
+ return S_OK;
+}
+
+HRESULT BuildCdmProperties(
+ const base::UnguessableToken& origin_id,
+ const absl::optional<std::vector<uint8_t>>& client_token,
+ const base::FilePath& store_path,
+ ComPtr<IPropertyStore>& properties) {
+ DCHECK(!origin_id.is_empty());
+
+ ComPtr<IPropertyStore> temp_properties;
+ RETURN_IF_FAILED(PSCreateMemoryPropertyStore(IID_PPV_ARGS(&temp_properties)));
+
+ base::win::ScopedPropVariant origin_id_var;
+ RETURN_IF_FAILED(InitPropVariantFromString(
+ base::UTF8ToWide(origin_id.ToString()).c_str(), origin_id_var.Receive()));
+ RETURN_IF_FAILED(temp_properties->SetValue(
+ EME_CONTENTDECRYPTIONMODULE_ORIGIN_ID, origin_id_var.get()));
+
+ if (client_token) {
+ base::win::ScopedPropVariant client_token_var;
+ PROPVARIANT* client_token_propvar = client_token_var.Receive();
+ client_token_propvar->vt = VT_VECTOR | VT_UI1;
+ client_token_propvar->caub.cElems = client_token->size();
+ client_token_propvar->caub.pElems = reinterpret_cast<unsigned char*>(
+ CoTaskMemAlloc(client_token->size() * sizeof(char)));
+ memcpy(client_token_propvar->caub.pElems, client_token->data(),
+ client_token->size());
+
+ RETURN_IF_FAILED(temp_properties->SetValue(
+ EME_CONTENTDECRYPTIONMODULE_CLIENT_TOKEN, client_token_var.get()));
+ }
+
+ base::win::ScopedPropVariant store_path_var;
+ RETURN_IF_FAILED(InitPropVariantFromString(store_path.value().c_str(),
+ store_path_var.Receive()));
+ RETURN_IF_FAILED(temp_properties->SetValue(
+ MF_CONTENTDECRYPTIONMODULE_STOREPATH, store_path_var.get()));
+
+ properties = temp_properties;
+ return S_OK;
+}
+
+} // namespace
+
+HRESULT CreateMediaFoundationCdm(
+ ComPtr<IMFContentDecryptionModuleFactory> cdm_factory,
+ const CdmConfig& cdm_config,
+ const base::UnguessableToken& cdm_origin_id,
+ const absl::optional<std::vector<uint8_t>>& cdm_client_token,
+ const base::FilePath& cdm_store_path_root,
+ ComPtr<IMFContentDecryptionModule>& mf_cdm) {
+ DVLOG(1) << __func__ << ": cdm_config=" << cdm_config
+ << ", cdm_origin_id=" << cdm_origin_id.ToString()
+ << ", cdm_store_path_root=" << cdm_store_path_root;
+
+ DCHECK(!cdm_origin_id.is_empty());
+
+ const auto key_system = cdm_config.key_system;
+ auto key_system_str = base::UTF8ToWide(key_system);
+ if (!cdm_factory->IsTypeSupported(key_system_str.c_str(), nullptr)) {
+ DLOG(ERROR) << key_system << " not supported by MF CdmFactory";
+ return MF_NOT_SUPPORTED_ERR;
+ }
+
+ ComPtr<IPropertyStore> property_store;
+ RETURN_IF_FAILED(BuildCdmAccessConfigurations(cdm_config, property_store));
+
+ IPropertyStore* configurations[] = {property_store.Get()};
+ ComPtr<IMFContentDecryptionModuleAccess> cdm_access;
+ RETURN_IF_FAILED(cdm_factory->CreateContentDecryptionModuleAccess(
+ key_system_str.c_str(), configurations, ARRAYSIZE(configurations),
+ &cdm_access));
+
+ // Provide a per-user, per-arch, per-origin and per-key-system path.
+ auto store_path =
+ GetCdmStorePath(cdm_store_path_root, cdm_origin_id, key_system);
+ DVLOG(1) << "store_path=" << store_path;
+
+ // Ensure the path exists. If it already exists, this call will do nothing.
+ base::File::Error file_error;
+ if (!base::CreateDirectoryAndGetError(store_path, &file_error)) {
+ DLOG(ERROR) << "Create CDM store path failed with " << file_error;
+ return MF_INVALID_ACCESS_ERR;
+ }
+
+ ComPtr<IPropertyStore> cdm_properties;
+ ComPtr<IMFContentDecryptionModule> cdm;
+ RETURN_IF_FAILED(BuildCdmProperties(cdm_origin_id, cdm_client_token,
+ store_path, cdm_properties));
+ RETURN_IF_FAILED(
+ cdm_access->CreateContentDecryptionModule(cdm_properties.Get(), &cdm));
+
+ mf_cdm.Swap(cdm);
+ return S_OK;
+}
+
+} // namespace media
diff --git a/chromium/media/cdm/win/media_foundation_cdm_util.h b/chromium/media/cdm/win/media_foundation_cdm_util.h
new file mode 100644
index 00000000000..f37c1849d5d
--- /dev/null
+++ b/chromium/media/cdm/win/media_foundation_cdm_util.h
@@ -0,0 +1,33 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CDM_WIN_MEDIA_FOUNDATION_CDM_UTIL_H_
+#define MEDIA_CDM_WIN_MEDIA_FOUNDATION_CDM_UTIL_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include <mfcontentdecryptionmodule.h>
+#include <wrl/client.h>
+
+#include "base/files/file_path.h"
+#include "base/unguessable_token.h"
+#include "media/base/cdm_config.h"
+#include "media/base/media_export.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+namespace media {
+
+MEDIA_EXPORT HRESULT CreateMediaFoundationCdm(
+ Microsoft::WRL::ComPtr<IMFContentDecryptionModuleFactory> cdm_factory,
+ const CdmConfig& cdm_config,
+ const base::UnguessableToken& cdm_origin_id,
+ const absl::optional<std::vector<uint8_t>>& cdm_client_token,
+ const base::FilePath& cdm_store_path_root,
+ Microsoft::WRL::ComPtr<IMFContentDecryptionModule>& mf_cdm);
+
+} // namespace media
+
+#endif // MEDIA_CDM_WIN_MEDIA_FOUNDATION_CDM_UTIL_H_
diff --git a/chromium/media/device_monitors/device_monitor_udev.cc b/chromium/media/device_monitors/device_monitor_udev.cc
index e9151c2fbdc..94d938004fe 100644
--- a/chromium/media/device_monitors/device_monitor_udev.cc
+++ b/chromium/media/device_monitors/device_monitor_udev.cc
@@ -74,8 +74,8 @@ DeviceMonitorLinux::BlockingTaskRunnerHelper::BlockingTaskRunnerHelper() {
void DeviceMonitorLinux::BlockingTaskRunnerHelper::Initialize() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
std::vector<device::UdevWatcher::Filter> filters;
- for (const SubsystemMap& entry : kSubsystemMap) {
- filters.emplace_back(entry.subsystem, entry.devtype);
+ for (const auto& [device_type, subsys, devtype] : kSubsystemMap) {
+ filters.emplace_back(subsys, devtype);
}
udev_watcher_ = device::UdevWatcher::StartWatching(this, filters);
}
@@ -99,20 +99,19 @@ void DeviceMonitorLinux::BlockingTaskRunnerHelper::OnDevicesChanged(
device::ScopedUdevDevicePtr device) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- base::SystemMonitor::DeviceType device_type =
- base::SystemMonitor::DEVTYPE_UNKNOWN;
+ base::SystemMonitor::DeviceType type = base::SystemMonitor::DEVTYPE_UNKNOWN;
const std::string subsystem(device::udev_device_get_subsystem(device.get()));
- for (const SubsystemMap& entry : kSubsystemMap) {
- if (subsystem == entry.subsystem) {
- device_type = entry.device_type;
+ for (const auto& [device_type, subsys, devtype] : kSubsystemMap) {
+ if (subsystem == subsys) {
+ type = device_type;
break;
}
}
- DCHECK_NE(device_type, base::SystemMonitor::DEVTYPE_UNKNOWN);
+ DCHECK_NE(type, base::SystemMonitor::DEVTYPE_UNKNOWN);
// base::SystemMonitor takes care of notifying each observer in their own task
// runner via base::ObserverListThreadSafe.
- base::SystemMonitor::Get()->ProcessDevicesChanged(device_type);
+ base::SystemMonitor::Get()->ProcessDevicesChanged(type);
}
DeviceMonitorLinux::DeviceMonitorLinux()
diff --git a/chromium/media/ffmpeg/ffmpeg_common.cc b/chromium/media/ffmpeg/ffmpeg_common.cc
index 76f03d6608e..9500983595f 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common.cc
@@ -351,7 +351,6 @@ bool AVCodecContextToAudioDecoderConfig(const AVCodecContext* codec_context,
codec_context->ch_layout.u.mask,
codec_context->ch_layout.nb_channels);
- int sample_rate = codec_context->sample_rate;
switch (codec) {
// For AC3/EAC3 we enable only demuxing, but not decoding, so FFmpeg does
// not fill |sample_fmt|.
@@ -398,7 +397,7 @@ bool AVCodecContextToAudioDecoderConfig(const AVCodecContext* codec_context,
codec_context->extradata + codec_context->extradata_size);
}
- config->Initialize(codec, sample_format, channel_layout, sample_rate,
+ config->Initialize(codec, sample_format, channel_layout, codec_context->sample_rate,
extra_data, encryption_scheme, seek_preroll,
codec_context->delay);
if (channel_layout == CHANNEL_LAYOUT_DISCRETE)
@@ -527,6 +526,14 @@ bool AVStreamToVideoDecoderConfig(const AVStream* stream,
// TODO(chcunningham): We need real profiles for all of the codecs below to
// actually handle capabilities requests correctly. http://crbug.com/784610
VideoCodecProfile profile = VIDEO_CODEC_PROFILE_UNKNOWN;
+
+ // Prefer the color space found by libavcodec if available
+ VideoColorSpace color_space =
+ VideoColorSpace(codec_context->color_primaries, codec_context->color_trc,
+ codec_context->colorspace,
+ codec_context->color_range == AVCOL_RANGE_JPEG
+ ? gfx::ColorSpace::RangeID::FULL
+ : gfx::ColorSpace::RangeID::LIMITED);
switch (codec) {
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
case VideoCodec::kH264: {
@@ -548,28 +555,62 @@ bool AVStreamToVideoDecoderConfig(const AVStream* stream,
}
#if BUILDFLAG(ENABLE_PLATFORM_HEVC)
case VideoCodec::kHEVC: {
- int hevc_profile = FF_PROFILE_UNKNOWN;
- if ((codec_context->profile < FF_PROFILE_HEVC_MAIN ||
- codec_context->profile > FF_PROFILE_HEVC_REXT) &&
- codec_context->extradata && codec_context->extradata_size) {
+ int hevc_profile = -1;
+ // We need to parse extradata each time, because we wont add ffmpeg
+ // hevc decoder & parser to chromium and codec_context->profile
+ // should always be FF_PROFILE_UNKNOWN (-99) here
+ if (codec_context->extradata && codec_context->extradata_size) {
mp4::HEVCDecoderConfigurationRecord hevc_config;
if (hevc_config.Parse(codec_context->extradata,
codec_context->extradata_size)) {
hevc_profile = hevc_config.general_profile_idc;
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
+ if (!color_space.IsSpecified()) {
+ // We should try to parsed color space from SPS if the
+ // result from libavcodec is not specified in case
+ // that some encoder not write extra colorspace info to
+ // the container
+ color_space = hevc_config.GetColorSpace();
+ }
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
}
- } else {
- hevc_profile = codec_context->profile;
}
+ // The values of general_profile_idc are taken from the HEVC standard, see
+ // the latest https://www.itu.int/rec/T-REC-H.265/en
switch (hevc_profile) {
- case FF_PROFILE_HEVC_MAIN:
+ case 1:
profile = HEVCPROFILE_MAIN;
break;
- case FF_PROFILE_HEVC_MAIN_10:
+ case 2:
profile = HEVCPROFILE_MAIN10;
break;
- case FF_PROFILE_HEVC_MAIN_STILL_PICTURE:
+ case 3:
profile = HEVCPROFILE_MAIN_STILL_PICTURE;
break;
+ case 4:
+ profile = HEVCPROFILE_REXT;
+ break;
+ case 5:
+ profile = HEVCPROFILE_HIGH_THROUGHPUT;
+ break;
+ case 6:
+ profile = HEVCPROFILE_MULTIVIEW_MAIN;
+ break;
+ case 7:
+ profile = HEVCPROFILE_SCALABLE_MAIN;
+ break;
+ case 8:
+ profile = HEVCPROFILE_3D_MAIN;
+ break;
+ case 9:
+ profile = HEVCPROFILE_SCREEN_EXTENDED;
+ break;
+ case 10:
+ profile = HEVCPROFILE_SCALABLE_REXT;
+ break;
+ case 11:
+ profile = HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED;
+ break;
default:
// Always assign a default if all heuristics fail.
profile = HEVCPROFILE_MAIN;
@@ -623,13 +664,6 @@ bool AVStreamToVideoDecoderConfig(const AVStream* stream,
static_cast<int32_t*>(display_matrix));
}
- // Prefer the color space found by libavcodec if available.
- VideoColorSpace color_space =
- VideoColorSpace(codec_context->color_primaries, codec_context->color_trc,
- codec_context->colorspace,
- codec_context->color_range == AVCOL_RANGE_JPEG
- ? gfx::ColorSpace::RangeID::FULL
- : gfx::ColorSpace::RangeID::LIMITED);
if (!color_space.IsSpecified()) {
// VP9 frames may have color information, but that information cannot
// express new color spaces, like HDR. For that reason, color space
diff --git a/chromium/media/filters/BUILD.gn b/chromium/media/filters/BUILD.gn
index e9e4fa9b5b5..a4cd06a7698 100644
--- a/chromium/media/filters/BUILD.gn
+++ b/chromium/media/filters/BUILD.gn
@@ -203,33 +203,38 @@ source_set("filters") {
sources += [
"mac/audio_toolbox_audio_decoder.cc",
"mac/audio_toolbox_audio_decoder.h",
+ "mac/audio_toolbox_audio_encoder.cc",
+ "mac/audio_toolbox_audio_encoder.h",
]
}
-
- if (use_vaapi) {
+ if (is_win) {
sources += [
- "h264_bitstream_buffer.cc",
- "h264_bitstream_buffer.h",
+ "win/media_foundation_utils.cc",
+ "win/media_foundation_utils.h",
]
+ deps += [ "//media/base/win:media_foundation_util" ]
+ if (enable_platform_dts_audio) {
+ sources += [
+ "win/media_foundation_audio_decoder.cc",
+ "win/media_foundation_audio_decoder.h",
+ ]
+ ldflags = [
+ "/DELAYLOAD:packages/Microsoft.VCRTForwarders.140.1.0.6/runtimes/win10-x64/native/release/concrt140_app.dll",
+ "/DELAYLOAD:packages/Microsoft.VCRTForwarders.140.1.0.6/runtimes/win10-x64/native/release/msvcp140_1_app.dll",
+ "/DELAYLOAD:packages/Microsoft.VCRTForwarders.140.1.0.6/runtimes/win10-x64/native/release/msvcp140_2_app.dll",
+ "/DELAYLOAD:packages/Microsoft.VCRTForwarders.140.1.0.6/runtimes/win10-x64/native/release/msvcp140_app.dll",
+ "/DELAYLOAD:packages/Microsoft.VCRTForwarders.140.1.0.6/runtimes/win10-x64/native/release/vcamp140_app.dll",
+ "/DELAYLOAD:packages/Microsoft.VCRTForwarders.140.1.0.6/runtimes/win10-x64/native/release/vccorlib140_app.dll",
+ "/DELAYLOAD:packages/Microsoft.VCRTForwarders.140.1.0.6/runtimes/win10-x64/native/release/vcomp140_app.dll",
+ "/DELAYLOAD:packages/Microsoft.VCRTForwarders.140.1.0.6/runtimes/win10-x64/native/release/vcruntime140_app.dll",
+ ]
+ }
}
- if (is_fuchsia) {
+ if (use_vaapi) {
sources += [
- "fuchsia/fuchsia_video_decoder.cc",
- "fuchsia/fuchsia_video_decoder.h",
- ]
- public_deps = [ "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media" ]
- deps += [
- "//components/viz/common",
- "//gpu/command_buffer/client",
- "//gpu/command_buffer/common",
- "//gpu/ipc/common",
- "//media/fuchsia/cdm",
- "//media/fuchsia/common",
- "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.mediacodec",
- "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sysmem",
- "//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
- "//ui/ozone",
+ "h264_bitstream_buffer.cc",
+ "h264_bitstream_buffer.h",
]
}
}
@@ -320,18 +325,6 @@ source_set("unit_tests") {
deps += [ "//ui/gl" ]
}
- if (is_fuchsia) {
- sources += [ "fuchsia/fuchsia_video_decoder_unittest.cc" ]
- deps += [
- "//components/viz/common",
- "//components/viz/test:test_support",
- "//gpu/command_buffer/client",
- "//gpu/config",
- "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sysmem",
- "//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
- ]
- }
-
# libvpx for running vpx test on chromecast doesn't support high bit depth.
# This may cause some unit tests failure.
if (is_chromecast) {
diff --git a/chromium/media/filters/audio_video_metadata_extractor.cc b/chromium/media/filters/audio_video_metadata_extractor.cc
index 69ff508c221..af9e84f50c0 100644
--- a/chromium/media/filters/audio_video_metadata_extractor.cc
+++ b/chromium/media/filters/audio_video_metadata_extractor.cc
@@ -24,7 +24,7 @@ void OnError(bool* succeeded) {
bool ExtractString(AVDictionaryEntry* tag,
const char* expected_key,
std::string* destination) {
- if (!base::LowerCaseEqualsASCII(std::string(tag->key), expected_key))
+ if (!base::EqualsCaseInsensitiveASCII(std::string(tag->key), expected_key))
return false;
if (destination->empty())
@@ -37,7 +37,7 @@ bool ExtractString(AVDictionaryEntry* tag,
bool ExtractInt(AVDictionaryEntry* tag,
const char* expected_key,
int* destination) {
- if (!base::LowerCaseEqualsASCII(std::string(tag->key), expected_key))
+ if (!base::EqualsCaseInsensitiveASCII(std::string(tag->key), expected_key))
return false;
int temporary = -1;
diff --git a/chromium/media/filters/chunk_demuxer.cc b/chromium/media/filters/chunk_demuxer.cc
index e8040f344a9..a2df9fec53c 100644
--- a/chromium/media/filters/chunk_demuxer.cc
+++ b/chromium/media/filters/chunk_demuxer.cc
@@ -885,9 +885,9 @@ void ChunkDemuxer::OnMemoryPressure(
return;
}
base::AutoLock auto_lock(lock_);
- for (const auto& itr : source_state_map_) {
- itr.second->OnMemoryPressure(currentMediaTime, memory_pressure_level,
- force_instant_gc);
+ for (const auto& [source, state] : source_state_map_) {
+ state->OnMemoryPressure(currentMediaTime, memory_pressure_level,
+ force_instant_gc);
}
}
diff --git a/chromium/media/filters/decoder_selector.cc b/chromium/media/filters/decoder_selector.cc
index d13c5264faf..23a28d21322 100644
--- a/chromium/media/filters/decoder_selector.cc
+++ b/chromium/media/filters/decoder_selector.cc
@@ -152,7 +152,7 @@ DecoderSelector<StreamType>::~DecoderSelector() {
DVLOG(2) << __func__;
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (select_decoder_cb_)
- ReturnNullDecoder();
+ ReturnSelectionError(DecoderStatus::Codes::kFailed);
}
template <DemuxerStream::Type StreamType>
@@ -174,9 +174,10 @@ void DecoderSelector<StreamType>::Initialize(StreamTraits* traits,
}
template <DemuxerStream::Type StreamType>
-void DecoderSelector<StreamType>::SelectDecoder(
+void DecoderSelector<StreamType>::SelectDecoderInternal(
SelectDecoderCB select_decoder_cb,
- typename Decoder::OutputCB output_cb) {
+ typename Decoder::OutputCB output_cb,
+ bool needs_new_decoders) {
DVLOG(2) << __func__;
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(select_decoder_cb);
@@ -191,19 +192,37 @@ void DecoderSelector<StreamType>::SelectDecoder(
if (!config_.IsValidConfig()) {
DLOG(ERROR) << "Invalid stream config";
- ReturnNullDecoder();
+ ReturnSelectionError(DecoderStatus::Codes::kUnsupportedConfig);
return;
}
- // If this is the first selection (ever or since FinalizeDecoderSelection()),
- // start selection with the full list of potential decoders.
- if (!is_selecting_decoders_) {
- is_selecting_decoders_ = true;
+ if (needs_new_decoders) {
decoder_selection_start_ = base::TimeTicks::Now();
+ decode_failure_reinit_cause_ = absl::nullopt;
CreateDecoders();
}
- InitializeDecoder();
+ GetAndInitializeNextDecoder();
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderSelector<StreamType>::BeginDecoderSelection(
+ SelectDecoderCB select_decoder_cb,
+ typename Decoder::OutputCB output_cb) {
+ SelectDecoderInternal(std::move(select_decoder_cb), std::move(output_cb),
+ /*needs_new_decoders = */ true);
+}
+
+template <DemuxerStream::Type StreamType>
+void DecoderSelector<StreamType>::ResumeDecoderSelection(
+ SelectDecoderCB select_decoder_cb,
+ typename Decoder::OutputCB output_cb,
+ DecoderStatus&& reinit_cause) {
+ DVLOG(2) << __func__;
+ if (!decode_failure_reinit_cause_.has_value())
+ decode_failure_reinit_cause_ = std::move(reinit_cause);
+ SelectDecoderInternal(std::move(select_decoder_cb), std::move(output_cb),
+ /*needs_new_decoders = */ false);
}
template <DemuxerStream::Type StreamType>
@@ -211,7 +230,6 @@ void DecoderSelector<StreamType>::FinalizeDecoderSelection() {
DVLOG(2) << __func__;
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!select_decoder_cb_);
- is_selecting_decoders_ = false;
const std::string decoder_type = is_platform_decoder_ ? "HW" : "SW";
const std::string stream_type =
@@ -263,9 +281,7 @@ void DecoderSelector<StreamType>::PrependDecoder(
// Decoders inserted directly should be given priority over those returned by
// |create_decoders_cb_|.
decoders_.insert(decoders_.begin(), std::move(decoder));
-
- if (is_selecting_decoders_)
- FilterAndSortAvailableDecoders();
+ FilterAndSortAvailableDecoders();
}
template <DemuxerStream::Type StreamType>
@@ -287,7 +303,7 @@ void DecoderSelector<StreamType>::CreateDecoders() {
}
template <DemuxerStream::Type StreamType>
-void DecoderSelector<StreamType>::InitializeDecoder() {
+void DecoderSelector<StreamType>::GetAndInitializeNextDecoder() {
DVLOG(2) << __func__;
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!decoder_);
@@ -300,7 +316,11 @@ void DecoderSelector<StreamType>::InitializeDecoder() {
return;
}
- ReturnNullDecoder();
+ if (decode_failure_reinit_cause_.has_value()) {
+ ReturnSelectionError(std::move(*decode_failure_reinit_cause_));
+ } else {
+ ReturnSelectionError(DecoderStatus::Codes::kUnsupportedConfig);
+ }
return;
}
@@ -323,35 +343,35 @@ void DecoderSelector<StreamType>::InitializeDecoder() {
template <DemuxerStream::Type StreamType>
void DecoderSelector<StreamType>::OnDecoderInitializeDone(
DecoderStatus status) {
+ DCHECK(decoder_);
DVLOG(2) << __func__ << ": " << decoder_->GetDecoderType()
<< " success=" << static_cast<int>(status.code());
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!status.is_ok()) {
- // TODO(tmathmeyer) this was too noisy in media log. Batch all the logs
- // together and then send them as an informational notice instead of
- // using NotifyError.
+ // Note: Don't track this decode status, as it is the result of
+ // initialization failure.
MEDIA_LOG(INFO, media_log_)
<< "Failed to initialize " << decoder_->GetDecoderType();
// Try the next decoder on the list.
- decoder_.reset();
- InitializeDecoder();
+ decoder_ = nullptr;
+ GetAndInitializeNextDecoder();
return;
}
- RunSelectDecoderCB();
+ RunSelectDecoderCB(std::move(decoder_));
}
template <DemuxerStream::Type StreamType>
-void DecoderSelector<StreamType>::ReturnNullDecoder() {
+void DecoderSelector<StreamType>::ReturnSelectionError(DecoderStatus error) {
DVLOG(1) << __func__ << ": No decoder selected";
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(!error.is_ok());
decrypting_demuxer_stream_.reset();
- decoder_.reset();
decoders_.clear();
- RunSelectDecoderCB();
+ RunSelectDecoderCB(std::move(error));
}
template <DemuxerStream::Type StreamType>
@@ -380,7 +400,8 @@ void DecoderSelector<StreamType>::OnDecryptingDemuxerStreamInitializeDone(
if (status != PIPELINE_OK) {
// Since we already tried every potential decoder without DDS, give up.
- ReturnNullDecoder();
+ ReturnSelectionError(
+ {DecoderStatus::Codes::kUnsupportedEncryptionMode, std::move(status)});
return;
}
@@ -395,24 +416,26 @@ void DecoderSelector<StreamType>::OnDecryptingDemuxerStreamInitializeDone(
// Try decoder selection again now that DDS is being used.
CreateDecoders();
- InitializeDecoder();
+ GetAndInitializeNextDecoder();
}
template <DemuxerStream::Type StreamType>
-void DecoderSelector<StreamType>::RunSelectDecoderCB() {
+void DecoderSelector<StreamType>::RunSelectDecoderCB(
+ DecoderOrError decoder_or_error) {
DCHECK(select_decoder_cb_);
TRACE_EVENT_ASYNC_END2(
"media", kSelectDecoderTrace, this, "type",
DemuxerStream::GetTypeName(StreamType), "decoder",
base::StringPrintf(
"%s (%s)",
- decoder_ ? GetDecoderName(decoder_->GetDecoderType()).c_str()
- : "null",
+ decoder_or_error.has_value()
+ ? GetDecoderName(decoder_or_error->GetDecoderType()).c_str()
+ : "null",
decrypting_demuxer_stream_ ? "encrypted" : "unencrypted"));
task_runner_->PostTask(
FROM_HERE,
- base::BindOnce(std::move(select_decoder_cb_), std::move(decoder_),
+ base::BindOnce(std::move(select_decoder_cb_), std::move(decoder_or_error),
std::move(decrypting_demuxer_stream_)));
}
diff --git a/chromium/media/filters/decoder_selector.h b/chromium/media/filters/decoder_selector.h
index 152785b8c38..de604995f5d 100644
--- a/chromium/media/filters/decoder_selector.h
+++ b/chromium/media/filters/decoder_selector.h
@@ -54,6 +54,7 @@ class MEDIA_EXPORT DecoderSelector {
typedef DecoderStreamTraits<StreamType> StreamTraits;
typedef typename StreamTraits::DecoderType Decoder;
typedef typename StreamTraits::DecoderConfigType DecoderConfig;
+ using DecoderOrError = DecoderStatus::Or<std::unique_ptr<Decoder>>;
// Callback to create a list of decoders to select from.
// TODO(xhwang): Use a DecoderFactory to create decoders one by one as needed,
@@ -76,7 +77,7 @@ class MEDIA_EXPORT DecoderSelector {
// The caller should call DecryptingDemuxerStream::Reset() before
// calling Decoder::Reset() to release any pending decryption or read.
using SelectDecoderCB =
- base::OnceCallback<void(std::unique_ptr<Decoder>,
+ base::OnceCallback<void(DecoderOrError,
std::unique_ptr<DecryptingDemuxerStream>)>;
DecoderSelector() = delete;
@@ -98,17 +99,27 @@ class MEDIA_EXPORT DecoderSelector {
WaitingCB waiting_cb);
// Selects and initializes a decoder, which will be returned via
- // |select_decoder_cb| posted to |task_runner|. Subsequent calls to
- // SelectDecoder() will return different decoder instances, until all
- // potential decoders have been exhausted.
+ // |select_decoder_cb| posted to |task_runner|. In the event that a selected
+ // decoder fails to decode, |ResumeDecoderSelection| may be used to get
+ // another one.
//
// When the caller determines that decoder selection has succeeded (eg.
// because the decoder decoded a frame successfully), it should call
// FinalizeDecoderSelection().
//
+ // |SelectDecoderCB| may be called with an error if no decoders are available.
+ //
// Must not be called while another selection is pending.
- void SelectDecoder(SelectDecoderCB select_decoder_cb,
- typename Decoder::OutputCB output_cb);
+ void BeginDecoderSelection(SelectDecoderCB select_decoder_cb,
+ typename Decoder::OutputCB output_cb);
+
+ // When a client was provided with a decoder that fails to decode after
+ // being successfully initialized, it should request a new decoder via
+ // this method rather than |SelectDecoder|. This allows the pipeline to
+ // report the root cause of decoder failure.
+ void ResumeDecoderSelection(SelectDecoderCB select_decoder_cb,
+ typename Decoder::OutputCB output_cb,
+ DecoderStatus&& reinit_cause);
// Signals that decoder selection has been completed (successfully). Future
// calls to SelectDecoder() will select from the full list of decoders.
@@ -131,13 +142,16 @@ class MEDIA_EXPORT DecoderSelector {
private:
void CreateDecoders();
- void InitializeDecoder();
+ void GetAndInitializeNextDecoder();
void OnDecoderInitializeDone(DecoderStatus status);
- void ReturnNullDecoder();
+ void ReturnSelectionError(DecoderStatus error);
void InitializeDecryptingDemuxerStream();
void OnDecryptingDemuxerStreamInitializeDone(PipelineStatus status);
- void RunSelectDecoderCB();
+ void RunSelectDecoderCB(DecoderOrError decoder_or_error);
void FilterAndSortAvailableDecoders();
+ void SelectDecoderInternal(SelectDecoderCB select_decoder_cb,
+ typename Decoder::OutputCB output_cb,
+ bool needs_new_decoders);
scoped_refptr<base::SequencedTaskRunner> task_runner_;
SEQUENCE_CHECKER(sequence_checker_);
@@ -153,10 +167,9 @@ class MEDIA_EXPORT DecoderSelector {
// Overall decoder selection state.
DecoderConfig config_;
- bool is_selecting_decoders_ = false;
std::vector<std::unique_ptr<Decoder>> decoders_;
- // State for a single SelectDecoder() invocation.
+ // State for a single GetAndInitializeNextDecoder() invocation.
SelectDecoderCB select_decoder_cb_;
typename Decoder::OutputCB output_cb_;
std::unique_ptr<Decoder> decoder_;
@@ -169,6 +182,11 @@ class MEDIA_EXPORT DecoderSelector {
base::TimeTicks decoder_selection_start_;
base::TimeTicks codec_change_start_;
+ // Used to keep track of the original failure-to-decode reason so that if
+ // playback fails entirely, we have a root cause to point to, rather than
+ // failing due to running out of more acceptable decoders.
+ absl::optional<DecoderStatus> decode_failure_reinit_cause_ = absl::nullopt;
+
base::WeakPtrFactory<DecoderSelector> weak_this_factory_{this};
};
diff --git a/chromium/media/filters/decoder_selector_unittest.cc b/chromium/media/filters/decoder_selector_unittest.cc
index 4951e02a1af..4310170f674 100644
--- a/chromium/media/filters/decoder_selector_unittest.cc
+++ b/chromium/media/filters/decoder_selector_unittest.cc
@@ -305,14 +305,16 @@ class DecoderSelectorTest : public ::testing::Test {
void(std::unique_ptr<DecryptingDemuxerStream>));
void OnDecoderSelectedThunk(
- std::unique_ptr<Decoder> decoder,
+ typename Selector::DecoderOrError decoder,
std::unique_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream) {
// Report only the type or id of the decoder, since that's what the tests
// care about. The decoder will be destructed immediately.
- if (decoder && decoder->GetDecoderType() == DecoderType::kTesting) {
+ if (decoder.has_value() &&
+ decoder->GetDecoderType() == DecoderType::kTesting) {
OnDecoderSelected(
- static_cast<MockDecoder*>(decoder.get())->GetDecoderId());
- } else if (decoder) {
+ static_cast<MockDecoder*>(std::move(decoder).value().get())
+ ->GetDecoderId());
+ } else if (decoder.has_value()) {
OnDecoderSelected(decoder->GetDecoderType());
} else {
NoDecoderSelected();
@@ -429,13 +431,26 @@ class DecoderSelectorTest : public ::testing::Test {
TypeParam::UseHighQualityEncryptedDecoderConfig(demuxer_stream_);
}
- void SelectDecoder() {
- decoder_selector_->SelectDecoder(
- base::BindOnce(&Self::OnDecoderSelectedThunk, base::Unretained(this)),
- base::BindRepeating(&Self::OnOutput, base::Unretained(this)));
+ void SelectNextDecoder() {
+ if (is_selecting_) {
+ decoder_selector_->ResumeDecoderSelection(
+ base::BindOnce(&Self::OnDecoderSelectedThunk, base::Unretained(this)),
+ base::BindRepeating(&Self::OnOutput, base::Unretained(this)),
+ DecoderStatus::Codes::kFailed);
+ } else {
+ decoder_selector_->BeginDecoderSelection(
+ base::BindOnce(&Self::OnDecoderSelectedThunk, base::Unretained(this)),
+ base::BindRepeating(&Self::OnOutput, base::Unretained(this)));
+ }
+ is_selecting_ = true;
RunUntilIdle();
}
+ void FinalizeDecoderSelection() {
+ decoder_selector_->FinalizeDecoderSelection();
+ is_selecting_ = false;
+ }
+
void RunUntilIdle() { task_environment_.RunUntilIdle(); }
base::test::TaskEnvironment task_environment_;
@@ -449,6 +464,7 @@ class DecoderSelectorTest : public ::testing::Test {
std::unique_ptr<Selector> decoder_selector_;
bool use_decrypting_decoder_ = false;
+ bool is_selecting_ = false;
std::vector<MockDecoderArgs> mock_decoders_to_create_;
};
@@ -468,7 +484,7 @@ TYPED_TEST(DecoderSelectorTest, ClearStream_NoDecoders) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, ClearStream_NoClearDecoder) {
@@ -477,7 +493,7 @@ TYPED_TEST(DecoderSelectorTest, ClearStream_NoClearDecoder) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, ClearStream_OneClearDecoder) {
@@ -486,7 +502,7 @@ TYPED_TEST(DecoderSelectorTest, ClearStream_OneClearDecoder) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, ClearStream_InternalFallback) {
@@ -496,7 +512,7 @@ TYPED_TEST(DecoderSelectorTest, ClearStream_InternalFallback) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, ClearStream_ExternalFallback) {
@@ -506,13 +522,13 @@ TYPED_TEST(DecoderSelectorTest, ClearStream_ExternalFallback) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, ClearStream_FinalizeDecoderSelection) {
@@ -522,12 +538,12 @@ TYPED_TEST(DecoderSelectorTest, ClearStream_FinalizeDecoderSelection) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
- this->decoder_selector_->FinalizeDecoderSelection();
+ this->FinalizeDecoderSelection();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests that platform decoders are prioritized for
@@ -544,16 +560,16 @@ TYPED_TEST(DecoderSelectorTest, ClearStream_PrioritizePlatformDecoders) {
base::BindRepeating(TypeParam::MockDecoderPriorityCB));
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder4));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests that non-platform decoders are prioritized for
@@ -570,16 +586,16 @@ TYPED_TEST(DecoderSelectorTest, ClearStream_DeprioritizePlatformDecoders) {
base::BindRepeating(TypeParam::MockDecoderPriorityCB));
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder4));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests that platform and non-platform decoders remain in the order they are
@@ -597,16 +613,16 @@ TYPED_TEST(DecoderSelectorTest,
base::BindRepeating(TypeParam::NormalDecoderPriorityCB));
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder4));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, ClearStream_SkipAllDecoders) {
@@ -621,7 +637,7 @@ TYPED_TEST(DecoderSelectorTest, ClearStream_SkipAllDecoders) {
base::BindRepeating(TypeParam::SkipDecoderPriorityCB));
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, ClearStream_ForceHardwareDecoders) {
@@ -637,11 +653,11 @@ TYPED_TEST(DecoderSelectorTest, ClearStream_ForceHardwareDecoders) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests the production predicate for `DecoderSelector<DemuxerStream::VIDEO>`
@@ -661,15 +677,15 @@ TEST_F(VideoDecoderSelectorTest, ClearStream_PrioritizeSoftwareDecoders) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder4));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests the production predicate for `DecoderSelector<DemuxerStream::VIDEO>`
@@ -689,15 +705,15 @@ TEST_F(VideoDecoderSelectorTest, ClearStream_PrioritizePlatformDecoders) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder4));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests for encrypted streams.
@@ -721,7 +737,7 @@ TYPED_TEST(DecoderSelectorTest,
this->CreateDecoderSelector();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests that for an encrypted stream, platform decoders are prioritized for
@@ -738,16 +754,16 @@ TYPED_TEST(DecoderSelectorTest, EncryptedStream_PrioritizePlatformDecoders) {
base::BindRepeating(TypeParam::MockDecoderPriorityCB));
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder4));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests that for an encrypted stream, non-platform decoders are prioritized for
@@ -764,16 +780,16 @@ TYPED_TEST(DecoderSelectorTest, EncryptedStream_DeprioritizePlatformDecoders) {
base::BindRepeating(TypeParam::MockDecoderPriorityCB));
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder4));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests that platform and non-platform decoders remain in the order they are
@@ -791,16 +807,16 @@ TYPED_TEST(DecoderSelectorTest,
base::BindRepeating(TypeParam::NormalDecoderPriorityCB));
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder4));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, EncryptedStream_SkipAllDecoders) {
@@ -815,7 +831,7 @@ TYPED_TEST(DecoderSelectorTest, EncryptedStream_SkipAllDecoders) {
base::BindRepeating(TypeParam::SkipDecoderPriorityCB));
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, EncryptedStream_ForceHardwareDecoders) {
@@ -831,9 +847,9 @@ TYPED_TEST(DecoderSelectorTest, EncryptedStream_ForceHardwareDecoders) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, EncryptedStream_NoDecryptor_OneClearDecoder) {
@@ -843,7 +859,7 @@ TYPED_TEST(DecoderSelectorTest, EncryptedStream_NoDecryptor_OneClearDecoder) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, EncryptedStream_NoDecryptor_InternalFallback) {
@@ -854,7 +870,7 @@ TYPED_TEST(DecoderSelectorTest, EncryptedStream_NoDecryptor_InternalFallback) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, EncryptedStream_NoDecryptor_ExternalFallback) {
@@ -865,10 +881,10 @@ TYPED_TEST(DecoderSelectorTest, EncryptedStream_NoDecryptor_ExternalFallback) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest,
@@ -880,12 +896,12 @@ TYPED_TEST(DecoderSelectorTest,
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
- this->decoder_selector_->FinalizeDecoderSelection();
+ this->FinalizeDecoderSelection();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, EncryptedStream_DecryptOnly_NoDecoder) {
@@ -894,7 +910,7 @@ TYPED_TEST(DecoderSelectorTest, EncryptedStream_DecryptOnly_NoDecoder) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, EncryptedStream_DecryptOnly_OneClearDecoder) {
@@ -905,7 +921,7 @@ TYPED_TEST(DecoderSelectorTest, EncryptedStream_DecryptOnly_OneClearDecoder) {
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
EXPECT_CALL(*this, OnDemuxerStreamSelected(NotNull()));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, EncryptedStream_DecryptOnly_InternalFallback) {
@@ -918,7 +934,7 @@ TYPED_TEST(DecoderSelectorTest, EncryptedStream_DecryptOnly_InternalFallback) {
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
EXPECT_CALL(*this, OnDemuxerStreamSelected(NotNull()));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest,
@@ -936,13 +952,13 @@ TYPED_TEST(DecoderSelectorTest,
saved_dds = std::move(dds);
});
- this->SelectDecoder();
+ this->SelectNextDecoder();
- this->decoder_selector_->FinalizeDecoderSelection();
+ this->FinalizeDecoderSelection();
// DDS is reused.
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, EncryptedStream_DecryptAndDecode) {
@@ -964,7 +980,7 @@ TYPED_TEST(DecoderSelectorTest, EncryptedStream_DecryptAndDecode) {
EXPECT_CALL(*this, OnDemuxerStreamSelected(NotNull()));
#endif // !BUILDFLAG(IS_ANDROID)
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest,
@@ -979,7 +995,7 @@ TYPED_TEST(DecoderSelectorTest,
#if !BUILDFLAG(IS_ANDROID)
// DecryptingDecoder is selected immediately.
EXPECT_CALL(*this, OnDecoderSelected(TestFixture::DecoderType::kDecrypting));
- this->SelectDecoder();
+ this->SelectNextDecoder();
#endif // !BUILDFLAG(IS_ANDROID)
// On fallback, a DecryptingDemuxerStream will be created.
@@ -989,11 +1005,11 @@ TYPED_TEST(DecoderSelectorTest,
.WillOnce([&](std::unique_ptr<DecryptingDemuxerStream> dds) {
saved_dds = std::move(dds);
});
- this->SelectDecoder();
+ this->SelectNextDecoder();
// The DecryptingDemuxerStream should be reused.
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
TYPED_TEST(DecoderSelectorTest, ClearToEncryptedStream_DecryptOnly) {
@@ -1003,14 +1019,14 @@ TYPED_TEST(DecoderSelectorTest, ClearToEncryptedStream_DecryptOnly) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
- this->decoder_selector_->FinalizeDecoderSelection();
+ this->FinalizeDecoderSelection();
this->UseEncryptedDecoderConfig();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
EXPECT_CALL(*this, OnDemuxerStreamSelected(NotNull()));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests the production predicate for `DecoderSelector<DemuxerStream::VIDEO>`
@@ -1030,11 +1046,11 @@ TEST_F(VideoDecoderSelectorTest, EncryptedStream_PrioritizeSoftwareDecoders) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder4));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests the production predicate for `DecoderSelector<DemuxerStream::VIDEO>`
@@ -1054,11 +1070,11 @@ TEST_F(VideoDecoderSelectorTest, EncryptedStream_PrioritizePlatformDecoders) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder3));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder4));
- this->SelectDecoder();
+ this->SelectNextDecoder();
EXPECT_CALL(*this, NoDecoderSelected());
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Tests we always use resolution-based rules for RTC.
@@ -1077,7 +1093,7 @@ TEST_F(VideoDecoderSelectorTest, RTC_UseResolutionRuleWithoutSwitch) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder2));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Non-platform decoders should be used for RTC unless enabled by a switch.
@@ -1094,7 +1110,7 @@ TEST_F(VideoDecoderSelectorTest, RTC_SkipNonPlatformDecodersWithoutSwitch) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1)).Times(0);
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Platform decoders should be allowed for RTC without the sw switch.
@@ -1111,7 +1127,7 @@ TEST_F(VideoDecoderSelectorTest, RTC_AllowPlatformDecodersWithoutSwitch) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
// Non-platform decoders should be allowed for RTC if enabled by a switch.
@@ -1128,7 +1144,7 @@ TEST_F(VideoDecoderSelectorTest, RTC_AllowNonPlatformDecodersWithSwitch) {
this->CreateDecoderSelector();
EXPECT_CALL(*this, OnDecoderSelected(kDecoder1));
- this->SelectDecoder();
+ this->SelectNextDecoder();
}
} // namespace media
diff --git a/chromium/media/filters/decoder_stream.cc b/chromium/media/filters/decoder_stream.cc
index b67fb512897..d530e3e95e7 100644
--- a/chromium/media/filters/decoder_stream.cc
+++ b/chromium/media/filters/decoder_stream.cc
@@ -169,7 +169,7 @@ void DecoderStream<StreamType>::Initialize(DemuxerStream* stream,
std::move(waiting_cb));
state_ = STATE_INITIALIZING;
- SelectDecoder();
+ BeginDecoderSelection();
}
template <DemuxerStream::Type StreamType>
@@ -186,8 +186,8 @@ void DecoderStream<StreamType>::Read(ReadCB read_cb) {
TRACE_EVENT_ASYNC_BEGIN0("media", GetReadTraceString<StreamType>(), this);
if (state_ == STATE_ERROR) {
read_cb_ = BindToCurrentLoop(std::move(read_cb));
- // TODO(crbug.com/1129662): Consider attaching a caused-by of the original
- // error as well.
+ // OnDecodeDone, OnBufferReady, and CompleteDecoderReinitialization all set
+ // STATE_ERROR and call SatisfyRead, passing the error back to a ReadCB.
SatisfyRead(DecoderStatus::Codes::kDecoderStreamInErrorState);
return;
}
@@ -337,8 +337,8 @@ void DecoderStream<StreamType>::SkipPrepareUntil(
}
template <DemuxerStream::Type StreamType>
-void DecoderStream<StreamType>::SelectDecoder() {
- decoder_selector_.SelectDecoder(
+void DecoderStream<StreamType>::BeginDecoderSelection() {
+ decoder_selector_.BeginDecoderSelection(
base::BindOnce(&DecoderStream<StreamType>::OnDecoderSelected,
weak_factory_.GetWeakPtr()),
base::BindRepeating(&DecoderStream<StreamType>::OnDecodeOutputReady,
@@ -346,12 +346,23 @@ void DecoderStream<StreamType>::SelectDecoder() {
}
template <DemuxerStream::Type StreamType>
+void DecoderStream<StreamType>::ResumeDecoderSelection(
+ DecoderStatus&& reinit_cause) {
+ decoder_selector_.ResumeDecoderSelection(
+ base::BindOnce(&DecoderStream<StreamType>::OnDecoderSelected,
+ weak_factory_.GetWeakPtr()),
+ base::BindRepeating(&DecoderStream<StreamType>::OnDecodeOutputReady,
+ fallback_weak_factory_.GetWeakPtr()),
+ std::move(reinit_cause));
+}
+
+template <DemuxerStream::Type StreamType>
void DecoderStream<StreamType>::OnDecoderSelected(
- std::unique_ptr<Decoder> selected_decoder,
+ DecoderStatus::Or<std::unique_ptr<Decoder>> decoder_or_error,
std::unique_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream) {
FUNCTION_DVLOG(1) << ": "
- << (selected_decoder
- ? GetDecoderName(selected_decoder->GetDecoderType())
+ << (decoder_or_error.has_value()
+ ? GetDecoderName(decoder_or_error->GetDecoderType())
: "No decoder selected.");
DCHECK(task_runner_->RunsTasksInCurrentSequence());
DCHECK(state_ == STATE_INITIALIZING || state_ == STATE_REINITIALIZING_DECODER)
@@ -377,13 +388,14 @@ void DecoderStream<StreamType>::OnDecoderSelected(
cdm_context_ = nullptr;
}
- decoder_ = std::move(selected_decoder);
- if (decoder_change_observer_cb_)
- decoder_change_observer_cb_.Run(decoder_.get());
+ if (decoder_change_observer_cb_) {
+ decoder_change_observer_cb_.Run(
+ decoder_or_error.has_value() ? (*decoder_or_error).get() : nullptr);
+ }
// TODO(tguilbert): crbug.com/603713 support config changes on decoder reinit.
if (received_config_change_during_reinit_) {
- CompleteDecoderReinitialization(false);
+ CompleteDecoderReinitialization(DecoderStatus::Codes::kInterrupted);
return;
}
@@ -391,18 +403,24 @@ void DecoderStream<StreamType>::OnDecoderSelected(
// never successfully outputed a frame).
fallback_buffers_ = pending_buffers_;
- if (!decoder_) {
+ if (decoder_or_error.has_error()) {
if (state_ == STATE_INITIALIZING) {
state_ = STATE_UNINITIALIZED;
MEDIA_LOG(ERROR, media_log_)
<< GetStreamTypeString() << " decoder initialization failed";
std::move(init_cb_).Run(false);
+ // Node that |decoder_or_error| is not actually lost in this case, as
+ // DecoderSelector is keeping track of it to use in case there are no
+ // successfully initialized decoders.
} else {
- CompleteDecoderReinitialization(false);
+ CompleteDecoderReinitialization(std::move(decoder_or_error).error());
}
return;
}
+ DCHECK(decoder_or_error.has_value());
+ decoder_ = std::move(decoder_or_error).value();
+
// Send logs and statistics updates including the decoder name.
traits_->SetIsPlatformDecoder(decoder_->IsPlatformDecoder());
traits_->SetIsDecryptingDemuxerStream(!!decrypting_demuxer_stream_);
@@ -428,7 +446,7 @@ void DecoderStream<StreamType>::OnDecoderSelected(
<< traits_->GetDecoderConfig(stream_).AsHumanReadableString();
if (state_ == STATE_REINITIALIZING_DECODER) {
- CompleteDecoderReinitialization(true);
+ CompleteDecoderReinitialization(OkStatus());
return;
}
@@ -453,22 +471,35 @@ void DecoderStream<StreamType>::Decode(scoped_refptr<DecoderBuffer> buffer) {
// We don't know if the decoder will error out on first decode yet. Save the
// buffer to feed it to the fallback decoder later if needed.
- if (!decoder_produced_a_frame_)
+ if (!decoder_produced_a_frame_) {
pending_buffers_.push_back(buffer);
+ }
// It's possible for a buffer to arrive from the demuxer right after the
// fallback decoder successfully completed its initialization. At this point
// |pending_buffers_| has already been copied to |fallback_buffers_| and we
// need to append it ourselves.
- if (!fallback_buffers_.empty()) {
- fallback_buffers_.push_back(buffer);
+ if (!fallback_buffers_.empty() || fallback_buffers_being_decoded_ > 0) {
+ fallback_buffers_.push_back(std::exchange(buffer, nullptr));
+
+ // There may already be a pending buffer being decoded after decoder
+ // change. Since decoders can have different max decode requests, we need to
+ // make sure we can actually decode more buffers here.
+ if (!CanDecodeMore()) {
+ return;
+ }
+ }
- scoped_refptr<DecoderBuffer> temp = std::move(fallback_buffers_.front());
+ // TODO(https://crbug.com/1324732): We should DCHECK(CanDecodeMore()) here,
+ // but this breaks a number of tests.
+
+ if (!fallback_buffers_.empty()) {
+ buffer = std::move(fallback_buffers_.front());
fallback_buffers_.pop_front();
- DecodeInternal(std::move(temp));
- } else {
- DecodeInternal(std::move(buffer));
+ ++fallback_buffers_being_decoded_;
}
+
+ DecodeInternal(std::move(buffer));
}
template <DemuxerStream::Type StreamType>
@@ -562,6 +593,10 @@ void DecoderStream<StreamType>::OnDecodeDone(
if (buffer_size > 0)
traits_->ReportStatistics(statistics_cb_, buffer_size);
+ if (fallback_buffers_being_decoded_ > 0) {
+ --fallback_buffers_being_decoded_;
+ }
+
if (state_ == STATE_NORMAL) {
if (end_of_stream) {
state_ = STATE_END_OF_STREAM;
@@ -592,7 +627,13 @@ void DecoderStream<StreamType>::OnDecodeDone(
pending_decode_requests_ = 0;
decoding_eos_ = false;
state_ = STATE_REINITIALIZING_DECODER;
- SelectDecoder();
+ if (fallback_cb_) {
+ DecoderStatus copy = status;
+ PipelineStatus fallback_status = {
+ PipelineStatus::Codes::PIPELINE_ERROR_DECODE, std::move(copy)};
+ fallback_cb_.Run(fallback_status);
+ }
+ ResumeDecoderSelection(std::move(status));
} else {
media_log_->NotifyError(status);
MEDIA_LOG(ERROR, media_log_)
@@ -671,6 +712,7 @@ void DecoderStream<StreamType>::ReadFromDemuxerStream() {
if (!fallback_buffers_.empty()) {
scoped_refptr<DecoderBuffer> buffer = std::move(fallback_buffers_.front());
fallback_buffers_.pop_front();
+ ++fallback_buffers_being_decoded_;
// Decode the buffer without re-appending it to |pending_buffers_|.
DecodeInternal(std::move(buffer));
@@ -714,7 +756,7 @@ void DecoderStream<StreamType>::OnBufferReady(
switch (status) {
case DemuxerStream::kOk:
// Save valid buffers to be consumed by the new decoder.
- // |pending_buffers_| is copied to |fallback_buffers| in
+ // |pending_buffers_| is copied to |fallback_buffers_| in
// OnDecoderSelected().
pending_buffers_.push_back(std::move(buffer));
break;
@@ -742,6 +784,8 @@ void DecoderStream<StreamType>::OnBufferReady(
<< GetStreamTypeString() << " demuxer stream read error!";
pending_buffers_.clear();
ClearOutputs();
+ // TODO(crbug.com/c/1326324): Convert |status| into a typed status so that
+ // it can be set as a cause here.
if (read_cb_)
SatisfyRead(DecoderStatus::Codes::kDecoderStreamDemuxerError);
}
@@ -849,16 +893,17 @@ void DecoderStream<StreamType>::ReinitializeDecoder() {
state_ = STATE_REINITIALIZING_DECODER;
decoder_selector_.PrependDecoder(std::move(decoder_));
- SelectDecoder();
+ BeginDecoderSelection();
}
template <DemuxerStream::Type StreamType>
-void DecoderStream<StreamType>::CompleteDecoderReinitialization(bool success) {
+void DecoderStream<StreamType>::CompleteDecoderReinitialization(
+ DecoderStatus status) {
FUNCTION_DVLOG(2);
DCHECK(task_runner_->RunsTasksInCurrentSequence());
DCHECK_EQ(state_, STATE_REINITIALIZING_DECODER);
- state_ = success ? STATE_NORMAL : STATE_ERROR;
+ state_ = status.is_ok() ? STATE_NORMAL : STATE_ERROR;
if (reset_cb_) {
std::move(reset_cb_).Run();
@@ -871,7 +916,7 @@ void DecoderStream<StreamType>::CompleteDecoderReinitialization(bool success) {
if (state_ == STATE_ERROR) {
MEDIA_LOG(ERROR, media_log_)
<< GetStreamTypeString() << " decoder reinitialization failed";
- SatisfyRead(DecoderStatus::Codes::kDecoderStreamReinitFailed);
+ SatisfyRead(std::move(status));
return;
}
@@ -918,6 +963,7 @@ void DecoderStream<StreamType>::OnDecoderReset() {
// Make sure we read directly from the demuxer after a reset.
fallback_buffers_.clear();
pending_buffers_.clear();
+ fallback_buffers_being_decoded_ = 0;
if (state_ != STATE_FLUSHING_DECODER) {
state_ = STATE_NORMAL;
diff --git a/chromium/media/filters/decoder_stream.h b/chromium/media/filters/decoder_stream.h
index 8a392103385..ec7491da834 100644
--- a/chromium/media/filters/decoder_stream.h
+++ b/chromium/media/filters/decoder_stream.h
@@ -135,6 +135,10 @@ class MEDIA_EXPORT DecoderStream {
decoder_change_observer_cb_ = std::move(decoder_change_observer_cb);
}
+ void set_fallback_observer(PipelineStatusCB fallback_cb) {
+ fallback_cb_ = std::move(fallback_cb);
+ }
+
int get_pending_buffers_size_for_testing() const {
return pending_buffers_.size();
}
@@ -173,13 +177,14 @@ class MEDIA_EXPORT DecoderStream {
// Returns true if one more decode request can be submitted to the decoder.
bool CanDecodeMore() const;
- void SelectDecoder();
+ void BeginDecoderSelection();
+ void ResumeDecoderSelection(DecoderStatus&& reinit_cause);
// Called when |decoder_selector| selected the |selected_decoder|.
// |decrypting_demuxer_stream| was also populated if a DecryptingDemuxerStream
// is created to help decrypt the encrypted stream.
void OnDecoderSelected(
- std::unique_ptr<Decoder> selected_decoder,
+ DecoderStatus::Or<std::unique_ptr<Decoder>> decoder_or_error,
std::unique_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream);
// Satisfy pending |read_cb_| with |result|.
@@ -214,7 +219,7 @@ class MEDIA_EXPORT DecoderStream {
void ReinitializeDecoder();
- void CompleteDecoderReinitialization(bool success);
+ void CompleteDecoderReinitialization(DecoderStatus status);
void ResetDecoder();
void OnDecoderReset();
@@ -236,6 +241,7 @@ class MEDIA_EXPORT DecoderStream {
StatisticsCB statistics_cb_;
InitCB init_cb_;
WaitingCB waiting_cb_;
+ PipelineStatusCB fallback_cb_;
ReadCB read_cb_;
base::OnceClosure reset_cb_;
@@ -301,6 +307,8 @@ class MEDIA_EXPORT DecoderStream {
bool encryption_type_reported_ = false;
+ int fallback_buffers_being_decoded_ = 0;
+
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<DecoderStream<StreamType>> weak_factory_{this};
diff --git a/chromium/media/filters/fuchsia/DIR_METADATA b/chromium/media/filters/fuchsia/DIR_METADATA
deleted file mode 100644
index 5b3985ecc8b..00000000000
--- a/chromium/media/filters/fuchsia/DIR_METADATA
+++ /dev/null
@@ -1,10 +0,0 @@
-# Metadata information for this directory.
-#
-# For more information on DIR_METADATA files, see:
-# https://source.chromium.org/chromium/infra/infra/+/main:go/src/infra/tools/dirmd/README.md
-#
-# For the schema of this file, see Metadata message:
-# https://source.chromium.org/chromium/infra/infra/+/main:go/src/infra/tools/dirmd/proto/dir_metadata.proto
-
-mixins: "//build/fuchsia/COMMON_METADATA"
-os: FUCHSIA \ No newline at end of file
diff --git a/chromium/media/filters/mac/audio_toolbox_audio_encoder.cc b/chromium/media/filters/mac/audio_toolbox_audio_encoder.cc
new file mode 100644
index 00000000000..8c3d6e96e98
--- /dev/null
+++ b/chromium/media/filters/mac/audio_toolbox_audio_encoder.cc
@@ -0,0 +1,322 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/mac/audio_toolbox_audio_encoder.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "base/memory/raw_ptr.h"
+#include "base/task/single_thread_task_runner.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/encoder_status.h"
+#include "media/base/timestamp_constants.h"
+#include "media/formats/mp4/es_descriptor.h"
+
+namespace media {
+
+namespace {
+
+struct InputData {
+ raw_ptr<const AudioBus> bus = nullptr;
+ AudioStreamPacketDescription packet = {};
+ bool flushing = false;
+};
+
+// Special error code we use to differentiate real errors from end of buffer.
+constexpr OSStatus kNoMoreDataError = -12345;
+
+// Callback used to provide input data to the AudioConverter.
+OSStatus ProvideInputCallback(AudioConverterRef decoder,
+ UInt32* num_packets,
+ AudioBufferList* buffer_list,
+ AudioStreamPacketDescription** packets,
+ void* user_data) {
+ auto* input_data = reinterpret_cast<InputData*>(user_data);
+ if (!input_data->bus) {
+ *num_packets = 0;
+ return input_data->flushing ? noErr : kNoMoreDataError;
+ }
+
+ DCHECK(!input_data->flushing);
+
+ const AudioBus* bus = input_data->bus;
+ buffer_list->mNumberBuffers = bus->channels();
+ for (int i = 0; i < bus->channels(); ++i) {
+ buffer_list->mBuffers[i].mNumberChannels = 1;
+ buffer_list->mBuffers[i].mDataByteSize = bus->frames() * sizeof(float);
+
+ // A non-const version of channel(i) exists, but the compiler doesn't select
+ // it for some reason.
+ buffer_list->mBuffers[i].mData = const_cast<float*>(bus->channel(i));
+ }
+
+ *num_packets = bus->frames();
+
+ // This ensures that if this callback is called again, we'll exit via the
+ // kNoMoreDataError path above.
+ input_data->bus = nullptr;
+ return noErr;
+}
+
+void GenerateOutputFormat(const AudioEncoder::Options& options,
+ AudioStreamBasicDescription& output_format) {
+ DCHECK(options.codec == AudioCodec::kAAC);
+
+ // Output is AAC-LC. Documentation:
+ // https://developer.apple.com/documentation/coreaudiotypes/coreaudiotype_constants/mpeg-4_audio_object_type_constants
+ // TODO(crbug.com/1317402): Implement support for other AAC profiles.
+ output_format.mFormatID = kAudioFormatMPEG4AAC;
+ output_format.mFormatFlags = kMPEG4Object_AAC_LC;
+}
+
+bool GenerateCodecDescription(AudioCodec codec,
+ AudioConverterRef encoder,
+ std::vector<uint8_t>& codec_desc) {
+ DCHECK(codec == AudioCodec::kAAC);
+
+ // AAC should always have a codec description available.
+ UInt32 magic_cookie_size = 0;
+ auto result = AudioConverterGetPropertyInfo(
+ encoder, kAudioConverterCompressionMagicCookie, &magic_cookie_size,
+ nullptr);
+ if (result != noErr || !magic_cookie_size) {
+ OSSTATUS_DLOG(ERROR, result) << "Failed to get magic cookie info";
+ return false;
+ }
+
+ std::vector<uint8_t> magic_cookie(magic_cookie_size, 0);
+ result =
+ AudioConverterGetProperty(encoder, kAudioConverterCompressionMagicCookie,
+ &magic_cookie_size, magic_cookie.data());
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result) << "Failed to get magic cookie";
+ return false;
+ }
+
+ // The magic cookie is an ISO-BMFF ESDS box. Use our mp4 tools to extract just
+ // the plain AAC extradata that we need.
+ mp4::ESDescriptor esds;
+ if (!esds.Parse(magic_cookie)) {
+ OSSTATUS_DLOG(ERROR, result) << "Failed to parse magic cookie";
+ return false;
+ }
+
+ if (!mp4::ESDescriptor::IsAAC(esds.object_type())) {
+ OSSTATUS_DLOG(ERROR, result) << "Expected AAC audio object type";
+ return false;
+ }
+
+ codec_desc = esds.decoder_specific_info();
+ return true;
+}
+
+} // namespace
+
+AudioToolboxAudioEncoder::AudioToolboxAudioEncoder() = default;
+
+AudioToolboxAudioEncoder::~AudioToolboxAudioEncoder() {
+ if (!encoder_)
+ return;
+
+ const auto result = AudioConverterDispose(encoder_);
+ OSSTATUS_DLOG_IF(WARNING, result != noErr, result)
+ << "AudioConverterDispose() failed";
+}
+
+void AudioToolboxAudioEncoder::Initialize(const Options& options,
+ OutputCB output_cb,
+ EncoderStatusCB done_cb) {
+ if (output_cb_) {
+ std::move(done_cb).Run(EncoderStatus::Codes::kEncoderInitializeTwice);
+ return;
+ }
+
+ if (options.codec != AudioCodec::kAAC) {
+ DLOG(WARNING) << "Only AAC encoding is supported by this encoder.";
+ std::move(done_cb).Run(EncoderStatus::Codes::kEncoderUnsupportedCodec);
+ return;
+ }
+
+ AudioStreamBasicDescription output_format = {};
+ sample_rate_ = output_format.mSampleRate = options.sample_rate;
+ channel_count_ = output_format.mChannelsPerFrame = options.channels;
+ GenerateOutputFormat(options, output_format);
+
+ if (!CreateEncoder(options, output_format)) {
+ std::move(done_cb).Run(EncoderStatus::Codes::kEncoderInitializationError);
+ return;
+ }
+
+ DCHECK(encoder_);
+
+ if (!GenerateCodecDescription(options.codec, encoder_, codec_desc_)) {
+ std::move(done_cb).Run(EncoderStatus::Codes::kEncoderInitializationError);
+ return;
+ }
+
+ timestamp_helper_ = std::make_unique<AudioTimestampHelper>(sample_rate_);
+ output_cb_ = output_cb;
+ std::move(done_cb).Run(EncoderStatus::Codes::kOk);
+}
+
+void AudioToolboxAudioEncoder::Encode(std::unique_ptr<AudioBus> input_bus,
+ base::TimeTicks capture_time,
+ EncoderStatusCB done_cb) {
+ if (!encoder_) {
+ std::move(done_cb).Run(
+ EncoderStatus::Codes::kEncoderInitializeNeverCompleted);
+ return;
+ }
+
+ if (timestamp_helper_->base_timestamp() == kNoTimestamp)
+ timestamp_helper_->SetBaseTimestamp(capture_time - base::TimeTicks());
+
+ if (input_bus) {
+ DVLOG(1) << __func__ << ": Encoding " << capture_time << ": "
+ << timestamp_helper_->GetFrameDuration(input_bus->frames());
+ } else {
+ DVLOG(1) << __func__ << ": Encoding end-of-stream.";
+ }
+
+ InputData input_data;
+ input_data.bus = input_bus.get();
+ input_data.flushing = !input_bus;
+
+ do {
+ // Note: This doesn't zero initialize the buffer.
+ // FIXME: This greedily allocates, we should preserve the buffer for the
+ // next call if we don't fill it.
+ std::unique_ptr<uint8_t[]> packet_buffer(new uint8_t[max_packet_size_]);
+
+ AudioBufferList output_buffer_list = {};
+ output_buffer_list.mNumberBuffers = 1;
+ output_buffer_list.mBuffers[0].mNumberChannels = channel_count_;
+ output_buffer_list.mBuffers[0].mData = packet_buffer.get();
+ output_buffer_list.mBuffers[0].mDataByteSize = max_packet_size_;
+
+ // Encodes |num_packets| into |packet_buffer| by calling the
+ // ProvideInputCallback to fill an AudioBufferList that points into
+ // |input_bus|. See media::AudioConverter for a similar mechanism.
+ UInt32 num_packets = 1;
+ AudioStreamPacketDescription packet_description = {};
+ auto result = AudioConverterFillComplexBuffer(
+ encoder_, ProvideInputCallback, &input_data, &num_packets,
+ &output_buffer_list, &packet_description);
+
+ if ((result == kNoMoreDataError || result == noErr) && !num_packets) {
+ std::move(done_cb).Run(EncoderStatus::Codes::kOk);
+ return;
+ }
+
+ if (result != noErr && result != kNoMoreDataError) {
+ OSSTATUS_DLOG(ERROR, result)
+ << "AudioConverterFillComplexBuffer() failed";
+ std::move(done_cb).Run(EncoderStatus::Codes::kEncoderFailedEncode);
+ return;
+ }
+
+ DCHECK_LE(packet_description.mDataByteSize, max_packet_size_);
+
+ // All AAC-LC packets are 1024 frames in size. Note: If other AAC profiles
+ // are added later, this value must be updated.
+ auto num_frames = 1024 * num_packets;
+ DVLOG(1) << __func__ << ": Output: num_frames=" << num_frames;
+
+ EncodedAudioBuffer encoded_buffer(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ GuessChannelLayout(channel_count_), sample_rate_,
+ num_frames),
+ std::move(packet_buffer), packet_description.mDataByteSize,
+ base::TimeTicks() + timestamp_helper_->GetTimestamp(),
+ timestamp_helper_->GetFrameDuration(num_frames));
+
+ absl::optional<CodecDescription> desc;
+ if (timestamp_helper_->frame_count() == 0)
+ desc = codec_desc_;
+
+ timestamp_helper_->AddFrames(num_frames);
+ output_cb_.Run(std::move(encoded_buffer), desc);
+ } while (true);
+}
+
+void AudioToolboxAudioEncoder::Flush(EncoderStatusCB flush_cb) {
+ DVLOG(1) << __func__;
+
+ if (!encoder_) {
+ std::move(flush_cb).Run(
+ EncoderStatus::Codes::kEncoderInitializeNeverCompleted);
+ return;
+ }
+
+ // Flush any remaining output.
+ Encode(nullptr, base::TimeTicks(), base::DoNothing());
+
+ const auto result = AudioConverterReset(encoder_);
+
+ auto status_code = EncoderStatus::Codes::kOk;
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result) << "AudioConverterReset() failed";
+ status_code = EncoderStatus::Codes::kEncoderFailedFlush;
+ }
+
+ timestamp_helper_->SetBaseTimestamp(kNoTimestamp);
+ std::move(flush_cb).Run(status_code);
+}
+
+bool AudioToolboxAudioEncoder::CreateEncoder(
+ const Options& options,
+ const AudioStreamBasicDescription& output_format) {
+ // Input is always float planar.
+ AudioStreamBasicDescription input_format = {};
+ input_format.mFormatID = kAudioFormatLinearPCM;
+ input_format.mFormatFlags =
+ kLinearPCMFormatFlagIsFloat | kLinearPCMFormatFlagIsNonInterleaved;
+ input_format.mFramesPerPacket = 1;
+ input_format.mBitsPerChannel = 32;
+ input_format.mSampleRate = options.sample_rate;
+ input_format.mChannelsPerFrame = options.channels;
+
+ // Note: This is important to get right or AudioConverterNew will balk. For
+ // interleaved data, this value should be multiplied by the channel count.
+ input_format.mBytesPerPacket = input_format.mBytesPerFrame =
+ input_format.mBitsPerChannel / 8;
+
+ // Create the encoder.
+ auto result = AudioConverterNew(&input_format, &output_format, &encoder_);
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result) << "AudioConverterNew() failed";
+ return false;
+ }
+
+ // NOTE: We don't setup the AudioConverter channel layout here, though we may
+ // need to in the future to support obscure multichannel layouts.
+
+ if (options.bitrate && options.bitrate > 0) {
+ UInt32 rate = options.bitrate.value();
+ result = AudioConverterSetProperty(encoder_, kAudioConverterEncodeBitRate,
+ sizeof(rate), &rate);
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result) << "Failed to set encoder bitrate";
+ return false;
+ }
+ }
+
+ // AudioConverter requires we provided a suitably sized output for the encoded
+ // buffer, but won't tell us the size before we request it... so we need to
+ // ask it what the maximum possible size is to allocate our output buffers.
+ UInt32 prop_size = sizeof(UInt32);
+ result = AudioConverterGetProperty(
+ encoder_, kAudioConverterPropertyMaximumOutputPacketSize, &prop_size,
+ &max_packet_size_);
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result) << "Failed to retrieve maximum packet size";
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/mac/audio_toolbox_audio_encoder.h b/chromium/media/filters/mac/audio_toolbox_audio_encoder.h
new file mode 100644
index 00000000000..02f7ddfcb0e
--- /dev/null
+++ b/chromium/media/filters/mac/audio_toolbox_audio_encoder.h
@@ -0,0 +1,65 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_MAC_AUDIO_TOOLBOX_AUDIO_ENCODER_H_
+#define MEDIA_FILTERS_MAC_AUDIO_TOOLBOX_AUDIO_ENCODER_H_
+
+#include <memory>
+
+#include <AudioToolbox/AudioToolbox.h>
+
+#include "media/base/audio_bus.h"
+#include "media/base/audio_encoder.h"
+#include "media/base/media_export.h"
+
+namespace media {
+class AudioTimestampHelper;
+
+// Audio encoder based on macOS's AudioToolbox API. The AudioToolbox
+// API is required to encode codecs that aren't supported by Chromium.
+class MEDIA_EXPORT AudioToolboxAudioEncoder : public AudioEncoder {
+ public:
+ AudioToolboxAudioEncoder();
+
+ AudioToolboxAudioEncoder(const AudioToolboxAudioEncoder&) = delete;
+ AudioToolboxAudioEncoder& operator=(const AudioToolboxAudioEncoder&) = delete;
+
+ ~AudioToolboxAudioEncoder() override;
+
+ // AudioEncoder implementation.
+ void Initialize(const Options& options,
+ OutputCB output_cb,
+ EncoderStatusCB done_cb) override;
+ void Encode(std::unique_ptr<AudioBus> audio_bus,
+ base::TimeTicks capture_time,
+ EncoderStatusCB done_cb) override;
+ void Flush(EncoderStatusCB flush_cb) override;
+
+ private:
+ bool CreateEncoder(const AudioEncoderConfig& config,
+ const AudioStreamBasicDescription& output_format);
+
+ // "Converter" for turning raw audio into encoded samples.
+ AudioConverterRef encoder_ = nullptr;
+
+ // Actual channel count and layout from encoder, may be different than config.
+ uint32_t channel_count_ = 0u;
+
+ // Actual sample rate from the encoder, may be different than config.
+ uint32_t sample_rate_ = 0u;
+
+ // Callback that delivers encoded frames.
+ OutputCB output_cb_;
+
+ // Maximum possible output size for one call to AudioConverter.
+ uint32_t max_packet_size_;
+
+ std::unique_ptr<AudioTimestampHelper> timestamp_helper_;
+
+ std::vector<uint8_t> codec_desc_;
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_MAC_AUDIO_TOOLBOX_AUDIO_ENCODER_H_
diff --git a/chromium/media/filters/pipeline_controller.cc b/chromium/media/filters/pipeline_controller.cc
index 7ea76b5789c..26074c045c6 100644
--- a/chromium/media/filters/pipeline_controller.cc
+++ b/chromium/media/filters/pipeline_controller.cc
@@ -443,6 +443,11 @@ void PipelineController::OnSelectedVideoTrackChanged(
Dispatch();
}
+void PipelineController::OnExternalVideoFrameRequest() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ pipeline_->OnExternalVideoFrameRequest();
+}
+
void PipelineController::FireOnTrackChangeCompleteForTesting(State set_to) {
previous_track_change_state_ = set_to;
OnTrackChangeComplete();
diff --git a/chromium/media/filters/pipeline_controller.h b/chromium/media/filters/pipeline_controller.h
index 425b1e1e1ae..116cae18973 100644
--- a/chromium/media/filters/pipeline_controller.h
+++ b/chromium/media/filters/pipeline_controller.h
@@ -148,6 +148,7 @@ class MEDIA_EXPORT PipelineController {
const std::vector<MediaTrack::Id>& enabled_track_ids);
void OnSelectedVideoTrackChanged(
absl::optional<MediaTrack::Id> selected_track_id);
+ void OnExternalVideoFrameRequest();
// Used to fire the OnTrackChangeComplete function which is captured in a
// OnceCallback, and doesn't play nicely with gmock.
diff --git a/chromium/media/filters/pipeline_controller_unittest.cc b/chromium/media/filters/pipeline_controller_unittest.cc
index 207ea530de8..8bd37dfefeb 100644
--- a/chromium/media/filters/pipeline_controller_unittest.cc
+++ b/chromium/media/filters/pipeline_controller_unittest.cc
@@ -141,6 +141,7 @@ class PipelineControllerTest : public ::testing::Test, public Pipeline::Client {
// Pipeline::Client overrides
void OnError(PipelineStatus status) override { NOTREACHED(); }
+ void OnFallback(PipelineStatus status) override { NOTREACHED(); }
void OnEnded() override {}
void OnMetadata(const PipelineMetadata& metadata) override {}
void OnBufferingStateChange(BufferingState state,
diff --git a/chromium/media/filters/source_buffer_range.cc b/chromium/media/filters/source_buffer_range.cc
index 1412275ecdd..c11f26ddd1a 100644
--- a/chromium/media/filters/source_buffer_range.cc
+++ b/chromium/media/filters/source_buffer_range.cc
@@ -952,9 +952,9 @@ std::string SourceBufferRange::ToStringForDebugging() const {
<< ", buffers.size()=" << buffers_.size()
<< ", keyframe_map_.size()=" << keyframe_map_.size()
<< ", keyframe_map_:\n";
- for (const auto& entry : keyframe_map_) {
- result << "\t pts " << entry.first.InMicroseconds()
- << ", unadjusted idx = " << entry.second << "\n";
+ for (const auto& [time_delta, idx] : keyframe_map_) {
+ result << "\t pts " << time_delta.InMicroseconds()
+ << ", unadjusted idx = " << idx << "\n";
}
#endif // !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
diff --git a/chromium/media/filters/source_buffer_state.cc b/chromium/media/filters/source_buffer_state.cc
index 74814c3be00..14b82f19792 100644
--- a/chromium/media/filters/source_buffer_state.cc
+++ b/chromium/media/filters/source_buffer_state.cc
@@ -718,37 +718,6 @@ bool SourceBufferState::OnNewConfigs(
<< " config: " << video_config.AsHumanReadableString();
DCHECK(video_config.IsValidConfig());
- if (video_config.codec() == VideoCodec::kHEVC) {
-#if BUILDFLAG(ENABLE_PLATFORM_ENCRYPTED_HEVC)
-#if BUILDFLAG(IS_CHROMEOS_LACROS)
- if (!base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kLacrosEnablePlatformEncryptedHevc)) {
- NOTREACHED() << "MSE parser must not emit HEVC tracks on runtime "
- "configurations that do not support HEVC playback "
- "via platform.";
- return false;
- }
-#endif // BUILDFLAG(IS_CHROMEOS_LACROS)
- // HEVC is only supported through EME under this build flag, so
- // require the config to be for an encrypted track. Even so,
- // conditionally allow clear HEVC if cmdline has test override.
- if (video_config.encryption_scheme() ==
- EncryptionScheme::kUnencrypted &&
- !base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableClearHevcForTesting)) {
- MEDIA_LOG(ERROR, media_log_)
- << "MSE playback of HEVC on is only supported via platform "
- "decryptor, but the provided HEVC "
- "track is not encrypted.";
- return false;
- }
-#elif !BUILDFLAG(ENABLE_PLATFORM_HEVC)
- NOTREACHED()
- << "MSE parser must not emit HEVC tracks on build configurations "
- "that do not support HEVC playback via platform.";
-#endif // BUILDFLAG(ENABLE_PLATFORM_ENCRYPTED_HEVC)
- }
-
const auto& it = std::find(expected_vcodecs.begin(),
expected_vcodecs.end(), video_config.codec());
if (it == expected_vcodecs.end()) {
@@ -978,10 +947,9 @@ bool SourceBufferState::OnNewBuffers(
DCHECK(timestamp_offset_during_append_);
DCHECK(parsing_media_segment_);
- for (const auto& it : buffer_queue_map) {
- const StreamParser::BufferQueue& bufq = it.second;
- DCHECK(!bufq.empty());
- media_segment_has_data_for_track_[it.first] = true;
+ for (const auto& [track_id, buffer_queue] : buffer_queue_map) {
+ DCHECK(!buffer_queue.empty());
+ media_segment_has_data_for_track_[track_id] = true;
}
const base::TimeDelta timestamp_offset_before_processing =
@@ -994,12 +962,11 @@ bool SourceBufferState::OnNewBuffers(
timestamp_offset_before_processing;
if (generate_timestamps_flag()) {
base::TimeDelta min_end_timestamp = kNoTimestamp;
- for (const auto& it : buffer_queue_map) {
- const StreamParser::BufferQueue& bufq = it.second;
- DCHECK(!bufq.empty());
+ for (const auto& [track_id, buffer_queue] : buffer_queue_map) {
+ DCHECK(!buffer_queue.empty());
if (min_end_timestamp == kNoTimestamp ||
- EndTimestamp(bufq) < min_end_timestamp) {
- min_end_timestamp = EndTimestamp(bufq);
+ EndTimestamp(buffer_queue) < min_end_timestamp) {
+ min_end_timestamp = EndTimestamp(buffer_queue);
DCHECK_NE(kNoTimestamp, min_end_timestamp);
}
}
diff --git a/chromium/media/filters/video_cadence_estimator.h b/chromium/media/filters/video_cadence_estimator.h
index 60c3f5e8326..d1cc14bacfb 100644
--- a/chromium/media/filters/video_cadence_estimator.h
+++ b/chromium/media/filters/video_cadence_estimator.h
@@ -8,6 +8,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <string>
#include <vector>
#include "base/time/time.h"
diff --git a/chromium/media/filters/vp9_compressed_header_parser.cc b/chromium/media/filters/vp9_compressed_header_parser.cc
index ec21d038484..4daae0d83eb 100644
--- a/chromium/media/filters/vp9_compressed_header_parser.cc
+++ b/chromium/media/filters/vp9_compressed_header_parser.cc
@@ -155,7 +155,7 @@ void Vp9CompressedHeaderParser::ReadCoefProbs(Vp9FrameHeader* fhdr) {
for (auto& ai : fhdr->frame_context.coef_probs[tx_size]) {
for (auto& aj : ai) {
for (auto& ak : aj) {
- int max_l = (ak == aj[0]) ? 3 : 6;
+ int max_l = (+ak == +aj[0]) ? 3 : 6;
for (int l = 0; l < max_l; l++) {
DiffUpdateProbArray(ak[l]);
}
diff --git a/chromium/media/filters/vp9_parser.cc b/chromium/media/filters/vp9_parser.cc
index 97e4525a68c..6de3dab48c1 100644
--- a/chromium/media/filters/vp9_parser.cc
+++ b/chromium/media/filters/vp9_parser.cc
@@ -373,7 +373,7 @@ bool Vp9FrameContext::IsValid() const {
for (auto& ai : a) {
for (auto& aj : ai) {
for (auto& ak : aj) {
- int max_l = (ak == aj[0]) ? 3 : 6;
+ int max_l = (+ak == +aj[0]) ? 3 : 6;
for (int l = 0; l < max_l; l++) {
for (auto& x : ak[l]) {
if (x == 0) {
diff --git a/chromium/media/filters/win/media_foundation_audio_decoder.cc b/chromium/media/filters/win/media_foundation_audio_decoder.cc
new file mode 100644
index 00000000000..66ddd3cd963
--- /dev/null
+++ b/chromium/media/filters/win/media_foundation_audio_decoder.cc
@@ -0,0 +1,463 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <mfapi.h>
+#include <mferror.h>
+#include <stdint.h>
+#include <wmcodecdsp.h>
+
+#include "base/auto_reset.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/task/single_thread_task_runner.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/windows_version.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_sample_types.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/base/limits.h"
+#include "media/base/status.h"
+#include "media/base/timestamp_constants.h"
+#include "media/base/win/mf_helpers.h"
+#include "media/base/win/mf_initializer.h"
+#include "media/filters/win/media_foundation_audio_decoder.h"
+#include "media/filters/win/media_foundation_utils.h"
+
+namespace media {
+
+namespace {
+
+bool PopulateInputSample(IMFSample* sample, const DecoderBuffer& input) {
+ Microsoft::WRL::ComPtr<IMFMediaBuffer> buffer;
+ HRESULT hr = sample->GetBufferByIndex(0, &buffer);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample", false);
+
+ DWORD max_length = 0;
+ DWORD current_length = 0;
+ uint8_t* destination = nullptr;
+ hr = buffer->Lock(&destination, &max_length, &current_length);
+ RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", false);
+
+ RETURN_ON_FAILURE(!current_length, "Input length is zero", false);
+ RETURN_ON_FAILURE(input.data_size() <= max_length, "Input length is too long",
+ false);
+ memcpy(destination, input.data(), input.data_size());
+
+ hr = buffer->SetCurrentLength(input.data_size());
+ RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", false);
+
+ hr = buffer->Unlock();
+ RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", false);
+
+ RETURN_ON_HR_FAILURE(
+ sample->SetSampleTime(input.timestamp().InNanoseconds() / 100),
+ "Failed to set input timestamp", false);
+ return true;
+}
+
+} // namespace
+
+// static
+std::unique_ptr<MediaFoundationAudioDecoder>
+MediaFoundationAudioDecoder::Create(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
+ return InitializeMediaFoundation()
+ ? std::make_unique<MediaFoundationAudioDecoder>(
+ std::move(task_runner))
+ : nullptr;
+}
+
+MediaFoundationAudioDecoder::MediaFoundationAudioDecoder(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
+ task_runner_ = task_runner;
+}
+
+MediaFoundationAudioDecoder::~MediaFoundationAudioDecoder() {}
+
+AudioDecoderType MediaFoundationAudioDecoder::GetDecoderType() const {
+ return AudioDecoderType::kMediaFoundation;
+}
+
+void MediaFoundationAudioDecoder::Initialize(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ InitCB init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
+#if BUILDFLAG(USE_PROPRIETARY_CODECS) && BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ if (config.codec() != AudioCodec::kDTS &&
+ config.codec() != AudioCodec::kDTSXP2) {
+ std::move(init_cb).Run(
+ DecoderStatus(DecoderStatus::Codes::kUnsupportedCodec,
+ "MFT Codec does not support DTS content"));
+ return;
+ }
+#else
+#error "MediaFoundationAudioDecoder requires proprietary codecs and DTS audio"
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) &&
+ // BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+
+ // FIXME: MFT will need to be signed by a Microsoft Certificate
+ // to support a secured chain of custody on Windows.
+ if (config.is_encrypted()) {
+ std::move(init_cb).Run(
+ DecoderStatus(DecoderStatus::Codes::kUnsupportedEncryptionMode,
+ "MFT Codec does not support encrypted content"));
+ return;
+ }
+
+ // This shouldn't be possible outside of tests since production code will use
+ // the Create() method above.
+ if (!InitializeMediaFoundation()) {
+ std::move(init_cb).Run(
+ DecoderStatus(DecoderStatus::Codes::kMediaFoundationNotAvailable,
+ "Unable to initialize Microsoft Media Foundation"));
+ return;
+ }
+
+ config_ = config;
+ output_cb_ = output_cb;
+
+#if BUILDFLAG(USE_PROPRIETARY_CODECS) && BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ if (config.codec() == AudioCodec::kDTS ||
+ config.codec() == AudioCodec::kDTSXP2) {
+ std::move(init_cb).Run(
+ CreateDecoder() ? OkStatus() : DecoderStatus::Codes::kUnsupportedCodec);
+ }
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) &&
+ // BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+}
+
+void MediaFoundationAudioDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
+ DecodeCB decode_cb) {
+ if (buffer->end_of_stream()) {
+ switch (decoder_->ProcessMessage(MFT_MESSAGE_COMMAND_DRAIN, 0)) {
+ case S_OK: {
+ OutputStatus rc;
+ do {
+ rc = PumpOutput(PumpState::kNormal);
+ } while (rc == OutputStatus::kSuccess);
+ // Return kOk if more input is needed since this is end of stream
+ std::move(decode_cb).Run(rc == OutputStatus::kFailed
+ ? DecoderStatus::Codes::kFailed
+ : DecoderStatus::Codes::kOk);
+ return;
+ }
+ case MF_E_TRANSFORM_TYPE_NOT_SET:
+ std::move(decode_cb).Run(DecoderStatus::Codes::kPlatformDecodeFailure);
+ return;
+ default:
+ std::move(decode_cb).Run(DecoderStatus::Codes::kFailed);
+ return;
+ }
+ }
+
+ if (buffer->timestamp() == kNoTimestamp) {
+ DVLOG(1) << "Received a buffer without timestamps!";
+ std::move(decode_cb).Run(DecoderStatus::Codes::kMissingTimestamp);
+ return;
+ }
+
+ if (timestamp_helper_->base_timestamp() == kNoTimestamp || has_reset_) {
+ has_reset_ = false;
+ timestamp_helper_->SetBaseTimestamp(buffer->timestamp());
+ }
+
+ auto sample = CreateEmptySampleWithBuffer(buffer->data_size(), 0);
+ if (!sample) {
+ std::move(decode_cb).Run(DecoderStatus::Codes::kFailed);
+ return;
+ }
+
+ if (!PopulateInputSample(sample.Get(), *buffer)) {
+ std::move(decode_cb).Run(DecoderStatus::Codes::kFailed);
+ return;
+ }
+
+ auto hr = decoder_->ProcessInput(0, sample.Get(), 0);
+ if (hr != S_OK && hr != MF_E_NOTACCEPTING) {
+ DecoderStatus::Codes rc;
+ switch (hr) {
+ case MF_E_NO_SAMPLE_DURATION:
+ rc = DecoderStatus::Codes::kDecoderStreamInErrorState;
+ break;
+ case MF_E_TRANSFORM_TYPE_NOT_SET:
+ rc = DecoderStatus::Codes::kPlatformDecodeFailure;
+ break;
+ case MF_E_NO_SAMPLE_TIMESTAMP:
+ rc = DecoderStatus::Codes::kMissingTimestamp;
+ break;
+ default:
+ rc = DecoderStatus::Codes::kFailed;
+ break;
+ }
+ // Drop remaining samples on error, no need to call PumpOutput
+ std::move(decode_cb).Run(rc);
+ return;
+ }
+
+ OutputStatus rc;
+ do {
+ rc = PumpOutput(PumpState::kNormal);
+ if (rc == OutputStatus::kNeedMoreInput)
+ break;
+ if (rc == OutputStatus::kFailed) {
+ std::move(decode_cb).Run(DecoderStatus::Codes::kFailed);
+ return;
+ }
+ } while (rc == OutputStatus::kSuccess);
+ std::move(decode_cb).Run(OkStatus());
+}
+
+void MediaFoundationAudioDecoder::Reset(base::OnceClosure reset_cb) {
+ has_reset_ = true;
+ auto hr = decoder_->ProcessMessage(MFT_MESSAGE_COMMAND_FLUSH, 0);
+ if (hr != S_OK) {
+ DLOG(ERROR) << "Reset failed with \"" << PrintHr(hr) << "\"";
+ }
+ std::move(reset_cb).Run();
+}
+
+bool MediaFoundationAudioDecoder::NeedsBitstreamConversion() const {
+ // DTS does not require any header/bit stream conversion
+ return false;
+}
+
+bool MediaFoundationAudioDecoder::CreateDecoder() {
+ // Find the decoder factory.
+ //
+ // Note: It'd be nice if there was an asynchronous DTS MFT (to avoid the need
+ // for a codec pump), but alas MFT_ENUM_FLAG_ASYNC_MFT returns no matches :(
+ MFT_REGISTER_TYPE_INFO type_info;
+ switch (config_.codec()) {
+#if BUILDFLAG(USE_PROPRIETARY_CODECS) && BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ case AudioCodec::kDTSXP2:
+ type_info = {MFMediaType_Audio, MFAudioFormat_DTS_UHD};
+ break;
+ case AudioCodec::kDTS:
+ type_info = {MFMediaType_Audio, MFAudioFormat_DTS_RAW};
+ break;
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) &&
+ // BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ default:
+ type_info = {MFMediaType_Audio, MFAudioFormat_Base};
+ }
+
+ IMFActivate** acts = NULL;
+ UINT32 acts_num = 0;
+ ::MFTEnumEx(MFT_CATEGORY_AUDIO_DECODER,
+ MFT_ENUM_FLAG_SYNCMFT | MFT_ENUM_FLAG_LOCALMFT |
+ MFT_ENUM_FLAG_SORTANDFILTER,
+ &type_info, NULL, &acts, &acts_num);
+
+ if (acts_num < 1)
+ return false;
+
+ // Create the decoder from the factory.
+ // Activate the first MFT object
+ RETURN_ON_HR_FAILURE(acts[0]->ActivateObject(IID_PPV_ARGS(&decoder_)),
+ "Failed to activate DTS MFT", false);
+ // Release all activated and unactivated object after creating the decoder
+ for (UINT32 curr_act = 0; curr_act < acts_num; ++curr_act)
+ acts[curr_act]->Release();
+
+ // Configure DTS input.
+ Microsoft::WRL::ComPtr<IMFMediaType> input_type;
+ RETURN_ON_HR_FAILURE(GetDefaultAudioType(config_, &input_type),
+ "Failed to create IMFMediaType for input data", false);
+
+ RETURN_ON_HR_FAILURE(decoder_->SetInputType(0, input_type.Get(), 0),
+ "Failed to set input type for IMFTransform", false);
+
+ return ConfigureOutput();
+}
+
+bool MediaFoundationAudioDecoder::ConfigureOutput() {
+ // Configure audio output.
+ Microsoft::WRL::ComPtr<IMFMediaType> output_type;
+ for (uint32_t i = 0;
+ SUCCEEDED(decoder_->GetOutputAvailableType(0, i, &output_type)); ++i) {
+ GUID out_type;
+ RETURN_ON_HR_FAILURE(output_type->GetGUID(MF_MT_MAJOR_TYPE, &out_type),
+ "Failed to get output main type", false);
+ GUID out_subtype;
+ RETURN_ON_HR_FAILURE(output_type->GetGUID(MF_MT_SUBTYPE, &out_subtype),
+ "Failed to get output subtype", false);
+
+#if BUILDFLAG(USE_PROPRIETARY_CODECS) && BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ if (config_.codec() == AudioCodec::kDTS ||
+ config_.codec() == AudioCodec::kDTSXP2) {
+ // Configuration specific to DTS Sound Unbound MFT v1.3.0
+ // DTS-CA 5.1 (6 channels)
+ constexpr uint32_t DTS_5_1 = 2;
+ // DTS:X P2 5.1 (6 channels) or 5.1.4 (downmix to 6 channels)
+ constexpr uint32_t DTSX_5_1_DOWNMIX = 3;
+ if ((out_subtype == MFAudioFormat_PCM && i == DTS_5_1 &&
+ config_.codec() == AudioCodec::kDTS) ||
+ (out_subtype == MFAudioFormat_PCM && i == DTSX_5_1_DOWNMIX &&
+ config_.codec() == AudioCodec::kDTSXP2)) {
+ RETURN_ON_HR_FAILURE(decoder_->SetOutputType(0, output_type.Get(), 0),
+ "Failed to set output type IMFTransform", false);
+
+ MFT_OUTPUT_STREAM_INFO info = {0};
+ RETURN_ON_HR_FAILURE(decoder_->GetOutputStreamInfo(0, &info),
+ "Failed to get output stream info", false);
+
+ output_sample_ =
+ CreateEmptySampleWithBuffer(info.cbSize, info.cbAlignment);
+ RETURN_ON_FAILURE(!!output_sample_, "Failed to create staging sample",
+ false);
+
+ RETURN_ON_HR_FAILURE(
+ output_type->GetUINT32(MF_MT_AUDIO_NUM_CHANNELS, &channel_count_),
+ "Failed to get output channel count", false);
+
+ if (channel_count_ != 6) {
+ output_type.Reset();
+ continue;
+ }
+ }
+ }
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) &&
+ // BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ if (!output_sample_) {
+ output_type.Reset();
+ continue;
+ }
+
+ // Check the optional channel mask argument.
+ ChannelConfig mask = 0u;
+ auto hr = output_type->GetUINT32(MF_MT_AUDIO_CHANNEL_MASK, &mask);
+ if (hr == MF_E_ATTRIBUTENOTFOUND) {
+ channel_layout_ = GuessChannelLayout(channel_count_);
+ } else {
+ RETURN_ON_HR_FAILURE(hr, "Failed to get output channel mask", false);
+ channel_layout_ = ChannelConfigToChannelLayout(mask);
+
+ RETURN_ON_FAILURE(static_cast<uint32_t>(ChannelLayoutToChannelCount(
+ channel_layout_)) == channel_count_ ||
+ channel_layout_ == CHANNEL_LAYOUT_DISCRETE,
+ "Channel layout and channel count don't match", false);
+ }
+
+ RETURN_ON_HR_FAILURE(
+ output_type->GetUINT32(MF_MT_AUDIO_SAMPLES_PER_SECOND, &sample_rate_),
+ "Failed to get output sample rate", false);
+
+ RETURN_ON_FAILURE(
+ channel_count_ > 0 && channel_count_ <= limits::kMaxChannels,
+ "Channel count is not supported", false);
+
+ RETURN_ON_FAILURE(sample_rate_ >= limits::kMinSampleRate &&
+ sample_rate_ <= limits::kMaxSampleRate,
+ "Sample rate is not supported", false);
+
+ timestamp_helper_ = std::make_unique<AudioTimestampHelper>(sample_rate_);
+ return true;
+ }
+ RETURN_ON_HR_FAILURE(decoder_->SetOutputType(0, output_type.Get(), 0),
+ "Failed to set output type IMFTransform", false);
+ return false;
+}
+
+int GetBytesPerFrame(AudioCodec codec) {
+ switch (codec) {
+#if BUILDFLAG(USE_PROPRIETARY_CODECS) && BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ // DTS Sound Unbound MFT v1.3 supports 24-bit PCM output only
+ case AudioCodec::kDTS:
+ case AudioCodec::kDTSXP2:
+ return 3;
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) &&
+ // BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ default:
+ return 4;
+ }
+}
+
+MediaFoundationAudioDecoder::OutputStatus
+MediaFoundationAudioDecoder::PumpOutput(PumpState pump_state) {
+ // Unlike video, the audio MFT requires that we provide the output sample
+ // instead of allocating it for us.
+ MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0};
+ output_data_buffer.pSample = output_sample_.Get();
+
+ DWORD status = 0;
+ auto hr = decoder_->ProcessOutput(0, 1, &output_data_buffer, &status);
+ if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ DVLOG(3) << __func__ << "More input needed to decode outputs.";
+ return OutputStatus::kNeedMoreInput;
+ }
+
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE &&
+ pump_state != PumpState::kStreamChange) {
+ if (!ConfigureOutput())
+ return OutputStatus::kFailed;
+
+ DVLOG(1) << "New config: ch=" << channel_count_ << ", sr=" << sample_rate_
+ << " (" << config_.AsHumanReadableString() << ")";
+ PumpOutput(PumpState::kStreamChange);
+ return OutputStatus::kStreamChange;
+ }
+
+ RETURN_ON_HR_FAILURE(hr, "Failed to process output", OutputStatus::kFailed);
+
+ // Unused, but must be released.
+ IMFCollection* events = output_data_buffer.pEvents;
+ if (events)
+ events->Release();
+
+ Microsoft::WRL::ComPtr<IMFMediaBuffer> output_buffer;
+ RETURN_ON_HR_FAILURE(
+ output_sample_->ConvertToContiguousBuffer(&output_buffer),
+ "Failed to map sample into a contiguous output buffer",
+ OutputStatus::kFailed);
+
+ DWORD current_length = 0;
+ uint8_t* destination = nullptr;
+ RETURN_ON_HR_FAILURE(output_buffer->Lock(&destination, NULL, &current_length),
+ "Failed to lock output buffer", OutputStatus::kFailed);
+
+ // Output is always configured to be interleaved float.
+ int sample_byte_len = GetBytesPerFrame(config_.codec());
+ size_t frames = (current_length / sample_byte_len / channel_count_);
+ RETURN_ON_FAILURE(frames > 0u, "Invalid output buffer size",
+ OutputStatus::kFailed);
+
+ if (!pool_)
+ pool_ = base::MakeRefCounted<AudioBufferMemoryPool>();
+
+ auto audio_buffer =
+ AudioBuffer::CreateBuffer(kSampleFormatF32, channel_layout_,
+ channel_count_, sample_rate_, frames, pool_);
+ audio_buffer->set_timestamp(timestamp_helper_->GetTimestamp());
+
+#if BUILDFLAG(USE_PROPRIETARY_CODECS) && BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ // DTS Sound Unbound MFT v1.3.0 outputs 24-bit PCM samples, and will
+ // be converted to 32-bit float
+ if (config_.codec() == AudioCodec::kDTS ||
+ config_.codec() == AudioCodec::kDTSXP2) {
+ float* channel_data =
+ reinterpret_cast<float*>(audio_buffer->channel_data()[0]);
+ int8_t* pcm24 = reinterpret_cast<int8_t*>(destination);
+ for (uint64_t i = 0; i < frames; i++) {
+ for (uint64_t ch = 0; ch < channel_count_; ch++) {
+ int32_t pcmi = (*pcm24++ << 8) & 0xff00;
+ pcmi |= (*pcm24++ << 16) & 0xff0000;
+ pcmi |= (*pcm24++ << 24) & 0xff000000;
+ *channel_data++ = SignedInt32SampleTypeTraits::ToFloat(pcmi);
+ }
+ }
+ }
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) &&
+ // BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+
+ timestamp_helper_->AddFrames(frames);
+
+ output_buffer->Unlock();
+
+ output_cb_.Run(std::move(audio_buffer));
+ return OutputStatus::kSuccess;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/win/media_foundation_audio_decoder.h b/chromium/media/filters/win/media_foundation_audio_decoder.h
new file mode 100644
index 00000000000..d96545d179c
--- /dev/null
+++ b/chromium/media/filters/win/media_foundation_audio_decoder.h
@@ -0,0 +1,122 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_WIN_MEDIA_FOUNDATION_AUDIO_DECODER_H_
+#define MEDIA_FILTERS_WIN_MEDIA_FOUNDATION_AUDIO_DECODER_H_
+
+#include <mfidl.h>
+#include <wrl/client.h>
+
+#include <memory>
+
+#include "base/callback.h"
+#include "base/sequence_checker.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_decoder.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/media_export.h"
+#include "media/base/media_log.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+namespace media {
+class AudioBufferMemoryPool;
+class AudioTimestampHelper;
+
+// MFAudioDecoder is based on Window's MediaFoundation API. The MediaFoundation
+// API is required to decode codecs that aren't supported by Chromium.
+class MEDIA_EXPORT MediaFoundationAudioDecoder : public AudioDecoder {
+ public:
+ // Creates a MediaFoundationAudioDecoder if MediaFoundation is supported,
+ // returns nullptr if not.
+ static std::unique_ptr<MediaFoundationAudioDecoder> Create(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner);
+
+ MediaFoundationAudioDecoder(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner);
+
+ MediaFoundationAudioDecoder(const MediaFoundationAudioDecoder&) = delete;
+ MediaFoundationAudioDecoder& operator=(const MediaFoundationAudioDecoder&) =
+ delete;
+
+ ~MediaFoundationAudioDecoder() override;
+
+ // AudioDecoder implementation.
+ AudioDecoderType GetDecoderType() const override;
+ void Initialize(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ InitCB init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure reset_cb) override;
+ bool NeedsBitstreamConversion() const override;
+
+ private:
+ // There are four states the decoder can be in:
+ //
+ // - kUninitialized: The decoder is not initialized.
+ // - kNormal: This is the normal state. The decoder is idle and ready to
+ // decode input buffers, or is decoding an input buffer.
+ // - kDecodeFinished: EOS buffer received, codec flushed and decode finished.
+ // No further Decode() call should be made.
+ // - kError: Unexpected error happened.
+ //
+ // These are the possible state transitions.
+ //
+ // kUninitialized -> kNormal:
+ // The decoder is successfully initialized and is ready to decode buffers.
+ // kNormal -> kDecodeFinished:
+ // When buffer->end_of_stream() is true.
+ // kNormal -> kError:
+ // A decoding error occurs and decoding needs to stop.
+ // (any state) -> kNormal:
+ // Any time Reset() is called.
+ enum class DecoderState { kUninitialized, kNormal, kDecodeFinished, kError };
+
+ bool CreateDecoder();
+ bool ConfigureOutput();
+
+ enum class OutputStatus { kSuccess, kNeedMoreInput, kStreamChange, kFailed };
+ enum class PumpState { kNormal, kStreamChange };
+
+ OutputStatus PumpOutput(PumpState pump_state);
+
+ // Used to post tasks. This class is single threaded and every method should
+ // run on this task runner.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ // Cached decoder config.
+ AudioDecoderConfig config_;
+
+ Microsoft::WRL::ComPtr<IMFTransform> decoder_;
+
+ // Actual channel count and layout from decoder, may be different than config.
+ uint32_t channel_count_ = 0u;
+ ChannelLayout channel_layout_ = CHANNEL_LAYOUT_UNSUPPORTED;
+
+ // Actual sample rate from the decoder, may be different than config.
+ uint32_t sample_rate_ = 0u;
+
+ // Output sample staging buffer
+ Microsoft::WRL::ComPtr<IMFSample> output_sample_;
+
+ // Callback that delivers output frames.
+ OutputCB output_cb_;
+
+ std::unique_ptr<AudioTimestampHelper> timestamp_helper_;
+
+ // Pool which helps avoid thrashing memory when returning audio buffers.
+ scoped_refptr<AudioBufferMemoryPool> pool_;
+
+ // Used to rest timestamp_helper_ after Reset() is called
+ bool has_reset_ = false;
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_WIN_MEDIA_FOUNDATION_AUDIO_DECODER_H_
diff --git a/chromium/media/filters/win/media_foundation_utils.cc b/chromium/media/filters/win/media_foundation_utils.cc
new file mode 100644
index 00000000000..ef3a7c311b0
--- /dev/null
+++ b/chromium/media/filters/win/media_foundation_utils.cc
@@ -0,0 +1,209 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Ensures MFAudioFormat_Xxx symbols are defined in mfapi.h which is included
+// by media_foundation_audio_stream.h.
+#include <initguid.h> // NOLINT(build/include_order)
+
+#include "media/filters/win/media_foundation_utils.h"
+
+#include <mfapi.h>
+#include <mferror.h> // NOLINT(build/include_order)
+#include <mfidl.h>
+#include <mmreg.h> // NOLINT(build/include_order)
+#include <wrl.h> // NOLINT(build/include_order)
+
+#include "base/win/scoped_co_mem.h"
+#include "media/base/audio_codecs.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/win/mf_helpers.h"
+#include "media/media_buildflags.h"
+
+namespace media {
+
+using Microsoft::WRL::ComPtr;
+using Microsoft::WRL::MakeAndInitialize;
+
+namespace {
+
+// Given an audio format tag |wave_format|, it returns an audio subtype GUID per
+// https://docs.microsoft.com/en-us/windows/win32/medfound/audio-subtype-guids
+// |wave_format| must be one of the WAVE_FORMAT_* constants defined in mmreg.h.
+GUID MediaFoundationSubTypeFromWaveFormat(uint32_t wave_format) {
+ GUID format_base = MFAudioFormat_Base;
+ format_base.Data1 = wave_format;
+ return format_base;
+}
+
+GUID AudioCodecToMediaFoundationSubtype(AudioCodec codec) {
+ DVLOG(1) << __func__ << ": codec=" << codec;
+
+ switch (codec) {
+ case AudioCodec::kAAC:
+ return MFAudioFormat_AAC;
+ case AudioCodec::kMP3:
+ return MFAudioFormat_MP3;
+ case AudioCodec::kPCM:
+ return MFAudioFormat_PCM;
+ case AudioCodec::kVorbis:
+ return MFAudioFormat_Vorbis;
+ case AudioCodec::kFLAC:
+ return MFAudioFormat_FLAC;
+ case AudioCodec::kAMR_NB:
+ return MFAudioFormat_AMR_NB;
+ case AudioCodec::kAMR_WB:
+ return MFAudioFormat_AMR_WB;
+ case AudioCodec::kPCM_MULAW:
+ return MediaFoundationSubTypeFromWaveFormat(WAVE_FORMAT_MULAW);
+ case AudioCodec::kGSM_MS:
+ return MediaFoundationSubTypeFromWaveFormat(WAVE_FORMAT_GSM610);
+ case AudioCodec::kPCM_S16BE:
+ return MFAudioFormat_PCM;
+ case AudioCodec::kPCM_S24BE:
+ return MFAudioFormat_PCM;
+ case AudioCodec::kOpus:
+ return MFAudioFormat_Opus;
+ case AudioCodec::kEAC3:
+ return MFAudioFormat_Dolby_DDPlus;
+ case AudioCodec::kPCM_ALAW:
+ return MediaFoundationSubTypeFromWaveFormat(WAVE_FORMAT_ALAW);
+ case AudioCodec::kALAC:
+ return MFAudioFormat_ALAC;
+ case AudioCodec::kAC3:
+ return MFAudioFormat_Dolby_AC3;
+#if BUILDFLAG(USE_PROPRIETARY_CODECS) && BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ case AudioCodec::kDTS:
+ return MFAudioFormat_DTS_RAW;
+ case AudioCodec::kDTSXP2:
+ return MFAudioFormat_DTS_UHD;
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) &&
+ // BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ default:
+ return GUID_NULL;
+ }
+}
+
+bool IsUncompressedAudio(AudioCodec codec) {
+ switch (codec) {
+ case AudioCodec::kPCM:
+ case AudioCodec::kPCM_S16BE:
+ case AudioCodec::kPCM_S24BE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace
+
+// Given an AudioDecoderConfig, get its corresponding IMFMediaType format.
+// Note:
+// IMFMediaType is derived from IMFAttributes and hence all the of information
+// in a media type is store as attributes.
+// https://docs.microsoft.com/en-us/windows/win32/medfound/media-type-attributes
+// has a list of media type attributes.
+HRESULT GetDefaultAudioType(const AudioDecoderConfig decoder_config,
+ IMFMediaType** media_type_out) {
+ DVLOG(1) << __func__;
+
+ ComPtr<IMFMediaType> media_type;
+ RETURN_IF_FAILED(MFCreateMediaType(&media_type));
+ RETURN_IF_FAILED(media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Audio));
+
+ GUID mf_subtype = AudioCodecToMediaFoundationSubtype(decoder_config.codec());
+ if (mf_subtype == GUID_NULL) {
+ DLOG(ERROR) << "Unsupported codec type: " << decoder_config.codec();
+ return MF_E_TOPO_CODEC_NOT_FOUND;
+ }
+ RETURN_IF_FAILED(media_type->SetGUID(MF_MT_SUBTYPE, mf_subtype));
+
+ bool uncompressed = IsUncompressedAudio(decoder_config.codec());
+
+ if (uncompressed) {
+ RETURN_IF_FAILED(media_type->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1));
+ } else {
+ RETURN_IF_FAILED(media_type->SetUINT32(MF_MT_COMPRESSED, 1));
+ }
+
+ int channels = decoder_config.channels();
+ if (channels > 0) {
+ RETURN_IF_FAILED(media_type->SetUINT32(MF_MT_AUDIO_NUM_CHANNELS, channels));
+ }
+
+ int samples_per_second = decoder_config.samples_per_second();
+ if (samples_per_second > 0) {
+ RETURN_IF_FAILED(media_type->SetUINT32(MF_MT_AUDIO_SAMPLES_PER_SECOND,
+ samples_per_second));
+ }
+
+ int bits_per_sample = decoder_config.bytes_per_frame() * 8;
+ if (bits_per_sample > 0) {
+ RETURN_IF_FAILED(
+ media_type->SetUINT32(MF_MT_AUDIO_BITS_PER_SAMPLE, bits_per_sample));
+ }
+
+ if (uncompressed) {
+ unsigned long block_alignment = channels * (bits_per_sample / 8);
+ if (block_alignment > 0) {
+ RETURN_IF_FAILED(
+ media_type->SetUINT32(MF_MT_AUDIO_BLOCK_ALIGNMENT, block_alignment));
+ }
+ unsigned long average_bps = samples_per_second * (bits_per_sample / 8);
+ if (average_bps > 0) {
+ RETURN_IF_FAILED(
+ media_type->SetUINT32(MF_MT_AUDIO_AVG_BYTES_PER_SECOND, average_bps));
+ }
+ }
+ *media_type_out = media_type.Detach();
+ return S_OK;
+}
+
+#if BUILDFLAG(USE_PROPRIETARY_CODECS)
+HRESULT GetAacAudioType(const AudioDecoderConfig decoder_config,
+ IMFMediaType** media_type_out) {
+ DVLOG(1) << __func__;
+
+ ComPtr<IMFMediaType> media_type;
+ RETURN_IF_FAILED(GetDefaultAudioType(decoder_config, &media_type));
+
+ // On Windows `extra_data` is not populated for AAC in `decoder_config`. Use
+ // `aac_extra_data` instead. See crbug.com/1245123.
+ const auto& extra_data = decoder_config.aac_extra_data();
+
+ size_t wave_format_size = sizeof(HEAACWAVEINFO) + extra_data.size();
+ std::vector<uint8_t> wave_format_buffer(wave_format_size);
+ HEAACWAVEINFO* aac_wave_format =
+ reinterpret_cast<HEAACWAVEINFO*>(wave_format_buffer.data());
+
+ aac_wave_format->wfx.wFormatTag = WAVE_FORMAT_MPEG_HEAAC;
+ aac_wave_format->wfx.nChannels = decoder_config.channels();
+ aac_wave_format->wfx.wBitsPerSample = decoder_config.bytes_per_channel() * 8;
+ aac_wave_format->wfx.nSamplesPerSec = decoder_config.samples_per_second();
+ aac_wave_format->wfx.nAvgBytesPerSec =
+ decoder_config.samples_per_second() * decoder_config.bytes_per_frame();
+ aac_wave_format->wfx.nBlockAlign = 1;
+
+ size_t extra_size = wave_format_size - sizeof(WAVEFORMATEX);
+ aac_wave_format->wfx.cbSize = static_cast<WORD>(extra_size);
+ aac_wave_format->wPayloadType = 0; // RAW AAC
+ aac_wave_format->wAudioProfileLevelIndication =
+ 0xFE; // no audio profile specified
+ aac_wave_format->wStructType = 0; // audio specific config follows
+ aac_wave_format->wReserved1 = 0;
+ aac_wave_format->dwReserved2 = 0;
+
+ if (!extra_data.empty()) {
+ memcpy(reinterpret_cast<uint8_t*>(aac_wave_format) + sizeof(HEAACWAVEINFO),
+ extra_data.data(), extra_data.size());
+ }
+
+ RETURN_IF_FAILED(MFInitMediaTypeFromWaveFormatEx(
+ media_type.Get(), reinterpret_cast<const WAVEFORMATEX*>(aac_wave_format),
+ wave_format_size));
+ *media_type_out = media_type.Detach();
+ return S_OK;
+}
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
+
+} // namespace media
diff --git a/chromium/media/filters/win/media_foundation_utils.h b/chromium/media/filters/win/media_foundation_utils.h
new file mode 100644
index 00000000000..bedf5379c77
--- /dev/null
+++ b/chromium/media/filters/win/media_foundation_utils.h
@@ -0,0 +1,36 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_WIN_MEDIA_FOUNDATION_UTILS_H_
+#define MEDIA_FILTERS_WIN_MEDIA_FOUNDATION_UTILS_H_
+
+#include "media/base/audio_decoder_config.h"
+#include "media/base/media_export.h"
+#include "media/media_buildflags.h"
+
+class IMFMediaType;
+
+namespace media {
+
+// Given an AudioDecoderConfig, get its corresponding IMFMediaType format.
+// Note:
+// IMFMediaType is derived from IMFAttributes and hence all the of information
+// in a media type is store as attributes.
+// https://docs.microsoft.com/en-us/windows/win32/medfound/media-type-attributes
+// has a list of media type attributes.
+MEDIA_EXPORT HRESULT
+GetDefaultAudioType(const AudioDecoderConfig decoder_config,
+ IMFMediaType** media_type_out);
+
+#if BUILDFLAG(USE_PROPRIETARY_CODECS)
+// Given an AudioDecoderConfig which represents AAC audio, get its
+// corresponding IMFMediaType format (by calling GetDefaultAudioType)
+// and populate the aac_extra_data in the decoder_config into the
+// returned IMFMediaType.
+MEDIA_EXPORT HRESULT GetAacAudioType(const AudioDecoderConfig decoder_config,
+ IMFMediaType** media_type_out);
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
+} // namespace media
+
+#endif // MEDIA_FILTERS_WIN_MEDIA_FOUNDATION_UTILS_H_
diff --git a/chromium/media/formats/BUILD.gn b/chromium/media/formats/BUILD.gn
index e6d61a40cda..f45e689d29b 100644
--- a/chromium/media/formats/BUILD.gn
+++ b/chromium/media/formats/BUILD.gn
@@ -191,6 +191,8 @@ source_set("formats") {
"hls/media_playlist.h",
"hls/media_segment.cc",
"hls/media_segment.h",
+ "hls/multivariant_playlist.cc",
+ "hls/multivariant_playlist.h",
"hls/parse_status.cc",
"hls/parse_status.h",
"hls/playlist.cc",
@@ -207,6 +209,8 @@ source_set("formats") {
"hls/types.h",
"hls/variable_dictionary.cc",
"hls/variable_dictionary.h",
+ "hls/variant_stream.cc",
+ "hls/variant_stream.h",
]
deps += [ "//third_party/re2" ]
public_deps = [
@@ -322,9 +326,17 @@ source_set("unit_tests") {
# TODO(https://crbug.com/1266991): This should be gated behind `enable_hls_demuxer`, once that's enabled by default.
sources += [
+ "hls/common_playlist_unittest.cc",
"hls/items_unittest.cc",
+ "hls/media_playlist_test_builder.cc",
+ "hls/media_playlist_test_builder.h",
"hls/media_playlist_unittest.cc",
+ "hls/multivariant_playlist_test_builder.cc",
+ "hls/multivariant_playlist_test_builder.h",
+ "hls/multivariant_playlist_unittest.cc",
+ "hls/playlist_test_builder.h",
"hls/tags_unittest.cc",
+ "hls/test_util.h",
"hls/types_unittest.cc",
"hls/variable_dictionary_unittest.cc",
]
@@ -358,6 +370,16 @@ fuzzer_test("hls_media_playlist_fuzzer") {
]
}
+# TODO(https://crbug.com/1266991): This should be gated behind `enable_hls_demuxer`, once that's enabled by default.
+fuzzer_test("hls_multivariant_playlist_fuzzer") {
+ sources = [ "hls/multivariant_playlist_fuzzer.cc" ]
+ deps = [
+ "//base",
+ "//base:i18n",
+ "//media",
+ ]
+}
+
if (proprietary_codecs) {
fuzzer_test("h264_annex_b_converter_fuzzer") {
sources = [ "mp4/h264_annex_b_to_avc_bitstream_converter_fuzztest.cc" ]
diff --git a/chromium/media/formats/hls/common_playlist_unittest.cc b/chromium/media/formats/hls/common_playlist_unittest.cc
new file mode 100644
index 00000000000..aef8c96d698
--- /dev/null
+++ b/chromium/media/formats/hls/common_playlist_unittest.cc
@@ -0,0 +1,163 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/hls/media_playlist_test_builder.h"
+#include "media/formats/hls/multivariant_playlist_test_builder.h"
+#include "media/formats/hls/parse_status.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media::hls {
+
+namespace {
+
+template <typename BuilderT>
+class HlsCommonPlaylistTest : public testing::Test {
+ public:
+ using Builder = BuilderT;
+};
+
+using Implementations =
+ testing::Types<MultivariantPlaylistTestBuilder, MediaPlaylistTestBuilder>;
+TYPED_TEST_SUITE(HlsCommonPlaylistTest, Implementations);
+
+// Adds any additional tags not relevant to these tests, but that are required
+// for this playlist type to be successfully parsed.
+void AddRequiredTags(MultivariantPlaylistTestBuilder&) {
+ // None required
+}
+void AddRequiredTags(MediaPlaylistTestBuilder& builder) {
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+}
+
+} // namespace
+
+TYPED_TEST(HlsCommonPlaylistTest, BadLineEndings) {
+ TypeParam builder;
+ builder.AppendLine("#EXTM3U");
+ AddRequiredTags(builder);
+
+ {
+ // Double carriage-return is not allowed
+ auto fork = builder;
+ fork.Append("\r\r\n");
+ fork.ExpectError(ParseStatusCode::kInvalidEOL);
+ }
+
+ {
+ // Carriage-return not followed by a newline is not allowed
+ auto fork = builder;
+ fork.Append("#EXT-X-VERSION:5\r");
+ fork.ExpectError(ParseStatusCode::kInvalidEOL);
+ }
+
+ builder.Append("\r\n");
+ builder.ExpectOk();
+}
+
+TYPED_TEST(HlsCommonPlaylistTest, MissingM3u) {
+ // #EXTM3U must be the very first line
+ TypeParam builder;
+ builder.AppendLine("");
+ builder.AppendLine("#EXTM3U");
+ builder.ExpectError(ParseStatusCode::kPlaylistMissingM3uTag);
+
+ builder = TypeParam();
+ builder.AppendLine("#EXT-X-VERSION:5");
+ builder.AppendLine("#EXTM3U");
+ builder.ExpectError(ParseStatusCode::kPlaylistMissingM3uTag);
+
+ // Test with invalid line ending
+ builder = TypeParam();
+ builder.Append("#EXTM3U");
+ builder.ExpectError(ParseStatusCode::kPlaylistMissingM3uTag);
+
+ // Test with invalid format
+ builder = TypeParam();
+ builder.AppendLine("#EXTM3U:");
+ builder.ExpectError(ParseStatusCode::kPlaylistMissingM3uTag);
+ builder = TypeParam();
+ builder.AppendLine("#EXTM3U:1");
+ builder.ExpectError(ParseStatusCode::kPlaylistMissingM3uTag);
+
+ // Extra M3U tag is OK
+ builder = TypeParam();
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXTM3U");
+ AddRequiredTags(builder);
+ builder.ExpectOk();
+}
+
+TYPED_TEST(HlsCommonPlaylistTest, UnknownTag) {
+ TypeParam builder;
+ builder.AppendLine("#EXTM3U");
+ AddRequiredTags(builder);
+
+ // Unrecognized tags should not result in an error
+ builder.AppendLine("#EXT-UNKNOWN-TAG");
+ builder.ExpectOk();
+}
+
+TYPED_TEST(HlsCommonPlaylistTest, VersionChecks) {
+ TypeParam builder;
+ builder.AppendLine("#EXTM3U");
+ AddRequiredTags(builder);
+
+ {
+ // Default version is 1
+ auto fork = builder;
+ fork.ExpectPlaylist(HasVersion, 1);
+ fork.ExpectOk();
+ }
+
+ {
+ // "-1" is not a valid decimal-integer
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-VERSION:-1");
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+
+ {
+ // "0" is not a valid version
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-VERSION:0");
+ fork.ExpectError(ParseStatusCode::kInvalidPlaylistVersion);
+ }
+
+ for (int i = 1; i <= 10; ++i) {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-VERSION:" + base::NumberToString(i));
+ fork.ExpectPlaylist(HasVersion, i);
+ fork.ExpectOk();
+ }
+
+ for (int i : {11, 12, 100, 999}) {
+ // Versions 11+ are not supported by this parser
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-VERSION:" + base::NumberToString(i));
+ fork.ExpectError(ParseStatusCode::kPlaylistHasUnsupportedVersion);
+ }
+}
+
+TYPED_TEST(HlsCommonPlaylistTest, XIndependentSegmentsTag) {
+ TypeParam builder;
+ builder.AppendLine("#EXTM3U");
+ AddRequiredTags(builder);
+
+ // Without the 'EXT-X-INDEPENDENT-SEGMENTS' tag, the default is 'false'.
+ {
+ auto fork = builder;
+ fork.ExpectPlaylist(HasIndependentSegments, false);
+ fork.ExpectOk();
+ }
+
+ builder.AppendLine("#EXT-X-INDEPENDENT-SEGMENTS");
+ builder.ExpectPlaylist(HasIndependentSegments, true);
+ builder.ExpectOk();
+
+ // This tag should not appear twice
+ builder.AppendLine("#EXT-X-INDEPENDENT-SEGMENTS");
+ builder.ExpectError(ParseStatusCode::kPlaylistHasDuplicateTags);
+}
+
+} // namespace media::hls
diff --git a/chromium/media/formats/hls/items_unittest.cc b/chromium/media/formats/hls/items_unittest.cc
index 46976f38e72..9de96286d85 100644
--- a/chromium/media/formats/hls/items_unittest.cc
+++ b/chromium/media/formats/hls/items_unittest.cc
@@ -3,7 +3,10 @@
// found in the LICENSE file.
#include "media/formats/hls/items.h"
+
+#include "base/location.h"
#include "base/strings/string_piece.h"
+#include "media/formats/hls/source_string.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/abseil-cpp/absl/types/variant.h"
@@ -13,16 +16,20 @@ namespace {
using LineResult = GetNextLineItemResult;
-void CheckSourceString(SourceString expected, SourceString actual) {
- EXPECT_EQ(expected.Line(), actual.Line());
- EXPECT_EQ(expected.Column(), actual.Column());
- EXPECT_EQ(expected.Str(), actual.Str());
+void CheckSourceString(SourceString expected,
+ SourceString actual,
+ const base::Location& from) {
+ EXPECT_EQ(expected.Line(), actual.Line()) << from.ToString();
+ EXPECT_EQ(expected.Column(), actual.Column()) << from.ToString();
+ EXPECT_EQ(expected.Str(), actual.Str()) << from.ToString();
}
// Calls `GetNextLineItem` for each expectation, and verifies that the result
// matches.
template <typename T>
-void RunTest(base::StringPiece source, const T& expectations) {
+void RunTest(base::StringPiece source,
+ const T& expectations,
+ const base::Location& from = base::Location::Current()) {
auto line_iter = SourceLineIterator(source);
for (auto expectation : expectations) {
@@ -31,31 +38,34 @@ void RunTest(base::StringPiece source, const T& expectations) {
if (expectation.has_value()) {
auto expected_value = std::move(expectation).value();
- EXPECT_TRUE(result.has_value());
+ EXPECT_TRUE(result.has_value()) << from.ToString();
auto value = std::move(result).value();
// Ensure that resulting variants are the same
static_assert(absl::variant_size<LineResult>::value == 2, "");
if (auto* expected_tag = absl::get_if<TagItem>(&expected_value)) {
auto tag = absl::get<TagItem>(std::move(value));
- EXPECT_EQ(expected_tag->GetName(), tag.GetName());
- EXPECT_EQ(expected_tag->GetLineNumber(), tag.GetLineNumber());
+ EXPECT_EQ(expected_tag->GetName(), tag.GetName()) << from.ToString();
+ EXPECT_EQ(expected_tag->GetLineNumber(), tag.GetLineNumber())
+ << from.ToString();
EXPECT_EQ(expected_tag->GetContent().has_value(),
- tag.GetContent().has_value());
+ tag.GetContent().has_value())
+ << from.ToString();
if (expected_tag->GetContent().has_value() &&
tag.GetContent().has_value()) {
- CheckSourceString(*expected_tag->GetContent(), *tag.GetContent());
+ CheckSourceString(*expected_tag->GetContent(), *tag.GetContent(),
+ from);
}
} else {
auto expected_uri = absl::get<UriItem>(std::move(expected_value));
auto uri = absl::get<UriItem>(std::move(value));
- CheckSourceString(expected_uri.content, uri.content);
+ CheckSourceString(expected_uri.content, uri.content, from);
}
} else {
- EXPECT_TRUE(result.has_error());
+ EXPECT_TRUE(result.has_error()) << from.ToString();
auto error = std::move(result).error();
auto expected_error = std::move(expectation).error();
- EXPECT_EQ(error.code(), expected_error.code());
+ EXPECT_EQ(error.code(), expected_error.code()) << from.ToString();
}
}
}
@@ -89,7 +99,7 @@ ParseStatus::Or<LineResult> ExpectUri(size_t line,
} // namespace
-TEST(HlsFormatParserTest, GetNextLineItemTest1) {
+TEST(HlsItemsTest, GetNextLineItem1) {
constexpr base::StringPiece kManifest =
"#EXTM3U\n"
"\n"
@@ -135,7 +145,7 @@ TEST(HlsFormatParserTest, GetNextLineItemTest1) {
RunTest(kManifest, kExpectations);
}
-TEST(HlsFormatParserTest, GetNextLineItemTest2) {
+TEST(HlsItemsTest, GetNextLineItem2) {
constexpr base::StringPiece kManifest =
"#EXTM3U\n"
"https://ww\rw.example.com\n"
@@ -148,7 +158,7 @@ TEST(HlsFormatParserTest, GetNextLineItemTest2) {
RunTest(kManifest, kExpectations);
}
-TEST(HlsFormatParserTest, GetNextLineItemTest3) {
+TEST(HlsItemsTest, GetNextLineItem3) {
constexpr base::StringPiece kManifest = "#EXTM3U";
const ParseStatus::Or<LineResult> kExpectations[] = {
@@ -157,7 +167,7 @@ TEST(HlsFormatParserTest, GetNextLineItemTest3) {
RunTest(kManifest, kExpectations);
}
-TEST(HlsFormatParserTest, GetNextLineItemTest4) {
+TEST(HlsItemsTest, GetNextLineItem4) {
constexpr base::StringPiece kManifest = "#EXTM3U\r";
const ParseStatus::Or<LineResult> kExpectations[] = {
@@ -166,7 +176,7 @@ TEST(HlsFormatParserTest, GetNextLineItemTest4) {
RunTest(kManifest, kExpectations);
}
-TEST(HlsFormatParserTest, GetNextLineItemTest5) {
+TEST(HlsItemsTest, GetNextLineItem5) {
constexpr base::StringPiece kManifest = "\n";
const ParseStatus::Or<LineResult> kExpectations[] = {
diff --git a/chromium/media/formats/hls/media_playlist.cc b/chromium/media/formats/hls/media_playlist.cc
index 281b3efb079..9835ece59ff 100644
--- a/chromium/media/formats/hls/media_playlist.cc
+++ b/chromium/media/formats/hls/media_playlist.cc
@@ -4,25 +4,55 @@
#include "media/formats/hls/media_playlist.h"
+#include <cmath>
+#include <utility>
+#include <vector>
+
+#include "base/check.h"
#include "base/notreached.h"
+#include "base/numerics/clamped_math.h"
+#include "base/strings/string_piece.h"
#include "base/time/time.h"
#include "media/formats/hls/media_segment.h"
+#include "media/formats/hls/multivariant_playlist.h"
+#include "media/formats/hls/parse_status.h"
#include "media/formats/hls/playlist_common.h"
+#include "media/formats/hls/source_string.h"
+#include "media/formats/hls/tags.h"
#include "media/formats/hls/types.h"
#include "media/formats/hls/variable_dictionary.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+#include "third_party/abseil-cpp/absl/types/variant.h"
#include "url/gurl.h"
namespace media::hls {
+struct MediaPlaylist::CtorArgs {
+ GURL uri;
+ types::DecimalInteger version;
+ bool independent_segments;
+ base::TimeDelta target_duration;
+ absl::optional<PartialSegmentInfo> partial_segment_info;
+ std::vector<MediaSegment> segments;
+ absl::optional<PlaylistType> playlist_type;
+ bool end_list;
+ bool i_frames_only;
+ bool has_media_sequence_tag;
+};
+
MediaPlaylist::MediaPlaylist(MediaPlaylist&&) = default;
MediaPlaylist& MediaPlaylist::operator=(MediaPlaylist&&) = default;
MediaPlaylist::~MediaPlaylist() = default;
-ParseStatus::Or<MediaPlaylist> MediaPlaylist::Parse(base::StringPiece source,
- GURL uri) {
- CHECK(uri.is_valid());
+ParseStatus::Or<MediaPlaylist> MediaPlaylist::Parse(
+ base::StringPiece source,
+ GURL uri,
+ const MultivariantPlaylist* parent_playlist) {
+ if (!uri.is_valid()) {
+ return ParseStatusCode::kInvalidUri;
+ }
SourceLineIterator src_iter{source};
@@ -36,12 +66,29 @@ ParseStatus::Or<MediaPlaylist> MediaPlaylist::Parse(base::StringPiece source,
CommonParserState common_state;
VariableDictionary::SubstitutionBuffer sub_buffer;
+ absl::optional<XTargetDurationTag> target_duration_tag;
absl::optional<InfTag> inf_tag;
absl::optional<XGapTag> gap_tag;
absl::optional<XDiscontinuityTag> discontinuity_tag;
+ absl::optional<XByteRangeTag> byterange_tag;
+ absl::optional<XBitrateTag> bitrate_tag;
absl::optional<XPlaylistTypeTag> playlist_type_tag;
+ absl::optional<XEndListTag> end_list_tag;
+ absl::optional<XIFramesOnlyTag> i_frames_only_tag;
+ absl::optional<XPartInfTag> part_inf_tag;
+ absl::optional<XMediaSequenceTag> media_sequence_tag;
+ absl::optional<XDiscontinuitySequenceTag> discontinuity_sequence_tag;
std::vector<MediaSegment> segments;
+ types::DecimalInteger discontinuity_sequence_number = 0;
+
+ // If this media playlist was found through a multivariant playlist, it may
+ // import variables from that playlist.
+ if (parent_playlist) {
+ common_state.parent_variable_dict =
+ &parent_playlist->GetVariableDictionary();
+ }
+
// Get segments out of the playlist
while (true) {
auto item_result = GetNextLineItem(&src_iter);
@@ -87,13 +134,29 @@ ParseStatus::Or<MediaPlaylist> MediaPlaylist::Parse(base::StringPiece source,
}
break;
}
- case MediaPlaylistTagName::kXDiscontinuity: {
- auto error = ParseUniqueTag(*tag, discontinuity_tag);
+ case MediaPlaylistTagName::kXTargetDuration: {
+ auto error = ParseUniqueTag(*tag, target_duration_tag);
if (error.has_value()) {
return std::move(error).value();
}
break;
}
+ case MediaPlaylistTagName::kXDiscontinuity: {
+ // Multiple occurrences of `EXT-X-DISCONTINUITY` per media segment are
+ // allowed, and each increments the segment's discontinuity sequence
+ // number by 1. The spec doesn't explicitly forbid this, and this
+ // seems to be how other HLS clients handle this scenario.
+ auto result = XDiscontinuityTag::Parse(*tag);
+ if (result.has_error()) {
+ return std::move(result).error();
+ }
+
+ // Even if there was a previous discontinuity tag, overwrite the value
+ // and increment the discontinuity sequence number by 1.
+ discontinuity_tag = std::move(result).value();
+ discontinuity_sequence_number += 1;
+ break;
+ }
case MediaPlaylistTagName::kXGap: {
auto error = ParseUniqueTag(*tag, gap_tag);
if (error.has_value()) {
@@ -101,12 +164,20 @@ ParseStatus::Or<MediaPlaylist> MediaPlaylist::Parse(base::StringPiece source,
}
break;
}
- case MediaPlaylistTagName::kXEndList:
- // TODO(crbug.com/1266991): Implement the #EXT-X-END-LIST Tag
+ case MediaPlaylistTagName::kXEndList: {
+ auto error = ParseUniqueTag(*tag, end_list_tag);
+ if (error.has_value()) {
+ return std::move(error).value();
+ }
break;
- case MediaPlaylistTagName::kXIFramesOnly:
- // TODO(crbug.com/1266991): Implement the #EXT-X-I-FRAMES-ONLY tag
+ }
+ case MediaPlaylistTagName::kXIFramesOnly: {
+ auto error = ParseUniqueTag(*tag, i_frames_only_tag);
+ if (error.has_value()) {
+ return std::move(error).value();
+ }
break;
+ }
case MediaPlaylistTagName::kXPlaylistType: {
auto error = ParseUniqueTag(*tag, playlist_type_tag);
if (error.has_value()) {
@@ -114,6 +185,61 @@ ParseStatus::Or<MediaPlaylist> MediaPlaylist::Parse(base::StringPiece source,
}
break;
}
+ case MediaPlaylistTagName::kXPartInf: {
+ auto error = ParseUniqueTag(*tag, part_inf_tag);
+ if (error.has_value()) {
+ return std::move(error).value();
+ }
+ break;
+ }
+ case MediaPlaylistTagName::kXMediaSequence: {
+ // This tag must appear before any media segment
+ if (!segments.empty()) {
+ return ParseStatusCode::kMediaSegmentBeforeMediaSequenceTag;
+ }
+
+ auto error = ParseUniqueTag(*tag, media_sequence_tag);
+ if (error.has_value()) {
+ return std::move(error).value();
+ }
+ break;
+ }
+ case MediaPlaylistTagName::kXDiscontinuitySequence: {
+ auto error = ParseUniqueTag(*tag, discontinuity_sequence_tag);
+ if (error.has_value()) {
+ return std::move(error).value();
+ }
+
+ // This tag must appear before any media segment or
+ // EXT-X-DISCONTINUITY tag.
+ if (!segments.empty()) {
+ return ParseStatusCode::kMediaSegmentBeforeDiscontinuitySequenceTag;
+ }
+ if (discontinuity_sequence_number != 0) {
+ return ParseStatusCode::
+ kDiscontinuityTagBeforeDiscontinuitySequenceTag;
+ }
+
+ discontinuity_sequence_number = discontinuity_sequence_tag->number;
+ break;
+ }
+ case MediaPlaylistTagName::kXByteRange: {
+ // TODO(https://crbug.com/1328528): Investigate supporting aspects of
+ // this tag not described by the spec
+ auto error = ParseUniqueTag(*tag, byterange_tag);
+ if (error.has_value()) {
+ return std::move(error).value();
+ }
+ break;
+ }
+ case MediaPlaylistTagName::kXBitrate: {
+ auto result = XBitrateTag::Parse(*tag);
+ if (result.has_error()) {
+ return std::move(result).error();
+ }
+ bitrate_tag = std::move(result).value();
+ break;
+ }
}
continue;
@@ -136,33 +262,120 @@ ParseStatus::Or<MediaPlaylist> MediaPlaylist::Parse(base::StringPiece source,
return ParseStatusCode::kMediaSegmentMissingInfTag;
}
- segments.emplace_back(inf_tag->duration, std::move(segment_uri),
- discontinuity_tag.has_value(), gap_tag.has_value());
+ // The media sequence number of this segment can be calculated by the value
+ // given by `EXT-X-MEDIA-SEQUENCE:n` (or 0), plus the number of prior
+ // segments in this playlist. It's an error for the EXT-X-MEDIA-SEQUENCE
+ // tag to appear after the first media segment (handled above).
+ const types::DecimalInteger media_sequence_number =
+ (media_sequence_tag ? media_sequence_tag->number : 0) + segments.size();
+
+ absl::optional<types::ByteRange> byterange;
+ if (byterange_tag.has_value()) {
+ auto range = byterange_tag->range;
+
+ // If this media segment had an EXT-X-BYTERANGE tag without an offset, the
+ // previous media segment must have been a byterange of the same resource.
+ // In that case, the offset is that of the byte following the previous
+ // media segment.
+ types::DecimalInteger offset;
+ if (range.offset.has_value()) {
+ offset = range.offset.value();
+ } else if (segments.empty()) {
+ return ParseStatusCode::kByteRangeRequiresOffset;
+ } else if (!segments.back().GetByteRange().has_value()) {
+ return ParseStatusCode::kByteRangeRequiresOffset;
+ } else if (segments.back().GetUri() != segment_uri) {
+ return ParseStatusCode::kByteRangeRequiresOffset;
+ } else {
+ offset = segments.back().GetByteRange()->GetEnd();
+ }
+
+ byterange = types::ByteRange::Validate(range.length, offset);
+ if (!byterange) {
+ return ParseStatusCode::kByteRangeInvalid;
+ }
+ }
+
+ // The previous occurrence of the EXT-X-BITRATE tag applies to this segment
+ // only if this segment is not a byterange of its resource.
+ absl::optional<types::DecimalInteger> bitrate;
+ if (bitrate_tag.has_value() && !byterange.has_value()) {
+ // The value in the tag is expressed in kilobits per-second, but we wish
+ // to normalize all bitrates to bits-per-second. The spec specifically
+ // uses 'kilobit' as opposed to 'kibibit', so we multiply by 1000 instead
+ // of 1024.
+ // Ensure we don't overflow `DecimalInteger` when doing this
+ // multiplication.
+ bitrate = base::ClampMul(bitrate_tag->bitrate, 1000u);
+ }
+
+ segments.emplace_back(inf_tag->duration, media_sequence_number,
+ discontinuity_sequence_number, std::move(segment_uri),
+ byterange, bitrate, discontinuity_tag.has_value(),
+ gap_tag.has_value());
// Reset per-segment tags
inf_tag.reset();
gap_tag.reset();
discontinuity_tag.reset();
+ byterange_tag.reset();
}
+ if (!target_duration_tag.has_value()) {
+ return ParseStatusCode::kMediaPlaylistMissingTargetDuration;
+ }
+
+ absl::optional<PartialSegmentInfo> partial_segment_info;
+ if (part_inf_tag.has_value()) {
+ partial_segment_info = MediaPlaylist::PartialSegmentInfo{
+ .target_duration = part_inf_tag->target_duration};
+ }
+
+ // Ensure that no segment exceeds the target duration
+ for (const auto& segment : segments) {
+ const auto duration =
+ static_cast<types::DecimalInteger>(std::round(segment.GetDuration()));
+ if (duration > target_duration_tag->duration) {
+ return ParseStatusCode::kMediaSegmentExceedsTargetDuration;
+ }
+ }
+
+ // Multivariant playlists may use the `EXT-X-INDEPENDENT-SEGMENTS` tag to
+ // indicate that every media playlist has independent segments. If that was
+ // the case, apply that to this playlist (this does not go in reverse).
+ // Otherwise, that property depends on whether that tag occurred in this
+ // playlist.
+ const bool independent_segments =
+ common_state.independent_segments_tag.has_value() ||
+ (parent_playlist && parent_playlist->AreSegmentsIndependent());
+
absl::optional<PlaylistType> playlist_type;
if (playlist_type_tag) {
playlist_type = playlist_type_tag->type;
}
- return MediaPlaylist(std::move(uri), common_state.GetVersion(),
- common_state.independent_segments_tag.has_value(),
- std::move(segments), playlist_type);
+ return MediaPlaylist(
+ CtorArgs{.uri = std::move(uri),
+ .version = common_state.GetVersion(),
+ .independent_segments = independent_segments,
+ .target_duration = base::Seconds(target_duration_tag->duration),
+ .partial_segment_info = std::move(partial_segment_info),
+ .segments = std::move(segments),
+ .playlist_type = playlist_type,
+ .end_list = end_list_tag.has_value(),
+ .i_frames_only = i_frames_only_tag.has_value(),
+ .has_media_sequence_tag = media_sequence_tag.has_value()});
}
-MediaPlaylist::MediaPlaylist(GURL uri,
- types::DecimalInteger version,
- bool independent_segments,
- std::vector<MediaSegment> segments,
- absl::optional<PlaylistType> playlist_type)
- : Playlist(std::move(uri), version, independent_segments),
- segments_(std::move(segments)),
- playlist_type_(playlist_type) {
+MediaPlaylist::MediaPlaylist(CtorArgs args)
+ : Playlist(std::move(args.uri), args.version, args.independent_segments),
+ target_duration_(args.target_duration),
+ partial_segment_info_(std::move(args.partial_segment_info)),
+ segments_(std::move(args.segments)),
+ playlist_type_(args.playlist_type),
+ end_list_(args.end_list),
+ i_frames_only_(args.i_frames_only),
+ has_media_sequence_tag_(args.has_media_sequence_tag) {
base::TimeDelta duration;
for (const auto& segment : segments_) {
duration += base::Seconds(segment.GetDuration());
diff --git a/chromium/media/formats/hls/media_playlist.h b/chromium/media/formats/hls/media_playlist.h
index 0f6263603a9..ddf353c67ef 100644
--- a/chromium/media/formats/hls/media_playlist.h
+++ b/chromium/media/formats/hls/media_playlist.h
@@ -6,18 +6,33 @@
#define MEDIA_FORMATS_HLS_MEDIA_PLAYLIST_H_
#include <vector>
+
#include "base/time/time.h"
#include "media/base/media_export.h"
+#include "media/formats/hls/parse_status.h"
#include "media/formats/hls/playlist.h"
#include "media/formats/hls/tags.h"
#include "media/formats/hls/types.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+#include "url/gurl.h"
namespace media::hls {
class MediaSegment;
+class MultivariantPlaylist;
class MEDIA_EXPORT MediaPlaylist final : public Playlist {
public:
+ // This structure describes information about partial segments in the
+ // playlist.
+ struct PartialSegmentInfo {
+ // The maximum duration (in seconds) of any partial segment. Each partial
+ // segment must be at least 85% of this, except for any where
+ // `HasDiscontinuity() == true` or the final partial segment of a parent
+ // segment.
+ types::DecimalFloatingPoint target_duration;
+ };
+
MediaPlaylist(const MediaPlaylist&) = delete;
MediaPlaylist(MediaPlaylist&&);
MediaPlaylist& operator=(const MediaPlaylist&) = delete;
@@ -28,6 +43,10 @@ class MEDIA_EXPORT MediaPlaylist final : public Playlist {
// may be copied independently of this Playlist.
const std::vector<MediaSegment>& GetSegments() const { return segments_; }
+ // Returns the target duration (maximum length of any segment, rounded to the
+ // nearest integer) for this playlist.
+ base::TimeDelta GetTargetDuration() const { return target_duration_; }
+
// Returns the sum of the duration of all segments in this playlist.
// Computed via the 'EXTINF' attribute, so may be slightly longer than the
// actual duration.
@@ -43,23 +62,55 @@ class MEDIA_EXPORT MediaPlaylist final : public Playlist {
return playlist_type_;
}
- // Attempts to parse the playlist represented by `source`. `uri` must be a
- // valid, non-empty GURL referring to the URI of this playlist. If the
- // playlist is invalid, returns an error. Otherwise, returns the parsed
- // playlist.
- static ParseStatus::Or<MediaPlaylist> Parse(base::StringPiece source,
- GURL uri);
+ // Returns information about partial segments in this playlist. This will be
+ // non-empty if this playlist contains at least one partial segment, and may
+ // be empty if this playlist contains no partial segments.
+ absl::optional<PartialSegmentInfo> GetPartialSegmentInfo() const {
+ return partial_segment_info_;
+ }
+
+ // Returns whether this playlist contained the 'EXT-X-ENDLIST' tag. This
+ // indicates, in the cause of EVENT or live playlists, that no further
+ // segments will be appended in future updates.
+ bool IsEndList() const { return end_list_; }
+
+ // Indicates that this playlist contained the 'EXT-X-I-FRAMES-ONLY tag.
+ // This means that each media segment in this playlist contains a single
+ // I-frame, and that the media segment duration should be interpreted as the
+ // time between that I-frame and the following one, or the end of the
+ // presentation.
+ // https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis#section-4.4.3.6
+ bool IsIFramesOnly() const { return i_frames_only_; }
+
+ // The presence of the EXT-X-MEDIA-SEQUENCE tag is a hint that, in the case of
+ // live playlists, media segments may become unavailable after the time this
+ // playlist was loaded + the duration of this playlist.
+ // https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis#:~:text=nominal%20playback%20rate).-,If,-the%20Media%20Playlist
+ bool HasMediaSequenceTag() const { return has_media_sequence_tag_; }
+
+ // Attempts to parse the media playlist represented by `source`. `uri` must be
+ // a valid, non-empty GURL referring to the URI of this playlist. If this
+ // playlist was found through a multivariant playlist, `parent_playlist` must
+ // point to that playlist in order to support persistent properties and
+ // imported variables. Otherwise, it should be `nullptr`. If `source` is
+ // invalid, this returns an error. Otherwise, the parsed playlist is returned.
+ static ParseStatus::Or<MediaPlaylist> Parse(
+ base::StringPiece source,
+ GURL uri,
+ const MultivariantPlaylist* parent_playlist);
private:
- MediaPlaylist(GURL uri,
- types::DecimalInteger version,
- bool independent_segments,
- std::vector<MediaSegment> segments,
- absl::optional<PlaylistType> playlist_type);
+ struct CtorArgs;
+ explicit MediaPlaylist(CtorArgs);
+ base::TimeDelta target_duration_;
+ absl::optional<PartialSegmentInfo> partial_segment_info_;
std::vector<MediaSegment> segments_;
base::TimeDelta computed_duration_;
absl::optional<PlaylistType> playlist_type_;
+ bool end_list_;
+ bool i_frames_only_;
+ bool has_media_sequence_tag_;
};
} // namespace media::hls
diff --git a/chromium/media/formats/hls/media_playlist_fuzzer.cc b/chromium/media/formats/hls/media_playlist_fuzzer.cc
index 50a91a15873..e113c22a92d 100644
--- a/chromium/media/formats/hls/media_playlist_fuzzer.cc
+++ b/chromium/media/formats/hls/media_playlist_fuzzer.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <fuzzer/FuzzedDataProvider.h>
#include <cstddef>
#include <cstdint>
@@ -10,6 +11,7 @@
#include "base/i18n/icu_util.h"
#include "base/strings/string_piece.h"
#include "media/formats/hls/media_playlist.h"
+#include "media/formats/hls/multivariant_playlist.h"
#include "third_party/abseil-cpp/absl/types/variant.h"
#include "url/gurl.h"
@@ -22,12 +24,29 @@ struct IcuEnvironment {
IcuEnvironment* env = new IcuEnvironment();
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- // Create a StringPiece from the given input
- const base::StringPiece source(reinterpret_cast<const char*>(data), size);
+ FuzzedDataProvider data_provider(data, size);
- // Try to parse it as a media playlist
- media::hls::MediaPlaylist::Parse(source,
- GURL("http://localhost/playlist.m3u8"));
+ // Decide whether to create a multivariant playlist + media playlist or just a
+ // media playlist
+ std::unique_ptr<media::hls::MultivariantPlaylist> multivariant_playlist;
+ if (data_provider.ConsumeBool()) {
+ auto multivariant_playlist_source =
+ data_provider.ConsumeRandomLengthString();
+ auto multivariant_playlist_result = media::hls::MultivariantPlaylist::Parse(
+ multivariant_playlist_source,
+ GURL("http://localhost/multi_playlist.m3u8"));
+ if (multivariant_playlist_result.has_error()) {
+ return 0;
+ }
+
+ multivariant_playlist = std::make_unique<media::hls::MultivariantPlaylist>(
+ std::move(multivariant_playlist_result).value());
+ }
+
+ auto media_playlist_source = data_provider.ConsumeRemainingBytesAsString();
+ media::hls::MediaPlaylist::Parse(media_playlist_source,
+ GURL("http://localhost/playlist.m3u8"),
+ multivariant_playlist.get());
return 0;
}
diff --git a/chromium/media/formats/hls/media_playlist_test_builder.cc b/chromium/media/formats/hls/media_playlist_test_builder.cc
new file mode 100644
index 00000000000..2290d73e72d
--- /dev/null
+++ b/chromium/media/formats/hls/media_playlist_test_builder.cc
@@ -0,0 +1,59 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/hls/media_playlist_test_builder.h"
+
+#include "base/location.h"
+
+namespace media::hls {
+
+MediaPlaylistTestBuilder::MediaPlaylistTestBuilder() = default;
+
+MediaPlaylistTestBuilder::~MediaPlaylistTestBuilder() = default;
+
+MediaPlaylistTestBuilder::MediaPlaylistTestBuilder(
+ const MediaPlaylistTestBuilder&) = default;
+
+MediaPlaylistTestBuilder::MediaPlaylistTestBuilder(MediaPlaylistTestBuilder&&) =
+ default;
+
+MediaPlaylistTestBuilder& MediaPlaylistTestBuilder::operator=(
+ const MediaPlaylistTestBuilder&) = default;
+
+MediaPlaylistTestBuilder& MediaPlaylistTestBuilder::operator=(
+ MediaPlaylistTestBuilder&&) = default;
+
+MediaPlaylistTestBuilder::SegmentExpectations::SegmentExpectations() = default;
+
+MediaPlaylistTestBuilder::SegmentExpectations::~SegmentExpectations() = default;
+
+MediaPlaylistTestBuilder::SegmentExpectations::SegmentExpectations(
+ const SegmentExpectations&) = default;
+
+MediaPlaylistTestBuilder::SegmentExpectations::SegmentExpectations(
+ SegmentExpectations&&) = default;
+
+MediaPlaylistTestBuilder::SegmentExpectations&
+MediaPlaylistTestBuilder::SegmentExpectations::operator=(
+ const SegmentExpectations&) = default;
+
+MediaPlaylistTestBuilder::SegmentExpectations&
+MediaPlaylistTestBuilder::SegmentExpectations::operator=(
+ SegmentExpectations&&) = default;
+
+void MediaPlaylistTestBuilder::VerifyExpectations(
+ const MediaPlaylist& playlist,
+ const base::Location& from) const {
+ ASSERT_EQ(segment_expectations_.size(), playlist.GetSegments().size())
+ << from.ToString();
+ for (size_t i = 0; i < segment_expectations_.size(); ++i) {
+ const auto& segment = playlist.GetSegments().at(i);
+ const auto& expectations = segment_expectations_.at(i);
+ for (const auto& expectation : expectations.expectations) {
+ expectation.Run(segment);
+ }
+ }
+}
+
+} // namespace media::hls
diff --git a/chromium/media/formats/hls/media_playlist_test_builder.h b/chromium/media/formats/hls/media_playlist_test_builder.h
new file mode 100644
index 00000000000..9fb6ad1b082
--- /dev/null
+++ b/chromium/media/formats/hls/media_playlist_test_builder.h
@@ -0,0 +1,211 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_HLS_MEDIA_PLAYLIST_TEST_BUILDER_H_
+#define MEDIA_FORMATS_HLS_MEDIA_PLAYLIST_TEST_BUILDER_H_
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/memory/raw_ptr.h"
+#include "base/time/time.h"
+#include "media/formats/hls/media_playlist.h"
+#include "media/formats/hls/media_segment.h"
+#include "media/formats/hls/playlist_test_builder.h"
+#include "media/formats/hls/types.h"
+#include "url/gurl.h"
+
+namespace media::hls {
+
+class MultivariantPlaylist;
+
+// Helper for building media playlist test cases that allows writing assertions
+// next to the playlist lines they check, as well as "forking" test cases via
+// copying the builder.
+class MediaPlaylistTestBuilder : public PlaylistTestBuilder<MediaPlaylist> {
+ public:
+ MediaPlaylistTestBuilder();
+ ~MediaPlaylistTestBuilder();
+ MediaPlaylistTestBuilder(const MediaPlaylistTestBuilder&);
+ MediaPlaylistTestBuilder(MediaPlaylistTestBuilder&&);
+ MediaPlaylistTestBuilder& operator=(const MediaPlaylistTestBuilder&);
+ MediaPlaylistTestBuilder& operator=(MediaPlaylistTestBuilder&&);
+
+ // Sets the referring multivariant playlist.
+ void SetParent(const MultivariantPlaylist* parent) { parent_ = parent; }
+
+ // Increments the number of segments that are expected to be contained in the
+ // playlist.
+ void ExpectAdditionalSegment() { segment_expectations_.emplace_back(); }
+
+ // Adds a new expectation for the latest segment in the playlist, which will
+ // be checked during `ExpectOk`.
+ template <typename Fn, typename Arg>
+ void ExpectSegment(Fn fn,
+ Arg arg,
+ base::Location location = base::Location::Current()) {
+ segment_expectations_.back().expectations.push_back(base::BindRepeating(
+ std::move(fn), std::move(arg), std::move(location)));
+ }
+
+ void ExpectOk(const base::Location& from = base::Location::Current()) const {
+ PlaylistTestBuilder::ExpectOk(from, parent_);
+ }
+
+ void ExpectError(
+ ParseStatusCode code,
+ const base::Location& from = base::Location::Current()) const {
+ PlaylistTestBuilder::ExpectError(code, from, parent_);
+ }
+
+ private:
+ struct SegmentExpectations {
+ SegmentExpectations();
+ ~SegmentExpectations();
+ SegmentExpectations(const SegmentExpectations&);
+ SegmentExpectations(SegmentExpectations&&);
+ SegmentExpectations& operator=(const SegmentExpectations&);
+ SegmentExpectations& operator=(SegmentExpectations&&);
+
+ std::vector<base::RepeatingCallback<void(const MediaSegment&)>>
+ expectations;
+ };
+
+ void VerifyExpectations(const MediaPlaylist& playlist,
+ const base::Location& from) const override;
+
+ raw_ptr<const MultivariantPlaylist> parent_ = nullptr;
+ std::vector<SegmentExpectations> segment_expectations_;
+};
+
+// Checks that the media playlist has the given type (or `absl::nullopt`).
+inline void HasType(absl::optional<PlaylistType> type,
+ const base::Location& from,
+ const MediaPlaylist& playlist) {
+ EXPECT_EQ(playlist.GetPlaylistType(), type) << from.ToString();
+}
+
+// Checks that the media playlist has the given Target Duration.
+inline void HasTargetDuration(base::TimeDelta value,
+ const base::Location& from,
+ const MediaPlaylist& playlist) {
+ EXPECT_EQ(playlist.GetTargetDuration(), value) << from.ToString();
+}
+
+// Checks that the value of `GetComputedDuration()` matches the given value.
+inline void HasComputedDuration(base::TimeDelta value,
+ const base::Location& from,
+ const MediaPlaylist& playlist) {
+ EXPECT_EQ(playlist.GetComputedDuration(), value) << from.ToString();
+}
+
+// Checks that the value of `GetPartialSegmentInfo()` matches the given value.
+inline void HasPartialSegmentInfo(
+ absl::optional<MediaPlaylist::PartialSegmentInfo> partial_segment_info,
+ const base::Location& from,
+ const MediaPlaylist& playlist) {
+ ASSERT_EQ(partial_segment_info.has_value(),
+ playlist.GetPartialSegmentInfo().has_value())
+ << from.ToString();
+ if (partial_segment_info.has_value()) {
+ ASSERT_DOUBLE_EQ(partial_segment_info->target_duration,
+ playlist.GetPartialSegmentInfo()->target_duration)
+ << from.ToString();
+ }
+}
+
+// Checks the media playlist's `HasMediaSequenceTag` property against
+// the given value.
+inline void HasMediaSequenceTag(bool value,
+ const base::Location& from,
+ const MediaPlaylist& playlist) {
+ EXPECT_EQ(playlist.HasMediaSequenceTag(), value) << from.ToString();
+}
+
+// Checks that the latest media segment has the given duration.
+inline void HasDuration(types::DecimalFloatingPoint duration,
+ const base::Location& from,
+ const MediaSegment& segment) {
+ EXPECT_DOUBLE_EQ(segment.GetDuration(), duration) << from.ToString();
+}
+
+// Checks that the latest media segment has the given media sequence number.
+inline void HasMediaSequenceNumber(types::DecimalInteger number,
+ const base::Location& from,
+ const MediaSegment& segment) {
+ EXPECT_EQ(segment.GetMediaSequenceNumber(), number) << from.ToString();
+}
+
+// Checks that the latest media segment has the given discontinuity sequence
+// number.
+inline void HasDiscontinuitySequenceNumber(types::DecimalInteger number,
+ const base::Location& from,
+ const MediaSegment& segment) {
+ EXPECT_EQ(segment.GetDiscontinuitySequenceNumber(), number)
+ << from.ToString();
+}
+
+// Checks that the latest media segment has the given URI.
+inline void HasUri(GURL uri,
+ const base::Location& from,
+ const MediaSegment& segment) {
+ EXPECT_EQ(segment.GetUri(), uri) << from.ToString();
+}
+
+// Checks that the latest media segment has the given byte range.
+inline void HasByteRange(absl::optional<types::ByteRange> range,
+ const base::Location& from,
+ const MediaSegment& segment) {
+ ASSERT_EQ(segment.GetByteRange().has_value(), range.has_value())
+ << from.ToString();
+ if (range.has_value()) {
+ EXPECT_EQ(segment.GetByteRange()->GetOffset(), range->GetOffset())
+ << from.ToString();
+ EXPECT_EQ(segment.GetByteRange()->GetLength(), range->GetLength())
+ << from.ToString();
+ EXPECT_EQ(segment.GetByteRange()->GetEnd(), range->GetEnd())
+ << from.ToString();
+ }
+}
+
+// Checks the latest media segment's `GetBitRate` property against the given
+// value.
+inline void HasBitRate(absl::optional<types::DecimalInteger> bitrate,
+ const base::Location& from,
+ const MediaSegment& segment) {
+ EXPECT_EQ(segment.GetBitRate(), bitrate);
+}
+
+// Checks the latest media segment's `HasDiscontinuity` property against the
+// given value.
+inline void HasDiscontinuity(bool value,
+ const base::Location& from,
+ const MediaSegment& segment) {
+ EXPECT_EQ(segment.HasDiscontinuity(), value) << from.ToString();
+}
+
+// Checks the latest media segment's `IsGap` property against the given value.
+inline void IsGap(bool value,
+ const base::Location& from,
+ const MediaSegment& segment) {
+ EXPECT_EQ(segment.IsGap(), value) << from.ToString();
+}
+
+// Checks the value of `IsEndList` against the given value.
+inline void IsEndList(bool value,
+ const base::Location& from,
+ const MediaPlaylist& playlist) {
+ EXPECT_EQ(playlist.IsEndList(), value) << from.ToString();
+}
+
+// Checks the value of `IsIFramesOnly` against the given value.
+inline void IsIFramesOnly(bool value,
+ const base::Location& from,
+ const MediaPlaylist& playlist) {
+ EXPECT_EQ(playlist.IsIFramesOnly(), value) << from.ToString();
+}
+
+} // namespace media::hls
+
+#endif
diff --git a/chromium/media/formats/hls/media_playlist_unittest.cc b/chromium/media/formats/hls/media_playlist_unittest.cc
index 0d2f5b35bfd..3e759ac642d 100644
--- a/chromium/media/formats/hls/media_playlist_unittest.cc
+++ b/chromium/media/formats/hls/media_playlist_unittest.cc
@@ -4,247 +4,88 @@
#include "media/formats/hls/media_playlist.h"
-#include <vector>
-
-#include "base/callback_list.h"
-#include "base/location.h"
-#include "media/formats/hls/items.h"
-#include "media/formats/hls/media_segment.h"
-#include "media/formats/hls/source_string.h"
+#include <initializer_list>
+#include <limits>
+#include <string>
+#include <utility>
+
+#include "base/strings/string_piece.h"
+#include "media/formats/hls/media_playlist_test_builder.h"
+#include "media/formats/hls/multivariant_playlist.h"
+#include "media/formats/hls/parse_status.h"
#include "media/formats/hls/tags.h"
-#include "media/formats/hls/types.h"
+#include "media/formats/hls/test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "url/gurl.h"
namespace media::hls {
namespace {
-class TestBuilder {
- public:
- void SetUri(GURL uri) { uri_ = std::move(uri); }
-
- // Appends text to the playlist, without a trailing newline.
- void Append(base::StringPiece text) {
- source_.append(text.data(), text.size());
- }
-
- // Appends a new line to the playlist.
- void AppendLine(base::StringPiece line) {
- Append(line);
- Append("\n");
- }
-
- // Adds a new expectation for the playlist, which will be checked during
- // `ExpectOk`.
- template <typename Fn, typename Arg>
- void ExpectPlaylist(Fn fn,
- Arg arg,
- base::Location location = base::Location::Current()) {
- playlist_expectations_.emplace_back(base::BindRepeating(
- std::move(fn), std::move(arg), std::move(location)));
- }
-
- // Increments the number of segments that are expected to be contained in the
- // playlist.
- void ExpectAdditionalSegment() { segment_expectations_.push_back({}); }
-
- // Adds a new expectation for the latest segment in the playlist, which will
- // be checked during `ExpectOk`.
- template <typename Fn, typename Arg>
- void ExpectSegment(Fn fn,
- Arg arg,
- base::Location location = base::Location::Current()) {
- segment_expectations_.back().expectations.emplace_back(base::BindRepeating(
- std::move(fn), std::move(arg), std::move(location)));
- }
-
- // Attempts to parse the playlist as-is, checking for the given
- // error code.
- void ExpectError(
- ParseStatusCode code,
- const base::Location& from = base::Location::Current()) const {
- auto result = MediaPlaylist::Parse(source_, uri_);
- ASSERT_TRUE(result.has_error()) << from.ToString();
- EXPECT_EQ(std::move(result).error().code(), code) << from.ToString();
- }
-
- // Attempts to parse the playlist as-is, checking all playlist and segment
- // expectations.
- void ExpectOk(const base::Location& from = base::Location::Current()) const {
- auto result = MediaPlaylist::Parse(source_, uri_);
- ASSERT_TRUE(result.has_value())
- << "Error: "
- << ParseStatusCodeToString(std::move(result).error().code()) << "\n"
- << from.ToString();
- auto playlist = std::move(result).value();
-
- for (const auto& expectation : playlist_expectations_) {
- expectation.Run(playlist);
- }
-
- ASSERT_EQ(segment_expectations_.size(), playlist.GetSegments().size())
- << from.ToString();
- for (size_t i = 0; i < segment_expectations_.size(); ++i) {
- const auto& segment = playlist.GetSegments().at(i);
- const auto& expectations = segment_expectations_.at(i);
- for (const auto& expectation : expectations.expectations) {
- expectation.Run(segment);
- }
- }
+MultivariantPlaylist CreateMultivariantPlaylist(
+ std::initializer_list<base::StringPiece> lines,
+ GURL uri = GURL("http://localhost/multi_playlist.m3u8")) {
+ std::string source;
+ for (auto line : lines) {
+ source.append(line.data(), line.size());
+ source.append("\n");
}
- private:
- struct SegmentExpectations {
- std::vector<base::RepeatingCallback<void(const MediaSegment&)>>
- expectations;
- };
-
- std::vector<SegmentExpectations> segment_expectations_;
- std::vector<base::RepeatingCallback<void(const MediaPlaylist&)>>
- playlist_expectations_;
- GURL uri_ = GURL("http://localhost/playlist.m3u8");
- std::string source_;
-};
-
-void HasVersion(types::DecimalInteger version,
- const base::Location& from,
- const MediaPlaylist& playlist) {
- EXPECT_EQ(playlist.GetVersion(), version) << from.ToString();
-}
-
-void HasType(absl::optional<PlaylistType> type,
- const base::Location& from,
- const MediaPlaylist& playlist) {
- EXPECT_EQ(playlist.GetPlaylistType(), type) << from.ToString();
-}
-
-void HasDuration(types::DecimalFloatingPoint duration,
- const base::Location& from,
- const MediaSegment& segment) {
- EXPECT_DOUBLE_EQ(segment.GetDuration(), duration) << from.ToString();
-}
-
-void HasUri(GURL uri, const base::Location& from, const MediaSegment& segment) {
- EXPECT_EQ(segment.GetUri(), uri) << from.ToString();
-}
-
-void HasDiscontinuity(bool value,
- const base::Location& from,
- const MediaSegment& segment) {
- EXPECT_EQ(segment.HasDiscontinuity(), value) << from.ToString();
-}
-
-void IsGap(bool value,
- const base::Location& from,
- const MediaSegment& segment) {
- EXPECT_EQ(segment.IsGap(), value) << from.ToString();
+ // Parse the given source. Failure here isn't supposed to be part of the test,
+ // so use a CHECK.
+ auto result = MultivariantPlaylist::Parse(source, std::move(uri));
+ CHECK(result.has_value());
+ return std::move(result).value();
}
} // namespace
-TEST(HlsFormatParserTest, ParseMediaPlaylist_BadLineEndings) {
- TestBuilder builder;
- builder.AppendLine("#EXTM3U");
-
- {
- // Double carriage-return is not allowed
- auto fork = builder;
- fork.Append("\r\r\n");
- fork.ExpectError(ParseStatusCode::kInvalidEOL);
- }
-
- {
- // Carriage-return not followed by a newline is not allowed
- auto fork = builder;
- fork.Append("#EXT-X-VERSION:5\r");
- fork.ExpectError(ParseStatusCode::kInvalidEOL);
- }
-
- builder.Append("\r\n");
- builder.ExpectOk();
-}
-
-TEST(HlsFormatParserTest, ParseMediaPlaylist_MissingM3u) {
- // #EXTM3U must be the very first line
- TestBuilder builder;
- builder.AppendLine("");
- builder.AppendLine("#EXTM3U");
- builder.ExpectError(ParseStatusCode::kPlaylistMissingM3uTag);
-
- builder = TestBuilder();
- builder.AppendLine("#EXT-X-VERSION:5");
- builder.AppendLine("#EXTM3U");
- builder.ExpectError(ParseStatusCode::kPlaylistMissingM3uTag);
-
- // Test with invalid line ending
- builder = TestBuilder();
- builder.Append("#EXTM3U");
- builder.ExpectError(ParseStatusCode::kPlaylistMissingM3uTag);
-
- // Test with invalid format
- builder = TestBuilder();
- builder.AppendLine("#EXTM3U:");
- builder.ExpectError(ParseStatusCode::kPlaylistMissingM3uTag);
- builder = TestBuilder();
- builder.AppendLine("#EXTM3U:1");
- builder.ExpectError(ParseStatusCode::kPlaylistMissingM3uTag);
-
- // Extra M3U tag is OK
- builder = TestBuilder();
- builder.AppendLine("#EXTM3U");
- builder.AppendLine("#EXTM3U");
- builder.ExpectOk();
-}
-
-TEST(HlsFormatParserTest, ParseMediaPlaylist_UnknownTag) {
- TestBuilder builder;
- builder.AppendLine("#EXTM3U");
-
- // Unrecognized tags should not result in an error
- builder.AppendLine("#EXT-UNKNOWN-TAG");
- builder.ExpectOk();
-}
-
-TEST(HlsFormatParserTest, ParseMediaPlaylist_XDiscontinuityTag) {
- TestBuilder builder;
+TEST(HlsMediaPlaylistTest, XDiscontinuityTag) {
+ MediaPlaylistTestBuilder builder;
builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+ builder.ExpectPlaylist(HasVersion, 1);
+ builder.ExpectPlaylist(HasTargetDuration, base::Seconds(10));
// Default discontinuity state is false
builder.AppendLine("#EXTINF:9.9,\t");
builder.AppendLine("video.ts");
builder.ExpectAdditionalSegment();
builder.ExpectSegment(HasDiscontinuity, false);
+ builder.ExpectSegment(HasDiscontinuitySequenceNumber, 0);
builder.AppendLine("#EXT-X-DISCONTINUITY");
builder.AppendLine("#EXTINF:9.9,\t");
builder.AppendLine("video.ts");
builder.ExpectAdditionalSegment();
builder.ExpectSegment(HasDiscontinuity, true);
+ builder.ExpectSegment(HasDiscontinuitySequenceNumber, 1);
// The discontinuity tag does not apply to subsequent segments
builder.AppendLine("#EXTINF:9.9,\t");
builder.AppendLine("video.ts");
builder.ExpectAdditionalSegment();
builder.ExpectSegment(HasDiscontinuity, false);
+ builder.ExpectSegment(HasDiscontinuitySequenceNumber, 1);
- // The discontinuity tag may only appear once per segment
- {
- auto fork = builder;
- fork.AppendLine("#EXT-X-DISCONTINUITY");
- fork.AppendLine("#EXT-X-DISCONTINUITY");
- fork.AppendLine("#EXTINF:9.9,\t");
- fork.AppendLine("video.ts");
- fork.ExpectAdditionalSegment();
- fork.ExpectSegment(HasDiscontinuity, true);
- fork.ExpectError(ParseStatusCode::kPlaylistHasDuplicateTags);
- }
+ // The discontinuity tag may appear multiple times per segment
+ builder.AppendLine("#EXT-X-DISCONTINUITY");
+ builder.AppendLine("#EXT-X-DISCONTINUITY");
+ builder.AppendLine("#EXTINF:9.9,\t");
+ builder.AppendLine("video.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasDiscontinuity, true);
+ builder.ExpectSegment(HasDiscontinuitySequenceNumber, 3);
builder.ExpectOk();
}
-TEST(HlsFormatParserTest, ParseMediaPlaylist_XGapTag) {
- TestBuilder builder;
+TEST(HlsMediaPlaylistTest, XGapTag) {
+ MediaPlaylistTestBuilder builder;
builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+ builder.ExpectPlaylist(HasVersion, 1);
+ builder.ExpectPlaylist(HasTargetDuration, base::Seconds(10));
// Default gap state is false
builder.AppendLine("#EXTINF:9.9,\t");
@@ -279,51 +120,13 @@ TEST(HlsFormatParserTest, ParseMediaPlaylist_XGapTag) {
builder.ExpectOk();
}
-TEST(HlsFormatParserTest, ParseMediaPlaylist_VersionChecks) {
- TestBuilder builder;
- builder.AppendLine("#EXTM3U");
-
- {
- // Default version is 1
- auto fork = builder;
- fork.ExpectPlaylist(HasVersion, 1);
- fork.ExpectOk();
- }
-
- {
- // "-1" is not a valid decimal-integer
- auto fork = builder;
- fork.AppendLine("#EXT-X-VERSION:-1");
- fork.ExpectError(ParseStatusCode::kMalformedTag);
- }
-
- {
- // "0" is not a valid version
- auto fork = builder;
- fork.AppendLine("#EXT-X-VERSION:0");
- fork.ExpectError(ParseStatusCode::kInvalidPlaylistVersion);
- }
-
- for (int i = 1; i <= 10; ++i) {
- auto fork = builder;
- fork.AppendLine("#EXT-X-VERSION:" + base::NumberToString(i));
- fork.ExpectPlaylist(HasVersion, i);
- fork.ExpectOk();
- }
-
- for (int i : {11, 12, 100, 999}) {
- // Versions 11+ are not supported by this parser
- auto fork = builder;
- fork.AppendLine("#EXT-X-VERSION:" + base::NumberToString(i));
- fork.ExpectError(ParseStatusCode::kPlaylistHasUnsupportedVersion);
- }
-}
-
-TEST(HlsFormatParserTest, ParseMediaPlaylist_Segments) {
- TestBuilder builder;
+TEST(HlsMediaPlaylistTest, Segments) {
+ MediaPlaylistTestBuilder builder;
builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
builder.AppendLine("#EXT-X-VERSION:5");
builder.ExpectPlaylist(HasVersion, 5);
+ builder.ExpectPlaylist(HasTargetDuration, base::Seconds(10));
builder.AppendLine("#EXTINF:9.2,\t");
builder.AppendLine("video.ts");
@@ -332,6 +135,7 @@ TEST(HlsFormatParserTest, ParseMediaPlaylist_Segments) {
builder.ExpectSegment(HasDuration, 9.2);
builder.ExpectSegment(HasUri, GURL("http://localhost/video.ts"));
builder.ExpectSegment(IsGap, false);
+ builder.ExpectSegment(HasMediaSequenceNumber, 0);
// Segments without #EXTINF tags are not allowed
{
@@ -348,6 +152,7 @@ TEST(HlsFormatParserTest, ParseMediaPlaylist_Segments) {
builder.ExpectSegment(HasDuration, 9.3);
builder.ExpectSegment(IsGap, false);
builder.ExpectSegment(HasUri, GURL("http://localhost/foo.ts"));
+ builder.ExpectSegment(HasMediaSequenceNumber, 1);
builder.AppendLine("#EXTINF:9.2,bar");
builder.AppendLine("http://foo/bar.ts");
@@ -356,15 +161,37 @@ TEST(HlsFormatParserTest, ParseMediaPlaylist_Segments) {
builder.ExpectSegment(HasDuration, 9.2);
builder.ExpectSegment(IsGap, false);
builder.ExpectSegment(HasUri, GURL("http://foo/bar.ts"));
+ builder.ExpectSegment(HasMediaSequenceNumber, 2);
+
+ // Segments must not exceed the playlist's target duration when rounded to the
+ // nearest integer
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXTINF:10.499,bar");
+ fork.AppendLine("bar.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectOk();
+
+ fork.AppendLine("#EXTINF:10.5,baz");
+ fork.AppendLine("baz.ts");
+ fork.ExpectError(ParseStatusCode::kMediaSegmentExceedsTargetDuration);
+ }
builder.ExpectOk();
}
-TEST(HlsFormatParserTest, ParseMediaPlaylist_Define) {
- TestBuilder builder;
+// This test is similar to the `HlsMultivariantPlaylistTest` test of the same
+// name, but due to subtle differences between media playlists and multivariant
+// playlists its difficult to combine them. If new cases are added here that are
+// also relevant to multivariant playlists, they should be added to that test as
+// well.
+TEST(HlsMediaPlaylistTest, VariableSubstitution) {
+ MediaPlaylistTestBuilder builder;
builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
builder.AppendLine("#EXT-X-VERSION:8");
builder.ExpectPlaylist(HasVersion, 8);
+ builder.ExpectPlaylist(HasTargetDuration, base::Seconds(10));
builder.AppendLine(R"(#EXT-X-DEFINE:NAME="ROOT",VALUE="http://video.com")");
builder.AppendLine(R"(#EXT-X-DEFINE:NAME="MOVIE",VALUE="some_video/low")");
@@ -409,7 +236,71 @@ TEST(HlsFormatParserTest, ParseMediaPlaylist_Define) {
fork.ExpectError(ParseStatusCode::kImportedVariableInParentlessPlaylist);
}
- // Variables may not be substituted recursively
+ // Test importing variables in a playlist with a parent
+ auto parent = CreateMultivariantPlaylist(
+ {"#EXTM3U", "#EXT-X-VERSION:8",
+ R"(#EXT-X-DEFINE:NAME="IMPORTED",VALUE="HELLO")"});
+ {
+ // Referring to a parent playlist variable without importing it is an error
+ auto fork = builder;
+ fork.SetParent(&parent);
+ fork.AppendLine("#EXTINF:9.9,\t");
+ fork.AppendLine("segments/{$IMPORTED}.ts");
+ fork.ExpectError(ParseStatusCode::kVariableUndefined);
+ }
+ {
+ // Locally overwriting an unimported variable from a parent playlist is NOT
+ // an error
+ auto fork = builder;
+ fork.SetParent(&parent);
+ fork.AppendLine(R"(#EXT-X-DEFINE:NAME="IMPORTED",VALUE="WORLD")");
+ fork.AppendLine("#EXTINF:9.9,\t");
+ fork.AppendLine("segments/{$IMPORTED}.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segments/WORLD.ts"));
+ fork.ExpectOk();
+
+ // Importing a variable once it's been defined is an error
+ fork.AppendLine(R"(#EXT-X-DEFINE:IMPORT="IMPORTED")");
+ fork.ExpectError(ParseStatusCode::kVariableDefinedMultipleTimes);
+ }
+ {
+ // Defining a variable once it's been imported is an error
+ auto fork = builder;
+ fork.SetParent(&parent);
+ fork.AppendLine(R"(#EXT-X-DEFINE:IMPORT="IMPORTED")");
+ fork.AppendLine(R"(#EXT-X-DEFINE:NAME="IMPORTED",VALUE="WORLD")");
+ fork.ExpectError(ParseStatusCode::kVariableDefinedMultipleTimes);
+ }
+ {
+ // Importing the same variable twice is an error
+ auto fork = builder;
+ fork.SetParent(&parent);
+ fork.AppendLine(R"(#EXT-X-DEFINE:IMPORT="IMPORTED")");
+ fork.AppendLine(R"(#EXT-X-DEFINE:IMPORT="IMPORTED")");
+ fork.ExpectError(ParseStatusCode::kVariableDefinedMultipleTimes);
+ }
+ {
+ // Importing a variable that hasn't been defined in the parent playlist is
+ // an error
+ auto fork = builder;
+ fork.SetParent(&parent);
+ fork.AppendLine(R"(#EXT-X-DEFINE:IMPORT="FOO")");
+ fork.ExpectError(ParseStatusCode::kImportedVariableUndefined);
+ }
+ {
+ // Test actually using an imported variable
+ auto fork = builder;
+ fork.SetParent(&parent);
+ fork.AppendLine(R"(#EXT-X-DEFINE:IMPORT="IMPORTED")");
+ fork.AppendLine("#EXTINF:9.9,\t");
+ fork.AppendLine("segments/{$IMPORTED}.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segments/HELLO.ts"));
+ fork.ExpectOk();
+ }
+
+ // Variables are not resolved recursively
builder.AppendLine(R"(#EXT-X-DEFINE:NAME="BAR",VALUE="BAZ")");
builder.AppendLine(R"(#EXT-X-DEFINE:NAME="FOO",VALUE="{$BAR}")");
builder.AppendLine("#EXTINF:9.9,\t");
@@ -420,9 +311,10 @@ TEST(HlsFormatParserTest, ParseMediaPlaylist_Define) {
builder.ExpectOk();
}
-TEST(HlsFormatParserTest, ParseMediaPlaylist_PlaylistType) {
- TestBuilder builder;
+TEST(HlsMediaPlaylistTest, PlaylistType) {
+ MediaPlaylistTestBuilder builder;
builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
// Without the EXT-X-PLAYLIST-TYPE tag, the playlist has no type.
{
@@ -477,4 +369,732 @@ TEST(HlsFormatParserTest, ParseMediaPlaylist_PlaylistType) {
}
}
+TEST(HlsMediaPlaylistTest, MultivariantPlaylistTag) {
+ MediaPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+
+ // Media playlists may not contain tags exclusive to multivariant playlists
+ for (TagName name = ToTagName(MultivariantPlaylistTagName::kMinValue);
+ name <= ToTagName(MultivariantPlaylistTagName::kMaxValue); ++name) {
+ auto tag_line = "#" + std::string{TagNameToString(name)};
+ auto fork = builder;
+ fork.AppendLine(tag_line);
+ fork.ExpectError(ParseStatusCode::kMediaPlaylistHasMultivariantPlaylistTag);
+ }
+}
+
+TEST(HlsMediaPlaylistTest, XIndependentSegmentsTagInParent) {
+ auto parent1 = CreateMultivariantPlaylist({
+ "#EXTM3U",
+ "#EXT-X-INDEPENDENT-SEGMENTS",
+ });
+
+ // Parent value should carryover to media playlist
+ MediaPlaylistTestBuilder builder;
+ builder.SetParent(&parent1);
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+ builder.ExpectPlaylist(HasIndependentSegments, true);
+ builder.ExpectOk();
+
+ // It's OK for this tag to reappear in the media playlist
+ builder.AppendLine("#EXT-X-INDEPENDENT-SEGMENTS");
+ builder.ExpectOk();
+
+ // Without that tag in the parent, the value depends entirely on its presence
+ // in the child
+ auto parent2 = CreateMultivariantPlaylist({"#EXTM3U"});
+ builder = MediaPlaylistTestBuilder();
+ builder.SetParent(&parent2);
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+ {
+ auto fork = builder;
+ fork.ExpectPlaylist(HasIndependentSegments, false);
+ fork.ExpectOk();
+ }
+ builder.AppendLine("#EXT-X-INDEPENDENT-SEGMENTS");
+ builder.ExpectPlaylist(HasIndependentSegments, true);
+ builder.ExpectOk();
+ EXPECT_FALSE(parent2.AreSegmentsIndependent());
+}
+
+TEST(HlsMediaPlaylistTest, XTargetDurationTag) {
+ MediaPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+
+ // The XTargetDurationTag tag is required
+ builder.ExpectError(ParseStatusCode::kMediaPlaylistMissingTargetDuration);
+
+ // The XTargetDurationTag must appear exactly once
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+ builder.ExpectPlaylist(HasTargetDuration, base::Seconds(10));
+ builder.ExpectOk();
+
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-TARGETDURATION:10");
+ fork.ExpectError(ParseStatusCode::kPlaylistHasDuplicateTags);
+ }
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-TARGETDURATION:11");
+ fork.ExpectError(ParseStatusCode::kPlaylistHasDuplicateTags);
+ }
+
+ // The XTargetDurationTag must be a valid DecimalInteger (unsigned)
+ for (base::StringPiece x : {"-1", "0.5", "-1.5", "999999999999999999999"}) {
+ MediaPlaylistTestBuilder builder2;
+ builder2.AppendLine("#EXTM3U");
+ builder2.AppendLine("#EXT-X-TARGETDURATION:", x);
+ builder2.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+}
+
+TEST(HlsMediaPlaylistTest, XEndListTag) {
+ MediaPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+
+ // Without the 'EXT-X-ENDLIST' tag, the default value is false, regardless of
+ // the playlist type.
+ {
+ for (const base::StringPiece type : {"", "EVENT", "VOD"}) {
+ auto fork = builder;
+ if (!type.empty()) {
+ fork.AppendLine("#EXT-X-PLAYLIST-TYPE:", type);
+ }
+ fork.ExpectPlaylist(IsEndList, false);
+ fork.ExpectOk();
+ }
+ }
+
+ // The 'EXT-X-ENDLIST' tag may not have any content
+ {
+ for (const base::StringPiece x : {"", "FOO=BAR", "1"}) {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-ENDLIST:", x);
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+ }
+
+ // The EXT-X-ENDLIST tag can appear anywhere in the playlist
+ builder.AppendLine("#EXTINF:9.2,\t");
+ builder.AppendLine("segment0.ts");
+ builder.ExpectAdditionalSegment();
+
+ builder.AppendLine("#EXT-X-ENDLIST");
+ builder.ExpectPlaylist(IsEndList, true);
+
+ builder.AppendLine("#EXTINF:9.2,\n");
+ builder.AppendLine("segment1.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectOk();
+
+ // The EXT-X-ENDLIST tag may not appear twice
+ builder.AppendLine("#EXT-X-ENDLIST");
+ builder.ExpectError(ParseStatusCode::kPlaylistHasDuplicateTags);
+}
+
+TEST(HlsMediaPlaylistTest, XIFramesOnlyTag) {
+ MediaPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+
+ // Without the 'EXT-X-I-FRAMES-ONLY' tag, the default value is false.
+ {
+ auto fork = builder;
+ fork.ExpectPlaylist(IsIFramesOnly, false);
+ fork.ExpectOk();
+ }
+
+ // The 'EXT-X-I-FRAMES-ONLY' tag may not have any content
+ {
+ for (const base::StringPiece x : {"", "FOO=BAR", "1"}) {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-I-FRAMES-ONLY:", x);
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+ }
+
+ builder.AppendLine("#EXT-X-I-FRAMES-ONLY");
+ builder.ExpectPlaylist(IsIFramesOnly, true);
+
+ // This should not affect the calculation of the playlist's duration
+ builder.AppendLine("#EXTINF:10,\t");
+ builder.AppendLine("segment0.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasDuration, 10);
+
+ builder.AppendLine("#EXTINF:10,\t");
+ builder.AppendLine("segment1.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasDuration, 10);
+
+ builder.ExpectPlaylist(HasComputedDuration, base::Seconds(20));
+ builder.ExpectOk();
+
+ // The 'EXT-X-I-FRAMES-ONLY' tag should not appear twice
+ builder.AppendLine("#EXT-X-I-FRAMES-ONLY");
+ builder.ExpectError(ParseStatusCode::kPlaylistHasDuplicateTags);
+}
+
+TEST(HlsMediaPlaylistTest, XMediaSequenceTag) {
+ MediaPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+
+ // The EXT-X-MEDIA-SEQUENCE tag's content must be a valid DecimalInteger
+ {
+ for (const base::StringPiece x : {"", ":-1", ":{$foo}", ":1.5", ":one"}) {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-MEDIA-SEQUENCE", x);
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+ }
+ // The EXT-X-MEDIA-SEQUENCE tag may not appear twice
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-MEDIA-SEQUENCE:0");
+ fork.AppendLine("#EXT-X-MEDIA-SEQUENCE:1");
+ fork.ExpectError(ParseStatusCode::kPlaylistHasDuplicateTags);
+ }
+ // The EXT-X-MEDIA-SEQUENCE tag must appear before any media segment
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXTINF:9.8,\t");
+ fork.AppendLine("segment0.ts");
+ fork.AppendLine("#EXT-X-MEDIA-SEQUENCE:0");
+ fork.ExpectError(ParseStatusCode::kMediaSegmentBeforeMediaSequenceTag);
+ }
+
+ const auto fill_playlist = [](auto& builder, auto first_sequence_number) {
+ builder.AppendLine("#EXTINF:9.8,\t");
+ builder.AppendLine("segment0.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment0.ts"));
+ builder.ExpectSegment(HasMediaSequenceNumber, first_sequence_number);
+ builder.ExpectSegment(HasDiscontinuitySequenceNumber, 0);
+
+ builder.AppendLine("#EXTINF:9.8,\t");
+ builder.AppendLine("segment1.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, first_sequence_number + 1);
+ builder.ExpectSegment(HasDiscontinuitySequenceNumber, 0);
+
+ builder.AppendLine("#EXTINF:9.8,\t");
+ builder.AppendLine("segment2.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, first_sequence_number + 2);
+ builder.ExpectSegment(HasDiscontinuitySequenceNumber, 0);
+ };
+
+ // If the playlist does not contain the EXT-X-MEDIA-SEQUENCE tag, the default
+ // starting segment number is 0.
+ auto fork = builder;
+ fill_playlist(fork, 0);
+ fork.ExpectPlaylist(HasMediaSequenceTag, false);
+ fork.ExpectOk();
+
+ // If the playlist has the EXT-X-MEDIA-SEQUENCE tag, it specifies the starting
+ // segment number.
+ fork = builder;
+ fork.AppendLine("#EXT-X-MEDIA-SEQUENCE:0");
+ fill_playlist(fork, 0);
+ fork.ExpectPlaylist(HasMediaSequenceTag, true);
+ fork.ExpectOk();
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-MEDIA-SEQUENCE:15");
+ fill_playlist(fork, 15);
+ fork.ExpectPlaylist(HasMediaSequenceTag, true);
+ fork.ExpectOk();
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-MEDIA-SEQUENCE:9999");
+ fill_playlist(fork, 9999);
+ fork.ExpectPlaylist(HasMediaSequenceTag, true);
+ fork.ExpectOk();
+}
+
+TEST(HlsMediaPlaylistTest, XDiscontinuitySequenceTag) {
+ MediaPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+
+ // The EXT-X-DISCONTINUITY-SEQUENCE tag must be a valid DecimalInteger
+ {
+ for (const base::StringPiece x : {"", ":-1", ":{$foo}", ":1.5", ":one"}) {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-DISCONTINUITY-SEQUENCE", x);
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+ }
+ // The EXT-X-DISCONTINUITY-SEQUENCE tag may not appear twice
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-DISCONTINUITY-SEQUENCE:1");
+ fork.AppendLine("#EXT-X-DISCONTINUITY-SEQUENCE:1");
+ fork.ExpectError(ParseStatusCode::kPlaylistHasDuplicateTags);
+ }
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-DISCONTINUITY-SEQUENCE:0");
+ fork.AppendLine("#EXT-X-DISCONTINUITY");
+ fork.AppendLine("#EXT-X-DISCONTINUITY-SEQUENCE:1");
+ fork.ExpectError(ParseStatusCode::kPlaylistHasDuplicateTags);
+ }
+ // The EXT-X-DISCONTINUITY-SEQUENCE tag must appear before any media segment
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXTINF:9.8,\t");
+ fork.AppendLine("segment0.ts");
+ fork.AppendLine("#EXT-X-DISCONTINUITY-SEQUENCE:0");
+ fork.ExpectError(
+ ParseStatusCode::kMediaSegmentBeforeDiscontinuitySequenceTag);
+ }
+ // The EXT-X-DISCONTINUITY-SEQUENCE tag must appear before any
+ // EXT-X-DISCONTINUITY tag
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-DISCONTINUITY");
+ fork.AppendLine("#EXT-X-DISCONTINUITY-SEQUENCE:0");
+ fork.AppendLine("#EXTINF:9.8,\t");
+ fork.AppendLine("segment0.ts");
+ fork.ExpectError(
+ ParseStatusCode::kDiscontinuityTagBeforeDiscontinuitySequenceTag);
+ }
+
+ const auto fill_playlist = [](auto& builder, auto first_media_sequence_number,
+ auto first_discontinuity_sequence_number) {
+ builder.AppendLine("#EXTINF:9.8,\t");
+ builder.AppendLine("segment0.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment0.ts"));
+ builder.ExpectSegment(HasDiscontinuity, false);
+ builder.ExpectSegment(HasMediaSequenceNumber, first_media_sequence_number);
+ builder.ExpectSegment(HasDiscontinuitySequenceNumber,
+ first_discontinuity_sequence_number);
+
+ builder.AppendLine("#EXT-X-DISCONTINUITY");
+ builder.AppendLine("#EXTINF:9.8,\t");
+ builder.AppendLine("segment1.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasDiscontinuity, true);
+ builder.ExpectSegment(HasMediaSequenceNumber,
+ first_media_sequence_number + 1);
+ builder.ExpectSegment(HasDiscontinuitySequenceNumber,
+ first_discontinuity_sequence_number + 1);
+
+ builder.AppendLine("#EXTINF:9.8,\t");
+ builder.AppendLine("segment2.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasDiscontinuity, false);
+ builder.ExpectSegment(HasMediaSequenceNumber,
+ first_media_sequence_number + 2);
+ builder.ExpectSegment(HasDiscontinuitySequenceNumber,
+ first_discontinuity_sequence_number + 1);
+ };
+
+ // If the playlist does not contain the EXT-X-DISCONTINUITY-SEQUENCE tag, the
+ // default starting value is 0.
+ auto fork = builder;
+ fill_playlist(fork, 0, 0);
+ fork.ExpectOk();
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-MEDIA-SEQUENCE:10");
+ fill_playlist(fork, 10, 0);
+ fork.ExpectOk();
+
+ // If the playlist has the EXT-X-DISCONTINUITY-SEQUENCE tag, it specifies the
+ // starting value.
+ fork = builder;
+ fork.AppendLine("#EXT-X-DISCONTINUITY-SEQUENCE:5");
+ fill_playlist(fork, 0, 5);
+ fork.ExpectOk();
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-MEDIA-SEQUENCE:10");
+ fork.AppendLine("#EXT-X-DISCONTINUITY-SEQUENCE:5");
+ fill_playlist(fork, 10, 5);
+ fork.ExpectOk();
+
+ // If the very first segment is a discontinuity, it should still have a
+ // subsequent discontinuity sequence number.
+ fork = builder;
+ fork.AppendLine("#EXT-X-MEDIA-SEQUENCE:10");
+ fork.AppendLine("#EXT-X-DISCONTINUITY");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasDiscontinuity, true);
+ fork.ExpectSegment(HasMediaSequenceNumber, 10);
+ fork.ExpectSegment(HasDiscontinuitySequenceNumber, 1);
+ fill_playlist(fork, 11, 1);
+ fork.ExpectOk();
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-MEDIA-SEQUENCE:10");
+ fork.AppendLine("#EXT-X-DISCONTINUITY-SEQUENCE:5");
+ fork.AppendLine("#EXT-X-DISCONTINUITY");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasDiscontinuity, true);
+ fork.ExpectSegment(HasMediaSequenceNumber, 10);
+ fork.ExpectSegment(HasDiscontinuitySequenceNumber, 6);
+ fill_playlist(fork, 11, 6);
+ fork.ExpectOk();
+}
+
+TEST(HlsMediaPlaylistTest, XByteRangeTag) {
+ MediaPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+
+ // EXT-X-BYTERANGE content must be a valid ByteRange
+ {
+ for (base::StringPiece x :
+ {"", ":", ": 12@34", ":12@34 ", ":12@", ":12@{$offset}"}) {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE", x);
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment.ts");
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+ }
+ // EXT-X-BYTERANGE may not appear twice per-segment.
+ // TODO(https://crbug.com/1328528): Some players support this, using only the
+ // final occurrence.
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:12@34");
+ fork.AppendLine("#EXT-X-BYTERANGE:34@56");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment.ts");
+ fork.ExpectError(ParseStatusCode::kPlaylistHasDuplicateTags);
+ }
+ // Offset is required if this is the first media segment.
+ // TODO(https://crbug.com/1328528): Some players support this, default offset
+ // to 0.
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:12");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment.ts");
+ fork.ExpectError(ParseStatusCode::kByteRangeRequiresOffset);
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:12@34");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasByteRange, CreateByteRange(12, 34));
+ fork.ExpectOk();
+ }
+ // Offset is required if the previous media segment is not a byterange.
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment.ts");
+ fork.AppendLine("#EXT-X-BYTERANGE:12");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment.ts");
+ fork.ExpectError(ParseStatusCode::kByteRangeRequiresOffset);
+
+ fork = builder;
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segment.ts"));
+ fork.ExpectSegment(HasByteRange, absl::nullopt);
+ fork.AppendLine("#EXT-X-BYTERANGE:12@34");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segment.ts"));
+ fork.ExpectSegment(HasByteRange, CreateByteRange(12, 34));
+ fork.ExpectOk();
+ }
+ // Offset is required if the previous media segment is a byterange of a
+ // different resource.
+ // TODO(https://crbug.com/1328528): Some players support this.
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:12@34");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.AppendLine("#EXT-X-BYTERANGE:56");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment2.ts");
+ fork.ExpectError(ParseStatusCode::kByteRangeRequiresOffset);
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:12@34");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segment1.ts"));
+ fork.ExpectSegment(HasByteRange, CreateByteRange(12, 34));
+ fork.AppendLine("#EXT-X-BYTERANGE:56@78");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment2.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segment2.ts"));
+ fork.ExpectSegment(HasByteRange, CreateByteRange(56, 78));
+ fork.ExpectOk();
+ }
+ // Offset is required even if a prior segment is a byterange of the same
+ // resource, but not the immediately previous segment.
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:12@34");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment2.ts");
+ fork.AppendLine("#EXT-X-BYTERANGE:45");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectError(ParseStatusCode::kByteRangeRequiresOffset);
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:12@34");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segment1.ts"));
+ fork.ExpectSegment(HasByteRange, CreateByteRange(12, 34));
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment2.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segment2.ts"));
+ fork.ExpectSegment(HasByteRange, absl::nullopt);
+ fork.AppendLine("#EXT-X-BYTERANGE:56@78");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segment1.ts"));
+ fork.ExpectSegment(HasByteRange, CreateByteRange(56, 78));
+ fork.ExpectOk();
+ }
+ // Offset can be elided if the previous segment is a byterange of the same
+ // resource.
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:12@34");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segment1.ts"));
+ fork.ExpectSegment(HasByteRange, CreateByteRange(12, 34));
+ fork.AppendLine("#EXT-X-BYTERANGE:56");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segment1.ts"));
+ fork.ExpectSegment(HasByteRange, CreateByteRange(56, 46));
+
+ // If an explicit offset is given (even it it's eligible to be elided), it
+ // must be used.
+ fork.AppendLine("#EXT-X-BYTERANGE:78@99999");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasUri, GURL("http://localhost/segment1.ts"));
+ fork.ExpectSegment(HasByteRange, CreateByteRange(78, 99999));
+ fork.ExpectOk();
+ }
+ // Range given by tag may not be empty or overflow a uint64, even across
+ // segments.
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:0@0");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectError(ParseStatusCode::kByteRangeInvalid);
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:18446744073709551615@1");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectError(ParseStatusCode::kByteRangeInvalid);
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:1@18446744073709551615");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectError(ParseStatusCode::kByteRangeInvalid);
+
+ fork = builder;
+ fork.AppendLine(
+ "#EXT-X-BYTERANGE:18446744073709551615@18446744073709551615");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectError(ParseStatusCode::kByteRangeInvalid);
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-BYTERANGE:1@18446744073709551614");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectAdditionalSegment();
+ fork.ExpectSegment(HasByteRange, CreateByteRange(1, 18446744073709551614u));
+ fork.ExpectOk();
+
+ // Since the previous segment ends at uint64_t::max, an additional
+ // contiguous byterange would overflow.
+ fork.AppendLine("#EXT-X-BYTERANGE:1");
+ fork.AppendLine("#EXTINF:9.2,\t");
+ fork.AppendLine("segment1.ts");
+ fork.ExpectError(ParseStatusCode::kByteRangeInvalid);
+ }
+}
+
+TEST(HlsMediaPlaylistTest, XBitrateTag) {
+ MediaPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+
+ // The EXT-X-BITRATE tag must be a valid DecimalInteger
+ {
+ for (base::StringPiece x : {"", ":", ": 1", ":1 ", ":-1", ":{$bitrate}"}) {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-BITRATE", x);
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+ }
+
+ // The EXT-X-BITRATE tag applies only to the segments that it appears after
+ builder.AppendLine("#EXTINF:9.2,");
+ builder.AppendLine("segment0.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, 0);
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment0.ts"));
+ builder.ExpectSegment(HasBitRate, absl::nullopt);
+
+ builder.AppendLine("#EXT-X-BITRATE:15");
+ builder.AppendLine("#EXTINF:9.2,");
+ builder.AppendLine("segment1.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, 1);
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment1.ts"));
+ builder.ExpectSegment(HasBitRate, 15000);
+
+ builder.AppendLine("#EXTINF:9.2,");
+ builder.AppendLine("segment2.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, 2);
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment2.ts"));
+ builder.ExpectSegment(HasBitRate, 15000);
+
+ // The EXT-X-BITRATE tag does not apply to segments that are byteranges
+ builder.AppendLine("#EXT-X-BYTERANGE:1024@0");
+ builder.AppendLine("#EXTINF:9.2,");
+ builder.AppendLine("segment3.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, 3);
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment3.ts"));
+ builder.ExpectSegment(HasByteRange, CreateByteRange(1024, 0));
+ builder.ExpectSegment(HasBitRate, absl::nullopt);
+
+ builder.AppendLine("#EXTINF:9.2,");
+ builder.AppendLine("segment4.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, 4);
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment4.ts"));
+ builder.ExpectSegment(HasByteRange, absl::nullopt);
+ builder.ExpectSegment(HasBitRate, 15000);
+
+ // The EXT-X-BITRATE tag is allowed to appear twice
+ builder.AppendLine("#EXT-X-BITRATE:20");
+ builder.AppendLine("#EXT-X-BITRATE:21");
+ builder.AppendLine("#EXTINF:9.2,");
+ builder.AppendLine("segment5.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, 5);
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment5.ts"));
+ builder.ExpectSegment(HasBitRate, 21000);
+
+ // A value of 0 is tolerated
+ builder.AppendLine("#EXT-X-BITRATE:0");
+ builder.AppendLine("#EXTINF:9.2,");
+ builder.AppendLine("segment6.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, 6);
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment6.ts"));
+ builder.ExpectSegment(HasBitRate, 0);
+
+ // Large values should saturate to `DecimalInteger::max`
+ builder.AppendLine("#EXT-X-BITRATE:18446744073709551");
+ builder.AppendLine("#EXTINF:9.2,");
+ builder.AppendLine("segment7.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, 7);
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment7.ts"));
+ builder.ExpectSegment(HasBitRate, 18446744073709551000u);
+
+ builder.AppendLine("#EXT-X-BITRATE:18446744073709552");
+ builder.AppendLine("#EXTINF:9.2,");
+ builder.AppendLine("segment8.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, 8);
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment8.ts"));
+ builder.ExpectSegment(HasBitRate,
+ std::numeric_limits<types::DecimalInteger>::max());
+
+ builder.AppendLine("#EXT-X-BITRATE:18446744073709551615");
+ builder.AppendLine("#EXTINF:9.2,");
+ builder.AppendLine("segment9.ts");
+ builder.ExpectAdditionalSegment();
+ builder.ExpectSegment(HasMediaSequenceNumber, 9);
+ builder.ExpectSegment(HasUri, GURL("http://localhost/segment9.ts"));
+ builder.ExpectSegment(HasBitRate,
+ std::numeric_limits<types::DecimalInteger>::max());
+
+ builder.ExpectOk();
+}
+
+TEST(HlsMediaPlaylistTest, XPartInfTag) {
+ MediaPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-TARGETDURATION:10");
+
+ // EXT-X-PART-INF tag must be well-formed
+ for (base::StringPiece x : {"", ":", ":TARGET=1", ":PART-TARGET=two"}) {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-PART-INF", x);
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-PART-INF:PART-TARGET=0");
+ fork.ExpectPlaylist(HasPartialSegmentInfo,
+ MediaPlaylist::PartialSegmentInfo{.target_duration = 0});
+ fork.ExpectOk();
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-PART-INF:PART-TARGET=1");
+ fork.ExpectPlaylist(HasPartialSegmentInfo,
+ MediaPlaylist::PartialSegmentInfo{.target_duration = 1});
+ fork.ExpectOk();
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-PART-INF:PART-TARGET=1.2");
+ fork.ExpectPlaylist(HasPartialSegmentInfo, MediaPlaylist::PartialSegmentInfo{
+ .target_duration = 1.2});
+ fork.ExpectOk();
+
+ fork = builder;
+ fork.AppendLine("#EXT-X-PART-INF:PART-TARGET=99.99");
+ fork.ExpectPlaylist(HasPartialSegmentInfo, MediaPlaylist::PartialSegmentInfo{
+ .target_duration = 99.99});
+ fork.ExpectOk();
+
+ // The EXT-X-PART-INF tag may not appear twice
+ fork.AppendLine("#EXT-X-PART-INF:PART-TARGET=10");
+ fork.ExpectError(ParseStatusCode::kPlaylistHasDuplicateTags);
+}
+
} // namespace media::hls
diff --git a/chromium/media/formats/hls/media_segment.cc b/chromium/media/formats/hls/media_segment.cc
index 7f278402e90..943b999cb17 100644
--- a/chromium/media/formats/hls/media_segment.cc
+++ b/chromium/media/formats/hls/media_segment.cc
@@ -10,11 +10,19 @@
namespace media::hls {
MediaSegment::MediaSegment(types::DecimalFloatingPoint duration,
+ types::DecimalInteger media_sequence_number,
+ types::DecimalInteger discontinuity_sequence_number,
GURL uri,
+ absl::optional<types::ByteRange> byte_range,
+ absl::optional<types::DecimalInteger> bitrate,
bool has_discontinuity,
bool is_gap)
: duration_(duration),
+ media_sequence_number_(media_sequence_number),
+ discontinuity_sequence_number_(discontinuity_sequence_number),
uri_(std::move(uri)),
+ byte_range_(byte_range),
+ bitrate_(bitrate),
has_discontinuity_(has_discontinuity),
is_gap_(is_gap) {}
MediaSegment::~MediaSegment() = default;
diff --git a/chromium/media/formats/hls/media_segment.h b/chromium/media/formats/hls/media_segment.h
index 020b8551fc8..f3a8b33cbb9 100644
--- a/chromium/media/formats/hls/media_segment.h
+++ b/chromium/media/formats/hls/media_segment.h
@@ -14,7 +14,11 @@ namespace media::hls {
class MEDIA_EXPORT MediaSegment {
public:
MediaSegment(types::DecimalFloatingPoint duration,
+ types::DecimalInteger media_sequence_number,
+ types::DecimalInteger discontinuity_sequence_number,
GURL uri,
+ absl::optional<types::ByteRange> byte_range,
+ absl::optional<types::DecimalInteger> bitrate,
bool has_discontinuity,
bool is_gap);
~MediaSegment();
@@ -26,11 +30,25 @@ class MEDIA_EXPORT MediaSegment {
// The approximate duration of this media segment in seconds.
types::DecimalFloatingPoint GetDuration() const { return duration_; }
+ // Returns the media sequence number of this media segment.
+ types::DecimalInteger GetMediaSequenceNumber() const {
+ return media_sequence_number_;
+ }
+
+ // Returns the discontinuity sequence number of this media segment.
+ types::DecimalInteger GetDiscontinuitySequenceNumber() const {
+ return discontinuity_sequence_number_;
+ }
+
// The URI of the media resource. This will have already been resolved against
// the playlist URI. This is guaranteed to be valid and non-empty, unless
// `gap` is true, in which case this URI should not be used.
const GURL& GetUri() const { return uri_; }
+ // If this media segment is a subrange of its resource, this indicates the
+ // range.
+ absl::optional<types::ByteRange> GetByteRange() const { return byte_range_; }
+
// Whether there is a decoding discontinuity between the previous media
// segment and this one.
bool HasDiscontinuity() const { return has_discontinuity_; }
@@ -39,9 +57,17 @@ class MEDIA_EXPORT MediaSegment {
// absent and the client should not attempt to fetch it.
bool IsGap() const { return is_gap_; }
+ // Returns the approximate bitrate of this segment (+-10%), expressed in
+ // bits-per-second.
+ absl::optional<types::DecimalInteger> GetBitRate() const { return bitrate_; }
+
private:
types::DecimalFloatingPoint duration_;
+ types::DecimalInteger media_sequence_number_;
+ types::DecimalInteger discontinuity_sequence_number_;
GURL uri_;
+ absl::optional<types::ByteRange> byte_range_;
+ absl::optional<types::DecimalInteger> bitrate_;
bool has_discontinuity_;
bool is_gap_;
};
diff --git a/chromium/media/formats/hls/multivariant_playlist.cc b/chromium/media/formats/hls/multivariant_playlist.cc
new file mode 100644
index 00000000000..c3269934a7e
--- /dev/null
+++ b/chromium/media/formats/hls/multivariant_playlist.cc
@@ -0,0 +1,175 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/hls/multivariant_playlist.h"
+
+#include <utility>
+#include <vector>
+
+#include "base/check.h"
+#include "base/notreached.h"
+#include "base/strings/string_piece.h"
+#include "media/formats/hls/items.h"
+#include "media/formats/hls/parse_status.h"
+#include "media/formats/hls/playlist_common.h"
+#include "media/formats/hls/source_string.h"
+#include "media/formats/hls/tags.h"
+#include "media/formats/hls/types.h"
+#include "media/formats/hls/variable_dictionary.h"
+#include "media/formats/hls/variant_stream.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+#include "third_party/abseil-cpp/absl/types/variant.h"
+#include "url/gurl.h"
+
+namespace media::hls {
+
+MultivariantPlaylist::MultivariantPlaylist(MultivariantPlaylist&&) = default;
+
+MultivariantPlaylist& MultivariantPlaylist::operator=(MultivariantPlaylist&&) =
+ default;
+
+MultivariantPlaylist::~MultivariantPlaylist() = default;
+
+ParseStatus::Or<MultivariantPlaylist> MultivariantPlaylist::Parse(
+ base::StringPiece source,
+ GURL uri) {
+ if (!uri.is_valid()) {
+ return ParseStatusCode::kInvalidUri;
+ }
+
+ SourceLineIterator src_iter{source};
+
+ // Parse the first line of the playlist. This must be an M3U tag.
+ {
+ auto m3u_tag_result = CheckM3uTag(&src_iter);
+ if (m3u_tag_result.has_error()) {
+ return std::move(m3u_tag_result).error();
+ }
+ }
+
+ CommonParserState common_state;
+ VariableDictionary::SubstitutionBuffer sub_buffer;
+ absl::optional<XStreamInfTag> inf_tag;
+ std::vector<VariantStream> variants;
+
+ // Get variants out of the playlist
+ while (true) {
+ auto item_result = GetNextLineItem(&src_iter);
+ if (item_result.has_error()) {
+ auto error = std::move(item_result).error();
+
+ // Only tolerated error is EOF
+ if (error.code() == ParseStatusCode::kReachedEOF) {
+ break;
+ }
+
+ return std::move(error);
+ }
+
+ auto item = std::move(item_result).value();
+
+ // Handle tags
+ if (auto* tag = absl::get_if<TagItem>(&item)) {
+ // The HLS spec requires that there may be no tags between the
+ // X-STREAM-INF tag and its URI.
+ if (inf_tag.has_value()) {
+ return ParseStatusCode::kXStreamInfTagNotFollowedByUri;
+ }
+
+ if (!tag->GetName().has_value()) {
+ HandleUnknownTag(*tag);
+ continue;
+ }
+
+ switch (GetTagKind(*tag->GetName())) {
+ case TagKind::kCommonTag: {
+ auto error = ParseCommonTag(*tag, &common_state);
+ if (error.has_value()) {
+ return std::move(error).value();
+ }
+ continue;
+ }
+ case TagKind::kMediaPlaylistTag:
+ return ParseStatusCode::kMultivariantPlaylistHasMediaPlaylistTag;
+ case TagKind::kMultivariantPlaylistTag:
+ // Handled below
+ break;
+ }
+
+ switch (static_cast<MultivariantPlaylistTagName>(*tag->GetName())) {
+ case MultivariantPlaylistTagName::kXMedia:
+ // TODO(crbug.com/1266991): Implement the EXT-X-MEDIA tag
+ break;
+ case MultivariantPlaylistTagName::kXStreamInf: {
+ auto error = ParseUniqueTag(*tag, inf_tag, common_state.variable_dict,
+ sub_buffer);
+ if (error.has_value()) {
+ return std::move(error).value();
+ }
+ break;
+ }
+ case MultivariantPlaylistTagName::kXIFrameStreamInf:
+ // TODO(crbug.com/1266991): Implement the EXT-X-I-FRAME-STREAM-INF tag
+ break;
+ case MultivariantPlaylistTagName::kXSessionData:
+ // TODO(crbug.com/1266991): Implement the EXT-X-SESSION-DATA tag
+ break;
+ case MultivariantPlaylistTagName::kXSessionKey:
+ // TODO(crbug.com/1266991): Implement the EXT-X-SESSION-KEY tag
+ break;
+ case MultivariantPlaylistTagName::kXContentSteering:
+ // TODO(crbug.com/1266991): Implement the EXT-X-CONTENT-STEERING tag
+ break;
+ }
+
+ continue;
+ }
+
+ // Handle URIs
+ // `GetNextLineItem` should return either a TagItem (handled above) or a
+ // UriItem.
+ static_assert(absl::variant_size<GetNextLineItemResult>() == 2);
+ auto variant_uri_result = ParseUri(absl::get<UriItem>(std::move(item)), uri,
+ common_state, sub_buffer);
+ if (variant_uri_result.has_error()) {
+ return std::move(variant_uri_result).error();
+ }
+ auto variant_uri = std::move(variant_uri_result).value();
+
+ // For this to be a valid variant, we must have previously parsed an
+ // X-STREAM-INF tag.
+ if (!inf_tag.has_value()) {
+ return ParseStatusCode::kVariantMissingStreamInfTag;
+ }
+
+ variants.emplace_back(std::move(variant_uri), inf_tag->bandwidth,
+ inf_tag->average_bandwidth, inf_tag->score,
+ std::move(inf_tag->codecs), inf_tag->resolution,
+ inf_tag->frame_rate);
+
+ // Reset per-variant tags
+ inf_tag.reset();
+ }
+
+ if (inf_tag.has_value()) {
+ return ParseStatusCode::kXStreamInfTagNotFollowedByUri;
+ }
+
+ return MultivariantPlaylist(std::move(uri), common_state.GetVersion(),
+ common_state.independent_segments_tag.has_value(),
+ std::move(variants),
+ std::move(common_state.variable_dict));
+}
+
+MultivariantPlaylist::MultivariantPlaylist(
+ GURL uri,
+ types::DecimalInteger version,
+ bool independent_segments,
+ std::vector<VariantStream> variants,
+ VariableDictionary variable_dictionary)
+ : Playlist(std::move(uri), version, independent_segments),
+ variants_(std::move(variants)),
+ variable_dictionary_(std::move(variable_dictionary)) {}
+
+} // namespace media::hls
diff --git a/chromium/media/formats/hls/multivariant_playlist.h b/chromium/media/formats/hls/multivariant_playlist.h
new file mode 100644
index 00000000000..6411bab2b0a
--- /dev/null
+++ b/chromium/media/formats/hls/multivariant_playlist.h
@@ -0,0 +1,60 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_HLS_MULTIVARIANT_PLAYLIST_H_
+#define MEDIA_FORMATS_HLS_MULTIVARIANT_PLAYLIST_H_
+
+#include <vector>
+
+#include "base/strings/string_piece.h"
+#include "media/base/media_export.h"
+#include "media/formats/hls/parse_status.h"
+#include "media/formats/hls/playlist.h"
+#include "media/formats/hls/types.h"
+#include "media/formats/hls/variable_dictionary.h"
+#include "url/gurl.h"
+
+namespace media::hls {
+
+class VariantStream;
+
+class MEDIA_EXPORT MultivariantPlaylist final : public Playlist {
+ public:
+ MultivariantPlaylist(const MultivariantPlaylist&) = delete;
+ MultivariantPlaylist(MultivariantPlaylist&&);
+ MultivariantPlaylist& operator=(const MultivariantPlaylist&) = delete;
+ MultivariantPlaylist& operator=(MultivariantPlaylist&&);
+ ~MultivariantPlaylist();
+
+ // Returns all variants described by this playlist.
+ const std::vector<VariantStream>& GetVariants() const { return variants_; }
+
+ // Returns the dictionary of variables defined by this playlist.
+ const VariableDictionary& GetVariableDictionary() const {
+ return variable_dictionary_;
+ }
+
+ // Attempts to parse the multivariant playlist represented by `source`. `uri`
+ // must be a valid, non-empty GURL referring to the URI of this playlist. If
+ // the playlist source is invalid, returns an error. If the playlist source
+ // contains tags specific to media playlists, the error code will be
+ // `kMultivariantPlaylistHasMediaPlaylistTag`, at which point the caller may
+ // choose to parse it as a media playlist instead.
+ static ParseStatus::Or<MultivariantPlaylist> Parse(base::StringPiece source,
+ GURL uri);
+
+ private:
+ MultivariantPlaylist(GURL uri,
+ types::DecimalInteger version,
+ bool independent_segments,
+ std::vector<VariantStream> variants,
+ VariableDictionary variable_dictionary);
+
+ std::vector<VariantStream> variants_;
+ VariableDictionary variable_dictionary_;
+};
+
+} // namespace media::hls
+
+#endif // MEDIA_FORMATS_HLS_MULTIVARIANT_PLAYLIST_H_
diff --git a/chromium/media/formats/hls/multivariant_playlist_fuzzer.cc b/chromium/media/formats/hls/multivariant_playlist_fuzzer.cc
new file mode 100644
index 00000000000..b22286a638b
--- /dev/null
+++ b/chromium/media/formats/hls/multivariant_playlist_fuzzer.cc
@@ -0,0 +1,33 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstddef>
+#include <cstdint>
+
+#include "base/at_exit.h"
+#include "base/check.h"
+#include "base/i18n/icu_util.h"
+#include "base/strings/string_piece.h"
+#include "media/formats/hls/multivariant_playlist.h"
+#include "third_party/abseil-cpp/absl/types/variant.h"
+#include "url/gurl.h"
+
+struct IcuEnvironment {
+ IcuEnvironment() { CHECK(base::i18n::InitializeICU()); }
+ // used by ICU integration.
+ base::AtExitManager at_exit_manager;
+};
+
+IcuEnvironment* env = new IcuEnvironment();
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ // Create a StringPiece from the given input
+ const base::StringPiece source(reinterpret_cast<const char*>(data), size);
+
+ // Try to parse it as a multivariant playlist
+ media::hls::MultivariantPlaylist::Parse(
+ source, GURL("http://localhost/playlist.m3u8"));
+
+ return 0;
+}
diff --git a/chromium/media/formats/hls/multivariant_playlist_test_builder.cc b/chromium/media/formats/hls/multivariant_playlist_test_builder.cc
new file mode 100644
index 00000000000..34d6b40f53c
--- /dev/null
+++ b/chromium/media/formats/hls/multivariant_playlist_test_builder.cc
@@ -0,0 +1,64 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/hls/multivariant_playlist_test_builder.h"
+
+#include "base/callback.h"
+#include "base/location.h"
+#include "media/formats/hls/multivariant_playlist.h"
+#include "media/formats/hls/variant_stream.h"
+
+namespace media::hls {
+
+MultivariantPlaylistTestBuilder::MultivariantPlaylistTestBuilder() = default;
+
+MultivariantPlaylistTestBuilder::~MultivariantPlaylistTestBuilder() = default;
+
+MultivariantPlaylistTestBuilder::MultivariantPlaylistTestBuilder(
+ const MultivariantPlaylistTestBuilder&) = default;
+
+MultivariantPlaylistTestBuilder::MultivariantPlaylistTestBuilder(
+ MultivariantPlaylistTestBuilder&&) = default;
+
+MultivariantPlaylistTestBuilder& MultivariantPlaylistTestBuilder::operator=(
+ const MultivariantPlaylistTestBuilder&) = default;
+
+MultivariantPlaylistTestBuilder& MultivariantPlaylistTestBuilder::operator=(
+ MultivariantPlaylistTestBuilder&&) = default;
+
+MultivariantPlaylistTestBuilder::VariantExpectations::VariantExpectations() =
+ default;
+
+MultivariantPlaylistTestBuilder::VariantExpectations::~VariantExpectations() =
+ default;
+
+MultivariantPlaylistTestBuilder::VariantExpectations::VariantExpectations(
+ const VariantExpectations&) = default;
+
+MultivariantPlaylistTestBuilder::VariantExpectations::VariantExpectations(
+ VariantExpectations&&) = default;
+
+MultivariantPlaylistTestBuilder::VariantExpectations&
+MultivariantPlaylistTestBuilder::VariantExpectations::operator=(
+ const VariantExpectations&) = default;
+
+MultivariantPlaylistTestBuilder::VariantExpectations&
+MultivariantPlaylistTestBuilder::VariantExpectations::operator=(
+ VariantExpectations&&) = default;
+
+void MultivariantPlaylistTestBuilder::VerifyExpectations(
+ const MultivariantPlaylist& playlist,
+ const base::Location& from) const {
+ ASSERT_EQ(variant_expectations_.size(), playlist.GetVariants().size())
+ << from.ToString();
+ for (size_t i = 0; i < variant_expectations_.size(); ++i) {
+ const auto& variant = playlist.GetVariants().at(i);
+ const auto& expectations = variant_expectations_.at(i);
+ for (const auto& expectation : expectations.expectations) {
+ expectation.Run(variant);
+ }
+ }
+}
+
+} // namespace media::hls
diff --git a/chromium/media/formats/hls/multivariant_playlist_test_builder.h b/chromium/media/formats/hls/multivariant_playlist_test_builder.h
new file mode 100644
index 00000000000..915b20f97d9
--- /dev/null
+++ b/chromium/media/formats/hls/multivariant_playlist_test_builder.h
@@ -0,0 +1,146 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_HLS_MULTIVARIANT_PLAYLIST_TEST_BUILDER_H_
+#define MEDIA_FORMATS_HLS_MULTIVARIANT_PLAYLIST_TEST_BUILDER_H_
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/strings/string_piece.h"
+#include "media/formats/hls/multivariant_playlist.h"
+#include "media/formats/hls/playlist_test_builder.h"
+#include "media/formats/hls/types.h"
+#include "media/formats/hls/variant_stream.h"
+
+namespace media::hls {
+
+// Helper for building multivariant playlist test cases that allows writing
+// assertions next to the playlist lines they check, as well as "forking" test
+// cases via copying the builder.
+class MultivariantPlaylistTestBuilder
+ : public PlaylistTestBuilder<MultivariantPlaylist> {
+ public:
+ MultivariantPlaylistTestBuilder();
+ ~MultivariantPlaylistTestBuilder();
+ MultivariantPlaylistTestBuilder(const MultivariantPlaylistTestBuilder&);
+ MultivariantPlaylistTestBuilder(MultivariantPlaylistTestBuilder&&);
+ MultivariantPlaylistTestBuilder& operator=(
+ const MultivariantPlaylistTestBuilder&);
+ MultivariantPlaylistTestBuilder& operator=(MultivariantPlaylistTestBuilder&&);
+
+ // Increments the number of variants that are expected to be contained in the
+ // playlist.
+ void ExpectAdditionalVariant() { variant_expectations_.emplace_back(); }
+
+ // Adds a new expectation for the latest variant in the playlist, which will
+ // be checked during `ExpectOk`.
+ template <typename Fn, typename Arg>
+ void ExpectVariant(Fn fn,
+ Arg arg,
+ base::Location location = base::Location::Current()) {
+ variant_expectations_.back().expectations.push_back(base::BindRepeating(
+ std::move(fn), std::move(arg), std::move(location)));
+ }
+
+ void ExpectOk(const base::Location& from = base::Location::Current()) const {
+ PlaylistTestBuilder::ExpectOk(from);
+ }
+
+ void ExpectError(
+ ParseStatusCode code,
+ const base::Location& from = base::Location::Current()) const {
+ PlaylistTestBuilder::ExpectError(code, from);
+ }
+
+ private:
+ struct VariantExpectations {
+ VariantExpectations();
+ ~VariantExpectations();
+ VariantExpectations(const VariantExpectations&);
+ VariantExpectations(VariantExpectations&&);
+ VariantExpectations& operator=(const VariantExpectations&);
+ VariantExpectations& operator=(VariantExpectations&&);
+
+ std::vector<base::RepeatingCallback<void(const VariantStream&)>>
+ expectations;
+ };
+
+ void VerifyExpectations(const MultivariantPlaylist& playlist,
+ const base::Location& from) const override;
+
+ std::vector<VariantExpectations> variant_expectations_;
+};
+
+// Checks that the latest variant has the given primary rendition URI.
+inline void HasPrimaryRenditionUri(const GURL& uri,
+ const base::Location& from,
+ const VariantStream& variant) {
+ EXPECT_EQ(variant.GetPrimaryRenditionUri(), uri) << from.ToString();
+}
+
+// Checks the value of `GetBandwidth` on the latest variant against the given
+// value.
+inline void HasBandwidth(types::DecimalInteger bandwidth,
+ const base::Location& from,
+ const VariantStream& variant) {
+ EXPECT_EQ(variant.GetBandwidth(), bandwidth) << from.ToString();
+}
+
+// Checks the value of `GetAverageBandwidth` on the latest variant against the
+// given value.
+inline void HasAverageBandwidth(
+ absl::optional<types::DecimalInteger> average_bandwidth,
+ const base::Location& from,
+ const VariantStream& variant) {
+ EXPECT_EQ(variant.GetAverageBandwidth(), average_bandwidth)
+ << from.ToString();
+}
+
+// Checks the value of `GetScore` on the latest variant against the given value.
+inline void HasScore(absl::optional<types::DecimalFloatingPoint> score,
+ const base::Location& from,
+ const VariantStream& variant) {
+ EXPECT_EQ(variant.GetScore(), score) << from.ToString();
+}
+
+// Checks the value of `GetCodecs` on the latest variant against the given
+// value.
+inline void HasCodecs(absl::optional<base::StringPiece> codecs,
+ const base::Location& from,
+ const VariantStream& variant) {
+ EXPECT_EQ(variant.GetCodecs(), codecs) << from.ToString();
+}
+
+// Checks the value of `GetResolution` on the latest variant against the given
+// value.
+inline void HasResolution(absl::optional<types::DecimalResolution> resolution,
+ const base::Location& from,
+ const VariantStream& variant) {
+ ASSERT_EQ(variant.GetResolution().has_value(), resolution.has_value())
+ << from.ToString();
+ if (resolution.has_value()) {
+ EXPECT_EQ(variant.GetResolution()->width, resolution->width)
+ << from.ToString();
+ EXPECT_EQ(variant.GetResolution()->height, resolution->height)
+ << from.ToString();
+ }
+}
+
+// Checks the value of `GetFrameRate` on the latest variant against the given
+// value.
+inline void HasFrameRate(absl::optional<types::DecimalFloatingPoint> frame_rate,
+ const base::Location& from,
+ const VariantStream& variant) {
+ ASSERT_EQ(variant.GetFrameRate().has_value(), frame_rate.has_value())
+ << from.ToString();
+ if (frame_rate.has_value()) {
+ EXPECT_DOUBLE_EQ(variant.GetFrameRate().value(), frame_rate.value())
+ << from.ToString();
+ }
+}
+
+} // namespace media::hls
+
+#endif
diff --git a/chromium/media/formats/hls/multivariant_playlist_unittest.cc b/chromium/media/formats/hls/multivariant_playlist_unittest.cc
new file mode 100644
index 00000000000..71b32122b2f
--- /dev/null
+++ b/chromium/media/formats/hls/multivariant_playlist_unittest.cc
@@ -0,0 +1,280 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/hls/multivariant_playlist.h"
+
+#include "media/formats/hls/multivariant_playlist_test_builder.h"
+#include "media/formats/hls/parse_status.h"
+#include "media/formats/hls/variant_stream.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+namespace media::hls {
+
+TEST(HlsMultivariantPlaylistTest, XStreamInfTag) {
+ MultivariantPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+
+ // 'BANDWIDTH' attribute is required
+ builder.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=100");
+ builder.AppendLine("playlist1.m3u8");
+ builder.ExpectAdditionalVariant();
+ builder.ExpectVariant(HasPrimaryRenditionUri,
+ GURL("http://localhost/playlist1.m3u8"));
+ builder.ExpectVariant(HasBandwidth, 100);
+ builder.ExpectVariant(HasAverageBandwidth, absl::nullopt);
+ builder.ExpectVariant(HasScore, absl::nullopt);
+ builder.ExpectVariant(HasCodecs, absl::nullopt);
+ builder.ExpectVariant(HasResolution, absl::nullopt);
+ builder.ExpectVariant(HasFrameRate, absl::nullopt);
+ builder.ExpectOk();
+
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-STREAM-INF:AVERAGE-BANDWIDTH=101");
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+
+ // EXT-X-STREAM-INF tags that are not immediately followed by URIs are
+ // invalid.
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=101");
+ fork.ExpectError(ParseStatusCode::kXStreamInfTagNotFollowedByUri);
+ }
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=101");
+ fork.AppendLine("#EXTM3U");
+ fork.AppendLine("playlist2.m3u8");
+ fork.ExpectError(ParseStatusCode::kXStreamInfTagNotFollowedByUri);
+ }
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=102");
+ fork.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=103");
+ fork.AppendLine("playlist3.m3u8");
+ fork.ExpectError(ParseStatusCode::kXStreamInfTagNotFollowedByUri);
+ }
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=104");
+ fork.AppendLine("#EXT-X-FOO-BAR");
+ fork.AppendLine("playlist4.m3u8");
+ fork.ExpectError(ParseStatusCode::kXStreamInfTagNotFollowedByUri);
+ }
+
+ // Blank lines are tolerated
+ builder.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=105");
+ builder.AppendLine("");
+ builder.AppendLine("playlist4.m3u8");
+ builder.ExpectAdditionalVariant();
+ builder.ExpectVariant(HasPrimaryRenditionUri,
+ GURL("http://localhost/playlist4.m3u8"));
+ builder.ExpectVariant(HasBandwidth, 105);
+ builder.ExpectVariant(HasAverageBandwidth, absl::nullopt);
+ builder.ExpectVariant(HasScore, absl::nullopt);
+ builder.ExpectVariant(HasCodecs, absl::nullopt);
+ builder.ExpectVariant(HasResolution, absl::nullopt);
+ builder.ExpectVariant(HasFrameRate, absl::nullopt);
+ builder.ExpectOk();
+
+ // URIs without corresponding EXT-X-STREAM-INF tags are not allowed
+ {
+ auto fork = builder;
+ fork.AppendLine("playlist5.m3u8");
+ fork.ExpectError(ParseStatusCode::kVariantMissingStreamInfTag);
+ }
+
+ // Check the value of the 'AVERAGE-BANDWIDTH' attribute
+ builder.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=106,AVERAGE-BANDWIDTH=105");
+ builder.AppendLine("playlist5.m3u8");
+ builder.ExpectAdditionalVariant();
+ builder.ExpectVariant(HasPrimaryRenditionUri,
+ GURL("http://localhost/playlist5.m3u8"));
+ builder.ExpectVariant(HasBandwidth, 106u);
+ builder.ExpectVariant(HasAverageBandwidth, 105u);
+ builder.ExpectVariant(HasScore, absl::nullopt);
+ builder.ExpectVariant(HasCodecs, absl::nullopt);
+ builder.ExpectVariant(HasResolution, absl::nullopt);
+ builder.ExpectVariant(HasFrameRate, absl::nullopt);
+ builder.ExpectOk();
+
+ // Check the value of the 'SCORE' attribute
+ builder.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=107,SCORE=10.5");
+ builder.AppendLine("playlist6.m3u8");
+ builder.ExpectAdditionalVariant();
+ builder.ExpectVariant(HasPrimaryRenditionUri,
+ GURL("http://localhost/playlist6.m3u8"));
+ builder.ExpectVariant(HasBandwidth, 107u);
+ builder.ExpectVariant(HasAverageBandwidth, absl::nullopt);
+ builder.ExpectVariant(HasScore, 10.5);
+ builder.ExpectVariant(HasCodecs, absl::nullopt);
+ builder.ExpectVariant(HasResolution, absl::nullopt);
+ builder.ExpectVariant(HasFrameRate, absl::nullopt);
+ builder.ExpectOk();
+
+ // Check the value of the 'CODECS' attribute
+ builder.AppendLine(R"(#EXT-X-STREAM-INF:BANDWIDTH=108,CODECS="foo,bar")");
+ builder.AppendLine("playlist7.m3u8");
+ builder.ExpectAdditionalVariant();
+ builder.ExpectVariant(HasPrimaryRenditionUri,
+ GURL("http://localhost/playlist7.m3u8"));
+ builder.ExpectVariant(HasBandwidth, 108u);
+ builder.ExpectVariant(HasAverageBandwidth, absl::nullopt);
+ builder.ExpectVariant(HasScore, absl::nullopt);
+ builder.ExpectVariant(HasCodecs, "foo,bar");
+ builder.ExpectVariant(HasResolution, absl::nullopt);
+ builder.ExpectVariant(HasFrameRate, absl::nullopt);
+ builder.ExpectOk();
+
+ // Check the value of the 'RESOLUTION' attribute
+ builder.AppendLine(R"(#EXT-X-STREAM-INF:BANDWIDTH=109,RESOLUTION=1920x1080)");
+ builder.AppendLine("playlist8.m3u8");
+ builder.ExpectAdditionalVariant();
+ builder.ExpectVariant(HasPrimaryRenditionUri,
+ GURL("http://localhost/playlist8.m3u8"));
+ builder.ExpectVariant(HasBandwidth, 109u);
+ builder.ExpectVariant(HasAverageBandwidth, absl::nullopt);
+ builder.ExpectVariant(HasScore, absl::nullopt);
+ builder.ExpectVariant(HasCodecs, absl::nullopt);
+ builder.ExpectVariant(
+ HasResolution, types::DecimalResolution{.width = 1920, .height = 1080});
+ builder.ExpectVariant(HasFrameRate, absl::nullopt);
+ builder.ExpectOk();
+
+ // Check the value of the 'FRAME-RATE' attribute
+ builder.AppendLine(R"(#EXT-X-STREAM-INF:BANDWIDTH=110,FRAME-RATE=59.94)");
+ builder.AppendLine("playlist9.m3u8");
+ builder.ExpectAdditionalVariant();
+ builder.ExpectVariant(HasPrimaryRenditionUri,
+ GURL("http://localhost/playlist9.m3u8"));
+ builder.ExpectVariant(HasBandwidth, 110u);
+ builder.ExpectVariant(HasAverageBandwidth, absl::nullopt);
+ builder.ExpectVariant(HasScore, absl::nullopt);
+ builder.ExpectVariant(HasCodecs, absl::nullopt);
+ builder.ExpectVariant(HasResolution, absl::nullopt);
+ builder.ExpectVariant(HasFrameRate, 59.94);
+ builder.ExpectOk();
+}
+
+// This test is similar to the `HlsMediaPlaylistTest` test of the same name, but
+// due to subtle differences between media playlists and multivariant playlists
+// its difficult to combine them. If new cases are added here that are also
+// relevant to media playlists, they should be added to that test as well.
+TEST(HlsMultivariantPlaylistTest, VariableSubstitution) {
+ MultivariantPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+ builder.AppendLine("#EXT-X-VERSION:8");
+ builder.ExpectPlaylist(HasVersion, 8);
+
+ builder.AppendLine(
+ R"(#EXT-X-DEFINE:NAME="HOST",VALUE="http://www.example.com")");
+ builder.AppendLine(
+ R"(#EXT-X-DEFINE:NAME="CODECS",VALUE="mp4a.40.2,avc1.4d401e")");
+
+ // Valid variable references within URIs or quoted-string values may be
+ // substituted
+ builder.AppendLine(R"(#EXT-X-STREAM-INF:BANDWIDTH=100,CODECS="{$CODECS}")");
+ builder.AppendLine("{$HOST}/playlist1.m3u8");
+ builder.ExpectAdditionalVariant();
+ builder.ExpectVariant(HasPrimaryRenditionUri,
+ GURL("http://www.example.com/playlist1.m3u8"));
+ builder.ExpectVariant(HasCodecs, "mp4a.40.2,avc1.4d401e");
+
+ // Invalid variable references should result in an error
+ {
+ auto fork = builder;
+ fork.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=101");
+ fork.AppendLine("{$HOST}/{$movie}/playlist2.m3u8");
+ fork.ExpectError(ParseStatusCode::kVariableUndefined);
+ }
+ {
+ auto fork = builder;
+ fork.AppendLine(R"(#EXT-X-STREAM-INF:BANDWIDTH=101,CODECS="{$CODEX}")");
+ fork.AppendLine("{$HOST}/playlist2.m3u8");
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+
+ // Variable references outside of valid substitution points should not be
+ // substituted
+ {
+ auto fork = builder;
+ fork.AppendLine(R"(#EXT-X-DEFINE:NAME="BW",VALUE="102")");
+ fork.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH={$BW}");
+ fork.AppendLine("playlist3.m3u8");
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+ {
+ auto fork = builder;
+ fork.AppendLine(R"(#EXT-X-DEFINE:NAME="AVG-BW",VALUE="102")");
+ fork.AppendLine(
+ "#EXT-X-STREAM-INF:BANDWIDTH=100,AVERAGE-BANDWIDTH={$AVG-BW}");
+ fork.AppendLine("playlist3.m3u8");
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+ {
+ auto fork = builder;
+ fork.AppendLine(R"(#EXT-X-DEFINE:NAME="SCORE",VALUE="10")");
+ fork.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=100,SCORE={$SCORE}");
+ fork.AppendLine("playlist3.m3u8");
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+ {
+ auto fork = builder;
+ fork.AppendLine(R"(#EXT-X-DEFINE:NAME="RES",VALUE="1920x1080")");
+ fork.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=100,RESOLUTION={$RES}");
+ fork.AppendLine("playlist3.m3u8");
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+ {
+ auto fork = builder;
+ fork.AppendLine(R"(#EXT-X-DEFINE:NAME="FR",VALUE="30")");
+ fork.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=100,FRAME-RATE={$FR}");
+ fork.AppendLine("playlist3.m3u8");
+ fork.ExpectError(ParseStatusCode::kMalformedTag);
+ }
+
+ // Redefinition is not allowed
+ {
+ auto fork = builder;
+ fork.AppendLine(
+ R"(#EXT-X-DEFINE:NAME="HOST",VALUE="https://www.google.com")");
+ fork.ExpectError(ParseStatusCode::kVariableDefinedMultipleTimes);
+ }
+
+ // Importing in a parentless playlist is not allowed
+ {
+ auto fork = builder;
+ fork.AppendLine(R"(#EXT-X-DEFINE:IMPORT="IMPORTED")");
+ fork.ExpectError(ParseStatusCode::kImportedVariableInParentlessPlaylist);
+ }
+
+ // Variables are not resolved recursively
+ builder.AppendLine(R"(#EXT-X-DEFINE:NAME="BAR",VALUE="BAZ")");
+ builder.AppendLine(R"(#EXT-X-DEFINE:NAME="FOO",VALUE="{$BAR}")");
+ builder.AppendLine("#EXT-X-STREAM-INF:BANDWIDTH=101");
+ builder.AppendLine("http://{$FOO}.com/playlist4.m3u8");
+ builder.ExpectAdditionalVariant();
+ builder.ExpectVariant(HasPrimaryRenditionUri,
+ GURL("http://{$BAR}.com/playlist4.m3u8"));
+
+ builder.ExpectOk();
+}
+
+TEST(HlsMultivariantPlaylistTest, MediaPlaylistTag) {
+ MultivariantPlaylistTestBuilder builder;
+ builder.AppendLine("#EXTM3U");
+
+ // Multivariant playlists may not contain tags exclusive to media playlists
+ for (TagName name = ToTagName(MediaPlaylistTagName::kMinValue);
+ name <= ToTagName(MediaPlaylistTagName::kMaxValue); ++name) {
+ auto tag_line = "#" + std::string{TagNameToString(name)};
+ auto fork = builder;
+ fork.AppendLine(tag_line);
+ fork.ExpectError(ParseStatusCode::kMultivariantPlaylistHasMediaPlaylistTag);
+ }
+}
+
+} // namespace media::hls
diff --git a/chromium/media/formats/hls/parse_status.cc b/chromium/media/formats/hls/parse_status.cc
index b5888dc76c0..9c5ee9d8540 100644
--- a/chromium/media/formats/hls/parse_status.cc
+++ b/chromium/media/formats/hls/parse_status.cc
@@ -21,7 +21,9 @@ base::StringPiece ParseStatusCodeToString(ParseStatusCode code) {
PARSE_STATUS_CODE_CASE(kFailedToParseDecimalInteger);
PARSE_STATUS_CODE_CASE(kFailedToParseDecimalFloatingPoint);
PARSE_STATUS_CODE_CASE(kFailedToParseSignedDecimalFloatingPoint);
+ PARSE_STATUS_CODE_CASE(kFailedToParseDecimalResolution);
PARSE_STATUS_CODE_CASE(kFailedToParseQuotedString);
+ PARSE_STATUS_CODE_CASE(kFailedToParseByteRange);
PARSE_STATUS_CODE_CASE(kInvalidPlaylistVersion);
PARSE_STATUS_CODE_CASE(kUnknownPlaylistType);
PARSE_STATUS_CODE_CASE(kMalformedAttributeList);
@@ -29,14 +31,24 @@ base::StringPiece ParseStatusCodeToString(ParseStatusCode code) {
PARSE_STATUS_CODE_CASE(kMalformedVariableName);
PARSE_STATUS_CODE_CASE(kInvalidUri);
PARSE_STATUS_CODE_CASE(kPlaylistMissingM3uTag);
+ PARSE_STATUS_CODE_CASE(kMediaPlaylistMissingTargetDuration);
PARSE_STATUS_CODE_CASE(kMediaSegmentMissingInfTag);
+ PARSE_STATUS_CODE_CASE(kMediaSegmentExceedsTargetDuration);
PARSE_STATUS_CODE_CASE(kPlaylistHasDuplicateTags);
PARSE_STATUS_CODE_CASE(kPlaylistHasUnsupportedVersion);
PARSE_STATUS_CODE_CASE(kMediaPlaylistHasMultivariantPlaylistTag);
+ PARSE_STATUS_CODE_CASE(kMultivariantPlaylistHasMediaPlaylistTag);
PARSE_STATUS_CODE_CASE(kVariableUndefined);
PARSE_STATUS_CODE_CASE(kVariableDefinedMultipleTimes);
PARSE_STATUS_CODE_CASE(kImportedVariableInParentlessPlaylist);
PARSE_STATUS_CODE_CASE(kImportedVariableUndefined);
+ PARSE_STATUS_CODE_CASE(kXStreamInfTagNotFollowedByUri);
+ PARSE_STATUS_CODE_CASE(kVariantMissingStreamInfTag);
+ PARSE_STATUS_CODE_CASE(kMediaSegmentBeforeMediaSequenceTag);
+ PARSE_STATUS_CODE_CASE(kMediaSegmentBeforeDiscontinuitySequenceTag);
+ PARSE_STATUS_CODE_CASE(kDiscontinuityTagBeforeDiscontinuitySequenceTag);
+ PARSE_STATUS_CODE_CASE(kByteRangeRequiresOffset);
+ PARSE_STATUS_CODE_CASE(kByteRangeInvalid);
}
NOTREACHED();
diff --git a/chromium/media/formats/hls/parse_status.h b/chromium/media/formats/hls/parse_status.h
index 4ca05d199ea..a1fcc12167c 100644
--- a/chromium/media/formats/hls/parse_status.h
+++ b/chromium/media/formats/hls/parse_status.h
@@ -18,7 +18,9 @@ enum class ParseStatusCode : StatusCodeType {
kFailedToParseDecimalInteger,
kFailedToParseDecimalFloatingPoint,
kFailedToParseSignedDecimalFloatingPoint,
+ kFailedToParseDecimalResolution,
kFailedToParseQuotedString,
+ kFailedToParseByteRange,
kInvalidPlaylistVersion,
kUnknownPlaylistType,
kMalformedAttributeList,
@@ -26,14 +28,24 @@ enum class ParseStatusCode : StatusCodeType {
kMalformedVariableName,
kInvalidUri,
kPlaylistMissingM3uTag,
+ kMediaPlaylistMissingTargetDuration,
kMediaSegmentMissingInfTag,
+ kMediaSegmentExceedsTargetDuration,
kPlaylistHasDuplicateTags,
kPlaylistHasUnsupportedVersion,
kMediaPlaylistHasMultivariantPlaylistTag,
+ kMultivariantPlaylistHasMediaPlaylistTag,
kVariableUndefined,
kVariableDefinedMultipleTimes,
kImportedVariableInParentlessPlaylist,
kImportedVariableUndefined,
+ kXStreamInfTagNotFollowedByUri,
+ kVariantMissingStreamInfTag,
+ kMediaSegmentBeforeMediaSequenceTag,
+ kMediaSegmentBeforeDiscontinuitySequenceTag,
+ kDiscontinuityTagBeforeDiscontinuitySequenceTag,
+ kByteRangeRequiresOffset,
+ kByteRangeInvalid,
};
struct ParseStatusTraits {
diff --git a/chromium/media/formats/hls/playlist.h b/chromium/media/formats/hls/playlist.h
index 4222fad79d7..4854afc5071 100644
--- a/chromium/media/formats/hls/playlist.h
+++ b/chromium/media/formats/hls/playlist.h
@@ -13,6 +13,10 @@ namespace media::hls {
class MEDIA_EXPORT Playlist {
public:
+ // Unless explicitly specified via the `EXT-X-VERSION` tag, the default
+ // playlist version is `1`.
+ static constexpr types::DecimalInteger kDefaultVersion = 1;
+
Playlist(const Playlist&) = delete;
Playlist& operator=(const Playlist&) = delete;
diff --git a/chromium/media/formats/hls/playlist_common.cc b/chromium/media/formats/hls/playlist_common.cc
index 81d1bcd3772..c35c933c7dc 100644
--- a/chromium/media/formats/hls/playlist_common.cc
+++ b/chromium/media/formats/hls/playlist_common.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "media/formats/hls/playlist_common.h"
+
#include "base/notreached.h"
+#include "media/formats/hls/playlist.h"
namespace media::hls {
@@ -11,7 +13,7 @@ types::DecimalInteger CommonParserState::GetVersion() const {
if (version_tag.has_value()) {
return version_tag.value().version;
} else {
- return 1;
+ return Playlist::kDefaultVersion;
}
}
diff --git a/chromium/media/formats/hls/playlist_common.h b/chromium/media/formats/hls/playlist_common.h
index 5eb3c2a682c..a6965ff211a 100644
--- a/chromium/media/formats/hls/playlist_common.h
+++ b/chromium/media/formats/hls/playlist_common.h
@@ -5,6 +5,9 @@
#ifndef MEDIA_FORMATS_HLS_PLAYLIST_COMMON_H_
#define MEDIA_FORMATS_HLS_PLAYLIST_COMMON_H_
+#include <utility>
+
+#include "base/memory/raw_ptr.h"
#include "media/formats/hls/items.h"
#include "media/formats/hls/tag_name.h"
#include "media/formats/hls/tags.h"
@@ -26,7 +29,7 @@ struct CommonParserState {
// The dictionary of variables defined in the parent playlist. This may remain
// null if there is no parent playlist (in the case of a multivariant
// playlist, or a media playlist without other variants).
- VariableDictionary* parent_variable_dict = nullptr;
+ raw_ptr<const VariableDictionary> parent_variable_dict = nullptr;
// Returns the version specified by `version_tag`, or the default version if
// the playlist did not contain a version tag.
@@ -45,9 +48,10 @@ absl::optional<ParseStatus> ParseCommonTag(TagItem, CommonParserState* state);
// Attempts to parse a tag from the given item, ensuring it has not been
// already appeared in the playlist.
-template <typename T>
+template <typename T, typename... Args>
absl::optional<ParseStatus> ParseUniqueTag(TagItem tag,
- absl::optional<T>& out) {
+ absl::optional<T>& out,
+ Args&&... args) {
DCHECK(tag.GetName() == ToTagName(T::kName));
// Ensure this tag has not already appeared.
@@ -55,7 +59,7 @@ absl::optional<ParseStatus> ParseUniqueTag(TagItem tag,
return ParseStatusCode::kPlaylistHasDuplicateTags;
}
- auto tag_result = T::Parse(tag);
+ auto tag_result = T::Parse(tag, std::forward<Args>(args)...);
if (tag_result.has_error()) {
return std::move(tag_result).error();
}
diff --git a/chromium/media/formats/hls/playlist_test_builder.h b/chromium/media/formats/hls/playlist_test_builder.h
new file mode 100644
index 00000000000..2970d227cab
--- /dev/null
+++ b/chromium/media/formats/hls/playlist_test_builder.h
@@ -0,0 +1,124 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_HLS_PLAYLIST_TEST_BUILDER_H_
+#define MEDIA_FORMATS_HLS_PLAYLIST_TEST_BUILDER_H_
+
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "media/formats/hls/playlist.h"
+#include "media/formats/hls/source_string.h"
+#include "media/formats/hls/tags.h"
+#include "media/formats/hls/types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "url/gurl.h"
+
+namespace media::hls {
+
+// Base helper for building playlist test cases. This should be extended by a
+// playlist-type specific builder with additional methods for creating
+// assertions specific to that type of playlist, and parameterized by the type
+// of playlist.
+template <typename PlaylistT>
+class PlaylistTestBuilder {
+ public:
+ static_assert(std::is_base_of_v<Playlist, PlaylistT>);
+
+ // Sets the URI for the playlist being built.
+ void SetUri(GURL uri) { uri_ = std::move(uri); }
+
+ // Appends fragments of text to the playlist, without a trailing newline.
+ template <typename... T>
+ void Append(base::StringPiece text1, T&&... rem) {
+ for (auto text : {text1, base::StringPiece(rem)...}) {
+ source_.append(text.data(), text.size());
+ }
+ }
+
+ // Appends fragments of text to the playlist, followed by a newline.
+ template <typename... T>
+ void AppendLine(base::StringPiece part1, T&&... rem) {
+ this->Append(part1, std::forward<T>(rem)..., "\n");
+ }
+
+ // Adds a new expectation for the playlist, which will be checked during
+ // `ExpectOk`.
+ template <typename Fn, typename Arg>
+ void ExpectPlaylist(Fn fn,
+ Arg arg,
+ base::Location location = base::Location::Current()) {
+ playlist_expectations_.push_back(base::BindRepeating(
+ [](Fn fn, Arg arg, const base::Location& from,
+ const PlaylistT& playlist) { fn(arg, from, playlist); },
+ std::move(fn), std::move(arg), std::move(location)));
+ }
+
+ protected:
+ // Attempts to parse the playlist as-is, checking for the given
+ // error code.
+ template <typename... Args>
+ void ExpectError(ParseStatusCode code,
+ const base::Location& from,
+ Args&&... args) const {
+ auto result = PlaylistT::Parse(source_, uri_, std::forward<Args>(args)...);
+ ASSERT_TRUE(result.has_error()) << from.ToString();
+
+ auto actual_code = std::move(result).error().code();
+ EXPECT_EQ(actual_code, code)
+ << "Error: " << ParseStatusCodeToString(actual_code) << "\n"
+ << "Expected Error: " << ParseStatusCodeToString(code) << "\n"
+ << from.ToString();
+ }
+
+ // Attempts to parse the playlist as-is, checking all playlist and segment
+ // expectations.
+ template <typename... Args>
+ void ExpectOk(const base::Location& from, Args&&... args) const {
+ auto result = PlaylistT::Parse(source_, uri_, std::forward<Args>(args)...);
+ ASSERT_TRUE(result.has_value())
+ << "Error: "
+ << ParseStatusCodeToString(std::move(result).error().code()) << "\n"
+ << from.ToString();
+ auto playlist = std::move(result).value();
+
+ for (const auto& expectation : playlist_expectations_) {
+ expectation.Run(playlist);
+ }
+
+ this->VerifyExpectations(playlist, from);
+ }
+
+ private:
+ virtual void VerifyExpectations(const PlaylistT&,
+ const base::Location& from) const = 0;
+
+ std::vector<base::RepeatingCallback<void(const PlaylistT&)>>
+ playlist_expectations_;
+ GURL uri_ = GURL("http://localhost/playlist.m3u8");
+ std::string source_;
+};
+
+// Checks that the playlist has the given version.
+inline void HasVersion(types::DecimalInteger version,
+ const base::Location& from,
+ const Playlist& playlist) {
+ EXPECT_EQ(playlist.GetVersion(), version) << from.ToString();
+}
+
+// Checks the playlist's `AreSegmentsIndependent` property against the given
+// value.
+inline void HasIndependentSegments(bool value,
+ const base::Location& from,
+ const Playlist& playlist) {
+ EXPECT_EQ(playlist.AreSegmentsIndependent(), value) << from.ToString();
+}
+
+} // namespace media::hls
+
+#endif // MEDIA_FORMATS_HLS_PLAYLIST_TEST_BUILDER_H_
diff --git a/chromium/media/formats/hls/tag_name.cc b/chromium/media/formats/hls/tag_name.cc
index 2125d670808..5494b573f9b 100644
--- a/chromium/media/formats/hls/tag_name.cc
+++ b/chromium/media/formats/hls/tag_name.cc
@@ -50,11 +50,15 @@ constexpr std::pair<base::StringPiece, TagName> TagNameEntry(
// Mapping of tag names to their constants. This must remain sorted by the
// string value.
constexpr auto kTagNames = base::MakeFixedFlatMap({
+ TagNameEntry("EXT-X-BITRATE", MediaPlaylistTagName::kXBitrate),
+ TagNameEntry("EXT-X-BYTERANGE", MediaPlaylistTagName::kXByteRange),
TagNameEntry("EXT-X-CONTENT-STEERING",
MultivariantPlaylistTagName::kXContentSteering),
TagNameEntry("EXT-X-DEFINE", CommonTagName::kXDefine),
TagNameEntry("EXT-X-DISCONTINUITY", MediaPlaylistTagName::kXDiscontinuity),
- TagNameEntry("EXT-X-END-LIST", MediaPlaylistTagName::kXEndList),
+ TagNameEntry("EXT-X-DISCONTINUITY-SEQUENCE",
+ MediaPlaylistTagName::kXDiscontinuitySequence),
+ TagNameEntry("EXT-X-ENDLIST", MediaPlaylistTagName::kXEndList),
TagNameEntry("EXT-X-GAP", MediaPlaylistTagName::kXGap),
TagNameEntry("EXT-X-I-FRAME-STREAM-INF",
MultivariantPlaylistTagName::kXIFrameStreamInf),
@@ -62,12 +66,16 @@ constexpr auto kTagNames = base::MakeFixedFlatMap({
TagNameEntry("EXT-X-INDEPENDENT-SEGMENTS",
CommonTagName::kXIndependentSegments),
TagNameEntry("EXT-X-MEDIA", MultivariantPlaylistTagName::kXMedia),
+ TagNameEntry("EXT-X-MEDIA-SEQUENCE", MediaPlaylistTagName::kXMediaSequence),
+ TagNameEntry("EXT-X-PART-INF", MediaPlaylistTagName::kXPartInf),
TagNameEntry("EXT-X-PLAYLIST-TYPE", MediaPlaylistTagName::kXPlaylistType),
TagNameEntry("EXT-X-SESSION-DATA",
MultivariantPlaylistTagName::kXSessionData),
TagNameEntry("EXT-X-SESSION-KEY",
MultivariantPlaylistTagName::kXSessionKey),
TagNameEntry("EXT-X-STREAM-INF", MultivariantPlaylistTagName::kXStreamInf),
+ TagNameEntry("EXT-X-TARGETDURATION",
+ MediaPlaylistTagName::kXTargetDuration),
TagNameEntry("EXT-X-VERSION", CommonTagName::kXVersion),
TagNameEntry("EXTINF", MediaPlaylistTagName::kInf),
TagNameEntry("EXTM3U", CommonTagName::kM3u),
diff --git a/chromium/media/formats/hls/tag_name.h b/chromium/media/formats/hls/tag_name.h
index 8698e583cf0..df106282d91 100644
--- a/chromium/media/formats/hls/tag_name.h
+++ b/chromium/media/formats/hls/tag_name.h
@@ -51,12 +51,18 @@ enum class MultivariantPlaylistTagName : TagName {
enum class MediaPlaylistTagName : TagName {
kMinValue = static_cast<TagName>(MultivariantPlaylistTagName::kMaxValue) + 1,
kInf = kMinValue,
+ kXTargetDuration,
kXEndList,
kXIFramesOnly,
kXDiscontinuity,
kXGap,
kXPlaylistType,
- kMaxValue = kXPlaylistType,
+ kXPartInf,
+ kXMediaSequence,
+ kXDiscontinuitySequence,
+ kXByteRange,
+ kXBitrate,
+ kMaxValue = kXBitrate,
};
constexpr TagKind GetTagKind(CommonTagName) {
@@ -83,7 +89,7 @@ constexpr TagName ToTagName(MediaPlaylistTagName name) {
return static_cast<TagName>(name);
}
-TagKind MEDIA_EXPORT GetTagKind(TagName name);
+MEDIA_EXPORT TagKind GetTagKind(TagName name);
// No-op conversion function for generic code.
constexpr TagName ToTagName(TagName name) {
@@ -92,10 +98,10 @@ constexpr TagName ToTagName(TagName name) {
// Parses the tag name, converting it to one of the `*TagName` enum values.
// If the tag is not recognized, returns `absl::nullopt`.
-absl::optional<TagName> MEDIA_EXPORT ParseTagName(base::StringPiece name);
+MEDIA_EXPORT absl::optional<TagName> ParseTagName(base::StringPiece name);
// Prints the corresponding string representation of the given `TagName`.
-base::StringPiece MEDIA_EXPORT TagNameToString(TagName name);
+MEDIA_EXPORT base::StringPiece TagNameToString(TagName name);
constexpr TagName kMinTagName = ToTagName(CommonTagName::kMinValue);
constexpr TagName kMaxTagName = ToTagName(MediaPlaylistTagName::kMaxValue);
diff --git a/chromium/media/formats/hls/tags.cc b/chromium/media/formats/hls/tags.cc
index d6c21ca07b4..4d52b517c8b 100644
--- a/chromium/media/formats/hls/tags.cc
+++ b/chromium/media/formats/hls/tags.cc
@@ -26,6 +26,25 @@ ParseStatus::Or<T> ParseEmptyTag(TagItem tag) {
return T{};
}
+template <typename T>
+ParseStatus::Or<T> ParseDecimalIntegerTag(TagItem tag,
+ types::DecimalInteger T::*field) {
+ DCHECK(tag.GetName() == ToTagName(T::kName));
+ if (!tag.GetContent().has_value()) {
+ return ParseStatusCode::kMalformedTag;
+ }
+
+ auto value = types::ParseDecimalInteger(*tag.GetContent());
+ if (value.has_error()) {
+ return ParseStatus(ParseStatusCode::kMalformedTag)
+ .AddCause(std::move(value).error());
+ }
+
+ T out;
+ out.*field = std::move(value).value();
+ return out;
+}
+
// Attributes expected in `EXT-X-DEFINE` tag contents.
// These must remain sorted alphabetically.
enum class XDefineTagAttribute {
@@ -55,7 +74,9 @@ enum class XStreamInfTagAttribute {
kAverageBandwidth,
kBandwidth,
kCodecs,
+ kFrameRate,
kProgramId, // Ignored for backwards compatibility
+ kResolution,
kScore,
kMaxValue = kScore,
};
@@ -68,8 +89,12 @@ constexpr base::StringPiece GetAttributeName(XStreamInfTagAttribute attribute) {
return "BANDWIDTH";
case XStreamInfTagAttribute::kCodecs:
return "CODECS";
+ case XStreamInfTagAttribute::kFrameRate:
+ return "FRAME-RATE";
case XStreamInfTagAttribute::kProgramId:
return "PROGRAM-ID";
+ case XStreamInfTagAttribute::kResolution:
+ return "RESOLUTION";
case XStreamInfTagAttribute::kScore:
return "SCORE";
}
@@ -78,6 +103,23 @@ constexpr base::StringPiece GetAttributeName(XStreamInfTagAttribute attribute) {
return "";
}
+// Attributes expected in `EXT-X-PART-INF` tag contents.
+// These must remain sorted alphabetically.
+enum class XPartInfTagAttribute {
+ kPartTarget,
+ kMaxValue = kPartTarget,
+};
+
+constexpr base::StringPiece GetAttributeName(XPartInfTagAttribute attribute) {
+ switch (attribute) {
+ case XPartInfTagAttribute::kPartTarget:
+ return "PART-TARGET";
+ }
+
+ NOTREACHED();
+ return "";
+}
+
template <typename T, size_t kLast>
constexpr bool IsAttributeEnumSorted(std::index_sequence<kLast>) {
return true;
@@ -146,26 +188,19 @@ ParseStatus::Or<M3uTag> M3uTag::Parse(TagItem tag) {
}
ParseStatus::Or<XVersionTag> XVersionTag::Parse(TagItem tag) {
- DCHECK(tag.GetName() == ToTagName(XVersionTag::kName));
-
- if (!tag.GetContent().has_value()) {
- return ParseStatusCode::kMalformedTag;
- }
-
- auto value_result = types::ParseDecimalInteger(*tag.GetContent());
- if (value_result.has_error()) {
- return ParseStatus(ParseStatusCode::kMalformedTag)
- .AddCause(std::move(value_result).error());
+ auto result = ParseDecimalIntegerTag(tag, &XVersionTag::version);
+ if (result.has_error()) {
+ return std::move(result).error();
}
// Reject invalid version numbers.
// For valid version numbers, caller will decide if the version is supported.
- auto value = std::move(value_result).value();
- if (value == 0) {
+ auto out = std::move(result).value();
+ if (out.version == 0) {
return ParseStatusCode::kInvalidPlaylistVersion;
}
- return XVersionTag{.version = value};
+ return out;
}
ParseStatus::Or<InfTag> InfTag::Parse(TagItem tag) {
@@ -398,7 +433,95 @@ ParseStatus::Or<XStreamInfTag> XStreamInfTag::Parse(
out.codecs = std::string{std::move(codecs).value()};
}
+ // Extract the 'RESOLUTION' attribute
+ if (map.HasValue(XStreamInfTagAttribute::kResolution)) {
+ auto resolution = types::DecimalResolution::Parse(
+ map.GetValue(XStreamInfTagAttribute::kResolution));
+ if (resolution.has_error()) {
+ return ParseStatus(ParseStatusCode::kMalformedTag)
+ .AddCause(std::move(resolution).error());
+ }
+ out.resolution = std::move(resolution).value();
+ }
+
+ // Extract the 'FRAME-RATE' attribute
+ if (map.HasValue(XStreamInfTagAttribute::kFrameRate)) {
+ auto frame_rate = types::ParseDecimalFloatingPoint(
+ map.GetValue(XStreamInfTagAttribute::kFrameRate));
+ if (frame_rate.has_error()) {
+ return ParseStatus(ParseStatusCode::kMalformedTag)
+ .AddCause(std::move(frame_rate).error());
+ }
+ out.frame_rate = std::move(frame_rate).value();
+ }
+
+ return out;
+}
+
+ParseStatus::Or<XTargetDurationTag> XTargetDurationTag::Parse(TagItem tag) {
+ return ParseDecimalIntegerTag(tag, &XTargetDurationTag::duration);
+}
+
+ParseStatus::Or<XPartInfTag> XPartInfTag::Parse(TagItem tag) {
+ DCHECK(tag.GetName() == ToTagName(XPartInfTag::kName));
+ if (!tag.GetContent().has_value()) {
+ return ParseStatusCode::kMalformedTag;
+ }
+
+ // Parse the attribute-list
+ TypedAttributeMap<XPartInfTagAttribute> map;
+ types::AttributeListIterator iter(*tag.GetContent());
+ auto map_result = map.FillUntilError(&iter);
+
+ if (map_result.code() != ParseStatusCode::kReachedEOF) {
+ return ParseStatus(ParseStatusCode::kMalformedTag)
+ .AddCause(std::move(map_result));
+ }
+
+ XPartInfTag out;
+
+ // Extract the 'PART-TARGET' attribute
+ if (map.HasValue(XPartInfTagAttribute::kPartTarget)) {
+ auto target_duration = types::ParseDecimalFloatingPoint(
+ map.GetValue(XPartInfTagAttribute::kPartTarget));
+ if (target_duration.has_error()) {
+ return ParseStatus(ParseStatusCode::kMalformedTag)
+ .AddCause(std::move(target_duration).error());
+ }
+ out.target_duration = std::move(target_duration).value();
+ } else {
+ return ParseStatusCode::kMalformedTag;
+ }
+
return out;
}
+ParseStatus::Or<XMediaSequenceTag> XMediaSequenceTag::Parse(TagItem tag) {
+ return ParseDecimalIntegerTag(tag, &XMediaSequenceTag::number);
+}
+
+ParseStatus::Or<XDiscontinuitySequenceTag> XDiscontinuitySequenceTag::Parse(
+ TagItem tag) {
+ return ParseDecimalIntegerTag(tag, &XDiscontinuitySequenceTag::number);
+}
+
+ParseStatus::Or<XByteRangeTag> XByteRangeTag::Parse(TagItem tag) {
+ DCHECK(tag.GetName() == ToTagName(XByteRangeTag::kName));
+ if (!tag.GetContent().has_value()) {
+ return ParseStatusCode::kMalformedTag;
+ }
+
+ auto range = types::ByteRangeExpression::Parse(*tag.GetContent());
+ if (range.has_error()) {
+ return ParseStatus(ParseStatusCode::kMalformedTag)
+ .AddCause(std::move(range).error());
+ }
+
+ return XByteRangeTag{.range = std::move(range).value()};
+}
+
+ParseStatus::Or<XBitrateTag> XBitrateTag::Parse(TagItem tag) {
+ return ParseDecimalIntegerTag(tag, &XBitrateTag::bitrate);
+}
+
} // namespace media::hls
diff --git a/chromium/media/formats/hls/tags.h b/chromium/media/formats/hls/tags.h
index 6d35a4b3495..a0aa37dac55 100644
--- a/chromium/media/formats/hls/tags.h
+++ b/chromium/media/formats/hls/tags.h
@@ -17,36 +17,36 @@ namespace media::hls {
class TagItem;
// Represents the contents of the #EXTM3U tag
-struct M3uTag {
+struct MEDIA_EXPORT M3uTag {
static constexpr auto kName = CommonTagName::kM3u;
- static MEDIA_EXPORT ParseStatus::Or<M3uTag> Parse(TagItem);
+ static ParseStatus::Or<M3uTag> Parse(TagItem);
};
// Represents the contents of the #EXT-X-VERSION tag
-struct XVersionTag {
+struct MEDIA_EXPORT XVersionTag {
static constexpr auto kName = CommonTagName::kXVersion;
- static MEDIA_EXPORT ParseStatus::Or<XVersionTag> Parse(TagItem);
+ static ParseStatus::Or<XVersionTag> Parse(TagItem);
types::DecimalInteger version;
};
// Represents the contents of the #EXT-X-INDEPENDENT-SEGMENTS tag
-struct XIndependentSegmentsTag {
+struct MEDIA_EXPORT XIndependentSegmentsTag {
static constexpr auto kName = CommonTagName::kXIndependentSegments;
- static MEDIA_EXPORT ParseStatus::Or<XIndependentSegmentsTag> Parse(TagItem);
+ static ParseStatus::Or<XIndependentSegmentsTag> Parse(TagItem);
};
// Represents the contents of the #EXT-X-DEFINE tag
-struct XDefineTag {
+struct MEDIA_EXPORT XDefineTag {
static constexpr auto kName = CommonTagName::kXDefine;
- static MEDIA_EXPORT ParseStatus::Or<XDefineTag> Parse(TagItem);
+ static ParseStatus::Or<XDefineTag> Parse(TagItem);
// Constructs an XDefineTag representing a variable definition.
- static MEDIA_EXPORT XDefineTag CreateDefinition(types::VariableName name,
- base::StringPiece value);
+ static XDefineTag CreateDefinition(types::VariableName name,
+ base::StringPiece value);
// Constructs an XDefineTag representing an imported variable definition.
- static MEDIA_EXPORT XDefineTag CreateImport(types::VariableName name);
+ static XDefineTag CreateImport(types::VariableName name);
// The name of the variable being defined.
types::VariableName name;
@@ -57,9 +57,9 @@ struct XDefineTag {
};
// Represents the contents of the #EXTINF tag
-struct InfTag {
+struct MEDIA_EXPORT InfTag {
static constexpr auto kName = MediaPlaylistTagName::kInf;
- static MEDIA_EXPORT ParseStatus::Or<InfTag> Parse(TagItem);
+ static ParseStatus::Or<InfTag> Parse(TagItem);
// Target duration of the media segment, in seconds.
types::DecimalFloatingPoint duration;
@@ -69,27 +69,27 @@ struct InfTag {
};
// Represents the contents of the #EXT-X-ENDLIST tag
-struct XEndListTag {
+struct MEDIA_EXPORT XEndListTag {
static constexpr auto kName = MediaPlaylistTagName::kXEndList;
- static MEDIA_EXPORT ParseStatus::Or<XEndListTag> Parse(TagItem);
+ static ParseStatus::Or<XEndListTag> Parse(TagItem);
};
// Represents the contents of the #EXT-X-I-FRAMES-ONLY tag
-struct XIFramesOnlyTag {
+struct MEDIA_EXPORT XIFramesOnlyTag {
static constexpr auto kName = MediaPlaylistTagName::kXIFramesOnly;
- static MEDIA_EXPORT ParseStatus::Or<XIFramesOnlyTag> Parse(TagItem);
+ static ParseStatus::Or<XIFramesOnlyTag> Parse(TagItem);
};
// Represents the contents of the #EXT-X-DISCONTINUITY tag
-struct XDiscontinuityTag {
+struct MEDIA_EXPORT XDiscontinuityTag {
static constexpr auto kName = MediaPlaylistTagName::kXDiscontinuity;
- static MEDIA_EXPORT ParseStatus::Or<XDiscontinuityTag> Parse(TagItem);
+ static ParseStatus::Or<XDiscontinuityTag> Parse(TagItem);
};
// Represents the contents of the #EXT-X-GAP tag
-struct XGapTag {
+struct MEDIA_EXPORT XGapTag {
static constexpr auto kName = MediaPlaylistTagName::kXGap;
- static MEDIA_EXPORT ParseStatus::Or<XGapTag> Parse(TagItem);
+ static ParseStatus::Or<XGapTag> Parse(TagItem);
};
enum class PlaylistType {
@@ -103,9 +103,9 @@ enum class PlaylistType {
};
// Represents the contents of the #EXT-X-PLAYLIST-TYPE tag
-struct XPlaylistTypeTag {
+struct MEDIA_EXPORT XPlaylistTypeTag {
static constexpr auto kName = MediaPlaylistTagName::kXPlaylistType;
- static MEDIA_EXPORT ParseStatus::Or<XPlaylistTypeTag> Parse(TagItem);
+ static ParseStatus::Or<XPlaylistTypeTag> Parse(TagItem);
PlaylistType type;
};
@@ -145,6 +145,76 @@ struct MEDIA_EXPORT XStreamInfTag {
// optional here so that the caller may decide how they wish to handle its
// absence.
absl::optional<std::string> codecs;
+
+ // The optimal pixel resolution at which to display all video in this variant
+ // stream.
+ absl::optional<types::DecimalResolution> resolution;
+
+ // This describes the maximum framerate for all video in this variant stream.
+ absl::optional<types::DecimalFloatingPoint> frame_rate;
+};
+
+// Represents the contents of the #EXT-X-TARGETDURATION tag.
+struct MEDIA_EXPORT XTargetDurationTag {
+ static constexpr auto kName = MediaPlaylistTagName::kXTargetDuration;
+ static ParseStatus::Or<XTargetDurationTag> Parse(TagItem);
+
+ // The upper bound on the duration (in seconds) of all media segments in the
+ // media playlist. The EXTINF duration of each Media Segment in a Playlist
+ // file, when rounded to the nearest integer, MUST be less than or equal to
+ // this duration.
+ types::DecimalInteger duration;
+};
+
+// Represents the contents of the #EXT-PART-INF tag.
+struct MEDIA_EXPORT XPartInfTag {
+ static constexpr auto kName = MediaPlaylistTagName::kXPartInf;
+ static ParseStatus::Or<XPartInfTag> Parse(TagItem);
+
+ // This value indicates the target duration for partial media segments.
+ types::DecimalFloatingPoint target_duration;
+};
+
+// Represents the contents of the #EXT-X-MEDIA-SEQUENCE tag.
+struct MEDIA_EXPORT XMediaSequenceTag {
+ static constexpr auto kName = MediaPlaylistTagName::kXMediaSequence;
+ static ParseStatus::Or<XMediaSequenceTag> Parse(TagItem);
+
+ // Indicates the media sequence number to assign to the first media segment in
+ // this playlist. These numbers are useful for validating the same media
+ // playlist across reloads, but not for synchronizing media segments between
+ // playlists.
+ types::DecimalInteger number;
+};
+
+// Represents the contents of the #EXT-X-DISCONTINUITY-SEQUENCE tag.
+struct MEDIA_EXPORT XDiscontinuitySequenceTag {
+ static constexpr auto kName = MediaPlaylistTagName::kXDiscontinuitySequence;
+ static ParseStatus::Or<XDiscontinuitySequenceTag> Parse(TagItem);
+
+ // Indicates the discontinuity sequence number to assign to the first media
+ // segment in this playlist. These numbers are useful for synchronizing
+ // between variant stream timelines.
+ types::DecimalInteger number;
+};
+
+// Represents the contents of the #EXT-X-BYTERANGE tag.
+struct MEDIA_EXPORT XByteRangeTag {
+ static constexpr auto kName = MediaPlaylistTagName::kXByteRange;
+ static ParseStatus::Or<XByteRangeTag> Parse(TagItem);
+
+ types::ByteRangeExpression range;
+};
+
+// Represents the contents of the #EXT-X-BITRATE tag.
+struct MEDIA_EXPORT XBitrateTag {
+ static constexpr auto kName = MediaPlaylistTagName::kXBitrate;
+ static ParseStatus::Or<XBitrateTag> Parse(TagItem);
+
+ // The approximate bitrate of the following media segments, (except those that
+ // have the EXT-X-BYTERANGE tag) expressed in kilobits per second. The value
+ // must be within +-10% of the actual segment bitrate.
+ types::DecimalInteger bitrate;
};
} // namespace media::hls
diff --git a/chromium/media/formats/hls/tags_unittest.cc b/chromium/media/formats/hls/tags_unittest.cc
index a7cbc5a0f5b..55da16d6565 100644
--- a/chromium/media/formats/hls/tags_unittest.cc
+++ b/chromium/media/formats/hls/tags_unittest.cc
@@ -3,10 +3,18 @@
// found in the LICENSE file.
#include "media/formats/hls/tags.h"
+
+#include <utility>
+
#include "base/location.h"
+#include "base/strings/string_piece.h"
#include "media/formats/hls/items.h"
+#include "media/formats/hls/parse_status.h"
#include "media/formats/hls/source_string.h"
+#include "media/formats/hls/test_util.h"
+#include "media/formats/hls/variable_dictionary.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
namespace media::hls {
@@ -37,7 +45,7 @@ void ErrorTest(absl::optional<base::StringPiece> content,
auto result = T::Parse(tag, variable_dict, sub_buffer);
ASSERT_TRUE(result.has_error()) << from.ToString();
auto error = std::move(result).error();
- EXPECT_EQ(error.code(), expected_status);
+ EXPECT_EQ(error.code(), expected_status) << from.ToString();
}
template <typename T>
@@ -60,7 +68,7 @@ T OkTest(absl::optional<base::StringPiece> content,
SourceString::CreateForTesting(*content))
: TagItem::CreateEmpty(ToTagName(T::kName), 1);
auto result = T::Parse(tag, variable_dict, sub_buffer);
- EXPECT_TRUE(result.has_value()) << from.ToString();
+ CHECK(result.has_value()) << from.ToString();
return std::move(result).value();
}
@@ -102,9 +110,37 @@ void RunEmptyTagTest() {
ErrorTest<T>("\t", ParseStatusCode::kMalformedTag);
}
-types::VariableName CreateVarName(base::StringPiece name) {
- return types::VariableName::Parse(SourceString::CreateForTesting(name))
- .value();
+// There are a couple of tags that are defined simply as `#EXT-X-TAG:n` where
+// `n` must be a valid DecimalInteger. This helper provides coverage for those
+// tags.
+template <typename T>
+void RunDecimalIntegerTagTest(types::DecimalInteger T::*field) {
+ // Content is required
+ ErrorTest<T>(absl::nullopt, ParseStatusCode::kMalformedTag);
+ ErrorTest<T>("", ParseStatusCode::kMalformedTag);
+
+ // Content must be a valid decimal-integer
+ ErrorTest<T>("-1", ParseStatusCode::kMalformedTag);
+ ErrorTest<T>("-1.5", ParseStatusCode::kMalformedTag);
+ ErrorTest<T>("-.5", ParseStatusCode::kMalformedTag);
+ ErrorTest<T>(".5", ParseStatusCode::kMalformedTag);
+ ErrorTest<T>("0.5", ParseStatusCode::kMalformedTag);
+ ErrorTest<T>("9999999999999999999999", ParseStatusCode::kMalformedTag);
+ ErrorTest<T>("one", ParseStatusCode::kMalformedTag);
+ ErrorTest<T>(" 1 ", ParseStatusCode::kMalformedTag);
+ ErrorTest<T>("1,", ParseStatusCode::kMalformedTag);
+ ErrorTest<T>("{$X}", ParseStatusCode::kMalformedTag);
+
+ auto tag = OkTest<T>("0");
+ EXPECT_EQ(tag.*field, 0u);
+ tag = OkTest<T>("1");
+ EXPECT_EQ(tag.*field, 1u);
+ tag = OkTest<T>("10");
+ EXPECT_EQ(tag.*field, 10u);
+ tag = OkTest<T>("14");
+ EXPECT_EQ(tag.*field, 14u);
+ tag = OkTest<T>("999999999999999999");
+ EXPECT_EQ(tag.*field, 999999999999999999u);
}
VariableDictionary CreateBasicDictionary(
@@ -118,7 +154,7 @@ VariableDictionary CreateBasicDictionary(
} // namespace
-TEST(HlsFormatParserTest, TagNameIdentifyTest) {
+TEST(HlsTagsTest, TagNameIdentity) {
std::set<base::StringPiece> names;
for (TagName name = kMinTagName; name <= kMaxTagName; ++name) {
@@ -133,12 +169,12 @@ TEST(HlsFormatParserTest, TagNameIdentifyTest) {
}
}
-TEST(HlsFormatParserTest, ParseM3uTagTest) {
+TEST(HlsTagsTest, ParseM3uTag) {
RunTagIdenficationTest<M3uTag>("#EXTM3U\n", absl::nullopt);
RunEmptyTagTest<M3uTag>();
}
-TEST(HlsFormatParserTest, ParseXVersionTagTest) {
+TEST(HlsTagsTest, ParseXVersionTag) {
RunTagIdenficationTest<XVersionTag>("#EXT-X-VERSION:123\n", "123");
// Test valid versions
@@ -179,7 +215,7 @@ TEST(HlsFormatParserTest, ParseXVersionTagTest) {
ErrorTest<XVersionTag>(" 1 ", ParseStatusCode::kMalformedTag);
}
-TEST(HlsFormatParserTest, ParseInfTagTest) {
+TEST(HlsTagsTest, ParseInfTag) {
RunTagIdenficationTest<InfTag>("#EXTINF:123,\t\n", "123,\t");
// Test some valid tags
@@ -214,35 +250,35 @@ TEST(HlsFormatParserTest, ParseInfTagTest) {
ErrorTest<InfTag>("asdf,", ParseStatusCode::kMalformedTag);
}
-TEST(HlsFormatParserTest, ParseXIndependentSegmentsTest) {
+TEST(HlsTagsTest, ParseXIndependentSegmentsTag) {
RunTagIdenficationTest<XIndependentSegmentsTag>(
"#EXT-X-INDEPENDENT-SEGMENTS\n", absl::nullopt);
RunEmptyTagTest<XIndependentSegmentsTag>();
}
-TEST(HlsFormatParserTest, ParseXEndListTagTest) {
- RunTagIdenficationTest<XEndListTag>("#EXT-X-END-LIST\n", absl::nullopt);
+TEST(HlsTagsTest, ParseXEndListTag) {
+ RunTagIdenficationTest<XEndListTag>("#EXT-X-ENDLIST\n", absl::nullopt);
RunEmptyTagTest<XEndListTag>();
}
-TEST(HlsFormatParserTest, ParseXIFramesOnlyTagTest) {
+TEST(HlsTagsTest, ParseXIFramesOnlyTag) {
RunTagIdenficationTest<XIFramesOnlyTag>("#EXT-X-I-FRAMES-ONLY\n",
absl::nullopt);
RunEmptyTagTest<XIFramesOnlyTag>();
}
-TEST(HlsFormatParserTest, ParseXDiscontinuityTagTest) {
+TEST(HlsTagsTest, ParseXDiscontinuityTag) {
RunTagIdenficationTest<XDiscontinuityTag>("#EXT-X-DISCONTINUITY\n",
absl::nullopt);
RunEmptyTagTest<XDiscontinuityTag>();
}
-TEST(HlsFormatParserTest, ParseXGapTagTest) {
+TEST(HlsTagsTest, ParseXGapTag) {
RunTagIdenficationTest<XGapTag>("#EXT-X-GAP\n", absl::nullopt);
RunEmptyTagTest<XGapTag>();
}
-TEST(HlsFormatParserTest, ParseXDefineTagTest) {
+TEST(HlsTagsTest, ParseXDefineTag) {
RunTagIdenficationTest<XDefineTag>(
"#EXT-X-DEFINE:NAME=\"FOO\",VALUE=\"Bar\",\n",
"NAME=\"FOO\",VALUE=\"Bar\",");
@@ -299,7 +335,7 @@ TEST(HlsFormatParserTest, ParseXDefineTagTest) {
ParseStatusCode::kMalformedTag);
}
-TEST(HlsFormatParserTest, ParseXPlaylistTypeTagTest) {
+TEST(HlsTagsTest, ParseXPlaylistTypeTag) {
RunTagIdenficationTest<XPlaylistTypeTag>("#EXT-X-PLAYLIST-TYPE:VOD\n", "VOD");
RunTagIdenficationTest<XPlaylistTypeTag>("#EXT-X-PLAYLIST-TYPE:EVENT\n",
"EVENT");
@@ -317,7 +353,7 @@ TEST(HlsFormatParserTest, ParseXPlaylistTypeTagTest) {
ErrorTest<XPlaylistTypeTag>(absl::nullopt, ParseStatusCode::kMalformedTag);
}
-TEST(HlsFormatParserTest, ParseXStreamInfTest) {
+TEST(HlsTagsTest, ParseXStreamInfTag) {
RunTagIdenficationTest<XStreamInfTag>(
"#EXT-X-STREAM-INF:BANDWIDTH=1010,CODECS=\"foo,bar\"\n",
"BANDWIDTH=1010,CODECS=\"foo,bar\"");
@@ -332,6 +368,20 @@ TEST(HlsFormatParserTest, ParseXStreamInfTest) {
EXPECT_EQ(tag.average_bandwidth, 1000u);
EXPECT_DOUBLE_EQ(tag.score.value(), 12.2);
EXPECT_EQ(tag.codecs, "foo,bar");
+ EXPECT_EQ(tag.resolution, absl::nullopt);
+ EXPECT_EQ(tag.frame_rate, absl::nullopt);
+
+ tag = OkTest<XStreamInfTag>(
+ R"(BANDWIDTH=1010,RESOLUTION=1920x1080,FRAME-RATE=29.97)", variable_dict,
+ sub_buffer);
+ EXPECT_EQ(tag.bandwidth, 1010u);
+ EXPECT_EQ(tag.average_bandwidth, absl::nullopt);
+ EXPECT_EQ(tag.score, absl::nullopt);
+ EXPECT_EQ(tag.codecs, absl::nullopt);
+ ASSERT_TRUE(tag.resolution.has_value());
+ EXPECT_EQ(tag.resolution->width, 1920u);
+ EXPECT_EQ(tag.resolution->height, 1080u);
+ EXPECT_DOUBLE_EQ(tag.frame_rate.value(), 29.97);
// "BANDWIDTH" is the only required attribute
tag = OkTest<XStreamInfTag>(R"(BANDWIDTH=5050)", variable_dict, sub_buffer);
@@ -339,6 +389,8 @@ TEST(HlsFormatParserTest, ParseXStreamInfTest) {
EXPECT_EQ(tag.average_bandwidth, absl::nullopt);
EXPECT_EQ(tag.score, absl::nullopt);
EXPECT_EQ(tag.codecs, absl::nullopt);
+ EXPECT_EQ(tag.resolution, absl::nullopt);
+ EXPECT_EQ(tag.frame_rate, absl::nullopt);
ErrorTest<XStreamInfTag>(absl::nullopt, variable_dict, sub_buffer,
ParseStatusCode::kMalformedTag);
@@ -389,6 +441,92 @@ TEST(HlsFormatParserTest, ParseXStreamInfTest) {
EXPECT_EQ(tag.average_bandwidth, absl::nullopt);
EXPECT_EQ(tag.score, absl::nullopt);
EXPECT_EQ(tag.codecs, "bar,baz");
+ EXPECT_EQ(tag.resolution, absl::nullopt);
+
+ // "RESOLUTION" must be a valid decimal-resolution
+ ErrorTest<XStreamInfTag>(R"(BANDWIDTH=1010,RESOLUTION=1920x)", variable_dict,
+ sub_buffer, ParseStatusCode::kMalformedTag);
+ ErrorTest<XStreamInfTag>(R"(BANDWIDTH=1010,RESOLUTION=x123)", variable_dict,
+ sub_buffer, ParseStatusCode::kMalformedTag);
+
+ // "FRAME-RATE" must be a valid decimal-floating-point (unsigned)
+ ErrorTest<XStreamInfTag>(R"(BANDWIDTH=1010,FRAME-RATE=-1)", variable_dict,
+ sub_buffer, ParseStatusCode::kMalformedTag);
+ ErrorTest<XStreamInfTag>(R"(BANDWIDTH=1010,FRAME-RATE=One)", variable_dict,
+ sub_buffer, ParseStatusCode::kMalformedTag);
+ ErrorTest<XStreamInfTag>(R"(BANDWIDTH=1010,FRAME-RATE=30.0.0)", variable_dict,
+ sub_buffer, ParseStatusCode::kMalformedTag);
+}
+
+TEST(HlsTagsTest, ParseXTargetDurationTag) {
+ RunTagIdenficationTest<XTargetDurationTag>("#EXT-X-TARGETDURATION:10\n",
+ "10");
+ RunDecimalIntegerTagTest(&XTargetDurationTag::duration);
+}
+
+TEST(HlsTagsTest, ParseXMediaSequenceTag) {
+ RunTagIdenficationTest<XMediaSequenceTag>("#EXT-X-MEDIA-SEQUENCE:3\n", "3");
+ RunDecimalIntegerTagTest(&XMediaSequenceTag::number);
+}
+
+TEST(HlsTagsTest, ParseXDiscontinuitySequenceTag) {
+ RunTagIdenficationTest<XDiscontinuitySequenceTag>(
+ "#EXT-X-DISCONTINUITY-SEQUENCE:3\n", "3");
+ RunDecimalIntegerTagTest(&XDiscontinuitySequenceTag::number);
+}
+
+TEST(HlsTagsTest, ParseXByteRangeTag) {
+ RunTagIdenficationTest<XByteRangeTag>("#EXT-X-BYTERANGE:12@34\n", "12@34");
+
+ auto tag = OkTest<XByteRangeTag>("12");
+ EXPECT_EQ(tag.range.length, 12u);
+ EXPECT_EQ(tag.range.offset, absl::nullopt);
+ tag = OkTest<XByteRangeTag>("12@34");
+ EXPECT_EQ(tag.range.length, 12u);
+ EXPECT_EQ(tag.range.offset, 34u);
+
+ ErrorTest<XByteRangeTag>("FOOBAR", ParseStatusCode::kMalformedTag);
+ ErrorTest<XByteRangeTag>("12@", ParseStatusCode::kMalformedTag);
+ ErrorTest<XByteRangeTag>("@34", ParseStatusCode::kMalformedTag);
+ ErrorTest<XByteRangeTag>("@", ParseStatusCode::kMalformedTag);
+ ErrorTest<XByteRangeTag>(" 12@34", ParseStatusCode::kMalformedTag);
+ ErrorTest<XByteRangeTag>("12@34 ", ParseStatusCode::kMalformedTag);
+ ErrorTest<XByteRangeTag>("", ParseStatusCode::kMalformedTag);
+ ErrorTest<XByteRangeTag>(absl::nullopt, ParseStatusCode::kMalformedTag);
+}
+
+TEST(HlsTagsTest, ParseXBitrateTag) {
+ RunTagIdenficationTest<XBitrateTag>("#EXT-X-BITRATE:3\n", "3");
+ RunDecimalIntegerTagTest(&XBitrateTag::bitrate);
+}
+
+TEST(HlsTagsTest, ParseXPartInfTag) {
+ RunTagIdenficationTest<XPartInfTag>("#EXT-X-PART-INF:PART-TARGET=1.0\n",
+ "PART-TARGET=1.0");
+
+ // PART-TARGET is required, and must be a valid DecimalFloatingPoint
+ ErrorTest<XPartInfTag>(absl::nullopt, ParseStatusCode::kMalformedTag);
+ ErrorTest<XPartInfTag>("", ParseStatusCode::kMalformedTag);
+ ErrorTest<XPartInfTag>("1", ParseStatusCode::kMalformedTag);
+ ErrorTest<XPartInfTag>("PART-TARGET=-1", ParseStatusCode::kMalformedTag);
+ ErrorTest<XPartInfTag>("PART-TARGET={$part-target}",
+ ParseStatusCode::kMalformedTag);
+ ErrorTest<XPartInfTag>("PART-TARGET=\"1\"", ParseStatusCode::kMalformedTag);
+ ErrorTest<XPartInfTag>("PART-TARGET=one", ParseStatusCode::kMalformedTag);
+ ErrorTest<XPartInfTag>("FOO=BAR", ParseStatusCode::kMalformedTag);
+ ErrorTest<XPartInfTag>("PART-TARGET=10,PART-TARGET=10",
+ ParseStatusCode::kMalformedTag);
+
+ auto tag = OkTest<XPartInfTag>("PART-TARGET=1.2");
+ EXPECT_DOUBLE_EQ(tag.target_duration, 1.2);
+ tag = OkTest<XPartInfTag>("PART-TARGET=1");
+ EXPECT_DOUBLE_EQ(tag.target_duration, 1);
+ tag = OkTest<XPartInfTag>("PART-TARGET=0");
+ EXPECT_DOUBLE_EQ(tag.target_duration, 0);
+ tag = OkTest<XPartInfTag>("PART-TARGET=99999999.99");
+ EXPECT_DOUBLE_EQ(tag.target_duration, 99999999.99);
+ tag = OkTest<XPartInfTag>("FOO=BAR,PART-TARGET=100,BAR=BAZ");
+ EXPECT_DOUBLE_EQ(tag.target_duration, 100);
}
} // namespace media::hls
diff --git a/chromium/media/formats/hls/test_util.h b/chromium/media/formats/hls/test_util.h
new file mode 100644
index 00000000000..fdc5506e51d
--- /dev/null
+++ b/chromium/media/formats/hls/test_util.h
@@ -0,0 +1,26 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_HLS_TEST_UTIL_H_
+#define MEDIA_FORMATS_HLS_TEST_UTIL_H_
+
+#include "base/strings/string_piece.h"
+#include "media/formats/hls/source_string.h"
+#include "media/formats/hls/types.h"
+
+namespace media::hls {
+
+inline types::VariableName CreateVarName(base::StringPiece name) {
+ return types::VariableName::Parse(SourceString::CreateForTesting(name))
+ .value();
+}
+
+inline types::ByteRange CreateByteRange(types::DecimalInteger length,
+ types::DecimalInteger offset) {
+ return types::ByteRange::Validate(length, offset).value();
+}
+
+} // namespace media::hls
+
+#endif
diff --git a/chromium/media/formats/hls/types.cc b/chromium/media/formats/hls/types.cc
index dafc688eb19..264f44d95fd 100644
--- a/chromium/media/formats/hls/types.cc
+++ b/chromium/media/formats/hls/types.cc
@@ -6,9 +6,11 @@
#include <algorithm>
#include <cmath>
+#include <limits>
#include "base/no_destructor.h"
#include "base/strings/string_number_conversions.h"
+#include "media/formats/hls/parse_status.h"
#include "media/formats/hls/source_string.h"
#include "third_party/re2/src/re2/re2.h"
@@ -199,6 +201,76 @@ ParseStatus::Or<SignedDecimalFloatingPoint> ParseSignedDecimalFloatingPoint(
return result;
}
+ParseStatus::Or<DecimalResolution> DecimalResolution::Parse(
+ SourceString source_str) {
+ // decimal-resolution values are in the format: DecimalInteger 'x'
+ // DecimalInteger
+ const auto x_index = source_str.Str().find_first_of('x');
+ if (x_index == base::StringPiece::npos) {
+ return ParseStatusCode::kFailedToParseDecimalResolution;
+ }
+
+ // Extract width and height strings
+ const auto width_str = source_str.Consume(x_index);
+ source_str.Consume(1);
+ const auto height_str = source_str;
+
+ auto width = ParseDecimalInteger(width_str);
+ auto height = ParseDecimalInteger(height_str);
+ for (auto* x : {&width, &height}) {
+ if (x->has_error()) {
+ return ParseStatus(ParseStatusCode::kFailedToParseDecimalResolution)
+ .AddCause(std::move(*x).error());
+ }
+ }
+
+ return DecimalResolution{.width = std::move(width).value(),
+ .height = std::move(height).value()};
+}
+
+ParseStatus::Or<ByteRangeExpression> ByteRangeExpression::Parse(
+ SourceString source_str) {
+ // If this ByteRange has an offset, it will be separated from the length by
+ // '@'.
+ const auto at_index = source_str.Str().find_first_of('@');
+ const auto length_str = source_str.Consume(at_index);
+ auto length = ParseDecimalInteger(length_str);
+ if (length.has_error()) {
+ return ParseStatus(ParseStatusCode::kFailedToParseByteRange)
+ .AddCause(std::move(length).error());
+ }
+
+ // If the offset was present, try to parse it
+ absl::optional<types::DecimalInteger> offset;
+ if (at_index != base::StringPiece::npos) {
+ source_str.Consume(1);
+ auto offset_result = ParseDecimalInteger(source_str);
+ if (offset_result.has_error()) {
+ return ParseStatus(ParseStatusCode::kFailedToParseByteRange)
+ .AddCause(std::move(offset_result).error());
+ }
+
+ offset = std::move(offset_result).value();
+ }
+
+ return ByteRangeExpression{.length = std::move(length).value(),
+ .offset = offset};
+}
+
+absl::optional<ByteRange> ByteRange::Validate(DecimalInteger length,
+ DecimalInteger offset) {
+ if (length == 0) {
+ return absl::nullopt;
+ }
+
+ // Ensure that `length+offset` won't overflow `DecimalInteger`
+ if (std::numeric_limits<DecimalInteger>::max() - offset < length) {
+ return absl::nullopt;
+ }
+
+ return ByteRange(length, offset);
+}
+
ParseStatus::Or<base::StringPiece> ParseQuotedString(
SourceString source_str,
const VariableDictionary& variable_dict,
@@ -281,12 +353,6 @@ AttributeMap::AttributeMap(base::span<Item> sorted_items)
std::is_sorted(items_.begin(), items_.end(), AttributeMapComparator()));
}
-AttributeMap::~AttributeMap() = default;
-AttributeMap::AttributeMap(const AttributeMap&) = default;
-AttributeMap::AttributeMap(AttributeMap&&) = default;
-AttributeMap& AttributeMap::operator=(const AttributeMap&) = default;
-AttributeMap& AttributeMap::operator=(AttributeMap&&) = default;
-
ParseStatus::Or<AttributeListIterator::Item> AttributeMap::Fill(
AttributeListIterator* iter) {
while (true) {
diff --git a/chromium/media/formats/hls/types.h b/chromium/media/formats/hls/types.h
index d615e25b0ea..22f08ead90c 100644
--- a/chromium/media/formats/hls/types.h
+++ b/chromium/media/formats/hls/types.h
@@ -14,35 +14,90 @@
namespace media::hls::types {
-// Data-types used in HLS, as described by the spec
+// A `DecimalInteger` is an unsigned integer value.
+// https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis#:~:text=of%20the%20following%3A%0A%0A%20%20%20o-,decimal%2Dinteger,-%3A%20an%20unquoted%20string
using DecimalInteger = uint64_t;
-ParseStatus::Or<DecimalInteger> MEDIA_EXPORT
-ParseDecimalInteger(SourceString source_str);
+MEDIA_EXPORT ParseStatus::Or<DecimalInteger> ParseDecimalInteger(
+ SourceString source_str);
+// A `DecimalFloatingPoint` is an unsigned floating-point value.
+// https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis#:~:text=on%20its%20AttributeNames.%0A%0A%20%20%20o-,decimal%2Dfloating%2Dpoint,-%3A%20an%20unquoted%20string
using DecimalFloatingPoint = double;
-ParseStatus::Or<DecimalFloatingPoint> MEDIA_EXPORT
-ParseDecimalFloatingPoint(SourceString source_str);
+MEDIA_EXPORT ParseStatus::Or<DecimalFloatingPoint> ParseDecimalFloatingPoint(
+ SourceString source_str);
+// A `SignedDecimalFloatingPoint` is a signed floating-point value.
+// https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis#:~:text=decimal%20positional%20notation.%0A%0A%20%20%20o-,signed%2Ddecimal%2Dfloating%2Dpoint,-%3A%20an%20unquoted%20string
using SignedDecimalFloatingPoint = double;
-ParseStatus::Or<SignedDecimalFloatingPoint> MEDIA_EXPORT
+MEDIA_EXPORT ParseStatus::Or<SignedDecimalFloatingPoint>
ParseSignedDecimalFloatingPoint(SourceString source_str);
+// A `DecimalResolution` is a set of two `DecimalInteger`s describing width and
+// height.
+// https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis#:~:text=enumerated%2Dstring%2Dlist.%0A%0A%20%20%20o-,decimal%2Dresolution,-%3A%20two%20decimal%2Dintegers
+struct MEDIA_EXPORT DecimalResolution {
+ static ParseStatus::Or<DecimalResolution> Parse(SourceString source_str);
+
+ types::DecimalInteger width;
+ types::DecimalInteger height;
+};
+
+// A `ByteRangeExpression` represents the 'length[@offset]' syntax that appears
+// in tags describing byte ranges of a resource.
+// https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis#section-4.4.4.2
+struct MEDIA_EXPORT ByteRangeExpression {
+ static ParseStatus::Or<ByteRangeExpression> Parse(SourceString source_str);
+
+ // The length of the sub-range, in bytes.
+ types::DecimalInteger length;
+
+ // If present, the offset in bytes from the beginning of the resource.
+ // If not present, the sub-range begins at the next byte following that of the
+ // previous segment. The previous segment must be a subrange of the same
+ // resource.
+ absl::optional<types::DecimalInteger> offset;
+};
+
+// This is similar to `ByteRangeExpression`, but with a stronger contract:
+// - `length` is non-zero
+// - `offset` is non-optional
+// - `offset+length` may not overflow `types::DecimalInteger`
+class MEDIA_EXPORT ByteRange {
+ public:
+ // Validates that the range given by `[offset,offset+length)` is non-empty and
+ // that `GetEnd()` would not exceed the max value representable by a
+ // `DecimalInteger`.
+ static absl::optional<ByteRange> Validate(DecimalInteger length,
+ DecimalInteger offset);
+
+ DecimalInteger GetLength() const { return length_; }
+ DecimalInteger GetOffset() const { return offset_; }
+ DecimalInteger GetEnd() const { return offset_ + length_; }
+
+ private:
+ ByteRange(DecimalInteger length, DecimalInteger offset)
+ : length_(length), offset_(offset) {}
+
+ DecimalInteger length_;
+ DecimalInteger offset_;
+};
+
// Parses a string surrounded by double-quotes ("), returning the inner string.
// These appear in the context of attribute-lists, and are subject to variable
// substitution. `sub_buffer` must outlive the returned string.
-ParseStatus::Or<base::StringPiece> MEDIA_EXPORT
-ParseQuotedString(SourceString source_str,
- const VariableDictionary& variable_dict,
- VariableDictionary::SubstitutionBuffer& sub_buffer);
+MEDIA_EXPORT ParseStatus::Or<base::StringPiece> ParseQuotedString(
+ SourceString source_str,
+ const VariableDictionary& variable_dict,
+ VariableDictionary::SubstitutionBuffer& sub_buffer);
// Parses a string surrounded by double-quotes ("), returning the interior
// string. These appear in the context of attribute-lists, however certain tags
// disallow variable substitution so this function exists to serve those.
-ParseStatus::Or<SourceString> MEDIA_EXPORT
-ParseQuotedStringWithoutSubstitution(SourceString source_str);
+MEDIA_EXPORT ParseStatus::Or<SourceString> ParseQuotedStringWithoutSubstitution(
+ SourceString source_str);
// Provides an iterator-style interface over attribute-lists.
// Since the number of attributes expected in an attribute-list for a tag varies
@@ -76,13 +131,6 @@ struct MEDIA_EXPORT AttributeMap {
// values. The keys present must be unique and sorted in alphabetical order.
explicit AttributeMap(base::span<Item> sorted_items);
- // TODO(crbug.com/1275317): These constructors should be removed
- ~AttributeMap();
- AttributeMap(const AttributeMap&);
- AttributeMap(AttributeMap&&);
- AttributeMap& operator=(const AttributeMap&);
- AttributeMap& operator=(AttributeMap&&);
-
// Fills this map with the given iterator until one of the following occurs:
// - iter->Next() returns a error. The error will be forwarded to the caller.
// - iter->Next() returns an Item with an unrecognized name. The item will be
@@ -114,10 +162,9 @@ struct MEDIA_EXPORT AttributeMap {
// Represents a string that is guaranteed to be a non-empty, and consisting only
// of characters in the set {[a-z], [A-Z], [0-9], _, -}. Variable names are
// case-sensitive.
-class VariableName {
+class MEDIA_EXPORT VariableName {
public:
- static MEDIA_EXPORT ParseStatus::Or<VariableName> Parse(
- SourceString source_str);
+ static ParseStatus::Or<VariableName> Parse(SourceString source_str);
base::StringPiece GetName() const { return name_; }
diff --git a/chromium/media/formats/hls/types_unittest.cc b/chromium/media/formats/hls/types_unittest.cc
index b79551e8341..1b482cdd3f6 100644
--- a/chromium/media/formats/hls/types_unittest.cc
+++ b/chromium/media/formats/hls/types_unittest.cc
@@ -3,33 +3,32 @@
// found in the LICENSE file.
#include "media/formats/hls/types.h"
+
+#include <utility>
+
#include "base/location.h"
+#include "base/strings/string_piece.h"
+#include "media/formats/hls/parse_status.h"
#include "media/formats/hls/source_string.h"
+#include "media/formats/hls/test_util.h"
+#include "media/formats/hls/variable_dictionary.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media::hls {
-namespace {
-
-types::VariableName CreateVarName(base::StringPiece name) {
- return types::VariableName::Parse(SourceString::CreateForTesting(name))
- .value();
-}
-
-} // namespace
-
-TEST(HlsFormatParserTest, ParseDecimalIntegerTest) {
- auto const error_test = [](base::StringPiece input,
+TEST(HlsTypesTest, ParseDecimalInteger) {
+ const auto error_test = [](base::StringPiece input,
const base::Location& from =
base::Location::Current()) {
auto result =
types::ParseDecimalInteger(SourceString::CreateForTesting(1, 1, input));
ASSERT_TRUE(result.has_error()) << from.ToString();
auto error = std::move(result).error();
- EXPECT_EQ(error.code(), ParseStatusCode::kFailedToParseDecimalInteger);
+ EXPECT_EQ(error.code(), ParseStatusCode::kFailedToParseDecimalInteger)
+ << from.ToString();
};
- auto const ok_test = [](base::StringPiece input,
+ const auto ok_test = [](base::StringPiece input,
types::DecimalInteger expected,
const base::Location& from =
base::Location::Current()) {
@@ -37,7 +36,7 @@ TEST(HlsFormatParserTest, ParseDecimalIntegerTest) {
types::ParseDecimalInteger(SourceString::CreateForTesting(1, 1, input));
ASSERT_TRUE(result.has_value()) << from.ToString();
auto value = std::move(result).value();
- EXPECT_EQ(value, expected);
+ EXPECT_EQ(value, expected) << from.ToString();
};
// Empty string is not allowed
@@ -72,19 +71,19 @@ TEST(HlsFormatParserTest, ParseDecimalIntegerTest) {
error_test("18446744073709551616");
}
-TEST(HlsFormatParserTest, ParseDecimalFloatingPointTest) {
- auto const error_test = [](base::StringPiece input,
+TEST(HlsTypesTest, ParseDecimalFloatingPoint) {
+ const auto error_test = [](base::StringPiece input,
const base::Location& from =
base::Location::Current()) {
auto result = types::ParseDecimalFloatingPoint(
SourceString::CreateForTesting(1, 1, input));
ASSERT_TRUE(result.has_error()) << from.ToString();
auto error = std::move(result).error();
- EXPECT_EQ(error.code(),
- ParseStatusCode::kFailedToParseDecimalFloatingPoint);
+ EXPECT_EQ(error.code(), ParseStatusCode::kFailedToParseDecimalFloatingPoint)
+ << from.ToString();
};
- auto const ok_test = [](base::StringPiece input,
+ const auto ok_test = [](base::StringPiece input,
types::DecimalFloatingPoint expected,
const base::Location& from =
base::Location::Current()) {
@@ -92,7 +91,7 @@ TEST(HlsFormatParserTest, ParseDecimalFloatingPointTest) {
SourceString::CreateForTesting(1, 1, input));
ASSERT_TRUE(result.has_value()) << from.ToString();
auto value = std::move(result).value();
- EXPECT_DOUBLE_EQ(value, expected);
+ EXPECT_DOUBLE_EQ(value, expected) << from.ToString();
};
// Empty string is not allowed
@@ -124,8 +123,8 @@ TEST(HlsFormatParserTest, ParseDecimalFloatingPointTest) {
ok_test("0000000.000001", 0.000001);
}
-TEST(HlsFormatParserTest, ParseSignedDecimalFloatingPointTest) {
- auto const error_test = [](base::StringPiece input,
+TEST(HlsTypesTest, ParseSignedDecimalFloatingPoint) {
+ const auto error_test = [](base::StringPiece input,
const base::Location& from =
base::Location::Current()) {
auto result = types::ParseSignedDecimalFloatingPoint(
@@ -133,10 +132,11 @@ TEST(HlsFormatParserTest, ParseSignedDecimalFloatingPointTest) {
ASSERT_TRUE(result.has_error()) << from.ToString();
auto error = std::move(result).error();
EXPECT_EQ(error.code(),
- ParseStatusCode::kFailedToParseSignedDecimalFloatingPoint);
+ ParseStatusCode::kFailedToParseSignedDecimalFloatingPoint)
+ << from.ToString();
};
- auto const ok_test = [](base::StringPiece input,
+ const auto ok_test = [](base::StringPiece input,
types::SignedDecimalFloatingPoint expected,
const base::Location& from =
base::Location::Current()) {
@@ -144,7 +144,7 @@ TEST(HlsFormatParserTest, ParseSignedDecimalFloatingPointTest) {
SourceString::CreateForTesting(1, 1, input));
ASSERT_TRUE(result.has_value()) << from.ToString();
auto value = std::move(result).value();
- EXPECT_DOUBLE_EQ(value, expected);
+ EXPECT_DOUBLE_EQ(value, expected) << from.ToString();
};
// Empty string is not allowed
@@ -178,7 +178,7 @@ TEST(HlsFormatParserTest, ParseSignedDecimalFloatingPointTest) {
ok_test("0000000.000001", 0.000001);
}
-TEST(HlsFormatParserTest, AttributeListIteratorTest) {
+TEST(HlsTypesTest, AttributeListIterator) {
using Items =
std::initializer_list<std::pair<base::StringPiece, base::StringPiece>>;
@@ -189,17 +189,17 @@ TEST(HlsFormatParserTest, AttributeListIteratorTest) {
auto result = iter.Next();
ASSERT_TRUE(result.has_value()) << from.ToString();
auto value = std::move(result).value();
- EXPECT_EQ(value.name.Str(), item.first);
- EXPECT_EQ(value.value.Str(), item.second);
+ EXPECT_EQ(value.name.Str(), item.first) << from.ToString();
+ EXPECT_EQ(value.value.Str(), item.second) << from.ToString();
}
// Afterwards, iterator should fail
auto result = iter.Next();
- ASSERT_TRUE(result.has_error());
- EXPECT_EQ(std::move(result).error().code(), error);
+ ASSERT_TRUE(result.has_error()) << from.ToString();
+ EXPECT_EQ(std::move(result).error().code(), error) << from.ToString();
result = iter.Next();
- ASSERT_TRUE(result.has_error());
- EXPECT_EQ(std::move(result).error().code(), error);
+ ASSERT_TRUE(result.has_error()) << from.ToString();
+ EXPECT_EQ(std::move(result).error().code(), error) << from.ToString();
};
// Checks for valid items, followed by an error
@@ -286,7 +286,7 @@ TEST(HlsFormatParserTest, AttributeListIteratorTest) {
error_test("FOO=\"as\ndf\"", {});
}
-TEST(HlsFormatParserTest, AttributeMapTest) {
+TEST(HlsTypesTest, AttributeMap) {
auto make_iter = [](auto str) {
return types::AttributeListIterator(SourceString::CreateForTesting(str));
};
@@ -416,24 +416,25 @@ TEST(HlsFormatParserTest, AttributeMapTest) {
}
}
-TEST(HlsFormatParserTest, ParseVariableNameTest) {
- auto const ok_test = [](base::StringPiece input,
+TEST(HlsTypesTest, ParseVariableName) {
+ const auto ok_test = [](base::StringPiece input,
const base::Location& from =
base::Location::Current()) {
auto result =
types::VariableName::Parse(SourceString::CreateForTesting(input));
ASSERT_TRUE(result.has_value()) << from.ToString();
- EXPECT_EQ(std::move(result).value().GetName(), input);
+ EXPECT_EQ(std::move(result).value().GetName(), input) << from.ToString();
};
- auto const error_test = [](base::StringPiece input,
+ const auto error_test = [](base::StringPiece input,
const base::Location& from =
base::Location::Current()) {
auto result =
types::VariableName::Parse(SourceString::CreateForTesting(input));
ASSERT_TRUE(result.has_error()) << from.ToString();
EXPECT_EQ(std::move(result).error().code(),
- ParseStatusCode::kMalformedVariableName);
+ ParseStatusCode::kMalformedVariableName)
+ << from.ToString();
};
// Variable names may not be empty
@@ -459,21 +460,25 @@ TEST(HlsFormatParserTest, ParseVariableNameTest) {
ok_test("______-___-__---");
}
-TEST(HlsFormatParserTest, ParseQuotedStringWithoutSubstitutionTest) {
- const auto ok_test = [](base::StringPiece in,
- base::StringPiece expected_out) {
+TEST(HlsTypesTest, ParseQuotedStringWithoutSubstitution) {
+ const auto ok_test = [](base::StringPiece in, base::StringPiece expected_out,
+ const base::Location& from =
+ base::Location::Current()) {
auto in_str = SourceString::CreateForTesting(in);
auto out = types::ParseQuotedStringWithoutSubstitution(in_str);
- ASSERT_TRUE(out.has_value());
- EXPECT_EQ(std::move(out).value().Str(), expected_out);
+ ASSERT_TRUE(out.has_value()) << from.ToString();
+ EXPECT_EQ(std::move(out).value().Str(), expected_out) << from.ToString();
};
- const auto error_test = [](base::StringPiece in) {
+ const auto error_test = [](base::StringPiece in,
+ const base::Location& from =
+ base::Location::Current()) {
auto in_str = SourceString::CreateForTesting(in);
auto out = types::ParseQuotedStringWithoutSubstitution(in_str);
- ASSERT_TRUE(out.has_error());
+ ASSERT_TRUE(out.has_error()) << from.ToString();
EXPECT_EQ(std::move(out).error().code(),
- ParseStatusCode::kFailedToParseQuotedString);
+ ParseStatusCode::kFailedToParseQuotedString)
+ << from.ToString();
};
// Test some basic examples
@@ -503,27 +508,30 @@ TEST(HlsFormatParserTest, ParseQuotedStringWithoutSubstitutionTest) {
error_test("");
}
-TEST(HlsFormatParserTest, ParseQuotedStringTest) {
+TEST(HlsTypesTest, ParseQuotedString) {
VariableDictionary dict;
EXPECT_TRUE(dict.Insert(CreateVarName("FOO"), "bar"));
EXPECT_TRUE(dict.Insert(CreateVarName("BAZ"), "\"foo\""));
- const auto ok_test = [&dict](base::StringPiece in,
- base::StringPiece expected_out) {
- auto in_str = SourceString::CreateForTesting(in);
- VariableDictionary::SubstitutionBuffer sub_buffer;
- auto out = types::ParseQuotedString(in_str, dict, sub_buffer);
- ASSERT_TRUE(out.has_value());
- EXPECT_EQ(std::move(out).value(), expected_out);
- };
+ const auto ok_test =
+ [&dict](base::StringPiece in, base::StringPiece expected_out,
+ const base::Location& from = base::Location::Current()) {
+ auto in_str = SourceString::CreateForTesting(in);
+ VariableDictionary::SubstitutionBuffer sub_buffer;
+ auto out = types::ParseQuotedString(in_str, dict, sub_buffer);
+ ASSERT_TRUE(out.has_value()) << from.ToString();
+ EXPECT_EQ(std::move(out).value(), expected_out) << from.ToString();
+ };
const auto error_test = [&dict](base::StringPiece in,
- ParseStatusCode expected_error) {
+ ParseStatusCode expected_error,
+ const base::Location& from =
+ base::Location::Current()) {
auto in_str = SourceString::CreateForTesting(in);
VariableDictionary::SubstitutionBuffer sub_buffer;
auto out = types::ParseQuotedString(in_str, dict, sub_buffer);
- ASSERT_TRUE(out.has_error());
- EXPECT_EQ(std::move(out).error().code(), expected_error);
+ ASSERT_TRUE(out.has_error()) << from.ToString();
+ EXPECT_EQ(std::move(out).error().code(), expected_error) << from.ToString();
};
// Test some basic examples
@@ -555,4 +563,191 @@ TEST(HlsFormatParserTest, ParseQuotedStringTest) {
error_test("", ParseStatusCode::kFailedToParseQuotedString);
}
+TEST(HlsTypesTest, ParseDecimalResolution) {
+ const auto error_test = [](base::StringPiece input,
+ const base::Location& from =
+ base::Location::Current()) {
+ auto result = types::DecimalResolution::Parse(
+ SourceString::CreateForTesting(1, 1, input));
+ ASSERT_TRUE(result.has_error()) << from.ToString();
+ auto error = std::move(result).error();
+ EXPECT_EQ(error.code(), ParseStatusCode::kFailedToParseDecimalResolution)
+ << from.ToString();
+ };
+
+ const auto ok_test =
+ [](base::StringPiece input, types::DecimalResolution expected,
+ const base::Location& from = base::Location::Current()) {
+ auto result = types::DecimalResolution::Parse(
+ SourceString::CreateForTesting(1, 1, input));
+ ASSERT_TRUE(result.has_value()) << from.ToString();
+ auto value = std::move(result).value();
+ EXPECT_EQ(value.width, expected.width) << from.ToString();
+ EXPECT_EQ(value.height, expected.height) << from.ToString();
+ };
+
+ // Empty string is not allowed
+ error_test("");
+
+ // Decimal-resolution must have a single lower-case 'x' between two
+ // DecimalIntegers
+ error_test("123");
+ error_test("123X456");
+ error_test("123*456");
+ error_test("123x");
+ error_test("x456");
+ error_test("123x456x");
+ error_test("x123x456");
+ error_test("x123x456x");
+ error_test("0X123");
+
+ // Decimal-resolutions may not be quoted
+ error_test("'123x456'");
+ error_test("\"123x456\"");
+
+ // Decimal-resolutions may not be negative
+ error_test("-123x456");
+ error_test("123x-456");
+ error_test("-123x-456");
+ error_test("-0x456");
+
+ // Decimal-integers may not contain junk or leading/trailing spaces
+ error_test("12.3x456");
+ error_test(" 123x456");
+ error_test("123 x456");
+ error_test("123x456 ");
+ error_test("123x 456");
+
+ // Decimal-integers may not exceed 20 characters
+ error_test("000000000000000000001x456");
+ error_test("123x000000000000000000001");
+
+ // Test some valid inputs
+ ok_test("00000000000000000001x456",
+ types::DecimalResolution{.width = 1, .height = 456});
+ ok_test("0x0", types::DecimalResolution{.width = 0, .height = 0});
+ ok_test("1x1", types::DecimalResolution{.width = 1, .height = 1});
+ ok_test("123x456", types::DecimalResolution{.width = 123, .height = 456});
+ ok_test("123x0", types::DecimalResolution{.width = 123, .height = 0});
+ ok_test("0x123", types::DecimalResolution{.width = 0, .height = 123});
+
+ // Test max supported value
+ ok_test("18446744073709551615x18446744073709551615",
+ types::DecimalResolution{.width = 18446744073709551615u,
+ .height = 18446744073709551615u});
+ error_test("18446744073709551616x18446744073709551616");
+}
+
+TEST(HlsTypesTest, ParseByteRangeExpression) {
+ const auto error_test = [](base::StringPiece input,
+ const base::Location& from =
+ base::Location::Current()) {
+ auto result = types::ByteRangeExpression::Parse(
+ SourceString::CreateForTesting(input));
+ ASSERT_TRUE(result.has_error());
+ auto error = std::move(result).error();
+ EXPECT_EQ(error.code(), ParseStatusCode::kFailedToParseByteRange)
+ << from.ToString();
+ };
+ const auto ok_test =
+ [](base::StringPiece input, types::ByteRangeExpression expected,
+ const base::Location& from = base::Location::Current()) {
+ auto result = types::ByteRangeExpression::Parse(
+ SourceString::CreateForTesting(input));
+ ASSERT_TRUE(result.has_value());
+ auto value = std::move(result).value();
+ EXPECT_EQ(value.length, expected.length);
+ EXPECT_EQ(value.offset, expected.offset);
+ };
+
+ // Empty string is not allowed
+ error_test("");
+
+ // Length must be a valid DecimalInteger
+ error_test("-1");
+ error_test(" 1");
+ error_test("1 ");
+ error_test(" 1 ");
+ error_test("1.2");
+ error_test("one");
+ error_test("{$length}");
+ error_test("@34");
+
+ // Offset must be a valid DecimalInteger
+ error_test("12@");
+ error_test("12@-3");
+ error_test("12@ 3");
+ error_test("12@3 ");
+ error_test("12@ 3 ");
+ error_test("12@3.4");
+ error_test("12@three");
+ error_test("12@{$offset}");
+ error_test("12@34@");
+
+ // ByteRange may not be quoted
+ error_test("'12@34'");
+ error_test("\"12@34\"");
+
+ // Test some valid inputs
+ ok_test("0",
+ types::ByteRangeExpression{.length = 0, .offset = absl::nullopt});
+ ok_test("12",
+ types::ByteRangeExpression{.length = 12, .offset = absl::nullopt});
+ ok_test("12@0", types::ByteRangeExpression{.length = 12, .offset = 0});
+ ok_test("12@34", types::ByteRangeExpression{.length = 12, .offset = 34});
+ ok_test("0@34", types::ByteRangeExpression{.length = 0, .offset = 34});
+ ok_test("0@0", types::ByteRangeExpression{.length = 0, .offset = 0});
+
+ // Test max supported values. These are valid ByteRangeExpressions, but not
+ // necessarily valid ByteRanges.
+ ok_test(
+ "18446744073709551615@0",
+ types::ByteRangeExpression{.length = 18446744073709551615u, .offset = 0});
+ error_test("18446744073709551616@0");
+ ok_test(
+ "0@18446744073709551615",
+ types::ByteRangeExpression{.length = 0, .offset = 18446744073709551615u});
+ error_test("0@18446744073709551616");
+ ok_test("18446744073709551615@18446744073709551615",
+ types::ByteRangeExpression{.length = 18446744073709551615u,
+ .offset = 18446744073709551615u});
+ error_test("18446744073709551616@18446744073709551615");
+ error_test("18446744073709551615@18446744073709551616");
+ error_test("18446744073709551616@18446744073709551616");
+}
+
+TEST(HlsTypesTest, ValidateByteRange) {
+ // Any non-empty range where `ByteRange::GetEnd()` doesn't overflow
+ // `DecimalInteger` is valid.
+ constexpr auto ok_test =
+ [](types::DecimalInteger length, types::DecimalInteger offset,
+ const base::Location& from = base::Location::Current()) {
+ const auto result = types::ByteRange::Validate(length, offset);
+ EXPECT_TRUE(result.has_value()) << from.ToString();
+ };
+ constexpr auto error_test =
+ [](types::DecimalInteger length, types::DecimalInteger offset,
+ const base::Location& from = base::Location::Current()) {
+ const auto result = types::ByteRange::Validate(length, offset);
+ EXPECT_FALSE(result.has_value()) << from.ToString();
+ };
+
+ ok_test(1, 1);
+ ok_test(1, 0);
+
+ // Empty range is not allowed
+ error_test(0, 0);
+ error_test(0, 1);
+ error_test(0, 18446744073709551615u);
+
+ // Overflowing range is not allowed
+ ok_test(18446744073709551615u, 0);
+ error_test(18446744073709551615u, 1);
+ error_test(1, 18446744073709551615u);
+ error_test(18446744073709551615u, 18446744073709551615u);
+ error_test(9223372036854775808u, 9223372036854775808u);
+ ok_test(9223372036854775808u, 9223372036854775807u);
+ ok_test(9223372036854775807u, 9223372036854775808u);
+}
+
} // namespace media::hls
diff --git a/chromium/media/formats/hls/variable_dictionary_unittest.cc b/chromium/media/formats/hls/variable_dictionary_unittest.cc
index 8f46a61b03a..c64ced4a4a9 100644
--- a/chromium/media/formats/hls/variable_dictionary_unittest.cc
+++ b/chromium/media/formats/hls/variable_dictionary_unittest.cc
@@ -4,20 +4,21 @@
#include "media/formats/hls/variable_dictionary.h"
+#include <utility>
+
#include "base/location.h"
+#include "base/strings/string_piece.h"
+#include "media/formats/hls/parse_status.h"
#include "media/formats/hls/source_string.h"
+#include "media/formats/hls/test_util.h"
#include "media/formats/hls/types.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
namespace media::hls {
namespace {
-types::VariableName CreateVarName(base::StringPiece name) {
- return types::VariableName::Parse(SourceString::CreateForTesting(name))
- .value();
-}
-
VariableDictionary CreateBasicDictionary(
const base::Location& from = base::Location::Current()) {
VariableDictionary dict;
@@ -53,7 +54,7 @@ void ErrorTest(const VariableDictionary& dict,
} // namespace
-TEST(HlsVariableDictionary, BasicSubstitution) {
+TEST(HlsVariableDictionaryTest, BasicSubstitution) {
VariableDictionary dict = CreateBasicDictionary();
OkTest(dict, "The NAME's {$NAME}, {$_0THER-1dent} {$NAME}. Agent {$IDENT}",
"The NAME's bond, {$james} bond. Agent 007");
@@ -61,7 +62,7 @@ TEST(HlsVariableDictionary, BasicSubstitution) {
"This $tring {has} ${no} v{}{}ar}}s");
}
-TEST(HlsVariableDictionary, VariableUndefined) {
+TEST(HlsVariableDictionaryTest, VariableUndefined) {
VariableDictionary dict;
// Names are case-sensitive
@@ -76,7 +77,7 @@ TEST(HlsVariableDictionary, VariableUndefined) {
ParseStatusCode::kVariableUndefined);
}
-TEST(HlsVariableDictionary, RedefinitionNotAllowed) {
+TEST(HlsVariableDictionaryTest, RedefinitionNotAllowed) {
VariableDictionary dict;
EXPECT_TRUE(dict.Insert(CreateVarName("TEST"), "FOO"));
EXPECT_EQ(dict.Find(CreateVarName("TEST")),
@@ -104,7 +105,7 @@ TEST(HlsVariableDictionary, RedefinitionNotAllowed) {
absl::make_optional<base::StringPiece>("FOO"));
}
-TEST(HlsVariableDictionary, IgnoreInvalidRefSequence) {
+TEST(HlsVariableDictionaryTest, IgnoreInvalidRefSequence) {
auto dict = CreateBasicDictionary();
// Variable refs with invalid variable names are ignored
@@ -129,7 +130,7 @@ TEST(HlsVariableDictionary, IgnoreInvalidRefSequence) {
OkTest(dict, "http://{$ {$NAME}}.com", "http://{$ bond}.com");
}
-TEST(HlsVariableDictionary, ExplosiveVariableDefs) {
+TEST(HlsVariableDictionaryTest, ExplosiveVariableDefs) {
// Variable substitution is by design not recursive
VariableDictionary dict;
EXPECT_TRUE(dict.Insert(CreateVarName("LOL1"), "LOLLOLLOL"));
diff --git a/chromium/media/formats/hls/variant_stream.cc b/chromium/media/formats/hls/variant_stream.cc
new file mode 100644
index 00000000000..3eaa46f4334
--- /dev/null
+++ b/chromium/media/formats/hls/variant_stream.cc
@@ -0,0 +1,31 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/hls/variant_stream.h"
+
+namespace media::hls {
+
+VariantStream::VariantStream(
+ GURL primary_rendition_uri,
+ types::DecimalInteger bandwidth,
+ absl::optional<types::DecimalInteger> average_bandwidth,
+ absl::optional<types::DecimalFloatingPoint> score,
+ absl::optional<std::string> codecs,
+ absl::optional<types::DecimalResolution> resolution,
+ absl::optional<types::DecimalFloatingPoint> frame_rate)
+ : primary_rendition_uri_(std::move(primary_rendition_uri)),
+ bandwidth_(bandwidth),
+ average_bandwidth_(average_bandwidth),
+ score_(score),
+ codecs_(std::move(codecs)),
+ resolution_(resolution),
+ frame_rate_(frame_rate) {}
+
+VariantStream::VariantStream(VariantStream&&) = default;
+
+VariantStream::~VariantStream() = default;
+
+VariantStream& VariantStream::operator=(VariantStream&&) = default;
+
+} // namespace media::hls
diff --git a/chromium/media/formats/hls/variant_stream.h b/chromium/media/formats/hls/variant_stream.h
new file mode 100644
index 00000000000..a7936a4107a
--- /dev/null
+++ b/chromium/media/formats/hls/variant_stream.h
@@ -0,0 +1,106 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_HLS_VARIANT_STREAM_H_
+#define MEDIA_FORMATS_HLS_VARIANT_STREAM_H_
+
+#include "base/strings/string_piece.h"
+#include "media/base/media_export.h"
+#include "media/formats/hls/types.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+#include "url/gurl.h"
+
+namespace media::hls {
+
+class MEDIA_EXPORT VariantStream {
+ public:
+ VariantStream(GURL primary_rendition_uri,
+ types::DecimalInteger bandwidth,
+ absl::optional<types::DecimalInteger> average_bandwidth,
+ absl::optional<types::DecimalFloatingPoint> score,
+ absl::optional<std::string> codecs,
+ absl::optional<types::DecimalResolution> resolution,
+ absl::optional<types::DecimalFloatingPoint> frame_rate);
+ VariantStream(const VariantStream&) = delete;
+ VariantStream(VariantStream&&);
+ ~VariantStream();
+ VariantStream& operator=(const VariantStream&) = delete;
+ VariantStream& operator=(VariantStream&&);
+
+ // The URI of the rendition provided by the playlist for clients that do not
+ // support multiple renditions.
+ const GURL& GetPrimaryRenditionUri() const { return primary_rendition_uri_; }
+
+ // Returns the peak segment bitrate (bits/s) of this variant stream.
+ //
+ // "If all the Media Segments in a Variant Stream have already been
+ // created, the BANDWIDTH value MUST be the largest sum of peak
+ // segment bit rates that is produced by any playable combination of
+ // Renditions. (For a Variant Stream with a single Media Playlist,
+ // this is just the peak segment bit rate of that Media Playlist.)
+ // An inaccurate value can cause playback stalls or prevent clients
+ // from playing the variant.
+ //
+ // If the Multivariant Playlist is to be made available before all
+ // Media Segments in the presentation have been encoded, the
+ // BANDWIDTH value SHOULD be the BANDWIDTH value of a representative
+ // period of similar content, encoded using the same settings."
+ // https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis#:~:text=attributes%20are%20defined%3A-,BANDWIDTH,-The%20value%20is
+ types::DecimalInteger GetBandwidth() const { return bandwidth_; }
+
+ // This represents the average segment bitrate of this variant stream. If all
+ //
+ // "If all the Media Segments in a Variant Stream have already been
+ // created, the AVERAGE-BANDWIDTH value MUST be the largest sum of
+ // average segment bit rates that is produced by any playable
+ // combination of Renditions. (For a Variant Stream with a single
+ // Media Playlist, this is just the average segment bit rate of that
+ // Media Playlist.) An inaccurate value can cause playback stalls or
+ // prevent clients from playing the variant.
+ //
+ // If the Multivariant Playlist is to be made available before all
+ // Media Segments in the presentation have been encoded, the AVERAGE-
+ // BANDWIDTH value SHOULD be the AVERAGE-BANDWIDTH value of a
+ // representative period of similar content, encoded using the same
+ // settings."
+ // https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis#:~:text=the%20BANDWIDTH%20attribute.-,AVERAGE%2DBANDWIDTH,-The%20value%20is
+ absl::optional<types::DecimalInteger> GetAverageBandwidth() const {
+ return average_bandwidth_;
+ }
+
+ // A metric computed by the HLS server to provide a relative measure of
+ // desireability for each variant. A higher score indicates that this variant
+ // should be preferred over other variants with lower scores.
+ absl::optional<types::DecimalFloatingPoint> GetScore() const {
+ return score_;
+ }
+
+ // A comma-separated list of media sample formats present in one or more
+ // renditions of this variant.
+ const absl::optional<std::string>& GetCodecs() const { return codecs_; }
+
+ // A value representing the optimal pixel resolution at which to display all
+ // video in this variant stream.
+ const absl::optional<types::DecimalResolution> GetResolution() const {
+ return resolution_;
+ }
+
+ // This represents the maximum framerate for all video in this variant stream.
+ const absl::optional<types::DecimalFloatingPoint> GetFrameRate() const {
+ return frame_rate_;
+ }
+
+ private:
+ GURL primary_rendition_uri_;
+ types::DecimalInteger bandwidth_;
+ absl::optional<types::DecimalInteger> average_bandwidth_;
+ absl::optional<types::DecimalFloatingPoint> score_;
+ absl::optional<std::string> codecs_;
+ absl::optional<types::DecimalResolution> resolution_;
+ absl::optional<types::DecimalFloatingPoint> frame_rate_;
+};
+
+} // namespace media::hls
+
+#endif
diff --git a/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc b/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
index decb1fcb243..27734436a31 100644
--- a/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
+++ b/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
@@ -280,15 +280,15 @@ class Mp2tStreamParserTest : public testing::Test {
bool OnNewBuffers(const StreamParser::BufferQueueMap& buffer_queue_map) {
EXPECT_GT(config_count_, 0);
// Ensure that track ids are properly assigned on all emitted buffers.
- for (const auto& it : buffer_queue_map) {
- DVLOG(3) << "Buffers for track_id=" << it.first;
- for (const auto& buf : it.second) {
+ for (const auto& [track_id, buffer] : buffer_queue_map) {
+ DVLOG(3) << "Buffers for track_id=" << track_id;
+ for (const auto& buf : buffer) {
DVLOG(3) << " track_id=" << buf->track_id()
<< ", size=" << buf->data_size()
<< ", pts=" << buf->timestamp().InSecondsF()
<< ", dts=" << buf->GetDecodeTimestamp().InSecondsF()
<< ", dur=" << buf->duration().InSecondsF();
- EXPECT_EQ(it.first, buf->track_id());
+ EXPECT_EQ(track_id, buf->track_id());
}
}
diff --git a/chromium/media/formats/mp2t/ts_section_pmt.cc b/chromium/media/formats/mp2t/ts_section_pmt.cc
index af9129f764e..1746bb8ceb1 100644
--- a/chromium/media/formats/mp2t/ts_section_pmt.cc
+++ b/chromium/media/formats/mp2t/ts_section_pmt.cc
@@ -105,8 +105,10 @@ bool TsSectionPmt::ParsePsiSection(BitReader* bit_reader) {
RCHECK(bit_reader->ReadBits(32, &crc32));
// Once the PMT has been proved to be correct, register the PIDs.
- for (const auto& it : pid_map)
- register_pes_cb_.Run(it.first, it.second.first, it.second.second);
+ for (const auto& [pid_es, stream_info] : pid_map) {
+ const auto& [stream_type, descriptors] = stream_info;
+ register_pes_cb_.Run(pid_es, stream_type, descriptors);
+ }
return true;
}
diff --git a/chromium/media/formats/mp4/avc.cc b/chromium/media/formats/mp4/avc.cc
index a91f43b237f..c3a3b29ea12 100644
--- a/chromium/media/formats/mp4/avc.cc
+++ b/chromium/media/formats/mp4/avc.cc
@@ -17,8 +17,8 @@
namespace media {
namespace mp4 {
-static const uint8_t kAnnexBStartCode[] = {0, 0, 0, 1};
-static const int kAnnexBStartCodeSize = 4;
+static constexpr uint8_t kAnnexBStartCode[] = {0, 0, 0, 1};
+static constexpr int kAnnexBStartCodeSize = 4;
static bool ConvertAVCToAnnexBInPlaceForLengthSize4(std::vector<uint8_t>* buf) {
const size_t kLengthSize = 4;
diff --git a/chromium/media/formats/mp4/box_definitions.cc b/chromium/media/formats/mp4/box_definitions.cc
index 4e99b0f822c..69299b3d102 100644
--- a/chromium/media/formats/mp4/box_definitions.cc
+++ b/chromium/media/formats/mp4/box_definitions.cc
@@ -1126,6 +1126,9 @@ bool VideoSampleEntry::Parse(BoxReader* reader) {
RCHECK(reader->ReadChild(hevcConfig.get()));
video_codec = VideoCodec::kHEVC;
video_codec_profile = hevcConfig->GetVideoProfile();
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
+ video_color_space = hevcConfig->GetColorSpace();
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
frame_bitstream_converter =
base::MakeRefCounted<HEVCBitstreamConverter>(std::move(hevcConfig));
#if BUILDFLAG(ENABLE_PLATFORM_DOLBY_VISION)
@@ -1166,6 +1169,9 @@ bool VideoSampleEntry::Parse(BoxReader* reader) {
std::unique_ptr<HEVCDecoderConfigurationRecord> hevcConfig(
new HEVCDecoderConfigurationRecord());
RCHECK(reader->ReadChild(hevcConfig.get()));
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
+ video_color_space = hevcConfig->GetColorSpace();
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
frame_bitstream_converter =
base::MakeRefCounted<HEVCBitstreamConverter>(std::move(hevcConfig));
DVLOG(2) << __func__ << " reading DolbyVisionConfiguration (dvcC/dvvC)";
diff --git a/chromium/media/formats/mp4/hevc.cc b/chromium/media/formats/mp4/hevc.cc
index d8c2efd83f0..20fbb8c52af 100644
--- a/chromium/media/formats/mp4/hevc.cc
+++ b/chromium/media/formats/mp4/hevc.cc
@@ -15,11 +15,18 @@
#include "media/formats/mp4/avc.h"
#include "media/formats/mp4/box_definitions.h"
#include "media/formats/mp4/box_reader.h"
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
+#include "media/video/h265_parser.h"
+#else
#include "media/video/h265_nalu_parser.h"
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
namespace media {
namespace mp4 {
+static constexpr uint8_t kAnnexBStartCode[] = {0, 0, 0, 1};
+static constexpr int kAnnexBStartCodeSize = 4;
+
HEVCDecoderConfigurationRecord::HEVCDecoderConfigurationRecord()
: configurationVersion(0),
general_profile_space(0),
@@ -123,7 +130,7 @@ bool HEVCDecoderConfigurationRecord::ParseInternal(BufferReader* reader,
VideoCodecProfile HEVCDecoderConfigurationRecord::GetVideoProfile() const {
// The values of general_profile_idc are taken from the HEVC standard, see
- // the latest https://www.itu.int/rec/T-REC-H.265/en section A.3
+ // the latest https://www.itu.int/rec/T-REC-H.265/en
switch (general_profile_idc) {
case 1:
return HEVCPROFILE_MAIN;
@@ -131,12 +138,66 @@ VideoCodecProfile HEVCDecoderConfigurationRecord::GetVideoProfile() const {
return HEVCPROFILE_MAIN10;
case 3:
return HEVCPROFILE_MAIN_STILL_PICTURE;
+ case 4:
+ return HEVCPROFILE_REXT;
+ case 5:
+ return HEVCPROFILE_HIGH_THROUGHPUT;
+ case 6:
+ return HEVCPROFILE_MULTIVIEW_MAIN;
+ case 7:
+ return HEVCPROFILE_SCALABLE_MAIN;
+ case 8:
+ return HEVCPROFILE_3D_MAIN;
+ case 9:
+ return HEVCPROFILE_SCREEN_EXTENDED;
+ case 10:
+ return HEVCPROFILE_SCALABLE_REXT;
+ case 11:
+ return HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED;
}
return VIDEO_CODEC_PROFILE_UNKNOWN;
}
-static const uint8_t kAnnexBStartCode[] = {0, 0, 0, 1};
-static const int kAnnexBStartCodeSize = 4;
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
+VideoColorSpace HEVCDecoderConfigurationRecord::GetColorSpace() {
+ if (!arrays.size()) {
+ DVLOG(1) << "HVCCNALArray not found, fallback to default colorspace";
+ return VideoColorSpace();
+ }
+
+ std::vector<uint8_t> param_sets;
+ if (!HEVC::ConvertConfigToAnnexB(*this, &param_sets))
+ return VideoColorSpace();
+
+ H265Parser parser;
+ H265NALU nalu;
+ parser.SetStream(param_sets.data(), param_sets.size());
+ while (true) {
+ H265Parser::Result result = parser.AdvanceToNextNALU(&nalu);
+
+ if (result != H265Parser::kOk)
+ return VideoColorSpace();
+
+ switch (nalu.nal_unit_type) {
+ case H265NALU::SPS_NUT: {
+ int sps_id = -1;
+ result = parser.ParseSPS(&sps_id);
+ if (result != H265Parser::kOk) {
+ DVLOG(1) << "Could not parse SPS, fallback to default colorspace";
+ return VideoColorSpace();
+ }
+
+ const H265SPS* sps = parser.GetSPS(sps_id);
+ DCHECK(sps);
+ return sps->GetColorSpace();
+ }
+ default:
+ break;
+ }
+ }
+ NOTREACHED();
+}
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
// static
bool HEVC::InsertParamSetsAnnexB(
diff --git a/chromium/media/formats/mp4/hevc.h b/chromium/media/formats/mp4/hevc.h
index 7ec661ab578..c5747a96879 100644
--- a/chromium/media/formats/mp4/hevc.h
+++ b/chromium/media/formats/mp4/hevc.h
@@ -62,6 +62,9 @@ struct MEDIA_EXPORT HEVCDecoderConfigurationRecord : Box {
std::vector<HVCCNALArray> arrays;
VideoCodecProfile GetVideoProfile() const;
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
+ VideoColorSpace GetColorSpace();
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
private:
bool ParseInternal(BufferReader* reader, MediaLog* media_log);
diff --git a/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc b/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
index fedb1ed5e5f..9ffeb0ab0ee 100644
--- a/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
+++ b/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
@@ -158,22 +158,22 @@ class MP4StreamParserTest : public testing::Test {
bool NewBuffersF(const StreamParser::BufferQueueMap& buffer_queue_map) {
DecodeTimestamp lowest_end_dts = kNoDecodeTimestamp;
- for (const auto& it : buffer_queue_map) {
- DVLOG(3) << "Buffers for track_id=" << it.first;
- DCHECK(!it.second.empty());
+ for (const auto& [track_id, buffer_queue] : buffer_queue_map) {
+ DVLOG(3) << "Buffers for track_id=" << track_id;
+ DCHECK(!buffer_queue.empty());
if (lowest_end_dts == kNoDecodeTimestamp ||
- lowest_end_dts > it.second.back()->GetDecodeTimestamp())
- lowest_end_dts = it.second.back()->GetDecodeTimestamp();
+ lowest_end_dts > buffer_queue.back()->GetDecodeTimestamp())
+ lowest_end_dts = buffer_queue.back()->GetDecodeTimestamp();
- for (const auto& buf : it.second) {
+ for (const auto& buf : buffer_queue) {
DVLOG(3) << " track_id=" << buf->track_id()
<< ", size=" << buf->data_size()
<< ", pts=" << buf->timestamp().InSecondsF()
<< ", dts=" << buf->GetDecodeTimestamp().InSecondsF()
<< ", dur=" << buf->duration().InSecondsF();
// Ensure that track ids are properly assigned on all emitted buffers.
- EXPECT_EQ(it.first, buf->track_id());
+ EXPECT_EQ(track_id, buf->track_id());
// Let single-track tests verify the sequence of keyframes/nonkeyframes.
if (verifying_keyframeness_sequence_) {
diff --git a/chromium/media/formats/mp4/nalu_test_helper.cc b/chromium/media/formats/mp4/nalu_test_helper.cc
index 4949f032b5e..ea0f2d58500 100644
--- a/chromium/media/formats/mp4/nalu_test_helper.cc
+++ b/chromium/media/formats/mp4/nalu_test_helper.cc
@@ -10,7 +10,7 @@
#include "media/video/h264_parser.h"
#if BUILDFLAG(ENABLE_PLATFORM_HEVC)
-#include "media/video/h265_parser.h"
+#include "media/video/h265_nalu_parser.h"
#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC)
namespace media {
diff --git a/chromium/media/formats/webm/webm_cluster_parser_unittest.cc b/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
index 13b24dbfc98..ab0f2eb135c 100644
--- a/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
+++ b/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
@@ -176,8 +176,8 @@ bool VerifyBuffers(const StreamParser::BufferQueueMap& buffer_queue_map,
const BlockInfo* block_info,
int block_count) {
int buffer_count = 0;
- for (const auto& it : buffer_queue_map)
- buffer_count += it.second.size();
+ for (const auto& [track_id, buffer_queue] : buffer_queue_map)
+ buffer_count += buffer_queue.size();
if (block_count != buffer_count) {
DVLOG(1) << __func__ << " : block_count (" << block_count
<< ") mismatches buffer_count (" << buffer_count << ")";
diff --git a/chromium/media/fuchsia/DIR_METADATA b/chromium/media/fuchsia/DIR_METADATA
new file mode 100644
index 00000000000..210aa6a954b
--- /dev/null
+++ b/chromium/media/fuchsia/DIR_METADATA
@@ -0,0 +1 @@
+mixins: "//build/fuchsia/COMMON_METADATA"
diff --git a/chromium/media/fuchsia/audio/fuchsia_audio_output_device_test.cc b/chromium/media/fuchsia/audio/fuchsia_audio_output_device_test.cc
index 66df5b17fd9..d7f80d327a6 100644
--- a/chromium/media/fuchsia/audio/fuchsia_audio_output_device_test.cc
+++ b/chromium/media/fuchsia/audio/fuchsia_audio_output_device_test.cc
@@ -75,7 +75,14 @@ class FuchsiaAudioOutputDeviceTest : public testing::Test {
std::move(audio_consumer), base::ThreadTaskRunnerHandle::Get());
}
- ~FuchsiaAudioOutputDeviceTest() override { output_device_->Stop(); }
+ ~FuchsiaAudioOutputDeviceTest() override {
+ // Stop() must be called before destruction to release resources.
+ output_device_->Stop();
+ // FuchsiaAudioOutputDevice::Stop() posts a task to run StopOnAudioThread()
+ // on `task_runner_`. RunUntilIdle() ensures the request to stop is
+ // fulfilled.
+ task_environment_.RunUntilIdle();
+ }
protected:
void Initialize() {
diff --git a/chromium/media/fuchsia/common/vmo_buffer_writer_queue.cc b/chromium/media/fuchsia/common/vmo_buffer_writer_queue.cc
index 902d19cb5ab..cd25d06ad30 100644
--- a/chromium/media/fuchsia/common/vmo_buffer_writer_queue.cc
+++ b/chromium/media/fuchsia/common/vmo_buffer_writer_queue.cc
@@ -13,33 +13,29 @@
namespace media {
-struct VmoBufferWriterQueue::PendingBuffer {
- PendingBuffer(scoped_refptr<DecoderBuffer> buffer) : buffer(buffer) {
- DCHECK(buffer);
- }
- ~PendingBuffer() = default;
+VmoBufferWriterQueue::PendingBuffer::PendingBuffer(
+ scoped_refptr<DecoderBuffer> buffer)
+ : buffer(buffer) {
+ DCHECK(buffer);
+}
- PendingBuffer(PendingBuffer&& other) = default;
- PendingBuffer& operator=(PendingBuffer&& other) = default;
+VmoBufferWriterQueue::PendingBuffer::~PendingBuffer() = default;
- const uint8_t* data() const { return buffer->data() + buffer_pos; }
- size_t bytes_left() const { return buffer->data_size() - buffer_pos; }
- void AdvanceCurrentPos(size_t bytes) {
- DCHECK_LE(bytes, bytes_left());
- buffer_pos += bytes;
- }
+VmoBufferWriterQueue::PendingBuffer::PendingBuffer(PendingBuffer&& other) =
+ default;
- scoped_refptr<DecoderBuffer> buffer;
- size_t buffer_pos = 0;
+const uint8_t* VmoBufferWriterQueue::PendingBuffer::data() const {
+ return buffer->data() + buffer_pos;
+}
- // Set to true when the consumer has finished processing the buffer and it can
- // be released.
- bool is_complete = false;
+size_t VmoBufferWriterQueue::PendingBuffer::bytes_left() const {
+ return buffer->data_size() - buffer_pos;
+}
- // Index of the last buffer in the sysmem buffer collection that was used to
- // send this input buffer. Should be set only when |bytes_left()==0|.
- absl::optional<size_t> tail_sysmem_buffer_index;
-};
+void VmoBufferWriterQueue::PendingBuffer::AdvanceCurrentPos(size_t bytes) {
+ DCHECK_LE(bytes, bytes_left());
+ buffer_pos += bytes;
+}
VmoBufferWriterQueue::VmoBufferWriterQueue() {
DETACH_FROM_THREAD(thread_checker_);
diff --git a/chromium/media/fuchsia/common/vmo_buffer_writer_queue.h b/chromium/media/fuchsia/common/vmo_buffer_writer_queue.h
index d8c701dbb2a..2969297e877 100644
--- a/chromium/media/fuchsia/common/vmo_buffer_writer_queue.h
+++ b/chromium/media/fuchsia/common/vmo_buffer_writer_queue.h
@@ -82,7 +82,29 @@ class MEDIA_EXPORT VmoBufferWriterQueue {
bool IsBlocked() const;
private:
- struct PendingBuffer;
+ struct PendingBuffer {
+ PendingBuffer(scoped_refptr<DecoderBuffer> buffer);
+ ~PendingBuffer();
+
+ PendingBuffer(PendingBuffer&& other);
+ PendingBuffer& operator=(PendingBuffer&& other) = default;
+
+ const uint8_t* data() const;
+ size_t bytes_left() const;
+ void AdvanceCurrentPos(size_t bytes);
+
+ scoped_refptr<DecoderBuffer> buffer;
+ size_t buffer_pos = 0;
+
+ // Set to true when the consumer has finished processing the buffer and it
+ // can be released.
+ bool is_complete = false;
+
+ // Index of the last buffer in the sysmem buffer collection that was used to
+ // send this input buffer. Should be set only when |bytes_left()==0|.
+ absl::optional<size_t> tail_sysmem_buffer_index;
+ };
+
class SysmemBuffer;
// Pumps pending buffers to SendPacketCB.
diff --git a/chromium/media/fuchsia/mojom/BUILD.gn b/chromium/media/fuchsia/mojom/BUILD.gn
index 3918575472a..b6f18091352 100644
--- a/chromium/media/fuchsia/mojom/BUILD.gn
+++ b/chromium/media/fuchsia/mojom/BUILD.gn
@@ -6,6 +6,7 @@ import("//mojo/public/tools/bindings/mojom.gni")
mojom("fuchsia_media_resource_provider") {
sources = [ "fuchsia_media_resource_provider.mojom" ]
+ deps = [ "//media/mojo/mojom" ]
export_class_attribute_blink = "BLINK_PLATFORM_EXPORT"
export_define_blink = "BLINK_PLATFORM_IMPLEMENTATION=1"
@@ -18,10 +19,16 @@ mojom("fuchsia_media_resource_provider") {
cpp = "::fidl::InterfaceRequest<::fuchsia::media::drm::ContentDecryptionModule>"
move_only = true
},
+ {
+ mojom = "media.mojom.StreamProcessorRequest"
+ cpp = "::fidl::InterfaceRequest<::fuchsia::media::StreamProcessor>"
+ move_only = true
+ },
]
traits_headers = [ "fuchsia_media_resource_provider_mojom_traits.h" ]
traits_public_deps = [
- "//fuchsia/mojom:traits",
+ "//mojo/public/cpp/base/fuchsia:traits",
+ "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media.drm",
]
}
diff --git a/chromium/media/fuchsia/mojom/DEPS b/chromium/media/fuchsia/mojom/DEPS
index 207080cb397..5222e05d18b 100644
--- a/chromium/media/fuchsia/mojom/DEPS
+++ b/chromium/media/fuchsia/mojom/DEPS
@@ -1,5 +1,5 @@
specific_include_rules = {
"fuchsia_.*_provider_mojom_traits\.h": [
- "+fuchsia/mojom/fidl_interface_request_mojom_traits.h",
+ "+mojo/public/cpp/base/fuchsia/fidl_interface_request_mojom_traits.h",
],
}
diff --git a/chromium/media/fuchsia/mojom/fuchsia_media_resource_provider.mojom b/chromium/media/fuchsia/mojom/fuchsia_media_resource_provider.mojom
index a91ba32b77b..6544c66df53 100644
--- a/chromium/media/fuchsia/mojom/fuchsia_media_resource_provider.mojom
+++ b/chromium/media/fuchsia/mojom/fuchsia_media_resource_provider.mojom
@@ -4,17 +4,29 @@
module media.mojom;
+import "media/mojo/mojom/media_types.mojom";
+
// Mojo struct for
-// fidl::InterfaceRequest<fuchsia::media::drm::ContentDecryptionModule>.
+// `fidl::InterfaceRequest<fuchsia::media::drm::ContentDecryptionModule>`.
struct CdmRequest {
handle<platform> request;
};
+// Mojo struct for `fidl::InterfaceRequest<fuchsia::media::StreamProcessor>`.
+struct StreamProcessorRequest {
+ handle<platform> request;
+};
+
// Interface used by the renderer to connect to CDM and mediacodec resources.
// Instances are document-scoped.
interface FuchsiaMediaResourceProvider {
// Create connection to fuchsia::media::drm::ContentDecryptionModule for
- // |key_system|. Implementation should make sure the persistent storage is
+ // `key_system`. Implementation should make sure the persistent storage is
// isolated per web origin.
CreateCdm(string key_system, CdmRequest cdm_request);
+
+ // Create connection to fuchsia::media::StreamProcessor for the specified
+ // `codec`.
+ CreateVideoDecoder(VideoCodec codec, bool secure_memory,
+ StreamProcessorRequest stream_processor_request);
};
diff --git a/chromium/media/fuchsia/mojom/fuchsia_media_resource_provider_mojom_traits.h b/chromium/media/fuchsia/mojom/fuchsia_media_resource_provider_mojom_traits.h
index 30b18a09c1e..49475a5ccb3 100644
--- a/chromium/media/fuchsia/mojom/fuchsia_media_resource_provider_mojom_traits.h
+++ b/chromium/media/fuchsia/mojom/fuchsia_media_resource_provider_mojom_traits.h
@@ -5,9 +5,10 @@
#ifndef MEDIA_FUCHSIA_MOJOM_FUCHSIA_MEDIA_RESOURCE_PROVIDER_MOJOM_TRAITS_H_
#define MEDIA_FUCHSIA_MOJOM_FUCHSIA_MEDIA_RESOURCE_PROVIDER_MOJOM_TRAITS_H_
+#include <fuchsia/media/cpp/fidl.h>
#include <fuchsia/media/drm/cpp/fidl.h>
-#include "fuchsia/mojom/fidl_interface_request_mojom_traits.h"
+#include "mojo/public/cpp/base/fuchsia/fidl_interface_request_mojom_traits.h"
namespace mojo {
@@ -19,6 +20,13 @@ struct StructTraits<
media::mojom::CdmRequestDataView,
fuchsia::media::drm::ContentDecryptionModule> {};
+template <>
+struct StructTraits<media::mojom::StreamProcessorRequestDataView,
+ fidl::InterfaceRequest<fuchsia::media::StreamProcessor>>
+ : public FidlInterfaceRequestStructTraits<
+ media::mojom::StreamProcessorRequestDataView,
+ fuchsia::media::StreamProcessor> {};
+
} // namespace mojo
#endif // MEDIA_FUCHSIA_MOJOM_FUCHSIA_MEDIA_RESOURCE_PROVIDER_MOJOM_TRAITS_H_
diff --git a/chromium/media/fuchsia/video/BUILD.gn b/chromium/media/fuchsia/video/BUILD.gn
new file mode 100644
index 00000000000..addeb301c99
--- /dev/null
+++ b/chromium/media/fuchsia/video/BUILD.gn
@@ -0,0 +1,52 @@
+# Copyright 2022 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+source_set("video") {
+ visibility = [
+ ":unittests",
+ "//content/renderer/*",
+ ]
+ sources = [
+ "fuchsia_decoder_factory.cc",
+ "fuchsia_decoder_factory.h",
+ "fuchsia_video_decoder.cc",
+ "fuchsia_video_decoder.h",
+ ]
+ public_deps = [ "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media" ]
+ deps = [
+ "//components/viz/common",
+ "//gpu/command_buffer/client",
+ "//gpu/command_buffer/common",
+ "//gpu/ipc/common",
+ "//media/fuchsia/cdm",
+ "//media/fuchsia/common",
+ "//media/fuchsia/mojom:fuchsia_media_resource_provider",
+ "//third_party/blink/public/common",
+ "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.mediacodec",
+ "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sysmem",
+ "//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
+ "//ui/ozone",
+ ]
+}
+
+source_set("unittests") {
+ testonly = true
+ sources = [ "fuchsia_video_decoder_unittest.cc" ]
+ deps = [
+ ":video",
+ "//base/test:test_support",
+ "//components/viz/common",
+ "//components/viz/test:test_support",
+ "//gpu/command_buffer/client",
+ "//gpu/config",
+ "//media:test_support",
+ "//media/fuchsia/mojom:fuchsia_media_resource_provider",
+ "//testing/gmock",
+ "//testing/gtest",
+ "//third_party/blink/public/common",
+ "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.mediacodec",
+ "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sysmem",
+ "//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
+ ]
+}
diff --git a/chromium/media/filters/fuchsia/DEPS b/chromium/media/fuchsia/video/DEPS
index 03a54a2749f..505bcdb71cd 100644
--- a/chromium/media/filters/fuchsia/DEPS
+++ b/chromium/media/fuchsia/video/DEPS
@@ -1,3 +1,5 @@
include_rules = [
"+components/viz/common/gpu/raster_context_provider.h",
+ "+mojo/public",
+ "+third_party/blink/public",
] \ No newline at end of file
diff --git a/chromium/media/filters/fuchsia/OWNERS b/chromium/media/fuchsia/video/OWNERS
index e7034eabb1e..fca2c201c25 100644
--- a/chromium/media/filters/fuchsia/OWNERS
+++ b/chromium/media/fuchsia/video/OWNERS
@@ -1 +1,2 @@
+sergeyu@chromium.org
file://build/fuchsia/OWNERS
diff --git a/chromium/media/fuchsia/video/fuchsia_decoder_factory.cc b/chromium/media/fuchsia/video/fuchsia_decoder_factory.cc
new file mode 100644
index 00000000000..19008176627
--- /dev/null
+++ b/chromium/media/fuchsia/video/fuchsia_decoder_factory.cc
@@ -0,0 +1,65 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/fuchsia/video/fuchsia_decoder_factory.h"
+
+#include "components/viz/common/gpu/raster_context_provider.h"
+#include "media/fuchsia/video/fuchsia_video_decoder.h"
+#include "media/video/gpu_video_accelerator_factories.h"
+#include "third_party/blink/public/common/browser_interface_broker_proxy.h"
+
+namespace media {
+
+FuchsiaDecoderFactory::FuchsiaDecoderFactory(
+ blink::BrowserInterfaceBrokerProxy* interface_broker) {
+ interface_broker->GetInterface(
+ media_resource_provider_handle_.InitWithNewPipeAndPassReceiver());
+}
+
+FuchsiaDecoderFactory::~FuchsiaDecoderFactory() = default;
+
+void FuchsiaDecoderFactory::CreateAudioDecoders(
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ MediaLog* media_log,
+ std::vector<std::unique_ptr<AudioDecoder>>* audio_decoders) {
+ // There are no Fuchsia-specific audio decoders.
+}
+
+SupportedVideoDecoderConfigs
+FuchsiaDecoderFactory::GetSupportedVideoDecoderConfigsForWebRTC() {
+ // TODO(crbug.com/1207991) Enable HW decoder support for WebRTC.
+ return {};
+}
+
+void FuchsiaDecoderFactory::CreateVideoDecoders(
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ GpuVideoAcceleratorFactories* gpu_factories,
+ MediaLog* media_log,
+ RequestOverlayInfoCB request_overlay_info_cb,
+ const gfx::ColorSpace& target_color_space,
+ std::vector<std::unique_ptr<VideoDecoder>>* video_decoders) {
+ // Bind `media_resource_provider_` the first time this function is called.
+ if (media_resource_provider_handle_)
+ media_resource_provider_.Bind(std::move(media_resource_provider_handle_));
+
+ if (gpu_factories && gpu_factories->IsGpuVideoDecodeAcceleratorEnabled()) {
+ auto* context_provider = gpu_factories->GetMediaContextProvider();
+
+ // GetMediaContextProvider() may return nullptr when the context was lost
+ // (e.g. after GPU process crash). To handle this case, RenderThreadImpl
+ // creates a new GpuVideoAcceleratorFactories with a new ContextProvider
+ // instance, but there is no way to get it here. For now just don't add
+ // FuchsiaVideoDecoder in that scenario.
+ //
+ // TODO(crbug.com/995902) Handle lost context.
+ if (context_provider) {
+ video_decoders->push_back(std::make_unique<FuchsiaVideoDecoder>(
+ context_provider, media_resource_provider_.get()));
+ } else {
+ LOG(ERROR) << "Can't create FuchsiaVideoDecoder due to GPU context loss.";
+ }
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/fuchsia/video/fuchsia_decoder_factory.h b/chromium/media/fuchsia/video/fuchsia_decoder_factory.h
new file mode 100644
index 00000000000..5d99a3e21ab
--- /dev/null
+++ b/chromium/media/fuchsia/video/fuchsia_decoder_factory.h
@@ -0,0 +1,47 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FUCHSIA_VIDEO_FUCHSIA_DECODER_FACTORY_H_
+#define MEDIA_FUCHSIA_VIDEO_FUCHSIA_DECODER_FACTORY_H_
+
+#include "media/base/decoder_factory.h"
+#include "media/fuchsia/mojom/fuchsia_media_resource_provider.mojom.h"
+#include "mojo/public/cpp/bindings/remote.h"
+
+namespace blink {
+class BrowserInterfaceBrokerProxy;
+} // namespace blink
+
+namespace media {
+
+class FuchsiaDecoderFactory final : public DecoderFactory {
+ public:
+ explicit FuchsiaDecoderFactory(
+ blink::BrowserInterfaceBrokerProxy* interface_broker);
+ ~FuchsiaDecoderFactory() final;
+
+ // DecoderFactory implementation.
+ void CreateAudioDecoders(
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ MediaLog* media_log,
+ std::vector<std::unique_ptr<AudioDecoder>>* audio_decoders) final;
+ SupportedVideoDecoderConfigs GetSupportedVideoDecoderConfigsForWebRTC() final;
+ void CreateVideoDecoders(
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ GpuVideoAcceleratorFactories* gpu_factories,
+ MediaLog* media_log,
+ RequestOverlayInfoCB request_overlay_info_cb,
+ const gfx::ColorSpace& target_color_space,
+ std::vector<std::unique_ptr<VideoDecoder>>* video_decoders) final;
+
+ private:
+ mojo::PendingRemote<media::mojom::FuchsiaMediaResourceProvider>
+ media_resource_provider_handle_;
+ mojo::Remote<media::mojom::FuchsiaMediaResourceProvider>
+ media_resource_provider_;
+};
+
+} // namespace media
+
+#endif // MEDIA_FUCHSIA_VIDEO_FUCHSIA_DECODER_FACTORY_H_ \ No newline at end of file
diff --git a/chromium/media/filters/fuchsia/fuchsia_video_decoder.cc b/chromium/media/fuchsia/video/fuchsia_video_decoder.cc
index d630c3a6a5c..28e71308053 100644
--- a/chromium/media/filters/fuchsia/fuchsia_video_decoder.cc
+++ b/chromium/media/fuchsia/video/fuchsia_video_decoder.cc
@@ -2,10 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/fuchsia/fuchsia_video_decoder.h"
+#include "media/fuchsia/video/fuchsia_video_decoder.h"
-#include <fuchsia/mediacodec/cpp/fidl.h>
-#include <lib/sys/cpp/component_context.h>
#include <vulkan/vulkan.h>
#include "base/bind.h"
@@ -37,7 +35,8 @@
#include "media/fuchsia/common/decrypting_sysmem_buffer_stream.h"
#include "media/fuchsia/common/passthrough_sysmem_buffer_stream.h"
#include "media/fuchsia/common/stream_processor_helper.h"
-#include "third_party/libyuv/include/libyuv/video_common.h"
+#include "media/fuchsia/mojom/fuchsia_media_resource_provider.mojom.h"
+#include "third_party/blink/public/common/browser_interface_broker_proxy.h"
#include "ui/gfx/buffer_types.h"
#include "ui/gfx/client_native_pixmap_factory.h"
#include "ui/ozone/public/client_native_pixmap_factory_ozone.h"
@@ -176,27 +175,11 @@ class FuchsiaVideoDecoder::OutputMailbox {
base::WeakPtrFactory<OutputMailbox> weak_factory_;
};
-// static
-std::unique_ptr<VideoDecoder> FuchsiaVideoDecoder::Create(
- scoped_refptr<viz::RasterContextProvider> raster_context_provider) {
- return std::make_unique<FuchsiaVideoDecoder>(
- std::move(raster_context_provider),
- /*enable_sw_decoding=*/false);
-}
-
-// static
-std::unique_ptr<VideoDecoder> FuchsiaVideoDecoder::CreateForTests(
- scoped_refptr<viz::RasterContextProvider> raster_context_provider,
- bool enable_sw_decoding) {
- return std::make_unique<FuchsiaVideoDecoder>(
- std::move(raster_context_provider), enable_sw_decoding);
-}
-
FuchsiaVideoDecoder::FuchsiaVideoDecoder(
scoped_refptr<viz::RasterContextProvider> raster_context_provider,
- bool enable_sw_decoding)
+ media::mojom::FuchsiaMediaResourceProvider* media_resource_provider)
: raster_context_provider_(raster_context_provider),
- enable_sw_decoding_(enable_sw_decoding),
+ media_resource_provider_(media_resource_provider),
use_overlays_for_video_(base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kUseOverlaysForVideo)),
sysmem_allocator_("CrFuchsiaVideoDecoder"),
@@ -271,51 +254,9 @@ void FuchsiaVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Reset output buffers since we won't be able to re-use them.
ReleaseOutputBuffers();
- fuchsia::mediacodec::CreateDecoder_Params decoder_params;
- decoder_params.mutable_input_details()->set_format_details_version_ordinal(0);
-
- switch (config.codec()) {
- case VideoCodec::kH264:
- decoder_params.mutable_input_details()->set_mime_type("video/h264");
- break;
- case VideoCodec::kVP8:
- decoder_params.mutable_input_details()->set_mime_type("video/vp8");
- break;
- case VideoCodec::kVP9:
- decoder_params.mutable_input_details()->set_mime_type("video/vp9");
- break;
- case VideoCodec::kHEVC:
- decoder_params.mutable_input_details()->set_mime_type("video/hevc");
- break;
- case VideoCodec::kAV1:
- decoder_params.mutable_input_details()->set_mime_type("video/av1");
- break;
-
- default:
- std::move(done_callback).Run(DecoderStatus::Codes::kUnsupportedCodec);
- return;
- }
-
- if (secure_mode) {
- decoder_params.set_secure_input_mode(
- fuchsia::mediacodec::SecureMemoryMode::ON);
- }
-
- if (secure_mode || base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kForceProtectedVideoOutputBuffers)) {
- decoder_params.set_secure_output_mode(
- fuchsia::mediacodec::SecureMemoryMode::ON);
- }
-
- decoder_params.set_promise_separate_access_units_on_input(true);
- decoder_params.set_require_hw(!enable_sw_decoding_);
-
- auto decoder_factory = base::ComponentContextForProcess()
- ->svc()
- ->Connect<fuchsia::mediacodec::CodecFactory>();
fuchsia::media::StreamProcessorPtr decoder;
- decoder_factory->CreateDecoder(std::move(decoder_params),
- decoder.NewRequest());
+ media_resource_provider_->CreateVideoDecoder(config.codec(), secure_mode,
+ decoder.NewRequest());
decoder_ = std::make_unique<StreamProcessorHelper>(std::move(decoder), this);
current_config_ = config;
@@ -558,8 +499,8 @@ void FuchsiaVideoDecoder::OnStreamProcessorOutputPacket(
base::Unretained(this), std::move(output_packet)));
// Currently sysmem doesn't specify location of chroma samples relative to
- // luma (see fxb/13677). Assume they are cosited with luma. YCbCr info here
- // must match the values passed for the same buffer in
+ // luma (see fxbug.dev/13677). Assume they are cosited with luma. YCbCr info
+ // here must match the values passed for the same buffer in
// ui::SysmemBufferCollection::CreateVkImage() (see
// ui/ozone/platform/scenic/sysmem_buffer_collection.cc). |format_features|
// are resolved later in the GPU process before this info is passed to Skia.
@@ -569,11 +510,9 @@ void FuchsiaVideoDecoder::OnStreamProcessorOutputPacket(
VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, VK_CHROMA_LOCATION_COSITED_EVEN,
VK_CHROMA_LOCATION_COSITED_EVEN, /*format_features=*/0));
- // Mark the frame as power-efficient when software decoders are disabled. The
- // codec may still decode on hardware even when |enable_sw_decoding_| is set
- // (i.e. power_efficient flag would not be set correctly in that case). It
- // doesn't matter because software decoders can be enabled only for tests.
- frame->metadata().power_efficient = !enable_sw_decoding_;
+ // Mark the frame as power-efficient since (software decoders are used only in
+ // tests).
+ frame->metadata().power_efficient = true;
// Allow this video frame to be promoted as an overlay, because it was
// registered with an ImagePipe.
diff --git a/chromium/media/filters/fuchsia/fuchsia_video_decoder.h b/chromium/media/fuchsia/video/fuchsia_video_decoder.h
index bdabdb9d523..22a115f339c 100644
--- a/chromium/media/filters/fuchsia/fuchsia_video_decoder.h
+++ b/chromium/media/fuchsia/video/fuchsia_video_decoder.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_FILTERS_FUCHSIA_FUCHSIA_VIDEO_DECODER_H_
-#define MEDIA_FILTERS_FUCHSIA_FUCHSIA_VIDEO_DECODER_H_
+#ifndef MEDIA_FUCHSIA_VIDEO_FUCHSIA_VIDEO_DECODER_H_
+#define MEDIA_FUCHSIA_VIDEO_FUCHSIA_VIDEO_DECODER_H_
#include <deque>
#include <memory>
@@ -29,25 +29,17 @@ class RasterContextProvider;
namespace media {
+namespace mojom {
+class FuchsiaMediaResourceProvider;
+} // namespace mojom
+
class MEDIA_EXPORT FuchsiaVideoDecoder : public VideoDecoder,
public SysmemBufferStream::Sink,
public StreamProcessorHelper::Client {
public:
- // Creates VideoDecoder that uses fuchsia.mediacodec API. The returned
- // VideoDecoder instance will only try to use hardware video codecs.
- MEDIA_EXPORT static std::unique_ptr<VideoDecoder> Create(
- scoped_refptr<viz::RasterContextProvider> raster_context_provider);
-
- // Same as above, but also allows to enable software codecs. This is useful
- // for FuchsiaVideoDecoder tests that run on systems that don't have hardware
- // decoder support.
- MEDIA_EXPORT static std::unique_ptr<VideoDecoder> CreateForTests(
- scoped_refptr<viz::RasterContextProvider> raster_context_provider,
- bool enable_sw_decoding);
-
FuchsiaVideoDecoder(
scoped_refptr<viz::RasterContextProvider> raster_context_provider,
- bool enable_sw_decoding);
+ media::mojom::FuchsiaMediaResourceProvider* media_resource_provider);
~FuchsiaVideoDecoder() override;
FuchsiaVideoDecoder(const FuchsiaVideoDecoder&) = delete;
@@ -121,7 +113,8 @@ class MEDIA_EXPORT FuchsiaVideoDecoder : public VideoDecoder,
void ReleaseOutputBuffers();
const scoped_refptr<viz::RasterContextProvider> raster_context_provider_;
- const bool enable_sw_decoding_;
+ media::mojom::FuchsiaMediaResourceProvider* media_resource_provider_;
+
const bool use_overlays_for_video_;
OutputCB output_cb_;
@@ -168,4 +161,4 @@ class MEDIA_EXPORT FuchsiaVideoDecoder : public VideoDecoder,
} // namespace media
-#endif // MEDIA_FILTERS_FUCHSIA_FUCHSIA_VIDEO_DECODER_H_
+#endif // MEDIA_FUCHSIA_VIDEO_FUCHSIA_VIDEO_DECODER_H_
diff --git a/chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc b/chromium/media/fuchsia/video/fuchsia_video_decoder_unittest.cc
index fc20438fea8..fbc6f3dbbbb 100644
--- a/chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc
+++ b/chromium/media/fuchsia/video/fuchsia_video_decoder_unittest.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/fuchsia/fuchsia_video_decoder.h"
+#include "media/fuchsia/video/fuchsia_video_decoder.h"
+#include <fuchsia/mediacodec/cpp/fidl.h>
#include <fuchsia/sysmem/cpp/fidl.h>
#include <lib/sys/cpp/component_context.h>
@@ -24,6 +25,7 @@
#include "media/base/test_helpers.h"
#include "media/base/video_decoder.h"
#include "media/base/video_frame.h"
+#include "media/fuchsia/mojom/fuchsia_media_resource_provider.mojom.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/gpu_fence.h"
#include "ui/gfx/gpu_memory_buffer.h"
@@ -286,6 +288,52 @@ class TestRasterContextProvider
base::OnceClosure on_destroyed_;
};
+class TestFuchsiaMediaResourceProvider
+ : public media::mojom::FuchsiaMediaResourceProvider {
+ public:
+ // media::mojom::FuchsiaMediaResourceProvider implementation.
+ void CreateCdm(
+ const std::string& key_system,
+ fidl::InterfaceRequest<fuchsia::media::drm::ContentDecryptionModule>
+ request) final {
+ ADD_FAILURE();
+ }
+ void CreateVideoDecoder(
+ media::VideoCodec codec,
+ bool secure_memory,
+ fidl::InterfaceRequest<fuchsia::media::StreamProcessor>
+ stream_processor_request) final {
+ EXPECT_FALSE(secure_memory);
+
+ fuchsia::mediacodec::CreateDecoder_Params decoder_params;
+ decoder_params.mutable_input_details()->set_format_details_version_ordinal(
+ 0);
+
+ switch (codec) {
+ case VideoCodec::kH264:
+ decoder_params.mutable_input_details()->set_mime_type("video/h264");
+ break;
+ case VideoCodec::kVP9:
+ decoder_params.mutable_input_details()->set_mime_type("video/vp9");
+ break;
+
+ default:
+ ADD_FAILURE() << "CreateVideoDecoder() called with unexpected codec: "
+ << static_cast<int>(codec);
+ return;
+ }
+
+ decoder_params.set_promise_separate_access_units_on_input(true);
+ decoder_params.set_require_hw(false);
+
+ auto decoder_factory = base::ComponentContextForProcess()
+ ->svc()
+ ->Connect<fuchsia::mediacodec::CodecFactory>();
+ decoder_factory->CreateDecoder(std::move(decoder_params),
+ std::move(stream_processor_request));
+ }
+};
+
} // namespace
class FuchsiaVideoDecoderTest : public testing::Test {
@@ -293,14 +341,20 @@ class FuchsiaVideoDecoderTest : public testing::Test {
FuchsiaVideoDecoderTest()
: raster_context_provider_(
base::MakeRefCounted<TestRasterContextProvider>()),
- decoder_(
- FuchsiaVideoDecoder::CreateForTests(raster_context_provider_.get(),
- /*enable_sw_decoding=*/true)) {}
+ decoder_(std::make_unique<FuchsiaVideoDecoder>(
+ raster_context_provider_.get(),
+ &test_media_resource_provider_)) {}
FuchsiaVideoDecoderTest(const FuchsiaVideoDecoderTest&) = delete;
FuchsiaVideoDecoderTest& operator=(const FuchsiaVideoDecoderTest&) = delete;
- ~FuchsiaVideoDecoderTest() override = default;
+ ~FuchsiaVideoDecoderTest() override {
+ // The decoder uses async destruction callbacks for VideoFrames, so we need
+ // to run the message loop after releasing the frames to avoid memory leaks
+ // (see crbug.com/1287362).
+ output_frames_.clear();
+ task_environment_.RunUntilIdle();
+ }
[[nodiscard]] bool InitializeDecoder(VideoDecoderConfig config) {
base::RunLoop run_loop;
@@ -376,6 +430,8 @@ class FuchsiaVideoDecoderTest : public testing::Test {
base::test::SingleThreadTaskEnvironment task_environment_{
base::test::SingleThreadTaskEnvironment::MainThreadType::IO};
+ TestFuchsiaMediaResourceProvider test_media_resource_provider_;
+
scoped_refptr<TestRasterContextProvider> raster_context_provider_;
std::unique_ptr<VideoDecoder> decoder_;
diff --git a/chromium/media/gpu/BUILD.gn b/chromium/media/gpu/BUILD.gn
index 39e97190879..71d39a33705 100644
--- a/chromium/media/gpu/BUILD.gn
+++ b/chromium/media/gpu/BUILD.gn
@@ -143,6 +143,8 @@ component("gpu") {
"android/media_codec_video_decoder.cc",
"android/media_codec_video_decoder.h",
"android/ndk_constants.cc",
+ "android/ndk_video_encode_accelerator.cc",
+ "android/ndk_video_encode_accelerator.h",
"android/pooled_shared_image_video_provider.cc",
"android/pooled_shared_image_video_provider.h",
"android/promotion_hint_aggregator.h",
@@ -235,7 +237,7 @@ component("gpu") {
"windows/supported_profile_helpers.cc",
"windows/supported_profile_helpers.h",
]
- if (enable_platform_hevc_decoding) {
+ if (enable_hevc_parser_and_hw_decoder) {
sources += [
"windows/d3d11_h265_accelerator.cc",
"windows/d3d11_h265_accelerator.h",
@@ -295,7 +297,7 @@ source_set("common") {
"vp9_reference_frame_vector.h",
]
- if (enable_platform_hevc_decoding) {
+ if (enable_hevc_parser_and_hw_decoder) {
sources += [
"h265_decoder.cc",
"h265_decoder.h",
@@ -482,6 +484,7 @@ source_set("android_video_decode_accelerator_unittests") {
"android/mock_promotion_hint_aggregator.h",
"android/mock_shared_image_video_provider.cc",
"android/mock_shared_image_video_provider.h",
+ "android/ndk_video_encode_accelerator_tests.cc",
"android/pooled_shared_image_video_provider_unittest.cc",
"android/promotion_hint_aggregator_impl_unittest.cc",
"android/surface_chooser_helper_unittest.cc",
@@ -496,6 +499,7 @@ source_set("android_video_decode_accelerator_unittests") {
"//media:test_support",
"//testing/gmock",
"//testing/gtest",
+ "//third_party/libyuv:libyuv",
"//ui/gl",
"//ui/gl/init",
]
@@ -531,7 +535,7 @@ source_set("unit_tests") {
"//ui/gl:test_support",
]
sources = [ "h264_decoder_unittest.cc" ]
- if (enable_platform_hevc_decoding) {
+ if (enable_hevc_parser_and_hw_decoder) {
sources += [ "h265_decoder_unittest.cc" ]
}
if (is_linux || is_chromeos) {
@@ -556,7 +560,7 @@ source_set("unit_tests") {
sources += [ "vp8_decoder_unittest.cc" ]
}
- if (use_libgav1_parser) {
+ if (use_libgav1_parser && media_use_ffmpeg) {
sources += [ "av1_decoder_unittest.cc" ]
deps += [
"//build:chromeos_buildflags",
@@ -661,7 +665,7 @@ if (use_v4l2_codec || use_vaapi) {
}
}
-if (enable_platform_hevc_decoding) {
+if (enable_hevc_parser_and_hw_decoder) {
fuzzer_test("media_h265_decoder_fuzzer") {
sources = [ "h265_decoder_fuzzertest.cc" ]
deps = [
diff --git a/chromium/media/gpu/DEPS b/chromium/media/gpu/DEPS
index f8a66ffce7a..2a7e6a79cef 100644
--- a/chromium/media/gpu/DEPS
+++ b/chromium/media/gpu/DEPS
@@ -26,5 +26,11 @@ include_rules = [
"+components/chromeos_camera/mjpeg_decode_accelerator.h",
# video_decode_accelerator_perf_tests uses it.
- "+sandbox/linux/services/resource_limits.h"
+ "+sandbox/linux/services/resource_limits.h",
+
+ # Mojo bindings
+ "+mojo/public/cpp/bindings/pending_remote.h",
+ "+mojo/public/cpp/bindings/receiver.h",
+ "+mojo/public/cpp/bindings/receiver_set.h",
+ "+mojo/public/cpp/bindings/remote.h"
]
diff --git a/chromium/media/gpu/android/android_video_encode_accelerator.cc b/chromium/media/gpu/android/android_video_encode_accelerator.cc
index f566c2b7327..7a1890b67ba 100644
--- a/chromium/media/gpu/android/android_video_encode_accelerator.cc
+++ b/chromium/media/gpu/android/android_video_encode_accelerator.cc
@@ -11,6 +11,8 @@
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/metrics/histogram_macros.h"
#include "base/task/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -20,7 +22,6 @@
#include "media/base/bitstream_buffer.h"
#include "media/base/limits.h"
#include "media/base/media_log.h"
-#include "media/base/unaligned_shared_memory.h"
#include "media/video/picture.h"
#include "third_party/libyuv/include/libyuv/convert_from.h"
#include "ui/gl/android/scoped_java_surface.h"
@@ -134,6 +135,7 @@ AndroidVideoEncodeAccelerator::GetSupportedProfiles() {
profile.max_resolution.SetSize(kMaxEncodeFrameWidth, kMaxEncodeFrameHeight);
profile.max_framerate_numerator = kMaxFramerateNumerator;
profile.max_framerate_denominator = kMaxFramerateDenominator;
+ profile.rate_control_modes = media::VideoEncodeAccelerator::kConstantMode;
profiles.push_back(profile);
}
return profiles;
@@ -467,18 +469,18 @@ void AndroidVideoEncodeAccelerator::DequeueOutput() {
BitstreamBuffer bitstream_buffer =
std::move(available_bitstream_buffers_.back());
available_bitstream_buffers_.pop_back();
- auto shm = std::make_unique<UnalignedSharedMemory>(
- bitstream_buffer.TakeRegion(), bitstream_buffer.size(), false);
- RETURN_ON_FAILURE(
- shm->MapAt(bitstream_buffer.offset(), bitstream_buffer.size()),
- "Failed to map SHM", kPlatformFailureError);
+ base::UnsafeSharedMemoryRegion region = bitstream_buffer.TakeRegion();
+ auto mapping =
+ region.MapAt(bitstream_buffer.offset(), bitstream_buffer.size());
+ RETURN_ON_FAILURE(mapping.IsValid(), "Failed to map SHM",
+ kPlatformFailureError);
RETURN_ON_FAILURE(
size <= bitstream_buffer.size(),
"Encoded buffer too large: " << size << ">" << bitstream_buffer.size(),
kPlatformFailureError);
- status = media_codec_->CopyFromOutputBuffer(buf_index, offset, shm->memory(),
- size);
+ status = media_codec_->CopyFromOutputBuffer(buf_index, offset,
+ mapping.memory(), size);
RETURN_ON_FAILURE(status == MEDIA_CODEC_OK, "CopyFromOutputBuffer failed",
kPlatformFailureError);
media_codec_->ReleaseOutputBuffer(buf_index, false);
diff --git a/chromium/media/gpu/android/codec_image_unittest.cc b/chromium/media/gpu/android/codec_image_unittest.cc
index 308d57415aa..e55f6858ec2 100644
--- a/chromium/media/gpu/android/codec_image_unittest.cc
+++ b/chromium/media/gpu/android/codec_image_unittest.cc
@@ -45,19 +45,21 @@ class CodecImageTest : public testing::Test {
codec_ = codec.get();
wrapper_ = std::make_unique<CodecWrapper>(
CodecSurfacePair(std::move(codec), new CodecSurfaceBundle()),
- base::DoNothing(), base::SequencedTaskRunnerHandle::Get());
+ base::DoNothing(), base::SequencedTaskRunnerHandle::Get(),
+ gfx::Size(640, 480));
ON_CALL(*codec_, DequeueOutputBuffer(_, _, _, _, _, _, _))
.WillByDefault(Return(MEDIA_CODEC_OK));
gl::init::InitializeStaticGLBindingsImplementation(
gl::GLImplementationParts(gl::kGLImplementationEGLGLES2), false);
- gl::init::InitializeGLOneOffPlatformImplementation(
+ display_ = gl::init::InitializeGLOneOffPlatformImplementation(
/*fallback_to_software_gl=*/false,
/*disable_gl_drawing=*/false,
/*init_extensions=*/false,
/*system_device_id=*/0);
- surface_ = new gl::PbufferGLSurfaceEGL(gfx::Size(320, 240));
+ surface_ = new gl::PbufferGLSurfaceEGL(gl::GLSurfaceEGL::GetGLDisplayEGL(),
+ gfx::Size(320, 240));
surface_->Initialize();
share_group_ = new gl::GLShareGroup();
context_ = new gl::GLContextEGL(share_group_.get());
@@ -81,7 +83,7 @@ class CodecImageTest : public testing::Test {
context_ = nullptr;
share_group_ = nullptr;
surface_ = nullptr;
- gl::init::ShutdownGL(false);
+ gl::init::ShutdownGL(display_, false);
wrapper_->TakeCodecSurfacePair();
}
@@ -119,6 +121,7 @@ class CodecImageTest : public testing::Test {
scoped_refptr<gl::GLShareGroup> share_group_;
scoped_refptr<gl::GLSurface> surface_;
GLuint texture_id_ = 0;
+ gl::GLDisplay* display_ = nullptr;
class PromotionHintReceiver {
public:
@@ -281,8 +284,8 @@ TEST_F(CodecImageTestExplicitBind, RenderToFrontBufferDoesNotBindTexture) {
TEST_F(CodecImageTest, RenderToFrontBufferRestoresGLContext) {
// Make a new context current.
- scoped_refptr<gl::GLSurface> surface(
- new gl::PbufferGLSurfaceEGL(gfx::Size(320, 240)));
+ scoped_refptr<gl::GLSurface> surface(new gl::PbufferGLSurfaceEGL(
+ gl::GLSurfaceEGL::GetGLDisplayEGL(), gfx::Size(320, 240)));
surface->Initialize();
scoped_refptr<gl::GLShareGroup> share_group(new gl::GLShareGroup());
scoped_refptr<gl::GLContext> context(new gl::GLContextEGL(share_group.get()));
diff --git a/chromium/media/gpu/android/codec_wrapper.cc b/chromium/media/gpu/android/codec_wrapper.cc
index a275c93ca98..62744a4c5c6 100644
--- a/chromium/media/gpu/android/codec_wrapper.cc
+++ b/chromium/media/gpu/android/codec_wrapper.cc
@@ -24,10 +24,10 @@ namespace media {
// CodecOutputBuffer are the only two things that hold references to it.
class CodecWrapperImpl : public base::RefCountedThreadSafe<CodecWrapperImpl> {
public:
- CodecWrapperImpl(
- CodecSurfacePair codec_surface_pair,
- CodecWrapper::OutputReleasedCB output_buffer_release_cb,
- scoped_refptr<base::SequencedTaskRunner> release_task_runner);
+ CodecWrapperImpl(CodecSurfacePair codec_surface_pair,
+ CodecWrapper::OutputReleasedCB output_buffer_release_cb,
+ scoped_refptr<base::SequencedTaskRunner> release_task_runner,
+ const gfx::Size& initial_expected_size);
CodecWrapperImpl(const CodecWrapperImpl&) = delete;
CodecWrapperImpl& operator=(const CodecWrapperImpl&) = delete;
@@ -143,11 +143,13 @@ bool CodecOutputBuffer::ReleaseToSurface() {
CodecWrapperImpl::CodecWrapperImpl(
CodecSurfacePair codec_surface_pair,
CodecWrapper::OutputReleasedCB output_buffer_release_cb,
- scoped_refptr<base::SequencedTaskRunner> release_task_runner)
+ scoped_refptr<base::SequencedTaskRunner> release_task_runner,
+ const gfx::Size& initial_expected_size)
: state_(State::kFlushed),
codec_(std::move(codec_surface_pair.first)),
surface_bundle_(std::move(codec_surface_pair.second)),
next_buffer_id_(0),
+ size_(initial_expected_size),
output_buffer_release_cb_(std::move(output_buffer_release_cb)),
release_task_runner_(std::move(release_task_runner)) {
DVLOG(2) << __func__;
@@ -349,15 +351,25 @@ CodecWrapperImpl::DequeueStatus CodecWrapperImpl::DequeueOutputBuffer(
return DequeueStatus::kError;
}
case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED: {
- if (codec_->GetOutputSize(&size_) == MEDIA_CODEC_ERROR) {
+ gfx::Size temp_size;
+ if (codec_->GetOutputSize(&temp_size) == MEDIA_CODEC_ERROR) {
state_ = State::kError;
return DequeueStatus::kError;
}
+ // In automated testing, we regularly see a blip where MediaCodec sends
+ // a format change to size 0,0, some number of output buffer available
+ // signals, and then finally the real size. Ignore this transient size
+ // change to avoid output errors. We'll either reuse the previous size
+ // information or the size provided during configure.
+ // See https://crbug.com/1207682.
+ if (!temp_size.IsEmpty())
+ size_ = temp_size;
+
bool error =
codec_->GetOutputColorSpace(&color_space_) == MEDIA_CODEC_ERROR;
UMA_HISTOGRAM_BOOLEAN("Media.Android.GetColorSpaceError", error);
- if (error) {
+ if (error && !size_.IsEmpty()) {
// If we get back an unsupported color space, then just default to
// sRGB for < 720p, or 709 otherwise. It's better than nothing.
color_space_ = size_.width() >= 1280 ? gfx::ColorSpace::CreateREC709()
@@ -453,10 +465,12 @@ bool CodecWrapperImpl::ReleaseCodecOutputBuffer(int64_t id, bool render) {
CodecWrapper::CodecWrapper(
CodecSurfacePair codec_surface_pair,
OutputReleasedCB output_buffer_release_cb,
- scoped_refptr<base::SequencedTaskRunner> release_task_runner)
+ scoped_refptr<base::SequencedTaskRunner> release_task_runner,
+ const gfx::Size& initial_expected_size)
: impl_(new CodecWrapperImpl(std::move(codec_surface_pair),
std::move(output_buffer_release_cb),
- std::move(release_task_runner))) {}
+ std::move(release_task_runner),
+ initial_expected_size)) {}
CodecWrapper::~CodecWrapper() {
// The codec must have already been taken.
diff --git a/chromium/media/gpu/android/codec_wrapper.h b/chromium/media/gpu/android/codec_wrapper.h
index 1d378402e55..0637d85291a 100644
--- a/chromium/media/gpu/android/codec_wrapper.h
+++ b/chromium/media/gpu/android/codec_wrapper.h
@@ -107,7 +107,8 @@ class MEDIA_GPU_EXPORT CodecWrapper {
using OutputReleasedCB = base::RepeatingCallback<void(bool)>;
CodecWrapper(CodecSurfacePair codec_surface_pair,
OutputReleasedCB output_buffer_release_cb,
- scoped_refptr<base::SequencedTaskRunner> release_task_runner);
+ scoped_refptr<base::SequencedTaskRunner> release_task_runner,
+ const gfx::Size& initial_expected_size);
CodecWrapper(const CodecWrapper&) = delete;
CodecWrapper& operator=(const CodecWrapper&) = delete;
diff --git a/chromium/media/gpu/android/codec_wrapper_unittest.cc b/chromium/media/gpu/android/codec_wrapper_unittest.cc
index a31019a60eb..7fc605625eb 100644
--- a/chromium/media/gpu/android/codec_wrapper_unittest.cc
+++ b/chromium/media/gpu/android/codec_wrapper_unittest.cc
@@ -21,15 +21,17 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using testing::_;
using testing::DoAll;
using testing::Invoke;
using testing::NiceMock;
using testing::Return;
using testing::SetArgPointee;
-using testing::_;
namespace media {
+constexpr gfx::Size kInitialCodedSize(640, 480);
+
class CodecWrapperTest : public testing::Test {
public:
CodecWrapperTest() : other_thread_("Other thread") {
@@ -40,7 +42,7 @@ class CodecWrapperTest : public testing::Test {
CodecSurfacePair(std::move(codec), surface_bundle_),
output_buffer_release_cb_.Get(),
// Unrendered output buffers are released on our thread.
- base::SequencedTaskRunnerHandle::Get());
+ base::SequencedTaskRunnerHandle::Get(), kInitialCodedSize);
ON_CALL(*codec_, DequeueOutputBuffer(_, _, _, _, _, _, _))
.WillByDefault(Return(MEDIA_CODEC_OK));
ON_CALL(*codec_, DequeueInputBuffer(_, _))
@@ -400,4 +402,23 @@ TEST_F(CodecWrapperTest, CodecWrapperDefaultsToSRGB) {
ASSERT_EQ(codec_buffer->color_space(), gfx::ColorSpace::CreateSRGB());
}
+TEST_F(CodecWrapperTest, CodecOutputsIgnoreZeroSize) {
+ EXPECT_CALL(*codec_, DequeueOutputBuffer(_, _, _, _, _, _, _))
+ .WillOnce(Return(MEDIA_CODEC_OUTPUT_FORMAT_CHANGED))
+ .WillOnce(Return(MEDIA_CODEC_OK))
+ .WillOnce(Return(MEDIA_CODEC_OUTPUT_FORMAT_CHANGED))
+ .WillOnce(Return(MEDIA_CODEC_OK));
+
+ constexpr gfx::Size kNewSize(1280, 720);
+ EXPECT_CALL(*codec_, GetOutputSize(_))
+ .WillOnce(DoAll(SetArgPointee<0>(gfx::Size()), Return(MEDIA_CODEC_OK)))
+ .WillOnce(DoAll(SetArgPointee<0>(kNewSize), Return(MEDIA_CODEC_OK)));
+
+ auto codec_buffer = DequeueCodecOutputBuffer();
+ ASSERT_EQ(codec_buffer->size(), kInitialCodedSize);
+
+ codec_buffer = DequeueCodecOutputBuffer();
+ ASSERT_EQ(codec_buffer->size(), kNewSize);
+}
+
} // namespace media
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.cc b/chromium/media/gpu/android/media_codec_video_decoder.cc
index 404d5ed6e26..94215b3ede7 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.cc
+++ b/chromium/media/gpu/android/media_codec_video_decoder.cc
@@ -169,10 +169,12 @@ std::vector<SupportedVideoDecoderConfig> GetSupportedConfigsInternal(
false); // require_encrypted
#if BUILDFLAG(ENABLE_PLATFORM_HEVC)
- supported_configs.emplace_back(HEVCPROFILE_MIN, HEVCPROFILE_MAX,
- gfx::Size(0, 0), gfx::Size(3840, 2160),
- true, // allow_encrypted
- false); // require_encrypted
+ if (base::FeatureList::IsEnabled(kPlatformHEVCDecoderSupport)) {
+ supported_configs.emplace_back(HEVCPROFILE_MIN, HEVCPROFILE_MAX,
+ gfx::Size(0, 0), gfx::Size(3840, 2160),
+ true, // allow_encrypted
+ false); // require_encrypted
+ }
#endif
#if BUILDFLAG(ENABLE_PLATFORM_DOLBY_VISION)
// Technically we should check which profiles are supported, but we can
@@ -382,8 +384,10 @@ void MediaCodecVideoDecoder::Initialize(const VideoDecoderConfig& config,
// We only support setting CDM at first initialization. Even if the initial
// config is clear, we'll still try to set CDM since we may switch to an
// encrypted config later.
+ const int width = decoder_config_.coded_size().width();
if (first_init && cdm_context && cdm_context->GetMediaCryptoContext()) {
DCHECK(media_crypto_.is_null());
+ last_width_ = width;
SetCdm(cdm_context, std::move(init_cb));
return;
}
@@ -399,7 +403,6 @@ void MediaCodecVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Do the rest of the initialization lazily on the first decode.
BindToCurrentLoop(std::move(init_cb)).Run(DecoderStatus::Codes::kOk);
- const int width = decoder_config_.coded_size().width();
// On re-init, reallocate the codec if the size has changed too much.
// Restrict this behavior to Q, where the behavior changed.
if (first_init) {
@@ -414,6 +417,9 @@ void MediaCodecVideoDecoder::Initialize(const VideoDecoderConfig& config,
// sure, request a deferred flush.
deferred_flush_pending_ = true;
deferred_reallocation_pending_ = true;
+ // Since this will re-use the same surface, allow a retry to work around a
+ // race condition in the android framework.
+ should_retry_codec_allocation_ = true;
last_width_ = width;
} // else leave |last_width_| unmodified, since we're re-using the codec.
}
@@ -704,6 +710,21 @@ void MediaCodecVideoDecoder::OnCodecConfigured(
std::unique_ptr<MediaCodecBridge> codec) {
DCHECK(!codec_);
DCHECK_EQ(state_, State::kRunning);
+ bool should_retry_codec_allocation = should_retry_codec_allocation_;
+ should_retry_codec_allocation_ = false;
+
+ // In rare cases, the framework can fail transiently when trying to re-use a
+ // surface. If we're in one of those cases, then retry codec allocation.
+ // This only happens on R and S, so skip it otherwise.
+ if (!codec && should_retry_codec_allocation &&
+ device_info_->SdkVersion() >= base::android::SDK_VERSION_R &&
+ device_info_->SdkVersion() <= 32 /* SDK_VERSION_S_V2 */
+ ) {
+ // We might want to post this with a short delay, but there is already quite
+ // a lot of overhead in codec allocation.
+ CreateCodec();
+ return;
+ }
if (!codec) {
EnterTerminalState(State::kError, "Unable to allocate codec");
@@ -717,7 +738,7 @@ void MediaCodecVideoDecoder::OnCodecConfigured(
BindToCurrentLoop(base::BindRepeating(
&MediaCodecVideoDecoder::StartTimerOrPumpCodec,
weak_factory_.GetWeakPtr()))),
- base::SequencedTaskRunnerHandle::Get());
+ base::SequencedTaskRunnerHandle::Get(), decoder_config_.coded_size());
// If the target surface changed while codec creation was in progress,
// transition to it immediately.
@@ -775,6 +796,8 @@ void MediaCodecVideoDecoder::FlushCodec() {
if (deferred_reallocation_pending_) {
deferred_reallocation_pending_ = false;
ReleaseCodec();
+ // Re-initializing the codec with the same surface may need to retry.
+ should_retry_codec_allocation_ = !SurfaceTransitionPending();
CreateCodec();
}
@@ -1057,6 +1080,12 @@ void MediaCodecVideoDecoder::ForwardVideoFrame(
base::Milliseconds(1), base::Milliseconds(100),
25);
+ // Attach the HDR metadata if the color space got this far and is still an HDR
+ // color space. Note that it might be converted to something else along the
+ // way, often sRGB. In that case, don't confuse things with HDR metadata.
+ if (frame->ColorSpace().IsHDR() && decoder_config_.hdr_metadata())
+ frame->set_hdr_metadata(decoder_config_.hdr_metadata());
+
// No |frame| indicates an error creating it.
if (!frame) {
DLOG(ERROR) << __func__ << " |frame| is null";
@@ -1164,7 +1193,7 @@ void MediaCodecVideoDecoder::EnterTerminalState(State state,
target_surface_bundle_ = nullptr;
texture_owner_bundle_ = nullptr;
if (state == State::kError)
- CancelPendingDecodes(DecoderStatus::Codes::kFailed);
+ CancelPendingDecodes({DecoderStatus::Codes::kFailed, reason});
if (drain_type_)
OnCodecDrained();
}
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.h b/chromium/media/gpu/android/media_codec_video_decoder.h
index 1c87405176b..c8277943fce 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.h
+++ b/chromium/media/gpu/android/media_codec_video_decoder.h
@@ -20,6 +20,7 @@
#include "media/base/callback_registry.h"
#include "media/base/cdm_context.h"
#include "media/base/overlay_info.h"
+#include "media/base/scoped_async_trace.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
#include "media/gpu/android/android_video_surface_chooser.h"
@@ -34,7 +35,6 @@
namespace media {
class MediaLog;
-class ScopedAsyncTrace;
struct SupportedVideoDecoderConfig;
struct PendingDecode {
@@ -353,6 +353,10 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder final
// See https://crbug.com/1081346 .
bool allow_nonsecure_overlays_ = true;
+ // If set, then the next call to `CodecConfig()` will be allowed to retry if
+ // it fails to get a codec. This is to work around b/191966399.
+ bool should_retry_codec_allocation_ = false;
+
base::WeakPtrFactory<MediaCodecVideoDecoder> weak_factory_{this};
base::WeakPtrFactory<MediaCodecVideoDecoder> codec_allocator_weak_factory_{
this};
diff --git a/chromium/media/gpu/android/ndk_video_encode_accelerator.cc b/chromium/media/gpu/android/ndk_video_encode_accelerator.cc
new file mode 100644
index 00000000000..b945f451474
--- /dev/null
+++ b/chromium/media/gpu/android/ndk_video_encode_accelerator.cc
@@ -0,0 +1,711 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/android/ndk_video_encode_accelerator.h"
+
+#include "base/android/build_info.h"
+#include "base/feature_list.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+#include "media/base/android/media_codec_util.h"
+#include "media/base/bitstream_buffer.h"
+#include "media/base/video_frame.h"
+#include "media/gpu/android/mediacodec_stubs.h"
+#include "third_party/libyuv/include/libyuv/convert_from.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
+
+namespace media {
+
+namespace {
+// Default distance between key frames. About 100 seconds between key frames,
+// the same default value we use on Windows.
+constexpr uint32_t kDefaultGOPLength = 3000;
+
+// Deliberately breaking naming convention rules, to match names from
+// MediaCodec SDK.
+constexpr int32_t BUFFER_FLAG_KEY_FRAME = 1;
+
+enum PixelFormat {
+ // Subset of MediaCodecInfo.CodecCapabilities.
+ COLOR_FORMAT_YUV420_PLANAR = 19,
+ COLOR_FORMAT_YUV420_SEMIPLANAR = 21, // Same as NV12
+};
+
+struct AMediaFormatDeleter {
+ inline void operator()(AMediaFormat* ptr) const {
+ if (ptr)
+ AMediaFormat_delete(ptr);
+ }
+};
+
+using MediaFormatPtr = std::unique_ptr<AMediaFormat, AMediaFormatDeleter>;
+
+absl::optional<PixelFormat> GetSupportedColorFormatForMime(
+ const std::string& mime) {
+ if (mime.empty())
+ return {};
+
+ auto formats = MediaCodecUtil::GetEncoderColorFormats(mime);
+ if (formats.count(COLOR_FORMAT_YUV420_SEMIPLANAR) > 0)
+ return COLOR_FORMAT_YUV420_SEMIPLANAR;
+
+ return {};
+}
+
+MediaFormatPtr CreateVideoFormat(const std::string& mime,
+ const VideoEncodeAccelerator::Config& config,
+ int framerate,
+ PixelFormat format) {
+ const int iframe_interval = config.gop_length.value_or(kDefaultGOPLength);
+ const gfx::Size& frame_size = config.input_visible_size;
+ const Bitrate& bitrate = config.bitrate;
+ MediaFormatPtr result(AMediaFormat_new());
+ AMediaFormat_setString(result.get(), AMEDIAFORMAT_KEY_MIME, mime.c_str());
+ AMediaFormat_setInt32(result.get(), AMEDIAFORMAT_KEY_WIDTH,
+ frame_size.width());
+ AMediaFormat_setInt32(result.get(), AMEDIAFORMAT_KEY_HEIGHT,
+ frame_size.height());
+
+ AMediaFormat_setInt32(result.get(), AMEDIAFORMAT_KEY_FRAME_RATE, framerate);
+ AMediaFormat_setInt32(result.get(), AMEDIAFORMAT_KEY_I_FRAME_INTERVAL,
+ iframe_interval);
+ AMediaFormat_setInt32(result.get(), AMEDIAFORMAT_KEY_COLOR_FORMAT, format);
+ if (config.require_low_delay) {
+ AMediaFormat_setInt32(result.get(), AMEDIAFORMAT_KEY_LATENCY, 1);
+ // MediaCodec supports two priorities: 0 - realtime, 1 - best effort
+ AMediaFormat_setInt32(result.get(), AMEDIAFORMAT_KEY_PRIORITY, 0);
+ }
+
+ constexpr int32_t BITRATE_MODE_VBR = 1;
+ constexpr int32_t BITRATE_MODE_CBR = 2;
+ switch (bitrate.mode()) {
+ case Bitrate::Mode::kConstant:
+ AMediaFormat_setInt32(result.get(), AMEDIAFORMAT_KEY_BITRATE_MODE,
+ BITRATE_MODE_CBR);
+ break;
+ case Bitrate::Mode::kVariable:
+ AMediaFormat_setInt32(result.get(), AMEDIAFORMAT_KEY_BITRATE_MODE,
+ BITRATE_MODE_VBR);
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ AMediaFormat_setInt32(result.get(), AMEDIAFORMAT_KEY_BIT_RATE,
+ base::saturated_cast<int32_t>(bitrate.target_bps()));
+ return result;
+}
+
+const base::Feature kAndroidNdkVideoEncoder{"AndroidNdkVideoEncoder",
+ base::FEATURE_ENABLED_BY_DEFAULT};
+
+static bool InitMediaCodec() {
+ // We need at least Android P for AMediaCodec_getInputFormat(), but in
+ // Android P we have issues with CFI and dynamic linker on arm64.
+ const base::android::SdkVersion min_supported_version =
+#if defined(ARCH_CPU_ARMEL)
+ base::android::SDK_VERSION_P;
+#else
+ base::android::SDK_VERSION_Q;
+#endif
+
+ if (base::android::BuildInfo::GetInstance()->sdk_int() <
+ min_supported_version) {
+ return false;
+ }
+
+ if (!base::FeatureList::IsEnabled(kAndroidNdkVideoEncoder))
+ return false;
+
+ media_gpu_android::StubPathMap paths;
+ static const base::FilePath::CharType kMediacodecPath[] =
+ FILE_PATH_LITERAL("libmediandk.so");
+
+ paths[media_gpu_android::kModuleMediacodec].push_back(kMediacodecPath);
+ if (!media_gpu_android::InitializeStubs(paths)) {
+ LOG(ERROR) << "Failed on loading libmediandk.so symbols";
+ return false;
+ }
+ return true;
+}
+
+bool IsThereGoodMediaCodecFor(VideoCodec codec) {
+ switch (codec) {
+ case VideoCodec::kH264:
+ if (!MediaCodecUtil::IsH264EncoderAvailable())
+ return false;
+ break;
+ case VideoCodec::kVP8:
+ if (!MediaCodecUtil::IsVp8EncoderAvailable())
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ // TODO(eugene): We should allow unaccelerated MediaCodecs for H.264
+ // because on Android we don't ship a software codec of our own.
+ // It's not enough to remove a call to IsKnownUnaccelerated(), we'll also need
+ // to change MediaCodecUtil::IsH264EncoderAvailable() to allow software
+ // encoders.
+ return !MediaCodecUtil::IsKnownUnaccelerated(codec,
+ MediaCodecDirection::ENCODER);
+}
+
+} // namespace
+
+NdkVideoEncodeAccelerator::NdkVideoEncodeAccelerator(
+ scoped_refptr<base::SequencedTaskRunner> runner)
+ : task_runner_(runner) {}
+NdkVideoEncodeAccelerator::~NdkVideoEncodeAccelerator() {
+ // It's supposed to be cleared by Destroy(), it basically checks
+ // that we destroy `this` correctly.
+ DCHECK(!media_codec_);
+}
+
+bool NdkVideoEncodeAccelerator::IsSupported() {
+ static const bool is_loaded = InitMediaCodec();
+ return is_loaded;
+}
+
+VideoEncodeAccelerator::SupportedProfiles
+NdkVideoEncodeAccelerator::GetSupportedProfiles() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ SupportedProfiles profiles;
+
+ if (!IsSupported())
+ return profiles;
+
+ // That's what Android CTS uses, so all compliant devices should support it.
+ SupportedProfile supported_profile;
+ supported_profile.max_resolution.SetSize(1920, 1080);
+ supported_profile.max_framerate_numerator = 30;
+ supported_profile.max_framerate_denominator = 1;
+ supported_profile.rate_control_modes =
+ media::VideoEncodeAccelerator::kConstantMode |
+ media::VideoEncodeAccelerator::kVariableMode;
+
+ for (auto profile : {H264PROFILE_BASELINE, VP8PROFILE_ANY}) {
+ if (!IsThereGoodMediaCodecFor(VideoCodecProfileToVideoCodec(profile)))
+ continue;
+ supported_profile.profile = profile;
+ profiles.push_back(supported_profile);
+ }
+ return profiles;
+}
+
+bool NdkVideoEncodeAccelerator::Initialize(
+ const Config& config,
+ Client* client,
+ std::unique_ptr<MediaLog> media_log) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(!media_codec_);
+ DCHECK(client);
+
+ if (!IsSupported()) {
+ MEDIA_LOG(ERROR, log_) << "Unsupported Android version.";
+ return false;
+ }
+
+ callback_weak_ptr_ = callback_weak_factory_.GetWeakPtr();
+ client_ptr_factory_ = std::make_unique<base::WeakPtrFactory<Client>>(client);
+ config_ = config;
+ effective_bitrate_ = config.bitrate;
+ log_ = std::move(media_log);
+ VideoCodec codec = VideoCodecProfileToVideoCodec(config.output_profile);
+
+ if (config.input_format != PIXEL_FORMAT_I420 &&
+ config.input_format != PIXEL_FORMAT_NV12) {
+ MEDIA_LOG(ERROR, log_) << "Unexpected combo: " << config.input_format
+ << ", " << GetProfileName(config.output_profile);
+ return false;
+ }
+
+ // Non 16x16 aligned resolutions don't work with MediaCodec unfortunately, see
+ // https://crbug.com/1084702 for details.
+ if (config.input_visible_size.width() % 16 != 0 ||
+ config.input_visible_size.height() % 16 != 0) {
+ MEDIA_LOG(ERROR, log_) << "MediaCodec is only tested with resolutions "
+ "that are 16x16 aligned.";
+ return false;
+ }
+
+ auto mime = MediaCodecUtil::CodecToAndroidMimeType(codec);
+ if (!IsThereGoodMediaCodecFor(codec)) {
+ MEDIA_LOG(ERROR, log_) << "No suitable MedicCodec found for: " << mime;
+ return false;
+ }
+
+ auto format = GetSupportedColorFormatForMime(mime);
+ if (!format.has_value()) {
+ MEDIA_LOG(ERROR, log_) << "Unsupported pixel format";
+ return false;
+ }
+ effective_framerate_ = config.initial_framerate.value_or(kDefaultFramerate);
+ auto media_format =
+ CreateVideoFormat(mime, config, effective_framerate_, format.value());
+
+ media_codec_.reset(AMediaCodec_createEncoderByType(mime.c_str()));
+ if (!media_codec_) {
+ MEDIA_LOG(ERROR, log_) << "Can't create media codec for mime type: "
+ << mime;
+ return false;
+ }
+ media_status_t status =
+ AMediaCodec_configure(media_codec_.get(), media_format.get(), nullptr,
+ nullptr, AMEDIACODEC_CONFIGURE_FLAG_ENCODE);
+ if (status != AMEDIA_OK) {
+ MEDIA_LOG(ERROR, log_) << "Can't configure media codec. Error " << status;
+ return false;
+ }
+
+ if (!SetInputBufferLayout()) {
+ MEDIA_LOG(ERROR, log_) << "Can't get input buffer layout from MediaCodec";
+ return false;
+ }
+
+ // Set MediaCodec callbacks and switch it to async mode
+ AMediaCodecOnAsyncNotifyCallback callbacks{
+ &NdkVideoEncodeAccelerator::OnAsyncInputAvailable,
+ &NdkVideoEncodeAccelerator::OnAsyncOutputAvailable,
+ &NdkVideoEncodeAccelerator::OnAsyncFormatChanged,
+ &NdkVideoEncodeAccelerator::OnAsyncError,
+ };
+ status =
+ AMediaCodec_setAsyncNotifyCallback(media_codec_.get(), callbacks, this);
+ if (status != AMEDIA_OK) {
+ MEDIA_LOG(ERROR, log_) << "Can't set media codec callback. Error "
+ << status;
+ return false;
+ }
+
+ status = AMediaCodec_start(media_codec_.get());
+ if (status != AMEDIA_OK) {
+ MEDIA_LOG(ERROR, log_) << "Can't start media codec. Error " << status;
+ return false;
+ }
+
+ // Conservative upper bound for output buffer size: decoded size + 2KB.
+ // Adding 2KB just in case the frame is really small, we don't want to
+ // end up with no space for a video codec's headers.
+ const size_t output_buffer_capacity =
+ VideoFrame::AllocationSize(config.input_format,
+ config.input_visible_size) +
+ 2048;
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&VideoEncodeAccelerator::Client::RequireBitstreamBuffers,
+ client_ptr_factory_->GetWeakPtr(), 1,
+ config.input_visible_size, output_buffer_capacity));
+
+ return true;
+}
+
+void NdkVideoEncodeAccelerator::Encode(scoped_refptr<VideoFrame> frame,
+ bool force_keyframe) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(media_codec_);
+ VideoEncoder::PendingEncode encode;
+ encode.frame = std::move(frame);
+ encode.key_frame = force_keyframe;
+ pending_frames_.push_back(std::move(encode));
+ FeedInput();
+}
+
+void NdkVideoEncodeAccelerator::UseOutputBitstreamBuffer(
+ BitstreamBuffer buffer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ available_bitstream_buffers_.push_back(std::move(buffer));
+ DrainOutput();
+}
+
+void NdkVideoEncodeAccelerator::RequestEncodingParametersChange(
+ const Bitrate& bitrate,
+ uint32_t framerate) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ MediaFormatPtr format(AMediaFormat_new());
+
+ if (effective_framerate_ != framerate)
+ AMediaFormat_setInt32(format.get(), AMEDIAFORMAT_KEY_FRAME_RATE, framerate);
+ if (effective_bitrate_ != bitrate) {
+ AMediaFormat_setInt32(format.get(), AMEDIACODEC_KEY_VIDEO_BITRATE,
+ bitrate.target_bps());
+ }
+ media_status_t status =
+ AMediaCodec_setParameters(media_codec_.get(), format.get());
+
+ if (status != AMEDIA_OK) {
+ NotifyMediaCodecError("Failed to change bitrate and framerate", status);
+ return;
+ }
+ effective_framerate_ = framerate;
+ effective_bitrate_ = bitrate;
+}
+
+void NdkVideoEncodeAccelerator::Destroy() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ client_ptr_factory_.reset();
+ callback_weak_factory_.InvalidateWeakPtrs();
+ if (media_codec_) {
+ AMediaCodec_stop(media_codec_.get());
+
+ // Internally this calls AMediaFormat_delete(), and before exiting
+ // AMediaFormat_delete() drains all calls on the internal thread that
+ // calls OnAsyncXXXXX() functions. (Even though this fact is not documented)
+ // It means by the time we actually destruct `this`, no OnAsyncXXXXX()
+ // functions will use it via saved `userdata` pointers.
+ media_codec_.reset();
+ }
+ delete this;
+}
+
+bool NdkVideoEncodeAccelerator::SetInputBufferLayout() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ if (!media_codec_)
+ return false;
+
+ MediaFormatPtr input_format(AMediaCodec_getInputFormat(media_codec_.get()));
+ if (!input_format) {
+ return false;
+ }
+
+ if (!AMediaFormat_getInt32(input_format.get(), AMEDIAFORMAT_KEY_STRIDE,
+ &input_buffer_stride_)) {
+ input_buffer_stride_ = config_.input_visible_size.width();
+ }
+ if (!AMediaFormat_getInt32(input_format.get(), AMEDIAFORMAT_KEY_SLICE_HEIGHT,
+ &input_buffer_yplane_height_)) {
+ input_buffer_yplane_height_ = config_.input_visible_size.height();
+ }
+ return true;
+}
+
+base::TimeDelta NdkVideoEncodeAccelerator::AssignMonotonicTimestamp(
+ base::TimeDelta real_timestamp) {
+ base::TimeDelta step = base::Seconds(1) / effective_framerate_;
+ auto result = next_timestamp_;
+ generated_to_real_timestamp_map_[result] = real_timestamp;
+ next_timestamp_ += step;
+ return result;
+}
+
+base::TimeDelta NdkVideoEncodeAccelerator::RetrieveRealTimestamp(
+ base::TimeDelta monotonic_timestamp) {
+ base::TimeDelta result;
+ auto it = generated_to_real_timestamp_map_.find(monotonic_timestamp);
+ if (it != generated_to_real_timestamp_map_.end()) {
+ result = it->second;
+ generated_to_real_timestamp_map_.erase(it);
+ }
+ return result;
+}
+
+void NdkVideoEncodeAccelerator::FeedInput() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(media_codec_);
+
+ if (error_occurred_)
+ return;
+
+ if (media_codec_input_buffers_.empty() || pending_frames_.empty())
+ return;
+
+ scoped_refptr<VideoFrame> frame = std::move(pending_frames_.front().frame);
+ bool key_frame = pending_frames_.front().key_frame;
+ pending_frames_.pop_front();
+
+ if (key_frame) {
+ // Signal to the media codec that it needs to include a key frame
+ MediaFormatPtr format(AMediaFormat_new());
+ AMediaFormat_setInt32(format.get(), AMEDIACODEC_KEY_REQUEST_SYNC_FRAME, 0);
+ media_status_t status =
+ AMediaCodec_setParameters(media_codec_.get(), format.get());
+
+ if (status != AMEDIA_OK) {
+ NotifyMediaCodecError("Failed to request a keyframe", status);
+ return;
+ }
+ }
+
+ size_t buffer_idx = media_codec_input_buffers_.front();
+ media_codec_input_buffers_.pop_front();
+
+ size_t capacity = 0;
+ uint8_t* buffer_ptr =
+ AMediaCodec_getInputBuffer(media_codec_.get(), buffer_idx, &capacity);
+ if (!buffer_ptr) {
+ NotifyError("Can't obtain input buffer from media codec.",
+ kPlatformFailureError);
+ return;
+ }
+
+ uint8_t* dst_y = buffer_ptr;
+ const int dst_stride_y = input_buffer_stride_;
+ const int uv_plane_offset =
+ input_buffer_yplane_height_ * input_buffer_stride_;
+ uint8_t* dst_uv = buffer_ptr + uv_plane_offset;
+ const int dst_stride_uv = input_buffer_stride_;
+
+ const gfx::Size uv_plane_size = VideoFrame::PlaneSizeInSamples(
+ PIXEL_FORMAT_NV12, VideoFrame::kUVPlane, frame->coded_size());
+ const size_t queued_size =
+ // size of Y-plane plus padding till UV-plane
+ uv_plane_offset +
+ // size of all UV-plane lines but the last one
+ (uv_plane_size.height() - 1) * dst_stride_uv +
+ // size of the very last line in UV-plane (it's not padded to full stride)
+ uv_plane_size.width() * 2;
+
+ if (queued_size > capacity) {
+ auto message = base::StringPrintf(
+ "Frame doesn't fit into the input buffer. queued_size: %zu capacity: "
+ "%zu",
+ queued_size, capacity);
+ NotifyError(message, kPlatformFailureError);
+ return;
+ }
+
+ bool converted = false;
+ if (frame->format() == PIXEL_FORMAT_I420) {
+ converted = !libyuv::I420ToNV12(
+ frame->data(VideoFrame::kYPlane), frame->stride(VideoFrame::kYPlane),
+ frame->data(VideoFrame::kUPlane), frame->stride(VideoFrame::kUPlane),
+ frame->data(VideoFrame::kVPlane), frame->stride(VideoFrame::kVPlane),
+ dst_y, dst_stride_y, dst_uv, dst_stride_uv, frame->coded_size().width(),
+ frame->coded_size().height());
+ } else if (frame->format() == PIXEL_FORMAT_NV12) {
+ // No actual scaling will be performed since src and dst sizes are the same
+ // NV12Scale() works simply as glorified memcpy.
+ converted = !libyuv::NV12Scale(
+ frame->visible_data(VideoFrame::kYPlane),
+ frame->stride(VideoFrame::kYPlane),
+ frame->visible_data(VideoFrame::kUVPlane),
+ frame->stride(VideoFrame::kUVPlane), frame->coded_size().width(),
+ frame->coded_size().height(), dst_y, dst_stride_y, dst_uv,
+ dst_stride_uv, frame->coded_size().width(),
+ frame->coded_size().height(), libyuv::kFilterBox);
+ }
+
+ if (!converted) {
+ NotifyError("Failed to copy pixels to input buffer.",
+ kPlatformFailureError);
+ return;
+ }
+
+ // MediaCodec uses timestamps for rate control purposes, but we can't rely
+ // on real frame timestamps to be consistent with configured frame rate.
+ // That's why we map real frame timestamps to generate ones that a
+ // monotonically increase according to the configured frame rate.
+ // We do the opposite for each output buffer, to restore accurate frame
+ // timestamps.
+ auto generate_timestamp = AssignMonotonicTimestamp(frame->timestamp());
+ uint64_t flags = 0; // Unfortunately BUFFER_FLAG_KEY_FRAME has no effect here
+ media_status_t status = AMediaCodec_queueInputBuffer(
+ media_codec_.get(), buffer_idx, /*offset=*/0, queued_size,
+ generate_timestamp.InMicroseconds(), flags);
+ if (status != AMEDIA_OK) {
+ NotifyMediaCodecError("Failed to queueInputBuffer", status);
+ return;
+ }
+}
+
+void NdkVideoEncodeAccelerator::NotifyMediaCodecError(std::string message,
+ media_status_t error) {
+ auto message_and_code = base::StringPrintf("%s MediaCodec error code: %d",
+ message.c_str(), error);
+ NotifyError(message_and_code, kPlatformFailureError);
+}
+
+void NdkVideoEncodeAccelerator::NotifyError(base::StringPiece message,
+ Error code) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ MEDIA_LOG(ERROR, log_) << message;
+ LOG(ERROR) << message;
+ if (!error_occurred_) {
+ client_ptr_factory_->GetWeakPtr()->NotifyError(code);
+ error_occurred_ = true;
+ }
+}
+
+void NdkVideoEncodeAccelerator::OnAsyncInputAvailable(AMediaCodec* codec,
+ void* userdata,
+ int32_t index) {
+ auto* self = reinterpret_cast<NdkVideoEncodeAccelerator*>(userdata);
+ DCHECK(self);
+
+ self->task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&NdkVideoEncodeAccelerator::OnInputAvailable,
+ self->callback_weak_ptr_, index));
+}
+
+void NdkVideoEncodeAccelerator::OnInputAvailable(int32_t index) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ media_codec_input_buffers_.push_back(index);
+ FeedInput();
+}
+
+void NdkVideoEncodeAccelerator::OnAsyncOutputAvailable(
+ AMediaCodec* codec,
+ void* userdata,
+ int32_t index,
+ AMediaCodecBufferInfo* bufferInfo) {
+ auto* self = reinterpret_cast<NdkVideoEncodeAccelerator*>(userdata);
+ DCHECK(self);
+
+ self->task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&NdkVideoEncodeAccelerator::OnOutputAvailable,
+ self->callback_weak_ptr_, index, *bufferInfo));
+}
+
+void NdkVideoEncodeAccelerator::OnOutputAvailable(int32_t index,
+ AMediaCodecBufferInfo info) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ media_codec_output_buffers_.push_back({index, info});
+ DrainOutput();
+}
+
+void NdkVideoEncodeAccelerator::OnAsyncError(AMediaCodec* codec,
+ void* userdata,
+ media_status_t error,
+ int32_t actionCode,
+ const char* detail) {
+ auto* self = reinterpret_cast<NdkVideoEncodeAccelerator*>(userdata);
+ DCHECK(self);
+
+ self->task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&NdkVideoEncodeAccelerator::NotifyMediaCodecError,
+ self->callback_weak_ptr_, "Media codec async error.",
+ error));
+}
+
+bool NdkVideoEncodeAccelerator::DrainConfig() {
+ if (media_codec_output_buffers_.empty())
+ return false;
+
+ MCOutput output_buffer = media_codec_output_buffers_.front();
+ AMediaCodecBufferInfo& mc_buffer_info = output_buffer.info;
+ const size_t mc_buffer_size = static_cast<size_t>(mc_buffer_info.size);
+
+ // Check that the first buffer in the queue contains config data.
+ if ((mc_buffer_info.flags & AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG) == 0)
+ return false;
+
+ media_codec_output_buffers_.pop_front();
+ size_t capacity = 0;
+ uint8_t* buf_data = AMediaCodec_getOutputBuffer(
+ media_codec_.get(), output_buffer.buffer_index, &capacity);
+
+ if (!buf_data) {
+ NotifyError("Can't obtain output buffer from media codec.",
+ kPlatformFailureError);
+ return false;
+ }
+
+ if (mc_buffer_info.offset + mc_buffer_size > capacity) {
+ auto message = base::StringPrintf(
+ "Invalid output buffer layout."
+ "offset: %d size: %zu capacity: %zu",
+ mc_buffer_info.offset, mc_buffer_size, capacity);
+ NotifyError(message, kPlatformFailureError);
+ return false;
+ }
+
+ config_data_.resize(mc_buffer_size);
+ memcpy(config_data_.data(), buf_data + mc_buffer_info.offset, mc_buffer_size);
+ AMediaCodec_releaseOutputBuffer(media_codec_.get(),
+ output_buffer.buffer_index, false);
+ return true;
+}
+
+void NdkVideoEncodeAccelerator::DrainOutput() {
+ if (error_occurred_)
+ return;
+
+ // Config data (e.g. PPS and SPS for H.264) needs to be handled differently,
+ // because we save it for later rather than giving it as an output
+ // straight away.
+ if (DrainConfig())
+ return;
+
+ if (media_codec_output_buffers_.empty() ||
+ available_bitstream_buffers_.empty()) {
+ return;
+ }
+
+ MCOutput output_buffer = media_codec_output_buffers_.front();
+ AMediaCodecBufferInfo& mc_buffer_info = output_buffer.info;
+ const size_t mc_buffer_size = static_cast<size_t>(mc_buffer_info.size);
+ media_codec_output_buffers_.pop_front();
+
+ if ((mc_buffer_info.flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) != 0)
+ return;
+ const bool key_frame = (mc_buffer_info.flags & BUFFER_FLAG_KEY_FRAME) != 0;
+
+ BitstreamBuffer bitstream_buffer =
+ std::move(available_bitstream_buffers_.back());
+ available_bitstream_buffers_.pop_back();
+
+ const size_t config_size = key_frame ? config_data_.size() : 0u;
+ if (config_size + mc_buffer_size > bitstream_buffer.size()) {
+ auto message = base::StringPrintf(
+ "Encoded output is too large. mc output size: %zu"
+ " bitstream buffer size: %zu"
+ " config size: %zu",
+ mc_buffer_size, bitstream_buffer.size(), config_size);
+ NotifyError(message, kPlatformFailureError);
+ return;
+ }
+
+ size_t capacity = 0;
+ uint8_t* buf_data = AMediaCodec_getOutputBuffer(
+ media_codec_.get(), output_buffer.buffer_index, &capacity);
+
+ if (!buf_data) {
+ NotifyError("Can't obtain output buffer from media codec.",
+ kPlatformFailureError);
+ return;
+ }
+
+ if (mc_buffer_info.offset + mc_buffer_size > capacity) {
+ auto message = base::StringPrintf(
+ "Invalid output buffer layout."
+ "offset: %d size: %zu capacity: %zu",
+ mc_buffer_info.offset, mc_buffer_size, capacity);
+ NotifyError(message, kPlatformFailureError);
+ return;
+ }
+
+ base::UnsafeSharedMemoryRegion region = bitstream_buffer.TakeRegion();
+ auto mapping =
+ region.MapAt(bitstream_buffer.offset(), bitstream_buffer.size());
+ if (!mapping.IsValid()) {
+ NotifyError("Failed to map SHM", kPlatformFailureError);
+ return;
+ }
+
+ uint8_t* output_dst = mapping.GetMemoryAs<uint8_t>();
+ if (config_size > 0) {
+ memcpy(output_dst, config_data_.data(), config_size);
+ output_dst += config_size;
+ }
+ memcpy(output_dst, buf_data, mc_buffer_size);
+
+ auto timestamp = base::Microseconds(mc_buffer_info.presentationTimeUs);
+ timestamp = RetrieveRealTimestamp(timestamp);
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&VideoEncodeAccelerator::Client::BitstreamBufferReady,
+ client_ptr_factory_->GetWeakPtr(), bitstream_buffer.id(),
+ BitstreamBufferMetadata(mc_buffer_size + config_size,
+ key_frame, timestamp)));
+ AMediaCodec_releaseOutputBuffer(media_codec_.get(),
+ output_buffer.buffer_index, false);
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/android/ndk_video_encode_accelerator.h b/chromium/media/gpu/android/ndk_video_encode_accelerator.h
new file mode 100644
index 00000000000..21e6e8b5cff
--- /dev/null
+++ b/chromium/media/gpu/android/ndk_video_encode_accelerator.h
@@ -0,0 +1,176 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_ANDROID_NDK_VIDEO_ENCODE_ACCELERATOR_H_
+#define MEDIA_GPU_ANDROID_NDK_VIDEO_ENCODE_ACCELERATOR_H_
+#include <stddef.h>
+#include <stdint.h>
+
+#include <media/NdkMediaCodec.h>
+#include <memory>
+#include <vector>
+
+#include "base/containers/circular_deque.h"
+#include "base/containers/flat_map.h"
+#include "base/memory/weak_ptr.h"
+#include "base/sequence_checker.h"
+#include "base/strings/string_piece.h"
+#include "base/task/sequenced_task_runner.h"
+#include "base/timer/timer.h"
+#include "media/base/bitrate.h"
+#include "media/base/media_log.h"
+#include "media/base/video_encoder.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/video/video_encode_accelerator.h"
+
+namespace media {
+
+class BitstreamBuffer;
+struct AMediaCodecDeleter {
+ inline void operator()(AMediaCodec* ptr) const {
+ if (ptr)
+ AMediaCodec_delete(ptr);
+ }
+};
+
+class MEDIA_GPU_EXPORT NdkVideoEncodeAccelerator final
+ : public VideoEncodeAccelerator {
+ public:
+ // |runner| - a task runner that will be used for all callbacks and external
+ // calls to this instance.
+ NdkVideoEncodeAccelerator(scoped_refptr<base::SequencedTaskRunner> runner);
+
+ NdkVideoEncodeAccelerator(const NdkVideoEncodeAccelerator&) = delete;
+ NdkVideoEncodeAccelerator& operator=(const NdkVideoEncodeAccelerator&) =
+ delete;
+ ~NdkVideoEncodeAccelerator() override;
+
+ static bool IsSupported();
+
+ // VideoEncodeAccelerator implementation.
+ VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
+ bool Initialize(const Config& config,
+ Client* client,
+ std::unique_ptr<MediaLog> media_log) override;
+ void Encode(scoped_refptr<VideoFrame> frame, bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(BitstreamBuffer buffer) override;
+ void RequestEncodingParametersChange(const Bitrate& bitrate,
+ uint32_t framerate) override;
+ void Destroy() override;
+
+ private:
+ // Called by MediaCodec when an input buffer becomes available.
+ static void OnAsyncInputAvailable(AMediaCodec* codec,
+ void* userdata,
+ int32_t index);
+ void OnInputAvailable(int32_t index);
+
+ // Called by MediaCodec when an output buffer becomes available.
+ static void OnAsyncOutputAvailable(AMediaCodec* codec,
+ void* userdata,
+ int32_t index,
+ AMediaCodecBufferInfo* bufferInfo);
+ void OnOutputAvailable(int32_t index, AMediaCodecBufferInfo bufferInfo);
+
+ // Called by MediaCodec when the output format has changed.
+ static void OnAsyncFormatChanged(AMediaCodec* codec,
+ void* userdata,
+ AMediaFormat* format) {}
+
+ // Called when the MediaCodec encountered an error.
+ static void OnAsyncError(AMediaCodec* codec,
+ void* userdata,
+ media_status_t error,
+ int32_t actionCode,
+ const char* detail);
+
+ // Ask MediaCodec what input buffer layout it prefers and set values of
+ // |input_buffer_stride_| and |input_buffer_yplane_height_|
+ bool SetInputBufferLayout();
+
+ // Read a frame from |pending_frames_| put it into an input buffer
+ // available in |media_codec_input_buffers_| and ask |media_codec_| to encode
+ // it.
+ void FeedInput();
+
+ // Read encoded data from |media_codec_output_buffers_| copy it to a buffer
+ // available in |available_bitstream_buffers_| and tell |client_ptr_factory_|
+ // that encoded data is ready.
+ void DrainOutput();
+
+ // Read config data from |media_codec_output_buffers_| and copy it to
+ // |config_data_|. |config_data_| is later used to preappent it to
+ // key-frame encoded chunks.
+ bool DrainConfig();
+
+ void NotifyMediaCodecError(std::string message, media_status_t status);
+ void NotifyError(base::StringPiece message, Error code);
+
+ base::TimeDelta AssignMonotonicTimestamp(base::TimeDelta real_timestamp);
+ base::TimeDelta RetrieveRealTimestamp(base::TimeDelta monotonic_timestamp);
+
+ SEQUENCE_CHECKER(sequence_checker_);
+
+ // VideoDecodeAccelerator::Client callbacks go here. Invalidated once any
+ // error triggers.
+ std::unique_ptr<base::WeakPtrFactory<Client>> client_ptr_factory_;
+
+ using MediaCodecPtr = std::unique_ptr<AMediaCodec, AMediaCodecDeleter>;
+
+ MediaCodecPtr media_codec_;
+
+ Config config_;
+
+ bool error_occurred_ = false;
+
+ uint32_t effective_framerate_ = 0;
+ Bitrate effective_bitrate_;
+
+ // Y and UV plane strides in the encoder's input buffer
+ int32_t input_buffer_stride_ = 0;
+
+ // Y-plane height in the encoder's input
+ int32_t input_buffer_yplane_height_ = 0;
+
+ // A runner all for callbacks and externals calls to public methods.
+ scoped_refptr<base::SequencedTaskRunner> task_runner_;
+
+ // Indices of input buffers currently pending in media codec.
+ base::circular_deque<size_t> media_codec_input_buffers_;
+
+ // Info about output buffers currently pending in media codec.
+ struct MCOutput {
+ int32_t buffer_index;
+ AMediaCodecBufferInfo info;
+ };
+ base::circular_deque<MCOutput> media_codec_output_buffers_;
+
+ // Frames waiting to be passed to the codec, queued until an input buffer is
+ // available.
+ base::circular_deque<VideoEncoder::PendingEncode> pending_frames_;
+
+ // Bitstream buffers waiting to be populated & returned to the client.
+ std::vector<BitstreamBuffer> available_bitstream_buffers_;
+
+ // Monotonically-growing timestamp that will be assigned to the next frame
+ base::TimeDelta next_timestamp_;
+
+ // Map from artificial monotonically-growing to real frame timestamp.
+ base::flat_map<base::TimeDelta, base::TimeDelta>
+ generated_to_real_timestamp_map_;
+
+ std::unique_ptr<MediaLog> log_;
+
+ // SPS and PPS NALs etc.
+ std::vector<uint8_t> config_data_;
+
+ // Declared last to ensure that all weak pointers are invalidated before
+ // other destructors run.
+ base::WeakPtr<NdkVideoEncodeAccelerator> callback_weak_ptr_;
+ base::WeakPtrFactory<NdkVideoEncodeAccelerator> callback_weak_factory_{this};
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_ANDROID_NDK_VIDEO_ENCODE_ACCELERATOR_H_
diff --git a/chromium/media/gpu/android/ndk_video_encode_accelerator_tests.cc b/chromium/media/gpu/android/ndk_video_encode_accelerator_tests.cc
new file mode 100644
index 00000000000..3e714755753
--- /dev/null
+++ b/chromium/media/gpu/android/ndk_video_encode_accelerator_tests.cc
@@ -0,0 +1,321 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <map>
+#include <vector>
+
+#include "base/android/build_info.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/run_loop.h"
+#include "base/test/task_environment.h"
+#include "media/base/bitstream_buffer.h"
+#include "media/base/media_util.h"
+#include "media/base/test_helpers.h"
+#include "media/base/video_codecs.h"
+#include "media/base/video_frame.h"
+#include "media/base/video_util.h"
+#include "media/gpu/android/ndk_video_encode_accelerator.h"
+#include "media/video/fake_gpu_memory_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+#include "third_party/libyuv/include/libyuv.h"
+#include "third_party/libyuv/include/libyuv/convert_from.h"
+
+using testing::Return;
+
+namespace media {
+
+struct VideoParams {
+ VideoCodecProfile profile;
+ VideoPixelFormat pixel_format;
+};
+
+class NdkVideoEncoderAcceleratorTest
+ : public ::testing::TestWithParam<VideoParams>,
+ public VideoEncodeAccelerator::Client {
+ public:
+ void SetUp() override {
+ if (!NdkVideoEncodeAccelerator::IsSupported())
+ GTEST_SKIP() << "Not supported Android version";
+
+ auto args = GetParam();
+ profile_ = args.profile;
+ codec_ = VideoCodecProfileToVideoCodec(profile_);
+ pixel_format_ = args.pixel_format;
+
+ auto profiles = MakeNdkAccelerator()->GetSupportedProfiles();
+ bool codec_supported =
+ std::any_of(profiles.begin(), profiles.end(),
+ [this](VideoEncodeAccelerator::SupportedProfile p) {
+ return p.profile == profile_;
+ });
+
+ if (!codec_supported) {
+ GTEST_SKIP() << "Device doesn't have hw encoder for: "
+ << GetProfileName(profile_);
+ }
+ }
+
+ void TearDown() override {}
+
+ // Implementation for VEA::Client
+ void RequireBitstreamBuffers(unsigned int input_count,
+ const gfx::Size& input_coded_size,
+ size_t output_buffer_size) override {
+ output_buffer_size_ = output_buffer_size;
+ input_buffer_size_ =
+ VideoFrame::AllocationSize(PIXEL_FORMAT_I420, input_coded_size);
+ SendNewBuffer();
+ if (!OnRequireBuffer())
+ loop_.Quit();
+ }
+
+ void BitstreamBufferReady(int32_t bitstream_buffer_id,
+ const BitstreamBufferMetadata& metadata) override {
+ outputs_.push_back({bitstream_buffer_id, metadata});
+ SendNewBuffer();
+ if (!OnBufferReady())
+ loop_.Quit();
+ }
+
+ void NotifyError(VideoEncodeAccelerator::Error error) override {
+ error_ = error;
+ if (!OnError())
+ loop_.Quit();
+ }
+
+ MOCK_METHOD(bool, OnRequireBuffer, ());
+ MOCK_METHOD(bool, OnBufferReady, ());
+ MOCK_METHOD(bool, OnError, ());
+
+ protected:
+ void SendNewBuffer() {
+ auto buffer = output_pool_->MaybeAllocateBuffer(output_buffer_size_);
+ if (!buffer) {
+ FAIL() << "Can't allocate memory buffer";
+ }
+
+ const base::UnsafeSharedMemoryRegion& region = buffer->GetRegion();
+ auto mapping = region.Map();
+ memset(mapping.memory(), 0, mapping.size());
+
+ auto id = ++last_buffer_id_;
+ accelerator_->UseOutputBitstreamBuffer(
+ BitstreamBuffer(id, region.Duplicate(), region.GetSize()));
+ id_to_buffer_[id] = std::move(buffer);
+ }
+
+ scoped_refptr<VideoFrame> CreateI420Frame(gfx::Size size,
+ uint32_t color,
+ base::TimeDelta timestamp) {
+ auto frame = VideoFrame::CreateFrame(PIXEL_FORMAT_I420, size,
+ gfx::Rect(size), size, timestamp);
+ auto y = color & 0xFF;
+ auto u = (color >> 8) & 0xFF;
+ auto v = (color >> 16) & 0xFF;
+ libyuv::I420Rect(
+ frame->data(VideoFrame::kYPlane), frame->stride(VideoFrame::kYPlane),
+ frame->data(VideoFrame::kUPlane), frame->stride(VideoFrame::kUPlane),
+ frame->data(VideoFrame::kVPlane), frame->stride(VideoFrame::kVPlane),
+ 0, // left
+ 0, // top
+ frame->visible_rect().width(), // right
+ frame->visible_rect().height(), // bottom
+ y, // Y color
+ u, // U color
+ v); // V color
+ return frame;
+ }
+
+ scoped_refptr<VideoFrame> CreateNV12Frame(gfx::Size size,
+ uint32_t color,
+ base::TimeDelta timestamp) {
+ auto i420_frame = CreateI420Frame(size, color, timestamp);
+ auto nv12_frame = VideoFrame::CreateFrame(PIXEL_FORMAT_NV12, size,
+ gfx::Rect(size), size, timestamp);
+ auto status = ConvertAndScaleFrame(*i420_frame, *nv12_frame, resize_buff_);
+ EXPECT_TRUE(status.is_ok());
+ return nv12_frame;
+ }
+
+ scoped_refptr<VideoFrame> CreateRGBFrame(gfx::Size size,
+ uint32_t color,
+ base::TimeDelta timestamp) {
+ auto frame = VideoFrame::CreateFrame(PIXEL_FORMAT_XRGB, size,
+ gfx::Rect(size), size, timestamp);
+
+ libyuv::ARGBRect(frame->data(VideoFrame::kARGBPlane),
+ frame->stride(VideoFrame::kARGBPlane),
+ 0, // left
+ 0, // top
+ frame->visible_rect().width(), // right
+ frame->visible_rect().height(), // bottom
+ color);
+
+ return frame;
+ }
+
+ scoped_refptr<VideoFrame> CreateFrame(gfx::Size size,
+ VideoPixelFormat format,
+ base::TimeDelta timestamp,
+ uint32_t color = 0x964050) {
+ switch (format) {
+ case PIXEL_FORMAT_I420:
+ return CreateI420Frame(size, color, timestamp);
+ case PIXEL_FORMAT_NV12:
+ return CreateNV12Frame(size, color, timestamp);
+ case PIXEL_FORMAT_XRGB:
+ return CreateRGBFrame(size, color, timestamp);
+ default:
+ EXPECT_TRUE(false) << "not supported pixel format";
+ return nullptr;
+ }
+ }
+
+ VideoEncodeAccelerator::Config GetDefaultConfig() {
+ gfx::Size frame_size(640, 480);
+ uint32_t framerate = 30;
+ auto bitrate = Bitrate::ConstantBitrate(1000000u);
+ return VideoEncodeAccelerator::Config(pixel_format_, frame_size, profile_,
+ bitrate, framerate, 1000);
+ }
+
+ void Run() { loop_.Run(); }
+
+ std::unique_ptr<NullMediaLog> NullLog() {
+ return std::make_unique<NullMediaLog>();
+ }
+
+ std::unique_ptr<VideoEncodeAccelerator> MakeNdkAccelerator() {
+ auto runner = task_environment_.GetMainThreadTaskRunner();
+ return base::WrapUnique<VideoEncodeAccelerator>(
+ new NdkVideoEncodeAccelerator(runner));
+ }
+
+ VideoCodec codec_;
+ VideoCodecProfile profile_;
+ VideoPixelFormat pixel_format_;
+
+ base::test::TaskEnvironment task_environment_;
+ base::RunLoop loop_;
+ std::unique_ptr<VideoEncodeAccelerator> accelerator_;
+ size_t output_buffer_size_ = 0;
+ scoped_refptr<base::UnsafeSharedMemoryPool> output_pool_ =
+ base::MakeRefCounted<base::UnsafeSharedMemoryPool>();
+ std::map<int32_t, std::unique_ptr<base::UnsafeSharedMemoryPool::Handle>>
+ id_to_buffer_;
+ struct Output {
+ int32_t id;
+ BitstreamBufferMetadata md;
+ };
+ std::vector<Output> outputs_;
+ absl::optional<VideoEncodeAccelerator::Error> error_;
+ size_t input_buffer_size_ = 0;
+ int32_t last_buffer_id_ = 0;
+ std::vector<uint8_t> resize_buff_;
+};
+
+TEST_P(NdkVideoEncoderAcceleratorTest, InitializeAndDestroy) {
+ auto config = GetDefaultConfig();
+ accelerator_ = MakeNdkAccelerator();
+ EXPECT_CALL(*this, OnRequireBuffer()).WillOnce(Return(false));
+
+ bool result = accelerator_->Initialize(config, this, NullLog());
+ ASSERT_TRUE(result);
+ Run();
+ EXPECT_GE(id_to_buffer_.size(), 1u);
+ accelerator_.reset();
+ EXPECT_FALSE(error_.has_value());
+}
+
+TEST_P(NdkVideoEncoderAcceleratorTest, HandleEncodingError) {
+ auto config = GetDefaultConfig();
+ accelerator_ = MakeNdkAccelerator();
+ EXPECT_CALL(*this, OnRequireBuffer()).WillOnce(Return(true));
+ EXPECT_CALL(*this, OnError()).WillOnce(Return(false));
+
+ bool result = accelerator_->Initialize(config, this, NullLog());
+ ASSERT_TRUE(result);
+
+ auto size = config.input_visible_size;
+ // A frame with unsupported pixel format works as a way to induce a error.
+ auto frame = VideoFrame::CreateFrame(PIXEL_FORMAT_NV21, size, gfx::Rect(size),
+ size, {});
+ accelerator_->Encode(frame, true);
+
+ Run();
+ EXPECT_EQ(outputs_.size(), 0u);
+ EXPECT_TRUE(error_.has_value());
+}
+
+TEST_P(NdkVideoEncoderAcceleratorTest, EncodeSeveralFrames) {
+ const size_t total_frames_count = 10;
+ const size_t key_frame_index = 7;
+ auto config = GetDefaultConfig();
+ accelerator_ = MakeNdkAccelerator();
+ EXPECT_CALL(*this, OnRequireBuffer()).WillRepeatedly(Return(true));
+ EXPECT_CALL(*this, OnBufferReady()).WillRepeatedly([this]() {
+ if (outputs_.size() < total_frames_count)
+ return true;
+ return false;
+ });
+
+ bool result = accelerator_->Initialize(config, this, NullLog());
+ ASSERT_TRUE(result);
+
+ uint32_t color = 0x964050;
+ auto duration = base::Milliseconds(16);
+ for (auto frame_index = 0u; frame_index < total_frames_count; frame_index++) {
+ auto timestamp = frame_index * duration;
+ auto frame =
+ CreateFrame(config.input_visible_size, pixel_format_, timestamp, color);
+ color = (color << 1) + frame_index;
+ bool key_frame = (frame_index == key_frame_index);
+ accelerator_->Encode(frame, key_frame);
+ }
+
+ Run();
+ EXPECT_FALSE(error_.has_value());
+ EXPECT_GE(outputs_.size(), total_frames_count);
+ // Here we'd like to test that an output with at `key_frame_index`
+ // has a keyframe flag set to true, but because MediaCodec
+ // is unreliable in inserting keyframes at our request we can't test
+ // for it. In practice it usually works, just not always.
+
+ for (auto& output : outputs_) {
+ auto& mapping = id_to_buffer_[output.id]->GetMapping();
+ EXPECT_GE(mapping.size(), output.md.payload_size_bytes);
+ EXPECT_GT(output.md.payload_size_bytes, 0u);
+ auto span = mapping.GetMemoryAsSpan<uint8_t>();
+ bool found_not_zero =
+ std::any_of(span.begin(), span.end(), [](uint8_t x) { return x != 0; });
+ EXPECT_TRUE(found_not_zero);
+ }
+}
+
+std::string PrintTestParams(const testing::TestParamInfo<VideoParams>& info) {
+ auto result = GetProfileName(info.param.profile) + "__" +
+ VideoPixelFormatToString(info.param.pixel_format);
+
+ // GTest doesn't like spaces, but profile names have spaces, so we need
+ // to replace them with underscores.
+ std::replace(result.begin(), result.end(), ' ', '_');
+ return result;
+}
+
+VideoParams kParams[] = {
+ {VP8PROFILE_MIN, PIXEL_FORMAT_I420},
+ {VP8PROFILE_MIN, PIXEL_FORMAT_NV12},
+ {H264PROFILE_BASELINE, PIXEL_FORMAT_I420},
+ {H264PROFILE_BASELINE, PIXEL_FORMAT_NV12},
+};
+
+INSTANTIATE_TEST_SUITE_P(AllNdkEncoderTests,
+ NdkVideoEncoderAcceleratorTest,
+ ::testing::ValuesIn(kParams),
+ PrintTestParams);
+
+} // namespace media \ No newline at end of file
diff --git a/chromium/media/gpu/android/video_frame_factory_impl.cc b/chromium/media/gpu/android/video_frame_factory_impl.cc
index 2b26054454c..f03da5443c4 100644
--- a/chromium/media/gpu/android/video_frame_factory_impl.cc
+++ b/chromium/media/gpu/android/video_frame_factory_impl.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
+#include "base/check_op.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/task/single_thread_task_runner.h"
@@ -33,32 +34,8 @@
namespace media {
namespace {
-// The frames must be copied when threaded texture mailboxes are in use
-// (http://crbug.com/582170). This texture copy can be avoided if
-// AImageReader/AHardwareBuffer is supported and AImageReader
-// max size is not limited to 1 (crbug.com/1091945).
-absl::optional<VideoFrameMetadata::CopyMode> GetVideoFrameCopyMode(
- bool enable_threaded_texture_mailboxes) {
- if (!enable_threaded_texture_mailboxes)
- return absl::nullopt;
-
- // If we can run thread-safe, we don't need to copy.
- if (features::NeedThreadSafeAndroidMedia())
- return absl::nullopt;
-
- return features::IsWebViewZeroCopyVideoEnabled()
- ? VideoFrameMetadata::CopyMode::kCopyMailboxesOnly
- : VideoFrameMetadata::CopyMode::kCopyToNewTexture;
-}
-
gpu::TextureOwner::Mode GetTextureOwnerMode(
- VideoFrameFactory::OverlayMode overlay_mode,
- const absl::optional<VideoFrameMetadata::CopyMode>& copy_mode) {
- if (copy_mode == VideoFrameMetadata::kCopyMailboxesOnly) {
- DCHECK(features::IsWebViewZeroCopyVideoEnabled());
- return gpu::TextureOwner::Mode::kAImageReaderInsecureMultithreaded;
- }
-
+ VideoFrameFactory::OverlayMode overlay_mode) {
switch (overlay_mode) {
case VideoFrameFactory::OverlayMode::kDontRequestPromotionHints:
case VideoFrameFactory::OverlayMode::kRequestPromotionHints:
@@ -82,7 +59,6 @@ gpu::TextureOwner::Mode GetTextureOwnerMode(
static void AllocateTextureOwnerOnGpuThread(
VideoFrameFactory::InitCB init_cb,
VideoFrameFactory::OverlayMode overlay_mode,
- const absl::optional<VideoFrameMetadata::CopyMode>& copy_mode,
scoped_refptr<gpu::RefCountedLock> drdc_lock,
scoped_refptr<gpu::SharedContextState> shared_context_state) {
if (!shared_context_state) {
@@ -92,7 +68,7 @@ static void AllocateTextureOwnerOnGpuThread(
std::move(init_cb).Run(gpu::TextureOwner::Create(
gpu::TextureOwner::CreateTexture(shared_context_state),
- GetTextureOwnerMode(overlay_mode, copy_mode), shared_context_state,
+ GetTextureOwnerMode(overlay_mode), shared_context_state,
std::move(drdc_lock)));
}
@@ -110,8 +86,9 @@ VideoFrameFactoryImpl::VideoFrameFactoryImpl(
: gpu::RefCountedLockHelperDrDc(std::move(drdc_lock)),
image_provider_(std::move(image_provider)),
gpu_task_runner_(std::move(gpu_task_runner)),
- copy_mode_(GetVideoFrameCopyMode(
- gpu_preferences.enable_threaded_texture_mailboxes)),
+ video_frame_copy_required_(
+ gpu_preferences.enable_threaded_texture_mailboxes &&
+ !features::NeedThreadSafeAndroidMedia()),
mre_manager_(std::move(mre_manager)),
frame_info_helper_(std::move(frame_info_helper)) {}
@@ -128,7 +105,7 @@ void VideoFrameFactoryImpl::Initialize(OverlayMode overlay_mode,
// call |init_cb|.
auto gpu_init_cb = base::BindOnce(&AllocateTextureOwnerOnGpuThread,
BindToCurrentLoop(std::move(init_cb)),
- overlay_mode, copy_mode_, GetDrDcLock());
+ overlay_mode, GetDrDcLock());
image_provider_->Initialize(std::move(gpu_init_cb));
}
@@ -195,11 +172,12 @@ void VideoFrameFactoryImpl::CreateVideoFrame(
return;
}
- auto image_ready_cb = base::BindOnce(
- &VideoFrameFactoryImpl::CreateVideoFrame_OnImageReady,
- weak_factory_.GetWeakPtr(), std::move(output_cb), timestamp, natural_size,
- !!codec_buffer_wait_coordinator_, std::move(promotion_hint_cb),
- pixel_format, overlay_mode_, copy_mode_, gpu_task_runner_);
+ auto image_ready_cb =
+ base::BindOnce(&VideoFrameFactoryImpl::CreateVideoFrame_OnImageReady,
+ weak_factory_.GetWeakPtr(), std::move(output_cb),
+ timestamp, natural_size, !!codec_buffer_wait_coordinator_,
+ std::move(promotion_hint_cb), pixel_format, overlay_mode_,
+ video_frame_copy_required_, gpu_task_runner_);
RequestImage(std::move(output_buffer_renderer), std::move(image_ready_cb));
}
@@ -256,7 +234,7 @@ void VideoFrameFactoryImpl::CreateVideoFrame_OnImageReady(
PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
VideoPixelFormat pixel_format,
OverlayMode overlay_mode,
- const absl::optional<VideoFrameMetadata::CopyMode>& copy_mode,
+ bool video_frame_copy_required,
scoped_refptr<base::SequencedTaskRunner> gpu_task_runner,
std::unique_ptr<CodecOutputBufferRenderer> output_buffer_renderer,
FrameInfoHelper::FrameInfo frame_info,
@@ -307,7 +285,9 @@ void VideoFrameFactoryImpl::CreateVideoFrame_OnImageReady(
std::move(output_cb).Run(nullptr);
return;
}
- frame->metadata().copy_mode = copy_mode;
+
+ frame->metadata().copy_required = video_frame_copy_required;
+
const bool is_surface_control =
overlay_mode == OverlayMode::kSurfaceControlSecure ||
overlay_mode == OverlayMode::kSurfaceControlInsecure;
diff --git a/chromium/media/gpu/android/video_frame_factory_impl.h b/chromium/media/gpu/android/video_frame_factory_impl.h
index cd30f4b3d28..42109782db7 100644
--- a/chromium/media/gpu/android/video_frame_factory_impl.h
+++ b/chromium/media/gpu/android/video_frame_factory_impl.h
@@ -101,7 +101,7 @@ class MEDIA_GPU_EXPORT VideoFrameFactoryImpl
PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
VideoPixelFormat pixel_format,
OverlayMode overlay_mode,
- const absl::optional<VideoFrameMetadata::CopyMode>& copy_mode,
+ bool video_frame_copy_required,
scoped_refptr<base::SequencedTaskRunner> gpu_task_runner,
std::unique_ptr<CodecOutputBufferRenderer> output_buffer_renderer,
FrameInfoHelper::FrameInfo frame_info,
@@ -122,8 +122,8 @@ class MEDIA_GPU_EXPORT VideoFrameFactoryImpl
OverlayMode overlay_mode_ = OverlayMode::kDontRequestPromotionHints;
- // Indicates how video frame needs to be copied when required.
- absl::optional<VideoFrameMetadata::CopyMode> copy_mode_;
+ // Is the video frame copy required?
+ bool video_frame_copy_required_ = false;
// Current group that new CodecImages should belong to. Do not use this on
// our thread; everything must be posted to the gpu main thread, including
diff --git a/chromium/media/gpu/android/video_frame_factory_impl_unittest.cc b/chromium/media/gpu/android/video_frame_factory_impl_unittest.cc
index ce63719394b..168f404521e 100644
--- a/chromium/media/gpu/android/video_frame_factory_impl_unittest.cc
+++ b/chromium/media/gpu/android/video_frame_factory_impl_unittest.cc
@@ -87,7 +87,7 @@ class VideoFrameFactoryImplTest : public testing::Test {
gfx::Size coded_size{100, 100};
gfx::Rect visible_rect{coded_size};
gfx::Size natural_size{coded_size};
- gfx::ColorSpace color_space{gfx::ColorSpace::CreateSCRGBLinear()};
+ gfx::ColorSpace color_space{gfx::ColorSpace::CreateSRGBLinear()};
} video_frame_params_;
void RequestVideoFrame() {
diff --git a/chromium/media/gpu/chromeos/dmabuf_video_frame_pool.h b/chromium/media/gpu/chromeos/dmabuf_video_frame_pool.h
index 87840b21c85..74b0b79e770 100644
--- a/chromium/media/gpu/chromeos/dmabuf_video_frame_pool.h
+++ b/chromium/media/gpu/chromeos/dmabuf_video_frame_pool.h
@@ -18,10 +18,6 @@
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
-namespace gpu {
-class GpuMemoryBufferFactory;
-} // namespace gpu
-
namespace media {
// Forward declare for use in AsPlatformVideoFramePool.
@@ -38,7 +34,6 @@ class MEDIA_GPU_EXPORT DmabufVideoFramePool {
using CreateFrameCB =
base::RepeatingCallback<CroStatus::Or<scoped_refptr<VideoFrame>>(
- gpu::GpuMemoryBufferFactory*,
VideoPixelFormat,
const gfx::Size&,
const gfx::Rect&,
diff --git a/chromium/media/gpu/chromeos/image_processor_test.cc b/chromium/media/gpu/chromeos/image_processor_test.cc
index 1077a4089bf..bbf844b886a 100644
--- a/chromium/media/gpu/chromeos/image_processor_test.cc
+++ b/chromium/media/gpu/chromeos/image_processor_test.cc
@@ -6,6 +6,7 @@
#include <string>
#include <tuple>
+#include "base/command_line.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/hash/md5.h"
diff --git a/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc b/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc
index 65903efb1a4..7f691e77f55 100644
--- a/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc
+++ b/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc
@@ -246,8 +246,11 @@ void MailboxVideoFrameConverter::WrapMailboxAndVideoFrameAndOutput(
mailbox_frame->set_metadata(frame->metadata());
mailbox_frame->set_ycbcr_info(frame->ycbcr_info());
mailbox_frame->metadata().read_lock_fences_enabled = true;
+ // We use origin_frame->metadata().is_webgpu_compatible instead of
+ // frame->metadata().is_webgpu_compatible because the PlatformVideoFramePool
+ // clears the metadata of the outer frame.
mailbox_frame->metadata().is_webgpu_compatible =
- enable_unsafe_webgpu_ && frame->metadata().is_webgpu_compatible;
+ enable_unsafe_webgpu_ && origin_frame->metadata().is_webgpu_compatible;
output_cb_.Run(mailbox_frame);
}
diff --git a/chromium/media/gpu/chromeos/oop_video_decoder.cc b/chromium/media/gpu/chromeos/oop_video_decoder.cc
index 71fb453d655..6f44e6b4833 100644
--- a/chromium/media/gpu/chromeos/oop_video_decoder.cc
+++ b/chromium/media/gpu/chromeos/oop_video_decoder.cc
@@ -69,11 +69,11 @@ namespace media {
// static
std::unique_ptr<VideoDecoderMixin> OOPVideoDecoder::Create(
- std::unique_ptr<MediaLog> media_log,
- scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
- base::WeakPtr<VideoDecoderMixin::Client> client,
mojo::PendingRemote<stable::mojom::StableVideoDecoder>
- pending_remote_decoder) {
+ pending_remote_decoder,
+ std::unique_ptr<media::MediaLog> media_log,
+ scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
+ base::WeakPtr<VideoDecoderMixin::Client> client) {
// TODO(b/171813538): make the destructor of this class (as well as the
// destructor of sister class VaapiVideoDecoder) public so the explicit
// argument can be removed from this call to base::WrapUnique().
@@ -83,7 +83,7 @@ std::unique_ptr<VideoDecoderMixin> OOPVideoDecoder::Create(
}
OOPVideoDecoder::OOPVideoDecoder(
- std::unique_ptr<MediaLog> media_log,
+ std::unique_ptr<media::MediaLog> media_log,
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
base::WeakPtr<VideoDecoderMixin::Client> client,
mojo::PendingRemote<stable::mojom::StableVideoDecoder>
@@ -129,11 +129,13 @@ OOPVideoDecoder::OOPVideoDecoder(
stable_video_frame_handle_releaser_remote_.set_disconnect_handler(
base::BindOnce(&OOPVideoDecoder::Stop, base::Unretained(this)));
+ DCHECK(!stable_media_log_receiver_.is_bound());
+
CHECK(!has_error_);
// TODO(b/171813538): plumb the remaining parameters.
remote_decoder_->Construct(
client_receiver_.BindNewEndpointAndPassRemote(),
- mojo::PendingRemote<stable::mojom::MediaLog>(),
+ stable_media_log_receiver_.BindNewPipeAndPassRemote(),
std::move(stable_video_frame_handle_releaser_receiver),
std::move(remote_consumer_handle), gfx::ColorSpace());
}
@@ -329,6 +331,7 @@ void OOPVideoDecoder::Stop() {
base::WeakPtr<OOPVideoDecoder> weak_this = weak_this_factory_.GetWeakPtr();
client_receiver_.reset();
+ stable_media_log_receiver_.reset();
remote_decoder_.reset();
mojo_decoder_buffer_writer_.reset();
stable_video_frame_handle_releaser_remote_.reset();
@@ -428,4 +431,12 @@ void OOPVideoDecoder::OnWaiting(WaitingReason reason) {
waiting_cb_.Run(reason);
}
+void OOPVideoDecoder::AddLogRecord(const MediaLogRecord& event) {
+ VLOGF(2);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ if (media_log_)
+ media_log_->AddLogRecord(std::make_unique<media::MediaLogRecord>(event));
+}
+
} // namespace media
diff --git a/chromium/media/gpu/chromeos/oop_video_decoder.h b/chromium/media/gpu/chromeos/oop_video_decoder.h
index d8214170f95..d61f3039544 100644
--- a/chromium/media/gpu/chromeos/oop_video_decoder.h
+++ b/chromium/media/gpu/chromeos/oop_video_decoder.h
@@ -21,17 +21,18 @@ class MojoDecoderBufferWriter;
// video decoder via Mojo. This class should be operated and
// destroyed on |decoder_task_runner_|.
class OOPVideoDecoder : public VideoDecoderMixin,
- public stable::mojom::VideoDecoderClient {
+ public stable::mojom::VideoDecoderClient,
+ public stable::mojom::MediaLog {
public:
OOPVideoDecoder(const OOPVideoDecoder&) = delete;
OOPVideoDecoder& operator=(const OOPVideoDecoder&) = delete;
static std::unique_ptr<VideoDecoderMixin> Create(
- std::unique_ptr<MediaLog> media_log,
- scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
- base::WeakPtr<VideoDecoderMixin::Client> client,
mojo::PendingRemote<stable::mojom::StableVideoDecoder>
- pending_remote_decoder);
+ pending_remote_decoder,
+ std::unique_ptr<media::MediaLog> media_log,
+ scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
+ base::WeakPtr<VideoDecoderMixin::Client> client);
// VideoDecoderMixin implementation, VideoDecoder part.
void Initialize(const VideoDecoderConfig& config,
@@ -57,8 +58,11 @@ class OOPVideoDecoder : public VideoDecoderMixin,
const base::UnguessableToken& release_token) final;
void OnWaiting(WaitingReason reason) final;
+ // stable::mojom::MediaLog implementation.
+ void AddLogRecord(const MediaLogRecord& event) final;
+
private:
- OOPVideoDecoder(std::unique_ptr<MediaLog> media_log,
+ OOPVideoDecoder(std::unique_ptr<media::MediaLog> media_log,
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
base::WeakPtr<VideoDecoderMixin::Client> client,
mojo::PendingRemote<stable::mojom::StableVideoDecoder>
@@ -93,6 +97,9 @@ class OOPVideoDecoder : public VideoDecoderMixin,
mojo::AssociatedReceiver<stable::mojom::VideoDecoderClient> client_receiver_
GUARDED_BY_CONTEXT(sequence_checker_){this};
+ mojo::Receiver<stable::mojom::MediaLog> stable_media_log_receiver_
+ GUARDED_BY_CONTEXT(sequence_checker_){this};
+
VideoDecoderType decoder_type_ GUARDED_BY_CONTEXT(sequence_checker_) =
VideoDecoderType::kUnknown;
diff --git a/chromium/media/gpu/chromeos/platform_video_frame_pool.cc b/chromium/media/gpu/chromeos/platform_video_frame_pool.cc
index 019bf5a82a0..1f077cdaf67 100644
--- a/chromium/media/gpu/chromeos/platform_video_frame_pool.cc
+++ b/chromium/media/gpu/chromeos/platform_video_frame_pool.cc
@@ -20,7 +20,6 @@ namespace {
// The default method to create frames.
CroStatus::Or<scoped_refptr<VideoFrame>> DefaultCreateFrame(
- gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -34,8 +33,7 @@ CroStatus::Or<scoped_refptr<VideoFrame>> DefaultCreateFrame(
}
scoped_refptr<VideoFrame> frame = CreateGpuMemoryBufferVideoFrame(
- gpu_memory_buffer_factory, format, coded_size, visible_rect, natural_size,
- timestamp,
+ format, coded_size, visible_rect, natural_size, timestamp,
use_protected
? gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE
: (use_linear_buffers ? gfx::BufferUsage::SCANOUT_CPU_READ_WRITE
@@ -54,10 +52,8 @@ CroStatus::Or<scoped_refptr<VideoFrame>> DefaultCreateFrame(
} // namespace
-PlatformVideoFramePool::PlatformVideoFramePool(
- gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory)
- : create_frame_cb_(base::BindRepeating(&DefaultCreateFrame)),
- gpu_memory_buffer_factory_(gpu_memory_buffer_factory) {
+PlatformVideoFramePool::PlatformVideoFramePool()
+ : create_frame_cb_(base::BindRepeating(&DefaultCreateFrame)) {
DVLOGF(4);
weak_this_ = weak_this_factory_.GetWeakPtr();
}
@@ -107,11 +103,19 @@ scoped_refptr<VideoFrame> PlatformVideoFramePool::GetFrame() {
// with (10, 20), 100x100 we cannot (even though it's contained in the
// former). Hence the use of GetRectSizeFromOrigin() to calculate the
// visible rect for |new_frame|.
+ //
+ // TODO(b/230370976): after https://crrev.com/c/3597211,
+ // PlatformVideoFramePool doesn't use a GpuMemoryBufferFactory for
+ // allocating dma-bufs which means DRM framebuffers won't be created for a
+ // dma-buf at allocation time (instead, it will be created at the moment of
+ // creating a SharedImage). That means that we probably don't need to take
+ // the |visible_rect_| into account for IsSameFormat_Locked() any more which
+ // implies that we can create |new_frame| using gfx::Rect(coded_size) as
+ // the visible rectangle.
CHECK(use_linear_buffers_.has_value());
CroStatus::Or<scoped_refptr<VideoFrame>> new_frame = create_frame_cb_.Run(
- gpu_memory_buffer_factory_, format, coded_size,
- gfx::Rect(GetRectSizeFromOrigin(visible_rect_)), coded_size,
- use_protected_, *use_linear_buffers_, base::TimeDelta());
+ format, coded_size, gfx::Rect(GetRectSizeFromOrigin(visible_rect_)),
+ coded_size, use_protected_, *use_linear_buffers_, base::TimeDelta());
if (new_frame.has_error()) {
// TODO(crbug.com/c/1103510) Push the error up instead of dropping it.
return nullptr;
@@ -195,12 +199,19 @@ CroStatus::Or<GpuBufferLayout> PlatformVideoFramePool::Initialize(
// hardware overlay purposes. The caveat is that different visible rectangles
// can map to the same framebuffer size, i.e., all the visible rectangles with
// the same bottom-right corner map to the same framebuffer size.
+ //
+ // TODO(b/230370976): after https://crrev.com/c/3597211,
+ // PlatformVideoFramePool doesn't use a GpuMemoryBufferFactory for allocating
+ // dma-bufs which means DRM framebuffers won't be created for a dma-buf at
+ // allocation time (instead, it will be created at the moment of creating a
+ // SharedImage). That means that we probably don't need to take the
+ // |visible_rect| into account for IsSameFormat_Locked() any more.
if (!IsSameFormat_Locked(format, coded_size, visible_rect, use_protected)) {
DVLOGF(4) << "The video frame format is changed. Clearing the pool.";
free_frames_.clear();
auto maybe_frame = create_frame_cb_.Run(
- gpu_memory_buffer_factory_, format, coded_size, visible_rect,
- natural_size, use_protected, *use_linear_buffers_, base::TimeDelta());
+ format, coded_size, visible_rect, natural_size, use_protected,
+ *use_linear_buffers_, base::TimeDelta());
if (maybe_frame.has_error())
return std::move(maybe_frame).error();
auto frame = std::move(maybe_frame).value();
diff --git a/chromium/media/gpu/chromeos/platform_video_frame_pool.h b/chromium/media/gpu/chromeos/platform_video_frame_pool.h
index 456a3eef798..e16e9e4419f 100644
--- a/chromium/media/gpu/chromeos/platform_video_frame_pool.h
+++ b/chromium/media/gpu/chromeos/platform_video_frame_pool.h
@@ -37,12 +37,9 @@ namespace media {
// old parameter values will be purged from the pool.
class MEDIA_GPU_EXPORT PlatformVideoFramePool : public DmabufVideoFramePool {
public:
- explicit PlatformVideoFramePool(
- gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory);
-
+ PlatformVideoFramePool();
PlatformVideoFramePool(const PlatformVideoFramePool&) = delete;
PlatformVideoFramePool& operator=(const PlatformVideoFramePool&) = delete;
-
~PlatformVideoFramePool() override;
// Returns the ID of the GpuMemoryBuffer wrapped by |frame|.
@@ -111,11 +108,6 @@ class MEDIA_GPU_EXPORT PlatformVideoFramePool : public DmabufVideoFramePool {
// The function used to allocate new frames.
CreateFrameCB create_frame_cb_ GUARDED_BY(lock_);
- // Used to allocate the video frame GpuMemoryBuffers, passed directly to
- // the callback that creates video frames. Indirectly owned by GpuChildThread;
- // therefore alive as long as the GPU process is.
- gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory_ = nullptr;
-
// The arguments of current frame. We allocate new frames only if a pixel
// format or size in |frame_layout_| is changed. When GetFrame() is
// called, we update |visible_rect_| and |natural_size_| of wrapped frames.
diff --git a/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc b/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc
index e922fbced31..2428ae9465f 100644
--- a/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc
+++ b/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc
@@ -25,7 +25,6 @@ namespace {
template <uint64_t modifier>
CroStatus::Or<scoped_refptr<VideoFrame>> CreateGpuMemoryBufferVideoFrame(
- gpu::GpuMemoryBufferFactory* factory,
VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -50,7 +49,7 @@ class PlatformVideoFramePoolTest
public:
PlatformVideoFramePoolTest()
: task_environment_(base::test::TaskEnvironment::TimeSource::MOCK_TIME),
- pool_(new PlatformVideoFramePool(nullptr)) {
+ pool_(new PlatformVideoFramePool()) {
SetCreateFrameCB(
base::BindRepeating(&CreateGpuMemoryBufferVideoFrame<
gfx::NativePixmapHandle::kNoModifier>));
@@ -294,10 +293,10 @@ TEST_P(PlatformVideoFramePoolTest, InitializeFail) {
const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam());
ASSERT_TRUE(fourcc.has_value());
SetCreateFrameCB(base::BindRepeating(
- [](gpu::GpuMemoryBufferFactory* factory, VideoPixelFormat format,
- const gfx::Size& coded_size, const gfx::Rect& visible_rect,
- const gfx::Size& natural_size, bool use_protected,
- bool use_linear_buffers, base::TimeDelta timestamp) {
+ [](VideoPixelFormat format, const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect, const gfx::Size& natural_size,
+ bool use_protected, bool use_linear_buffers,
+ base::TimeDelta timestamp) {
return CroStatus::Or<scoped_refptr<VideoFrame>>(
CroStatus::Codes::kFailedToCreateVideoFrame);
}));
diff --git a/chromium/media/gpu/chromeos/platform_video_frame_utils.cc b/chromium/media/gpu/chromeos/platform_video_frame_utils.cc
index 57a4a06452c..3b5fdac923c 100644
--- a/chromium/media/gpu/chromeos/platform_video_frame_utils.cc
+++ b/chromium/media/gpu/chromeos/platform_video_frame_utils.cc
@@ -21,9 +21,7 @@
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
-#include "gpu/ipc/common/gpu_client_ids.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
-#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "media/base/color_plane_layout.h"
#include "media/base/format_utils.h"
#include "media/base/scopedfd_helper.h"
@@ -127,7 +125,7 @@ class GbmDeviceWrapper {
version->name,
base::checked_cast<std::string::size_type>(version->name_len));
drmFreeVersion(version);
- if (base::LowerCaseEqualsASCII(version_name, "vgem"))
+ if (base::EqualsCaseInsensitiveASCII(version_name, "vgem"))
continue;
gbm_device_ = ui::CreateGbmDevice(render_node_file_.GetPlatformFile());
if (gbm_device_)
@@ -168,38 +166,15 @@ class GbmDeviceWrapper {
};
gfx::GpuMemoryBufferHandle AllocateGpuMemoryBufferHandle(
- gpu::GpuMemoryBufferFactory* factory,
VideoPixelFormat pixel_format,
const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- gfx::BufferUsage buffer_usage,
- base::ScopedClosureRunner& destroy_cb) {
+ gfx::BufferUsage buffer_usage) {
gfx::GpuMemoryBufferHandle gmb_handle;
auto buffer_format = VideoPixelFormatToGfxBufferFormat(pixel_format);
if (!buffer_format)
return gmb_handle;
-
- if (!factory) {
- return GbmDeviceWrapper::Get()->CreateGpuMemoryBuffer(
- *buffer_format, coded_size, buffer_usage);
- }
-
- gmb_handle = factory->CreateGpuMemoryBuffer(
- GetNextGpuMemoryBufferId(), coded_size,
- /*framebuffer_size=*/GetRectSizeFromOrigin(visible_rect), *buffer_format,
- buffer_usage, gpu::kPlatformVideoFramePoolClientId,
- gfx::kNullAcceleratedWidget);
- DCHECK(gmb_handle.is_null() || gmb_handle.type != gfx::NATIVE_PIXMAP ||
- VideoFrame::NumPlanes(pixel_format) ==
- gmb_handle.native_pixmap_handle.planes.size());
- if (gmb_handle.is_null())
- return gmb_handle;
- destroy_cb.ReplaceClosure(
- base::BindOnce(&gpu::GpuMemoryBufferFactory::DestroyGpuMemoryBuffer,
- base::Unretained(factory), gmb_handle.id,
- gpu::kPlatformVideoFramePoolClientId));
-
- return gmb_handle;
+ return GbmDeviceWrapper::Get()->CreateGpuMemoryBuffer(
+ *buffer_format, coded_size, buffer_usage);
}
} // namespace
@@ -212,17 +187,14 @@ gfx::GpuMemoryBufferId GetNextGpuMemoryBufferId() {
}
scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame(
- gpu::GpuMemoryBufferFactory* factory,
VideoPixelFormat pixel_format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp,
gfx::BufferUsage buffer_usage) {
- base::ScopedClosureRunner destroy_cb((base::DoNothing()));
auto gmb_handle =
- AllocateGpuMemoryBufferHandle(factory, pixel_format, coded_size,
- visible_rect, buffer_usage, destroy_cb);
+ AllocateGpuMemoryBufferHandle(pixel_format, coded_size, buffer_usage);
if (gmb_handle.is_null() || gmb_handle.type != gfx::NATIVE_PIXMAP)
return nullptr;
@@ -244,9 +216,8 @@ scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame(
auto frame = VideoFrame::WrapExternalGpuMemoryBuffer(
visible_rect, natural_size, std::move(gpu_memory_buffer), mailbox_holders,
base::NullCallback(), timestamp);
-
- if (frame)
- frame->AddDestructionObserver(destroy_cb.Release());
+ if (!frame)
+ return nullptr;
// We only support importing non-DISJOINT multi-planar GbmBuffer right now.
// TODO(crbug.com/1258986): Add DISJOINT support.
@@ -256,17 +227,14 @@ scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame(
}
scoped_refptr<VideoFrame> CreatePlatformVideoFrame(
- gpu::GpuMemoryBufferFactory* factory,
VideoPixelFormat pixel_format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp,
gfx::BufferUsage buffer_usage) {
- base::ScopedClosureRunner destroy_cb((base::DoNothing()));
auto gmb_handle =
- AllocateGpuMemoryBufferHandle(factory, pixel_format, coded_size,
- visible_rect, buffer_usage, destroy_cb);
+ AllocateGpuMemoryBufferHandle(pixel_format, coded_size, buffer_usage);
if (gmb_handle.is_null() || gmb_handle.type != gfx::NATIVE_PIXMAP)
return nullptr;
@@ -291,20 +259,18 @@ scoped_refptr<VideoFrame> CreatePlatformVideoFrame(
if (!frame)
return nullptr;
- frame->AddDestructionObserver(destroy_cb.Release());
return frame;
}
absl::optional<VideoFrameLayout> GetPlatformVideoFrameLayout(
- gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat pixel_format,
const gfx::Size& coded_size,
gfx::BufferUsage buffer_usage) {
// |visible_rect| and |natural_size| do not matter here. |coded_size| is set
// as a dummy variable.
- auto frame = CreatePlatformVideoFrame(
- gpu_memory_buffer_factory, pixel_format, coded_size,
- gfx::Rect(coded_size), coded_size, base::TimeDelta(), buffer_usage);
+ auto frame =
+ CreatePlatformVideoFrame(pixel_format, coded_size, gfx::Rect(coded_size),
+ coded_size, base::TimeDelta(), buffer_usage);
return frame ? absl::make_optional<VideoFrameLayout>(frame->layout())
: absl::nullopt;
}
diff --git a/chromium/media/gpu/chromeos/platform_video_frame_utils.h b/chromium/media/gpu/chromeos/platform_video_frame_utils.h
index 89833bb8130..af278c8ce3d 100644
--- a/chromium/media/gpu/chromeos/platform_video_frame_utils.h
+++ b/chromium/media/gpu/chromeos/platform_video_frame_utils.h
@@ -13,27 +13,16 @@
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/linux/native_pixmap_dmabuf.h"
-namespace gpu {
-class GpuMemoryBufferFactory;
-} // namespace gpu
-
namespace media {
// Returns a GpuMemoryBufferId that's guaranteed to be different from those
// returned by previous calls. This function is thread safe.
MEDIA_GPU_EXPORT gfx::GpuMemoryBufferId GetNextGpuMemoryBufferId();
-// Create GpuMemoryBuffer-based media::VideoFrame with |buffer_usage|.
-// See //media/base/video_frame.h for other parameters.
-// If |gpu_memory_buffer_factory| is not null, it's used to allocate the
-// GpuMemoryBuffer and it must outlive the returned VideoFrame. If it's null,
-// the buffer is allocated using the render node (this is intended to be used
-// only for the internals of video encoding when the usage is
-// VEA_READ_CAMERA_AND_CPU_READ_WRITE). It's safe to call this function
-// concurrently from multiple threads (as long as either
-// |gpu_memory_buffer_factory| is thread-safe or nullptr).
+// Creates a STORAGE_GPU_MEMORY_BUFFER VideoFrame backed by a NATIVE_PIXMAP
+// GpuMemoryBuffer allocated with |buffer_usage|. See //media/base/video_frame.h
+// for the other parameters. This function is thread-safe.
MEDIA_GPU_EXPORT scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame(
- gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat pixel_format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -41,17 +30,10 @@ MEDIA_GPU_EXPORT scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame(
base::TimeDelta timestamp,
gfx::BufferUsage buffer_usage);
-// Create platform dependent media::VideoFrame with |buffer_usage|.
-// See //media/base/video_frame.h for other parameters.
-// If |gpu_memory_buffer_factory| is not null, it's used to allocate the
-// video frame's storage and it must outlive the returned VideoFrame. If it's
-// null, the buffer is allocated using the render node (this is intended to be
-// used only for the internals of video encoding when the usage is
-// VEA_READ_CAMERA_AND_CPU_READ_WRITE). It's safe to call this function
-// concurrently from multiple threads (as long as either
-// |gpu_memory_buffer_factory| is thread-safe or nullptr).
+// Creates a STORAGE_DMABUFS VideoFrame whose buffer is allocated with
+// |buffer_usage|. See //media/base/video_frame.h for the other parameters. This
+// function is thread-safe.
MEDIA_GPU_EXPORT scoped_refptr<VideoFrame> CreatePlatformVideoFrame(
- gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat pixel_format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -59,17 +41,13 @@ MEDIA_GPU_EXPORT scoped_refptr<VideoFrame> CreatePlatformVideoFrame(
base::TimeDelta timestamp,
gfx::BufferUsage buffer_usage);
-// Get VideoFrameLayout of platform dependent video frame with |pixel_format|,
-// |coded_size| and |buffer_usage|. This function is not cost-free as this
-// allocates a platform dependent video frame.
-// If |gpu_memory_buffer_factory| is not null, it's used to allocate the
-// video frame's storage. If it's null, the storage is allocated using the
-// render node (this is intended to be used only for the internals of video
-// encoding when the usage is VEA_READ_CAMERA_AND_CPU_READ_WRITE). It's
-// safe to call this function concurrently from multiple threads (as long as
-// either |gpu_memory_buffer_factory| is thread-safe or nullptr).
+// Returns the VideoFrameLayout of a VideoFrame allocated with
+// CreatePlatformVideoFrame(), i.e., all parameters are forwarded to that
+// function (|visible_rect| is set to gfx::Rect(|coded_size|), |natural_size| is
+// set to |coded_size|, and |timestamp| is set to base::TimeDelta()). This
+// function is not cheap as it allocates a buffer. Returns absl::nullopt if the
+// buffer allocation fails. This function is thread-safe.
MEDIA_GPU_EXPORT absl::optional<VideoFrameLayout> GetPlatformVideoFrameLayout(
- gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat pixel_format,
const gfx::Size& coded_size,
gfx::BufferUsage buffer_usage);
diff --git a/chromium/media/gpu/chromeos/platform_video_frame_utils_unittest.cc b/chromium/media/gpu/chromeos/platform_video_frame_utils_unittest.cc
index f525ad9bfa1..fe3ff628261 100644
--- a/chromium/media/gpu/chromeos/platform_video_frame_utils_unittest.cc
+++ b/chromium/media/gpu/chromeos/platform_video_frame_utils_unittest.cc
@@ -17,7 +17,6 @@
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/time/time.h"
-#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "media/base/color_plane_layout.h"
#include "media/base/format_utils.h"
#include "media/base/video_frame.h"
@@ -66,63 +65,6 @@ scoped_refptr<VideoFrame> CreateMockDmaBufVideoFrame(
std::move(dmabuf_fds),
base::TimeDelta());
}
-
-class FakeGpuMemoryBufferFactory : public gpu::GpuMemoryBufferFactory {
- public:
- FakeGpuMemoryBufferFactory() = default;
- ~FakeGpuMemoryBufferFactory() override {
- for (const auto& buffers : gpu_memory_buffers_) {
- if (!buffers.second.empty()) {
- LOG(ERROR) << "client_id=" << buffers.first
- << ", the number of unreleased buffers="
- << buffers.second.size();
- ADD_FAILURE();
- }
- }
- }
-
- // gpu::GpuMemoryBufferFactory implementation.
- gfx::GpuMemoryBufferHandle CreateGpuMemoryBuffer(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- const gfx::Size& framebuffer_size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- int client_id,
- gpu::SurfaceHandle surface_handle) override {
- if (base::Contains(gpu_memory_buffers_[client_id], id))
- return gfx::GpuMemoryBufferHandle();
-
- FakeGpuMemoryBuffer fake_gmb(size, format);
- gfx::GpuMemoryBufferHandle handle = fake_gmb.CloneHandle();
- handle.id = id;
- gpu_memory_buffers_[client_id].insert(id);
- return handle;
- }
-
- void DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
- int client_id) override {
- ASSERT_TRUE(base::Contains(gpu_memory_buffers_, client_id));
- ASSERT_TRUE(base::Contains(gpu_memory_buffers_[client_id], id));
- gpu_memory_buffers_[client_id].erase(id);
- }
-
- bool FillSharedMemoryRegionWithBufferContents(
- gfx::GpuMemoryBufferHandle buffer_handle,
- base::UnsafeSharedMemoryRegion shared_memory) override {
- NOTIMPLEMENTED();
- return false;
- }
-
- // Type-checking downcast routine.
- gpu::ImageFactory* AsImageFactory() override {
- NOTIMPLEMENTED();
- return nullptr;
- }
-
- private:
- std::map<int, std::set<gfx::GpuMemoryBufferId>> gpu_memory_buffers_;
-};
} // namespace
TEST(PlatformVideoFrameUtilsTest, CreateNativePixmapDmaBuf) {
@@ -159,6 +101,12 @@ TEST(PlatformVideoFrameUtilsTest, CreateNativePixmapDmaBuf) {
}
}
+// TODO(b/230370976): remove this #if/#endif guard. To do so, we need to be able
+// to mock/fake the allocator used by CreatePlatformVideoFrame() and
+// CreateGpuMemoryBufferVideoFrame() so that those functions return a
+// non-nullptr frame on platforms where allocating NV12 buffers is not
+// supported.
+#if BUILDFLAG(IS_CHROMEOS_ASH)
TEST(PlatformVideoFrameUtilsTest, CreateVideoFrame) {
constexpr VideoPixelFormat kPixelFormat = PIXEL_FORMAT_NV12;
constexpr gfx::Size kCodedSize(320, 240);
@@ -168,9 +116,6 @@ TEST(PlatformVideoFrameUtilsTest, CreateVideoFrame) {
constexpr gfx::BufferUsage kBufferUsage =
gfx::BufferUsage::VEA_READ_CAMERA_AND_CPU_READ_WRITE;
- auto gpu_memory_buffer_factory =
- std::make_unique<FakeGpuMemoryBufferFactory>();
-
const VideoFrame::StorageType storage_types[] = {
VideoFrame::STORAGE_DMABUFS,
VideoFrame::STORAGE_GPU_MEMORY_BUFFER,
@@ -179,14 +124,14 @@ TEST(PlatformVideoFrameUtilsTest, CreateVideoFrame) {
scoped_refptr<VideoFrame> frame;
switch (storage_type) {
case VideoFrame::STORAGE_DMABUFS:
- frame = CreatePlatformVideoFrame(
- gpu_memory_buffer_factory.get(), kPixelFormat, kCodedSize,
- kVisibleRect, kNaturalSize, kTimeStamp, kBufferUsage);
+ frame =
+ CreatePlatformVideoFrame(kPixelFormat, kCodedSize, kVisibleRect,
+ kNaturalSize, kTimeStamp, kBufferUsage);
break;
case VideoFrame::STORAGE_GPU_MEMORY_BUFFER:
- frame = CreateGpuMemoryBufferVideoFrame(
- gpu_memory_buffer_factory.get(), kPixelFormat, kCodedSize,
- kVisibleRect, kNaturalSize, kTimeStamp, kBufferUsage);
+ frame = CreateGpuMemoryBufferVideoFrame(kPixelFormat, kCodedSize,
+ kVisibleRect, kNaturalSize,
+ kTimeStamp, kBufferUsage);
break;
default:
NOTREACHED();
@@ -214,4 +159,5 @@ TEST(PlatformVideoFrameUtilsTest, CreateVideoFrame) {
};
}
}
+#endif // BUILDFLAG(IS_CHROMEOS_ASH)
} // namespace media
diff --git a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc
index f777cadc961..e4dcec8be32 100644
--- a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc
+++ b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/location.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
#include "media/base/format_utils.h"
#include "media/base/media_util.h"
@@ -81,8 +82,7 @@ scoped_refptr<DecoderBuffer> DecryptBitstreamBuffer(
BitstreamBuffer bitstream_buffer) {
// Check to see if we have our secure buffer tag and then extract the
// decrypt parameters.
- auto mem_region = base::UnsafeSharedMemoryRegion::Deserialize(
- bitstream_buffer.DuplicateRegion());
+ auto mem_region = bitstream_buffer.DuplicateRegion();
if (!mem_region.IsValid()) {
DVLOG(2) << "Invalid shared memory region";
return nullptr;
@@ -242,7 +242,8 @@ bool VdVideoDecodeAccelerator::Initialize(const Config& config,
std::make_unique<VdaVideoFramePool>(weak_this_, client_task_runner_);
vd_ = create_vd_cb_.Run(client_task_runner_, std::move(frame_pool),
std::make_unique<VideoFrameConverter>(),
- std::make_unique<NullMediaLog>());
+ std::make_unique<NullMediaLog>(),
+ /*oop_video_decoder=*/{});
if (!vd_)
return false;
diff --git a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h
index 79f0da6458a..517cd550f56 100644
--- a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h
+++ b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h
@@ -22,6 +22,7 @@
#include "media/gpu/chromeos/vda_video_frame_pool.h"
#include "media/gpu/chromeos/video_frame_converter.h"
#include "media/gpu/media_gpu_export.h"
+#include "media/mojo/mojom/stable/stable_video_decoder.mojom.h"
#include "media/video/video_decode_accelerator.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
#include "ui/gfx/gpu_memory_buffer.h"
@@ -51,7 +52,8 @@ class MEDIA_GPU_EXPORT VdVideoDecodeAccelerator
scoped_refptr<base::SequencedTaskRunner>,
std::unique_ptr<DmabufVideoFramePool>,
std::unique_ptr<VideoFrameConverter>,
- std::unique_ptr<MediaLog>)>;
+ std::unique_ptr<MediaLog>,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder>)>;
// Create VdVideoDecodeAccelerator instance, and call Initialize().
// Return nullptr if Initialize() failed.
diff --git a/chromium/media/gpu/chromeos/video_decoder_pipeline.cc b/chromium/media/gpu/chromeos/video_decoder_pipeline.cc
index 3ec81996dac..79263cd0edc 100644
--- a/chromium/media/gpu/chromeos/video_decoder_pipeline.cc
+++ b/chromium/media/gpu/chromeos/video_decoder_pipeline.cc
@@ -21,6 +21,7 @@
#include "media/gpu/chromeos/dmabuf_video_frame_pool.h"
#include "media/gpu/chromeos/image_processor.h"
#include "media/gpu/chromeos/image_processor_factory.h"
+#include "media/gpu/chromeos/oop_video_decoder.h"
#include "media/gpu/chromeos/platform_video_frame_pool.h"
#include "media/gpu/macros.h"
#include "media/media_buildflags.h"
@@ -78,7 +79,8 @@ VideoDecoderMixin::VideoDecoderMixin(
std::unique_ptr<MediaLog> media_log,
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
base::WeakPtr<VideoDecoderMixin::Client> client)
- : decoder_task_runner_(std::move(decoder_task_runner)),
+ : media_log_(std::move(media_log)),
+ decoder_task_runner_(std::move(decoder_task_runner)),
client_(std::move(client)) {}
VideoDecoderMixin::~VideoDecoderMixin() = default;
@@ -92,17 +94,24 @@ std::unique_ptr<VideoDecoder> VideoDecoderPipeline::Create(
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
std::unique_ptr<DmabufVideoFramePool> frame_pool,
std::unique_ptr<VideoFrameConverter> frame_converter,
- std::unique_ptr<MediaLog> media_log) {
+ std::unique_ptr<MediaLog> media_log,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder> oop_video_decoder) {
DCHECK(client_task_runner);
DCHECK(frame_pool);
DCHECK(frame_converter);
- CreateDecoderFunctionCB create_decoder_function_cb =
+ CreateDecoderFunctionCB create_decoder_function_cb;
+ if (oop_video_decoder) {
+ create_decoder_function_cb =
+ base::BindOnce(&OOPVideoDecoder::Create, std::move(oop_video_decoder));
+ } else {
+ create_decoder_function_cb =
#if BUILDFLAG(USE_VAAPI)
- base::BindOnce(&VaapiVideoDecoder::Create);
+ base::BindOnce(&VaapiVideoDecoder::Create);
#elif BUILDFLAG(USE_V4L2_CODEC)
- base::BindOnce(&V4L2VideoDecoder::Create);
+ base::BindOnce(&V4L2VideoDecoder::Create);
#endif
+ }
auto* pipeline = new VideoDecoderPipeline(
std::move(client_task_runner), std::move(frame_pool),
@@ -669,9 +678,9 @@ VideoDecoderPipeline::PickDecoderOutputFormat(
#error "Unsupported platform"
#endif
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#if BUILDFLAG(IS_LINUX)
// viable_candidate should always be set unless using L1 protected content,
- // which isn't an option on linux or lacros.
+ // which isn't an option on linux.
CHECK(viable_candidate);
#endif
@@ -732,13 +741,8 @@ VideoDecoderPipeline::PickDecoderOutputFormat(
if (need_aux_frame_pool) {
// Initialize the auxiliary frame pool with the input format of the image
- // processor. Note that we pass nullptr as the GpuMemoryBufferFactory. That
- // way, the pool will allocate buffers using minigbm directly instead of
- // going through Ozone which means it won't create DRM/KMS framebuffers for
- // those buffers. This is good because these buffers don't end up as
- // overlays anyway.
- auxiliary_frame_pool_ = std::make_unique<PlatformVideoFramePool>(
- /*gpu_memory_buffer_factory=*/nullptr);
+ // processor.
+ auxiliary_frame_pool_ = std::make_unique<PlatformVideoFramePool>();
auxiliary_frame_pool_->set_parent_task_runner(decoder_task_runner_);
CroStatus::Or<GpuBufferLayout> status_or_layout =
diff --git a/chromium/media/gpu/chromeos/video_decoder_pipeline.h b/chromium/media/gpu/chromeos/video_decoder_pipeline.h
index 61560d32e99..24588db7bd9 100644
--- a/chromium/media/gpu/chromeos/video_decoder_pipeline.h
+++ b/chromium/media/gpu/chromeos/video_decoder_pipeline.h
@@ -21,6 +21,7 @@
#include "media/gpu/chromeos/image_processor_with_pool.h"
#include "media/gpu/chromeos/video_frame_converter.h"
#include "media/gpu/media_gpu_export.h"
+#include "media/mojo/mojom/stable/stable_video_decoder.mojom.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/native_pixmap.h"
@@ -140,7 +141,8 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder,
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
std::unique_ptr<DmabufVideoFramePool> frame_pool,
std::unique_ptr<VideoFrameConverter> frame_converter,
- std::unique_ptr<MediaLog> media_log);
+ std::unique_ptr<MediaLog> media_log,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder> oop_video_decoder);
static absl::optional<SupportedVideoDecoderConfigs> GetSupportedConfigs(
const gpu::GpuDriverBugWorkarounds& workarounds);
diff --git a/chromium/media/gpu/gpu_video_accelerator_util.cc b/chromium/media/gpu/gpu_video_accelerator_util.cc
index ae601a6d2c3..eee356bba5c 100644
--- a/chromium/media/gpu/gpu_video_accelerator_util.cc
+++ b/chromium/media/gpu/gpu_video_accelerator_util.cc
@@ -33,6 +33,20 @@ STATIC_ASSERT_ENUM_MATCH(VP9PROFILE_PROFILE3);
STATIC_ASSERT_ENUM_MATCH(HEVCPROFILE_MAIN);
STATIC_ASSERT_ENUM_MATCH(HEVCPROFILE_MAIN10);
STATIC_ASSERT_ENUM_MATCH(HEVCPROFILE_MAIN_STILL_PICTURE);
+STATIC_ASSERT_ENUM_MATCH(HEVCPROFILE_REXT);
+STATIC_ASSERT_ENUM_MATCH(HEVCPROFILE_HIGH_THROUGHPUT);
+STATIC_ASSERT_ENUM_MATCH(HEVCPROFILE_MULTIVIEW_MAIN);
+STATIC_ASSERT_ENUM_MATCH(HEVCPROFILE_SCALABLE_MAIN);
+STATIC_ASSERT_ENUM_MATCH(HEVCPROFILE_3D_MAIN);
+STATIC_ASSERT_ENUM_MATCH(HEVCPROFILE_SCREEN_EXTENDED);
+STATIC_ASSERT_ENUM_MATCH(HEVCPROFILE_SCALABLE_REXT);
+STATIC_ASSERT_ENUM_MATCH(HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED);
+STATIC_ASSERT_ENUM_MATCH(DOLBYVISION_PROFILE0);
+STATIC_ASSERT_ENUM_MATCH(DOLBYVISION_PROFILE4);
+STATIC_ASSERT_ENUM_MATCH(DOLBYVISION_PROFILE5);
+STATIC_ASSERT_ENUM_MATCH(DOLBYVISION_PROFILE7);
+STATIC_ASSERT_ENUM_MATCH(DOLBYVISION_PROFILE8);
+STATIC_ASSERT_ENUM_MATCH(DOLBYVISION_PROFILE9);
STATIC_ASSERT_ENUM_MATCH(AV1PROFILE_PROFILE_MAIN);
STATIC_ASSERT_ENUM_MATCH(AV1PROFILE_PROFILE_HIGH);
STATIC_ASSERT_ENUM_MATCH(AV1PROFILE_PROFILE_PRO);
@@ -123,6 +137,8 @@ GpuVideoAcceleratorUtil::ConvertGpuToMediaEncodeProfiles(
profile.max_resolution = gpu_profile.max_resolution;
profile.max_framerate_numerator = gpu_profile.max_framerate_numerator;
profile.max_framerate_denominator = gpu_profile.max_framerate_denominator;
+ // If VBR is supported in the future, remove this hard-coding of CBR.
+ profile.rate_control_modes = media::VideoEncodeAccelerator::kConstantMode;
profiles.push_back(profile);
}
return profiles;
diff --git a/chromium/media/gpu/gpu_video_decode_accelerator_factory.cc b/chromium/media/gpu/gpu_video_decode_accelerator_factory.cc
index 6687b11865f..98f1e2ee734 100644
--- a/chromium/media/gpu/gpu_video_decode_accelerator_factory.cc
+++ b/chromium/media/gpu/gpu_video_decode_accelerator_factory.cc
@@ -201,8 +201,8 @@ GpuVideoDecodeAcceleratorFactory::CreateV4L2VDA(
scoped_refptr<V4L2Device> device = V4L2Device::Create();
if (device.get()) {
decoder.reset(new V4L2VideoDecodeAccelerator(
- gl::GLSurfaceEGL::GetHardwareDisplay(), gl_client_.get_context,
- gl_client_.make_context_current, device));
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay(),
+ gl_client_.get_context, gl_client_.make_context_current, device));
}
return decoder;
}
@@ -216,8 +216,8 @@ GpuVideoDecodeAcceleratorFactory::CreateV4L2SliceVDA(
scoped_refptr<V4L2Device> device = V4L2Device::Create();
if (device.get()) {
decoder.reset(new V4L2SliceVideoDecodeAccelerator(
- device, gl::GLSurfaceEGL::GetHardwareDisplay(), gl_client_.bind_image,
- gl_client_.make_context_current));
+ device, gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay(),
+ gl_client_.bind_image, gl_client_.make_context_current));
}
return decoder;
}
diff --git a/chromium/media/gpu/gpu_video_encode_accelerator_factory.cc b/chromium/media/gpu/gpu_video_encode_accelerator_factory.cc
index 094645d5578..604c7dae131 100644
--- a/chromium/media/gpu/gpu_video_encode_accelerator_factory.cc
+++ b/chromium/media/gpu/gpu_video_encode_accelerator_factory.cc
@@ -8,6 +8,7 @@
#include "base/containers/cxx20_erase.h"
#include "base/feature_list.h"
#include "base/memory/ptr_util.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/config/gpu_preferences.h"
@@ -23,6 +24,7 @@
#endif
#if BUILDFLAG(IS_ANDROID)
#include "media/gpu/android/android_video_encode_accelerator.h"
+#include "media/gpu/android/ndk_video_encode_accelerator.h"
#endif
#if BUILDFLAG(IS_MAC)
#include "media/gpu/mac/vt_video_encode_accelerator_mac.h"
@@ -56,8 +58,13 @@ std::unique_ptr<VideoEncodeAccelerator> CreateVaapiVEA() {
#if BUILDFLAG(IS_ANDROID)
std::unique_ptr<VideoEncodeAccelerator> CreateAndroidVEA() {
- return base::WrapUnique<VideoEncodeAccelerator>(
- new AndroidVideoEncodeAccelerator());
+ if (NdkVideoEncodeAccelerator::IsSupported()) {
+ return base::WrapUnique<VideoEncodeAccelerator>(
+ new NdkVideoEncodeAccelerator(base::ThreadTaskRunnerHandle::Get()));
+ } else {
+ return base::WrapUnique<VideoEncodeAccelerator>(
+ new AndroidVideoEncodeAccelerator());
+ }
}
#endif
diff --git a/chromium/media/gpu/gpu_video_encode_accelerator_helpers.cc b/chromium/media/gpu/gpu_video_encode_accelerator_helpers.cc
index c731dff83d2..2f057cf736c 100644
--- a/chromium/media/gpu/gpu_video_encode_accelerator_helpers.cc
+++ b/chromium/media/gpu/gpu_video_encode_accelerator_helpers.cc
@@ -58,9 +58,12 @@ size_t GetMaxEncodeBitstreamBufferSize(const gfx::Size& size) {
return kMaxBitstreamBufferSizeInBytes;
}
+// This function sets the peak equal to the target. The peak can then be
+// updated by callers.
VideoBitrateAllocation AllocateBitrateForDefaultEncodingWithBitrates(
const std::vector<uint32_t>& sl_bitrates,
- const size_t num_temporal_layers) {
+ const size_t num_temporal_layers,
+ const bool uses_vbr) {
CHECK(!sl_bitrates.empty());
CHECK_LE(sl_bitrates.size(), kMaxSpatialLayers);
@@ -77,6 +80,8 @@ VideoBitrateAllocation AllocateBitrateForDefaultEncodingWithBitrates(
DCHECK_EQ(std::size(kTemporalLayersBitrateScaleFactors), kMaxTemporalLayers);
VideoBitrateAllocation bitrate_allocation;
+ bitrate_allocation = VideoBitrateAllocation(
+ uses_vbr ? Bitrate::Mode::kVariable : Bitrate::Mode::kConstant);
for (size_t spatial_id = 0; spatial_id < sl_bitrates.size(); ++spatial_id) {
const uint32_t bitrate_bps = sl_bitrates[spatial_id];
for (size_t temporal_id = 0; temporal_id < num_temporal_layers;
@@ -156,10 +161,16 @@ std::vector<uint8_t> GetFpsAllocation(size_t num_temporal_layers) {
VideoBitrateAllocation AllocateBitrateForDefaultEncoding(
const VideoEncodeAccelerator::Config& config) {
+ VideoBitrateAllocation allocation;
+ const bool use_vbr = config.bitrate.mode() == Bitrate::Mode::kVariable;
if (config.spatial_layers.empty()) {
- return AllocateBitrateForDefaultEncodingWithBitrates(
+ allocation = AllocateBitrateForDefaultEncodingWithBitrates(
{config.bitrate.target_bps()},
- /*num_temporal_layers=*/1u);
+ /*num_temporal_layers=*/1u, use_vbr);
+ if (use_vbr) {
+ allocation.SetPeakBps(config.bitrate.peak_bps());
+ }
+ return allocation;
}
const size_t num_temporal_layers =
@@ -171,14 +182,19 @@ VideoBitrateAllocation AllocateBitrateForDefaultEncoding(
bitrates.push_back(spatial_layer.bitrate_bps);
}
- return AllocateBitrateForDefaultEncodingWithBitrates(bitrates,
- num_temporal_layers);
+ allocation = AllocateBitrateForDefaultEncodingWithBitrates(
+ bitrates, num_temporal_layers, use_vbr);
+ if (use_vbr) {
+ allocation.SetPeakBps(config.bitrate.peak_bps());
+ }
+ return allocation;
}
VideoBitrateAllocation AllocateDefaultBitrateForTesting(
const size_t num_spatial_layers,
const size_t num_temporal_layers,
- const uint32_t bitrate) {
+ const uint32_t bitrate,
+ const bool uses_vbr) {
// Higher spatial layers (those to the right) get more bitrate.
constexpr double kSpatialLayersBitrateScaleFactors[][kMaxSpatialLayers] = {
{1.00, 0.00, 0.00}, // For one spatial layer.
@@ -197,8 +213,8 @@ VideoBitrateAllocation AllocateDefaultBitrateForTesting(
bitrates[sid] = bitrate * bitrate_factor;
}
- return AllocateBitrateForDefaultEncodingWithBitrates(bitrates,
- num_temporal_layers);
+ return AllocateBitrateForDefaultEncodingWithBitrates(
+ bitrates, num_temporal_layers, uses_vbr);
}
} // namespace media
diff --git a/chromium/media/gpu/gpu_video_encode_accelerator_helpers.h b/chromium/media/gpu/gpu_video_encode_accelerator_helpers.h
index 79f9b8c0637..66d1b95875b 100644
--- a/chromium/media/gpu/gpu_video_encode_accelerator_helpers.h
+++ b/chromium/media/gpu/gpu_video_encode_accelerator_helpers.h
@@ -43,17 +43,21 @@ MEDIA_GPU_EXPORT VideoBitrateAllocation
AllocateBitrateForDefaultEncoding(const VideoEncodeAccelerator::Config& config);
// Create VideoBitrateAllocation with |num_spatial_layers|,
-// |num_temporal_layers| and |bitrate|. |bitrate| is the bitrate of the entire
+// |num_temporal_layers| and |bitrate|, additionally indicating if the
+// constructed bitrate should |use_vbr|. |bitrate| is the bitrate of the entire
// stream. |num_temporal_layers| is the number of temporal layers in each
-// spatial layer.
+// spatial layer. |use_vbr| indicates whether the bitrate should have
+// |Bitrate::Mode::kVariable.|
// First, |bitrate| is distributed to spatial layers based on libwebrtc bitrate
// division. Then the bitrate of each spatial layer is distributed to temporal
// layers in the spatial layer based on the same bitrate division ratio as a
-// software encoder.
+// software encoder. If a variable bitrate is requested, the peak will be set
+// equal to the target.
MEDIA_GPU_EXPORT VideoBitrateAllocation
AllocateDefaultBitrateForTesting(const size_t num_spatial_layers,
const size_t num_temporal_layers,
- const uint32_t bitrate);
+ const uint32_t bitrate,
+ const bool uses_vbr);
} // namespace media
#endif // MEDIA_GPU_GPU_VIDEO_ENCODE_ACCELERATOR_HELPERS_H_
diff --git a/chromium/media/gpu/h264_decoder.cc b/chromium/media/gpu/h264_decoder.cc
index 259c76784c0..4754a1e974b 100644
--- a/chromium/media/gpu/h264_decoder.cc
+++ b/chromium/media/gpu/h264_decoder.cc
@@ -90,22 +90,28 @@ H264Decoder::H264Accelerator::H264Accelerator() = default;
H264Decoder::H264Accelerator::~H264Accelerator() = default;
-H264Decoder::H264Accelerator::Status H264Decoder::H264Accelerator::SetStream(
- base::span<const uint8_t> stream,
- const DecryptConfig* decrypt_config) {
- return H264Decoder::H264Accelerator::Status::kNotSupported;
-}
+void H264Decoder::H264Accelerator::ProcessSPS(
+ const H264SPS* sps,
+ base::span<const uint8_t> sps_nalu_data) {}
+
+void H264Decoder::H264Accelerator::ProcessPPS(
+ const H264PPS* pps,
+ base::span<const uint8_t> pps_nalu_data) {}
H264Decoder::H264Accelerator::Status
H264Decoder::H264Accelerator::ParseEncryptedSliceHeader(
const std::vector<base::span<const uint8_t>>& data,
const std::vector<SubsampleEntry>& subsamples,
- const std::vector<uint8_t>& sps_nalu_data,
- const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_header_out) {
return H264Decoder::H264Accelerator::Status::kNotSupported;
}
+H264Decoder::H264Accelerator::Status H264Decoder::H264Accelerator::SetStream(
+ base::span<const uint8_t> stream,
+ const DecryptConfig* decrypt_config) {
+ return H264Decoder::H264Accelerator::Status::kNotSupported;
+}
+
H264Decoder::H264Decoder(std::unique_ptr<H264Accelerator> accelerator,
VideoCodecProfile profile,
const VideoColorSpace& container_color_space)
@@ -1292,7 +1298,6 @@ H264Decoder::H264Accelerator::Status H264Decoder::ProcessEncryptedSliceHeader(
all_subsamples.insert(all_subsamples.end(), subsamples.begin(),
subsamples.end());
return accelerator_->ParseEncryptedSliceHeader(spans, all_subsamples,
- last_sps_nalu_, last_pps_nalu_,
curr_slice_hdr_.get());
}
@@ -1553,9 +1558,10 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
bool need_new_buffers = false;
if (!ProcessSPS(sps_id, &need_new_buffers))
SET_ERROR_AND_RETURN();
+ accelerator_->ProcessSPS(
+ parser_.GetSPS(sps_id),
+ base::span<const uint8_t>(curr_nalu_->data, curr_nalu_->size));
- last_sps_nalu_.assign(curr_nalu_->data,
- curr_nalu_->data + curr_nalu_->size);
if (state_ == State::kNeedStreamMetadata)
state_ = State::kAfterReset;
@@ -1576,9 +1582,9 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
par_res = parser_.ParsePPS(&last_parsed_pps_id_);
if (par_res != H264Parser::kOk)
SET_ERROR_AND_RETURN();
-
- last_pps_nalu_.assign(curr_nalu_->data,
- curr_nalu_->data + curr_nalu_->size);
+ accelerator_->ProcessPPS(
+ parser_.GetPPS(last_parsed_pps_id_),
+ base::span<const uint8_t>(curr_nalu_->data, curr_nalu_->size));
break;
}
diff --git a/chromium/media/gpu/h264_decoder.h b/chromium/media/gpu/h264_decoder.h
index e07526ff996..716e8a10c85 100644
--- a/chromium/media/gpu/h264_decoder.h
+++ b/chromium/media/gpu/h264_decoder.h
@@ -74,6 +74,18 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
// this situation as normal and return from Decode() with kRanOutOfSurfaces.
virtual scoped_refptr<H264Picture> CreateH264Picture() = 0;
+ // Provides the raw NALU data for an SPS. The |sps| passed to
+ // SubmitFrameMetadata() is always the most recent SPS passed to
+ // ProcessSPS() with the same |seq_parameter_set_id|.
+ virtual void ProcessSPS(const H264SPS* sps,
+ base::span<const uint8_t> sps_nalu_data);
+
+ // Provides the raw NALU data for a PPS. The |pps| passed to
+ // SubmitFrameMetadata() is always the most recent PPS passed to
+ // ProcessPPS() with the same |pic_parameter_set_id|.
+ virtual void ProcessPPS(const H264PPS* pps,
+ base::span<const uint8_t> pps_nalu_data);
+
// Submit metadata for the current frame, providing the current |sps| and
// |pps| for it, |dpb| has to contain all the pictures in DPB for current
// frame, and |ref_pic_p0/b0/b1| as specified in the H264 spec. Note that
@@ -106,8 +118,6 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
virtual Status ParseEncryptedSliceHeader(
const std::vector<base::span<const uint8_t>>& data,
const std::vector<SubsampleEntry>& subsamples,
- const std::vector<uint8_t>& sps_nalu_data,
- const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_header_out);
// Submit one slice for the current frame, passing the current |pps| and
@@ -374,10 +384,6 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
// the stream).
int last_parsed_pps_id_;
- // Copies of the last SPS and PPS NALUs, used for full sample encryption.
- std::vector<uint8_t> last_sps_nalu_;
- std::vector<uint8_t> last_pps_nalu_;
-
// Current NALU and slice header being processed.
std::unique_ptr<H264NALU> curr_nalu_;
std::unique_ptr<H264SliceHeader> curr_slice_hdr_;
diff --git a/chromium/media/gpu/h264_decoder_unittest.cc b/chromium/media/gpu/h264_decoder_unittest.cc
index 18a2fad58c5..823e8780d7f 100644
--- a/chromium/media/gpu/h264_decoder_unittest.cc
+++ b/chromium/media/gpu/h264_decoder_unittest.cc
@@ -24,7 +24,6 @@ using ::testing::_;
using ::testing::Args;
using ::testing::Expectation;
using ::testing::InSequence;
-using ::testing::Invoke;
using ::testing::MakeMatcher;
using ::testing::Matcher;
using ::testing::MatcherInterface;
@@ -74,8 +73,6 @@ H264Decoder::H264Accelerator::Status ParseSliceHeader(
const std::vector<uint8_t>& sps_nalu_data,
const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_hdr_out) {
- EXPECT_TRUE(!sps_nalu_data.empty());
- EXPECT_TRUE(!pps_nalu_data.empty());
// Construct the bitstream for parsing.
std::vector<uint8_t> full_data;
const std::vector<uint8_t> start_code = {0u, 0u, 1u};
@@ -120,12 +117,11 @@ class MockH264Accelerator : public H264Decoder::H264Accelerator {
MockH264Accelerator() = default;
MOCK_METHOD0(CreateH264Picture, scoped_refptr<H264Picture>());
+
MOCK_METHOD1(SubmitDecode, Status(scoped_refptr<H264Picture> pic));
- MOCK_METHOD5(ParseEncryptedSliceHeader,
+ MOCK_METHOD3(ParseEncryptedSliceHeader,
Status(const std::vector<base::span<const uint8_t>>& data,
const std::vector<SubsampleEntry>& subsamples,
- const std::vector<uint8_t>& sps_nalu_data,
- const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_hdr_out));
MOCK_METHOD7(SubmitFrameMetadata,
Status(const H264SPS* sps,
@@ -150,6 +146,19 @@ class MockH264Accelerator : public H264Decoder::H264Accelerator {
const DecryptConfig* decrypt_config));
void Reset() override {}
+
+ void ProcessSPS(const H264SPS* sps,
+ base::span<const uint8_t> sps_nalu_data) override {
+ last_sps_nalu_data.assign(sps_nalu_data.begin(), sps_nalu_data.end());
+ }
+
+ void ProcessPPS(const H264PPS* pps,
+ base::span<const uint8_t> pps_nalu_data) override {
+ last_pps_nalu_data.assign(pps_nalu_data.begin(), pps_nalu_data.end());
+ }
+
+ std::vector<uint8_t> last_sps_nalu_data;
+ std::vector<uint8_t> last_pps_nalu_data;
};
// Test H264Decoder by feeding different of h264 frame sequences and make
@@ -189,9 +198,9 @@ void H264DecoderTest::SetUp() {
VIDEO_CODEC_PROFILE_UNKNOWN);
// Sets default behaviors for mock methods for convenience.
- ON_CALL(*accelerator_, CreateH264Picture()).WillByDefault(Invoke([]() {
+ ON_CALL(*accelerator_, CreateH264Picture()).WillByDefault([]() {
return new H264Picture();
- }));
+ });
ON_CALL(*accelerator_, SubmitFrameMetadata(_, _, _, _, _, _, _))
.WillByDefault(Return(H264Decoder::H264Accelerator::Status::kOk));
ON_CALL(*accelerator_, SubmitDecode(_))
@@ -299,8 +308,14 @@ TEST_F(H264DecoderTest, DecodeSingleEncryptedFrame) {
{
InSequence sequence;
- EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _, _, _))
- .WillOnce(Invoke(&ParseSliceHeader));
+ EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _))
+ .WillOnce([this](const std::vector<base::span<const uint8_t>>& data,
+ const std::vector<SubsampleEntry>& subsamples,
+ H264SliceHeader* slice_hdr_out) {
+ return ParseSliceHeader(
+ data, subsamples, accelerator_->last_sps_nalu_data,
+ accelerator_->last_pps_nalu_data, slice_hdr_out);
+ });
EXPECT_CALL(*accelerator_, CreateH264Picture());
EXPECT_CALL(*accelerator_, SubmitFrameMetadata(_, _, _, _, _, _, _));
EXPECT_CALL(*accelerator_, SubmitSlice(_, _, _, _, _, _, _, _));
@@ -608,21 +623,27 @@ TEST_F(H264DecoderTest, ParseEncryptedSliceHeaderRetry) {
EXPECT_EQ(H264PROFILE_BASELINE, decoder_->GetProfile());
EXPECT_LE(9u, decoder_->GetRequiredNumOfPictures());
- EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _, _, _))
+ EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _))
.WillOnce(Return(H264Decoder::H264Accelerator::Status::kTryAgain));
ASSERT_EQ(AcceleratedVideoDecoder::kTryAgain, Decode(true));
// Try again, assuming key still not set. Only ParseEncryptedSliceHeader()
// should be called again.
- EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _, _, _))
+ EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _))
.WillOnce(Return(H264Decoder::H264Accelerator::Status::kTryAgain));
ASSERT_EQ(AcceleratedVideoDecoder::kTryAgain, Decode(true));
// Assume key has been provided now, next call to Decode() should proceed.
{
InSequence sequence;
- EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _, _, _))
- .WillOnce(Invoke(&ParseSliceHeader));
+ EXPECT_CALL(*accelerator_, ParseEncryptedSliceHeader(_, _, _))
+ .WillOnce([this](const std::vector<base::span<const uint8_t>>& data,
+ const std::vector<SubsampleEntry>& subsamples,
+ H264SliceHeader* slice_hdr_out) {
+ return ParseSliceHeader(
+ data, subsamples, accelerator_->last_sps_nalu_data,
+ accelerator_->last_pps_nalu_data, slice_hdr_out);
+ });
EXPECT_CALL(*accelerator_, CreateH264Picture());
EXPECT_CALL(*accelerator_, SubmitFrameMetadata(_, _, _, _, _, _, _));
EXPECT_CALL(*accelerator_, SubmitSlice(_, _, _, _, _, _, _, _));
diff --git a/chromium/media/gpu/h265_decoder.cc b/chromium/media/gpu/h265_decoder.cc
index 9d75e0d737e..36a186e5d74 100644
--- a/chromium/media/gpu/h265_decoder.cc
+++ b/chromium/media/gpu/h265_decoder.cc
@@ -31,14 +31,42 @@ bool ParseBitDepth(const H265SPS& sps, uint8_t& bit_depth) {
}
bool IsValidBitDepth(uint8_t bit_depth, VideoCodecProfile profile) {
- // Spec A.3.
switch (profile) {
+ // Spec A.3.2
case HEVCPROFILE_MAIN:
return bit_depth == 8u;
+ // Spec A.3.3
case HEVCPROFILE_MAIN10:
return bit_depth == 8u || bit_depth == 10u;
+ // Spec A.3.4
case HEVCPROFILE_MAIN_STILL_PICTURE:
return bit_depth == 8u;
+ // Spec A.3.5
+ case HEVCPROFILE_REXT:
+ return bit_depth == 8u || bit_depth == 10u || bit_depth == 12u ||
+ bit_depth == 14u || bit_depth == 16u;
+ // Spec A.3.6
+ case HEVCPROFILE_HIGH_THROUGHPUT:
+ return bit_depth == 8u || bit_depth == 10u || bit_depth == 14u ||
+ bit_depth == 16u;
+ // Spec G.11.1.1
+ case HEVCPROFILE_MULTIVIEW_MAIN:
+ return bit_depth == 8u;
+ // Spec H.11.1.1
+ case HEVCPROFILE_SCALABLE_MAIN:
+ return bit_depth == 8u || bit_depth == 10u;
+ // Spec I.11.1.1
+ case HEVCPROFILE_3D_MAIN:
+ return bit_depth == 8u;
+ // Spec A.3.7
+ case HEVCPROFILE_SCREEN_EXTENDED:
+ return bit_depth == 8u || bit_depth == 10u;
+ // Spec H.11.1.2
+ case HEVCPROFILE_SCALABLE_REXT:
+ return bit_depth == 8u || bit_depth == 12u || bit_depth == 16u;
+ // Spec A.3.8
+ case HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED:
+ return bit_depth == 8u || bit_depth == 10u || bit_depth == 14u;
default:
DVLOG(1) << "Invalid profile specified for H265";
return false;
diff --git a/chromium/media/gpu/h265_decoder_unittest.cc b/chromium/media/gpu/h265_decoder_unittest.cc
index 70ef1638533..5b2c6ec5adb 100644
--- a/chromium/media/gpu/h265_decoder_unittest.cc
+++ b/chromium/media/gpu/h265_decoder_unittest.cc
@@ -10,6 +10,7 @@
#include "base/containers/queue.h"
#include "base/containers/span.h"
#include "base/files/file_util.h"
+#include "base/memory/raw_ptr.h"
#include "media/base/test_data_util.h"
#include "media/gpu/h265_decoder.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -141,7 +142,7 @@ class H265DecoderTest : public ::testing::Test {
protected:
std::unique_ptr<H265Decoder> decoder_;
- MockH265Accelerator* accelerator_;
+ raw_ptr<MockH265Accelerator> accelerator_;
private:
base::queue<std::string> input_frame_files_;
diff --git a/chromium/media/gpu/ipc/common/media_param_traits.cc b/chromium/media/gpu/ipc/common/media_param_traits.cc
index 334d9d4596c..e7f057acf8a 100644
--- a/chromium/media/gpu/ipc/common/media_param_traits.cc
+++ b/chromium/media/gpu/ipc/common/media_param_traits.cc
@@ -16,8 +16,7 @@ void ParamTraits<media::BitstreamBuffer>::Write(base::Pickle* m,
const param_type& p) {
WriteParam(m, p.id());
WriteParam(m, static_cast<uint64_t>(p.size()));
- DCHECK_GE(p.offset(), 0);
- WriteParam(m, static_cast<uint64_t>(p.offset()));
+ WriteParam(m, p.offset());
WriteParam(m, p.presentation_timestamp());
WriteParam(m, p.key_id());
if (!p.key_id().empty()) {
@@ -32,9 +31,8 @@ bool ParamTraits<media::BitstreamBuffer>::Read(const base::Pickle* m,
param_type* r) {
DCHECK(r);
uint64_t size = 0;
- uint64_t offset = 0;
if (!(ReadParam(m, iter, &r->id_) && ReadParam(m, iter, &size) &&
- ReadParam(m, iter, &offset) &&
+ ReadParam(m, iter, &r->offset_) &&
ReadParam(m, iter, &r->presentation_timestamp_) &&
ReadParam(m, iter, &r->key_id_)))
return false;
@@ -46,13 +44,6 @@ bool ParamTraits<media::BitstreamBuffer>::Read(const base::Pickle* m,
}
r->size_ = checked_size.ValueOrDie();
- base::CheckedNumeric<off_t> checked_offset(offset);
- if (!checked_offset.IsValid()) {
- DLOG(ERROR) << "Invalid offset: " << offset;
- return false;
- }
- r->offset_ = checked_offset.ValueOrDie();
-
if (!r->key_id_.empty()) {
if (!(ReadParam(m, iter, &r->iv_) && ReadParam(m, iter, &r->subsamples_)))
return false;
diff --git a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h
index 683939e3fca..f45fa83f8e5 100644
--- a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h
+++ b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h
@@ -112,7 +112,7 @@ class GpuVideoDecodeAccelerator
GpuVideoDecodeGLClient gl_client_;
// Unowned pointer to the underlying gpu::CommandBufferStub. |this| is
- // registered as a DestuctionObserver of |stub_| and will self-delete when
+ // registered as a DestructionObserver of |stub_| and will self-delete when
// |stub_| is destroyed.
const raw_ptr<gpu::CommandBufferStub> stub_;
diff --git a/chromium/media/gpu/ipc/service/media_gpu_channel.cc b/chromium/media/gpu/ipc/service/media_gpu_channel.cc
index 563b8d5119b..885d9db8d36 100644
--- a/chromium/media/gpu/ipc/service/media_gpu_channel.cc
+++ b/chromium/media/gpu/ipc/service/media_gpu_channel.cc
@@ -26,10 +26,17 @@ class DecoderProviderImpl : public mojom::GpuAcceleratedVideoDecoderProvider,
public:
DecoderProviderImpl(gpu::CommandBufferStub* stub,
const AndroidOverlayMojoFactoryCB& overlay_factory_cb)
- : stub_(stub), overlay_factory_cb_(overlay_factory_cb) {}
+ : stub_(stub), overlay_factory_cb_(overlay_factory_cb) {
+ stub_->AddDestructionObserver(this);
+ }
+
DecoderProviderImpl(const DecoderProviderImpl&) = delete;
DecoderProviderImpl& operator=(const DecoderProviderImpl&) = delete;
- ~DecoderProviderImpl() override = default;
+ ~DecoderProviderImpl() override {
+ if (stub_) {
+ stub_->RemoveDestructionObserver(this);
+ }
+ }
// mojom::GpuAcceleratedVideoDecoderProvider:
void CreateAcceleratedVideoDecoder(
diff --git a/chromium/media/gpu/ipc/service/picture_buffer_manager.cc b/chromium/media/gpu/ipc/service/picture_buffer_manager.cc
index 7c02ce15976..47229a7365a 100644
--- a/chromium/media/gpu/ipc/service/picture_buffer_manager.cc
+++ b/chromium/media/gpu/ipc/service/picture_buffer_manager.cc
@@ -252,6 +252,7 @@ class PictureBufferManagerImpl : public PictureBufferManager {
frame->metadata().allow_overlay = picture.allow_overlay();
frame->metadata().read_lock_fences_enabled =
picture.read_lock_fences_enabled();
+ frame->metadata().is_webgpu_compatible = picture.is_webgpu_compatible();
// TODO(sandersd): Provide an API for VDAs to control this.
frame->metadata().power_efficient = true;
diff --git a/chromium/media/gpu/mac/vt_config_util.mm b/chromium/media/gpu/mac/vt_config_util.mm
index f426ff8dcd9..5977ce5bc62 100644
--- a/chromium/media/gpu/mac/vt_config_util.mm
+++ b/chromium/media/gpu/mac/vt_config_util.mm
@@ -78,11 +78,7 @@ CFStringRef GetTransferFunction(
return kCMFormatDescriptionTransferFunction_UseGamma;
case media::VideoColorSpace::TransferID::IEC61966_2_1:
- if (@available(macos 10.13, *))
- return kCVImageBufferTransferFunction_sRGB;
- DLOG(WARNING)
- << "kCVImageBufferTransferFunction_sRGB unsupported prior to 10.13";
- return nil;
+ return kCVImageBufferTransferFunction_sRGB;
case media::VideoColorSpace::TransferID::SMPTE170M:
case media::VideoColorSpace::TransferID::BT709:
@@ -95,28 +91,16 @@ CFStringRef GetTransferFunction(
return kCMFormatDescriptionTransferFunction_ITU_R_2020;
case media::VideoColorSpace::TransferID::SMPTEST2084:
- if (@available(macos 10.13, *))
- return kCMFormatDescriptionTransferFunction_SMPTE_ST_2084_PQ;
- DLOG(WARNING) << "kCMFormatDescriptionTransferFunction_SMPTE_ST_2084_PQ "
- "unsupported prior to 10.13";
- return nil;
+ return kCMFormatDescriptionTransferFunction_SMPTE_ST_2084_PQ;
case media::VideoColorSpace::TransferID::ARIB_STD_B67:
- if (@available(macos 10.13, *))
- return kCMFormatDescriptionTransferFunction_ITU_R_2100_HLG;
- DLOG(WARNING) << "kCMFormatDescriptionTransferFunction_ITU_R_2100_HLG "
- "unsupported prior to 10.13";
- return nil;
+ return kCMFormatDescriptionTransferFunction_ITU_R_2100_HLG;
case media::VideoColorSpace::TransferID::SMPTE240M:
return kCMFormatDescriptionTransferFunction_SMPTE_240M_1995;
case media::VideoColorSpace::TransferID::SMPTEST428_1:
- if (@available(macos 10.12, *))
- return kCMFormatDescriptionTransferFunction_SMPTE_ST_428_1;
- DLOG(WARNING) << "kCMFormatDescriptionTransferFunction_SMPTE_ST_428_1 "
- "unsupported prior to 10.12";
- return nil;
+ return kCMFormatDescriptionTransferFunction_SMPTE_ST_428_1;
default:
DLOG(ERROR) << "Unsupported transfer function: "
@@ -153,28 +137,18 @@ CFStringRef GetMatrix(media::VideoColorSpace::MatrixID matrix_id) {
void SetContentLightLevelInfo(const gfx::HDRMetadata& hdr_metadata,
NSMutableDictionary<NSString*, id>* extensions) {
- if (@available(macos 10.13, *)) {
- SetDictionaryValue(extensions,
- kCMFormatDescriptionExtension_ContentLightLevelInfo,
- base::mac::CFToNSCast(
- media::GenerateContentLightLevelInfo(hdr_metadata)));
- } else {
- DLOG(WARNING) << "kCMFormatDescriptionExtension_ContentLightLevelInfo "
- "unsupported prior to 10.13";
- }
+ SetDictionaryValue(extensions,
+ kCMFormatDescriptionExtension_ContentLightLevelInfo,
+ base::mac::CFToNSCast(
+ media::GenerateContentLightLevelInfo(hdr_metadata)));
}
void SetColorVolumeMetadata(const gfx::HDRMetadata& hdr_metadata,
NSMutableDictionary<NSString*, id>* extensions) {
- if (@available(macos 10.13, *)) {
- SetDictionaryValue(
- extensions, kCMFormatDescriptionExtension_MasteringDisplayColorVolume,
- base::mac::CFToNSCast(
- media::GenerateMasteringDisplayColorVolume(hdr_metadata)));
- } else {
- DLOG(WARNING) << "kCMFormatDescriptionExtension_"
- "MasteringDisplayColorVolume unsupported prior to 10.13";
- }
+ SetDictionaryValue(
+ extensions, kCMFormatDescriptionExtension_MasteringDisplayColorVolume,
+ base::mac::CFToNSCast(
+ media::GenerateMasteringDisplayColorVolume(hdr_metadata)));
}
void SetVp9CodecConfigurationBox(
diff --git a/chromium/media/gpu/mac/vt_config_util_unittest.cc b/chromium/media/gpu/mac/vt_config_util_unittest.cc
index dcc045c3239..f56c45267d0 100644
--- a/chromium/media/gpu/mac/vt_config_util_unittest.cc
+++ b/chromium/media/gpu/mac/vt_config_util_unittest.cc
@@ -110,20 +110,18 @@ gfx::ColorSpace ToBT709_APPLE(gfx::ColorSpace cs) {
}
void AssertHasEmptyHDRMetadata(CFDictionaryRef fmt) {
- if (__builtin_available(macos 10.13, *)) {
- // We constructed with an empty HDRMetadata, so all values should be zero.
- auto mdcv = GetDataValue(
- fmt, kCMFormatDescriptionExtension_MasteringDisplayColorVolume);
- ASSERT_EQ(24u, mdcv.size());
- for (size_t i = 0; i < mdcv.size(); ++i)
- EXPECT_EQ(0u, mdcv[i]);
-
- auto clli =
- GetDataValue(fmt, kCMFormatDescriptionExtension_ContentLightLevelInfo);
- ASSERT_EQ(4u, clli.size());
- for (size_t i = 0; i < clli.size(); ++i)
- EXPECT_EQ(0u, clli[i]);
- }
+ // We constructed with an empty HDRMetadata, so all values should be zero.
+ auto mdcv = GetDataValue(
+ fmt, kCMFormatDescriptionExtension_MasteringDisplayColorVolume);
+ ASSERT_EQ(24u, mdcv.size());
+ for (size_t i = 0; i < mdcv.size(); ++i)
+ EXPECT_EQ(0u, mdcv[i]);
+
+ auto clli =
+ GetDataValue(fmt, kCMFormatDescriptionExtension_ContentLightLevelInfo);
+ ASSERT_EQ(4u, clli.size());
+ for (size_t i = 0; i < clli.size(); ++i)
+ EXPECT_EQ(0u, clli[i]);
}
constexpr char kBitDepthKey[] = "BitsPerComponent";
@@ -137,6 +135,7 @@ TEST(VTConfigUtil, CreateFormatExtensions_H264_BT709) {
base::ScopedCFTypeRef<CFDictionaryRef> fmt(
CreateFormatExtensions(kCMVideoCodecType_H264, H264PROFILE_MAIN,
VideoColorSpace::REC709(), absl::nullopt));
+
EXPECT_EQ("avc1", GetStrValue(fmt, kCMFormatDescriptionExtension_FormatName));
EXPECT_EQ(24, GetIntValue(fmt, kCMFormatDescriptionExtension_Depth));
EXPECT_EQ(kCMFormatDescriptionColorPrimaries_ITU_R_709_2,
@@ -146,16 +145,13 @@ TEST(VTConfigUtil, CreateFormatExtensions_H264_BT709) {
EXPECT_EQ(kCMFormatDescriptionYCbCrMatrix_ITU_R_709_2,
GetCFStrValue(fmt, kCMFormatDescriptionExtension_YCbCrMatrix));
EXPECT_FALSE(GetBoolValue(fmt, kCMFormatDescriptionExtension_FullRangeVideo));
-
- if (__builtin_available(macos 10.13, *)) {
- EXPECT_TRUE(
- GetDataValue(fmt,
- kCMFormatDescriptionExtension_MasteringDisplayColorVolume)
- .empty());
- EXPECT_TRUE(
- GetDataValue(fmt, kCMFormatDescriptionExtension_ContentLightLevelInfo)
- .empty());
- }
+ EXPECT_TRUE(
+ GetDataValue(fmt,
+ kCMFormatDescriptionExtension_MasteringDisplayColorVolume)
+ .empty());
+ EXPECT_TRUE(
+ GetDataValue(fmt, kCMFormatDescriptionExtension_ContentLightLevelInfo)
+ .empty());
}
TEST(VTConfigUtil, CreateFormatExtensions_H264_BT2020_PQ) {
@@ -166,18 +162,15 @@ TEST(VTConfigUtil, CreateFormatExtensions_H264_BT2020_PQ) {
VideoColorSpace::MatrixID::BT2020_NCL,
gfx::ColorSpace::RangeID::FULL),
gfx::HDRMetadata()));
+
EXPECT_EQ("avc1", GetStrValue(fmt, kCMFormatDescriptionExtension_FormatName));
EXPECT_EQ(24, GetIntValue(fmt, kCMFormatDescriptionExtension_Depth));
-
- if (__builtin_available(macos 10.13, *)) {
- EXPECT_EQ(kCMFormatDescriptionColorPrimaries_ITU_R_2020,
- GetCFStrValue(fmt, kCMFormatDescriptionExtension_ColorPrimaries));
- EXPECT_EQ(
- kCMFormatDescriptionTransferFunction_SMPTE_ST_2084_PQ,
- GetCFStrValue(fmt, kCMFormatDescriptionExtension_TransferFunction));
- EXPECT_EQ(kCMFormatDescriptionYCbCrMatrix_ITU_R_2020,
- GetCFStrValue(fmt, kCMFormatDescriptionExtension_YCbCrMatrix));
- }
+ EXPECT_EQ(kCMFormatDescriptionColorPrimaries_ITU_R_2020,
+ GetCFStrValue(fmt, kCMFormatDescriptionExtension_ColorPrimaries));
+ EXPECT_EQ(kCMFormatDescriptionTransferFunction_SMPTE_ST_2084_PQ,
+ GetCFStrValue(fmt, kCMFormatDescriptionExtension_TransferFunction));
+ EXPECT_EQ(kCMFormatDescriptionYCbCrMatrix_ITU_R_2020,
+ GetCFStrValue(fmt, kCMFormatDescriptionExtension_YCbCrMatrix));
EXPECT_TRUE(GetBoolValue(fmt, kCMFormatDescriptionExtension_FullRangeVideo));
AssertHasEmptyHDRMetadata(fmt);
}
@@ -190,18 +183,15 @@ TEST(VTConfigUtil, CreateFormatExtensions_H264_BT2020_HLG) {
VideoColorSpace::MatrixID::BT2020_NCL,
gfx::ColorSpace::RangeID::FULL),
gfx::HDRMetadata()));
+
EXPECT_EQ("avc1", GetStrValue(fmt, kCMFormatDescriptionExtension_FormatName));
EXPECT_EQ(24, GetIntValue(fmt, kCMFormatDescriptionExtension_Depth));
-
- if (__builtin_available(macos 10.13, *)) {
- EXPECT_EQ(kCMFormatDescriptionColorPrimaries_ITU_R_2020,
- GetCFStrValue(fmt, kCMFormatDescriptionExtension_ColorPrimaries));
- EXPECT_EQ(
- kCMFormatDescriptionTransferFunction_ITU_R_2100_HLG,
- GetCFStrValue(fmt, kCMFormatDescriptionExtension_TransferFunction));
- EXPECT_EQ(kCMFormatDescriptionYCbCrMatrix_ITU_R_2020,
- GetCFStrValue(fmt, kCMFormatDescriptionExtension_YCbCrMatrix));
- }
+ EXPECT_EQ(kCMFormatDescriptionColorPrimaries_ITU_R_2020,
+ GetCFStrValue(fmt, kCMFormatDescriptionExtension_ColorPrimaries));
+ EXPECT_EQ(kCMFormatDescriptionTransferFunction_ITU_R_2100_HLG,
+ GetCFStrValue(fmt, kCMFormatDescriptionExtension_TransferFunction));
+ EXPECT_EQ(kCMFormatDescriptionYCbCrMatrix_ITU_R_2020,
+ GetCFStrValue(fmt, kCMFormatDescriptionExtension_YCbCrMatrix));
EXPECT_TRUE(GetBoolValue(fmt, kCMFormatDescriptionExtension_FullRangeVideo));
AssertHasEmptyHDRMetadata(fmt);
}
@@ -226,44 +216,43 @@ TEST(VTConfigUtil, CreateFormatExtensions_HDRMetadata) {
VideoColorSpace::MatrixID::BT2020_NCL,
gfx::ColorSpace::RangeID::FULL),
hdr_meta));
- if (__builtin_available(macos 10.13, *)) {
- {
- auto mdcv = GetDataValue(
- fmt, kCMFormatDescriptionExtension_MasteringDisplayColorVolume);
- ASSERT_EQ(24u, mdcv.size());
- std::unique_ptr<mp4::BoxReader> box_reader(
- mp4::BoxReader::ReadConcatentatedBoxes(mdcv.data(), mdcv.size(),
- nullptr));
- mp4::MasteringDisplayColorVolume mdcv_box;
- ASSERT_TRUE(mdcv_box.Parse(box_reader.get()));
- EXPECT_EQ(mdcv_box.display_primaries_gx, cv_metadata.primary_g.x());
- EXPECT_EQ(mdcv_box.display_primaries_gy, cv_metadata.primary_g.y());
- EXPECT_EQ(mdcv_box.display_primaries_bx, cv_metadata.primary_b.x());
- EXPECT_EQ(mdcv_box.display_primaries_by, cv_metadata.primary_b.y());
- EXPECT_EQ(mdcv_box.display_primaries_rx, cv_metadata.primary_r.x());
- EXPECT_EQ(mdcv_box.display_primaries_ry, cv_metadata.primary_r.y());
- EXPECT_EQ(mdcv_box.white_point_x, cv_metadata.white_point.x());
- EXPECT_EQ(mdcv_box.white_point_y, cv_metadata.white_point.y());
- EXPECT_EQ(mdcv_box.max_display_mastering_luminance,
- cv_metadata.luminance_max);
- EXPECT_EQ(mdcv_box.min_display_mastering_luminance,
- cv_metadata.luminance_min);
- }
-
- {
- auto clli = GetDataValue(
- fmt, kCMFormatDescriptionExtension_ContentLightLevelInfo);
- ASSERT_EQ(4u, clli.size());
- std::unique_ptr<mp4::BoxReader> box_reader(
- mp4::BoxReader::ReadConcatentatedBoxes(clli.data(), clli.size(),
- nullptr));
- mp4::ContentLightLevelInformation clli_box;
- ASSERT_TRUE(clli_box.Parse(box_reader.get()));
- EXPECT_EQ(clli_box.max_content_light_level,
- hdr_meta.max_content_light_level);
- EXPECT_EQ(clli_box.max_pic_average_light_level,
- hdr_meta.max_frame_average_light_level);
- }
+
+ {
+ auto mdcv = GetDataValue(
+ fmt, kCMFormatDescriptionExtension_MasteringDisplayColorVolume);
+ ASSERT_EQ(24u, mdcv.size());
+ std::unique_ptr<mp4::BoxReader> box_reader(
+ mp4::BoxReader::ReadConcatentatedBoxes(mdcv.data(), mdcv.size(),
+ nullptr));
+ mp4::MasteringDisplayColorVolume mdcv_box;
+ ASSERT_TRUE(mdcv_box.Parse(box_reader.get()));
+ EXPECT_EQ(mdcv_box.display_primaries_gx, cv_metadata.primary_g.x());
+ EXPECT_EQ(mdcv_box.display_primaries_gy, cv_metadata.primary_g.y());
+ EXPECT_EQ(mdcv_box.display_primaries_bx, cv_metadata.primary_b.x());
+ EXPECT_EQ(mdcv_box.display_primaries_by, cv_metadata.primary_b.y());
+ EXPECT_EQ(mdcv_box.display_primaries_rx, cv_metadata.primary_r.x());
+ EXPECT_EQ(mdcv_box.display_primaries_ry, cv_metadata.primary_r.y());
+ EXPECT_EQ(mdcv_box.white_point_x, cv_metadata.white_point.x());
+ EXPECT_EQ(mdcv_box.white_point_y, cv_metadata.white_point.y());
+ EXPECT_EQ(mdcv_box.max_display_mastering_luminance,
+ cv_metadata.luminance_max);
+ EXPECT_EQ(mdcv_box.min_display_mastering_luminance,
+ cv_metadata.luminance_min);
+ }
+
+ {
+ auto clli =
+ GetDataValue(fmt, kCMFormatDescriptionExtension_ContentLightLevelInfo);
+ ASSERT_EQ(4u, clli.size());
+ std::unique_ptr<mp4::BoxReader> box_reader(
+ mp4::BoxReader::ReadConcatentatedBoxes(clli.data(), clli.size(),
+ nullptr));
+ mp4::ContentLightLevelInformation clli_box;
+ ASSERT_TRUE(clli_box.Parse(box_reader.get()));
+ EXPECT_EQ(clli_box.max_content_light_level,
+ hdr_meta.max_content_light_level);
+ EXPECT_EQ(clli_box.max_pic_average_light_level,
+ hdr_meta.max_frame_average_light_level);
}
}
@@ -361,13 +350,7 @@ TEST(VTConfigUtil, GetImageBufferColorSpace_BT2020_PQ) {
auto image_buffer_cs = GetImageBufferColorSpace(image_buffer);
// When BT.2020 is unavailable the default should be BT.709.
- if (base::mac::IsAtLeastOS10_13()) {
- EXPECT_EQ(cs.ToGfxColorSpace(), image_buffer_cs);
- } else {
- // 10.12 doesn't have HDR transfer functions.
- cs.transfer = VideoColorSpace::TransferID::BT709;
- EXPECT_EQ(cs.ToGfxColorSpace(), image_buffer_cs);
- }
+ EXPECT_EQ(cs.ToGfxColorSpace(), image_buffer_cs);
}
TEST(VTConfigUtil, GetImageBufferColorSpace_BT2020_HLG) {
@@ -380,13 +363,7 @@ TEST(VTConfigUtil, GetImageBufferColorSpace_BT2020_HLG) {
auto image_buffer_cs = GetImageBufferColorSpace(image_buffer);
// When BT.2020 is unavailable the default should be BT.709.
- if (base::mac::IsAtLeastOS10_13()) {
- EXPECT_EQ(cs.ToGfxColorSpace(), image_buffer_cs);
- } else {
- // 10.12 doesn't have HDR transfer functions.
- cs.transfer = VideoColorSpace::TransferID::BT709;
- EXPECT_EQ(cs.ToGfxColorSpace(), image_buffer_cs);
- }
+ EXPECT_EQ(cs.ToGfxColorSpace(), image_buffer_cs);
}
TEST(VTConfigUtil, FormatDescriptionInvalid) {
diff --git a/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.cc b/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.cc
index 52cb5929402..45395a62cd1 100644
--- a/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.cc
+++ b/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.cc
@@ -16,6 +16,7 @@
#include "base/atomic_sequence_num.h"
#include "base/bind.h"
+#include "base/cxx17_backports.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "base/mac/mac_util.h"
@@ -82,9 +83,8 @@ constexpr VideoCodecProfile kSupportedProfiles[] = {
VP9PROFILE_PROFILE0, VP9PROFILE_PROFILE2,
// These are only supported on macOS 11+.
- // TODO(crbug.com/1300786): add HEVCPROFILE_MAIN_STILL_PICTURE,
- // and HEVCPROFILE_REXT since VT has already supported these profiles
- HEVCPROFILE_MAIN, HEVCPROFILE_MAIN10,
+ HEVCPROFILE_MAIN, HEVCPROFILE_MAIN10, HEVCPROFILE_MAIN_STILL_PICTURE,
+ HEVCPROFILE_REXT,
// TODO(sandersd): Hi10p fails during
// CMVideoFormatDescriptionCreateFromH264ParameterSets with
@@ -130,12 +130,12 @@ constexpr int kNumPictureBuffers = limits::kMaxVideoFrames * 4;
// minimum safe (static) size of the reorder queue.
constexpr int kMaxReorderQueueSize = 17;
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
// If videotoolbox total output picture count is lower than
// kMinOutputsBeforeRASL, then we should skip the RASL frames
// to avoid kVTVideoDecoderBadDataErr
constexpr int kMinOutputsBeforeRASL = 5;
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
// Build an |image_config| dictionary for VideoToolbox initialization.
base::ScopedCFTypeRef<CFMutableDictionaryRef> BuildImageConfig(
@@ -171,7 +171,7 @@ base::ScopedCFTypeRef<CFMutableDictionaryRef> BuildImageConfig(
return image_config;
}
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
// Create a CMFormatDescription using the provided |pps|, |sps| and |vps|.
base::ScopedCFTypeRef<CMFormatDescriptionRef> CreateVideoFormatHEVC(
const std::vector<uint8_t>& vps,
@@ -212,7 +212,7 @@ base::ScopedCFTypeRef<CMFormatDescriptionRef> CreateVideoFormatHEVC(
}
return format;
}
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
// Create a CMFormatDescription using the provided |pps| and |sps|.
base::ScopedCFTypeRef<CMFormatDescriptionRef> CreateVideoFormatH264(
@@ -394,8 +394,8 @@ bool InitializeVideoToolboxInternal() {
}
}
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
- if (base::FeatureList::IsEnabled(media::kVideoToolboxHEVCDecoding)) {
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
+ if (base::FeatureList::IsEnabled(media::kPlatformHEVCDecoderSupport)) {
// Only macOS >= 11.0 will support hevc if we use
// CMVideoFormatDescriptionCreateFromHEVCParameterSets
// API to create video format
@@ -459,7 +459,7 @@ bool InitializeVideoToolboxInternal() {
}
}
}
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
return true;
}
@@ -476,7 +476,7 @@ int32_t ComputeH264ReorderWindow(const H264SPS* sps) {
int max_dpb_frames =
max_dpb_mbs / ((sps->pic_width_in_mbs_minus1 + 1) *
(sps->pic_height_in_map_units_minus1 + 1));
- max_dpb_frames = std::clamp(max_dpb_frames, 0, 16);
+ max_dpb_frames = base::clamp(max_dpb_frames, 0, 16);
// See AVC spec section E.2.1 definition of |max_num_reorder_frames|.
if (sps->vui_parameters_present_flag && sps->bitstream_restriction_flag) {
@@ -491,12 +491,12 @@ int32_t ComputeH264ReorderWindow(const H264SPS* sps) {
return max_dpb_frames;
}
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
int32_t ComputeHEVCReorderWindow(const H265VPS* vps) {
int32_t vps_max_sub_layers_minus1 = vps->vps_max_sub_layers_minus1;
return vps->vps_max_num_reorder_pics[vps_max_sub_layers_minus1];
}
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
// Route decoded frame callbacks back into the VTVideoDecodeAccelerator.
void OutputThunk(void* decompression_output_refcon,
@@ -651,8 +651,7 @@ bool VTVideoDecodeAccelerator::OnMemoryDump(
// Dump output pictures (decoded frames for which PictureReady() has been
// called already).
- for (const auto& it : picture_info_map_) {
- PictureInfo* picture_info = it.second.get();
+ for (const auto& [texture_id, picture_info] : picture_info_map_) {
for (const auto& gl_image : picture_info->gl_images) {
std::string dump_name =
base::StringPrintf("media/vt_video_decode_accelerator_%d/picture_%d",
@@ -756,12 +755,14 @@ bool VTVideoDecodeAccelerator::Initialize(const Config& config,
case H264PROFILE_HIGH:
codec_ = VideoCodec::kH264;
break;
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
case HEVCPROFILE_MAIN:
case HEVCPROFILE_MAIN10:
+ case HEVCPROFILE_MAIN_STILL_PICTURE:
+ case HEVCPROFILE_REXT:
codec_ = VideoCodec::kHEVC;
break;
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
case VP9PROFILE_PROFILE0:
case VP9PROFILE_PROFILE2:
codec_ = VideoCodec::kVP9;
@@ -800,11 +801,11 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
case VideoCodec::kH264:
format = CreateVideoFormatH264(active_sps_, active_spsext_, active_pps_);
break;
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
case VideoCodec::kHEVC:
format = CreateVideoFormatHEVC(active_vps_, active_sps_, active_pps_);
break;
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
case VideoCodec::kVP9:
format = CreateVideoFormatVP9(
cc_detector_->GetColorSpace(config_.container_color_space),
@@ -841,7 +842,8 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
const bool require_hardware = config_.profile == VP9PROFILE_PROFILE0 ||
config_.profile == VP9PROFILE_PROFILE2;
const bool is_hbd = config_.profile == VP9PROFILE_PROFILE2 ||
- config_.profile == HEVCPROFILE_MAIN10;
+ config_.profile == HEVCPROFILE_MAIN10 ||
+ config_.profile == HEVCPROFILE_REXT;
if (!CreateVideoToolboxSession(format_, require_hardware, is_hbd, &callback_,
&session_, &configured_size_)) {
NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
@@ -864,9 +866,9 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
vp9_bsf_ = std::make_unique<VP9SuperFrameBitstreamFilter>();
// Record that the configuration change is complete.
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
configured_vps_ = active_vps_;
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
configured_sps_ = active_sps_;
configured_spsext_ = active_spsext_;
configured_pps_ = active_pps_;
@@ -1297,7 +1299,7 @@ void VTVideoDecodeAccelerator::DecodeTaskH264(
}
}
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
void VTVideoDecodeAccelerator::DecodeTaskHEVC(
scoped_refptr<DecoderBuffer> buffer,
Frame* frame) {
@@ -1656,7 +1658,7 @@ void VTVideoDecodeAccelerator::DecodeTaskHEVC(
return;
}
}
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
// This method may be called on any VideoToolbox thread.
void VTVideoDecodeAccelerator::Output(void* source_frame_refcon,
@@ -1785,13 +1787,13 @@ void VTVideoDecodeAccelerator::Decode(scoped_refptr<DecoderBuffer> buffer,
FROM_HERE,
base::BindOnce(&VTVideoDecodeAccelerator::DecodeTaskVp9,
decoder_weak_this_, std::move(buffer), frame));
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
} else if (codec_ == VideoCodec::kHEVC) {
decoder_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VTVideoDecodeAccelerator::DecodeTaskHEVC,
decoder_weak_this_, std::move(buffer), frame));
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
} else {
decoder_task_runner_->PostTask(
FROM_HERE,
@@ -2051,7 +2053,8 @@ bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) {
// TODO(https://crbug.com/1233228): The UV planes of P010 frames cannot
// be represented in the current gfx::BufferFormat.
if (config_.profile != VP9PROFILE_PROFILE2 &&
- config_.profile != HEVCPROFILE_MAIN10)
+ config_.profile != HEVCPROFILE_MAIN10 &&
+ config_.profile != HEVCPROFILE_REXT)
picture_format_ = PIXEL_FORMAT_NV12;
}
@@ -2082,7 +2085,8 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
const gfx::BufferFormat buffer_format =
config_.profile == VP9PROFILE_PROFILE2 ||
- config_.profile == HEVCPROFILE_MAIN10
+ config_.profile == HEVCPROFILE_MAIN10 ||
+ config_.profile == HEVCPROFILE_REXT
? gfx::BufferFormat::P010
: gfx::BufferFormat::YUV_420_BIPLANAR;
gfx::ColorSpace color_space = GetImageBufferColorSpace(frame.image);
@@ -2220,6 +2224,9 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
picture.set_scoped_shared_image(picture_info->scoped_shared_images[plane],
plane);
}
+ if (picture_format_ == PIXEL_FORMAT_NV12)
+ picture.set_is_webgpu_compatible(true);
+
client_->PictureReady(std::move(picture));
return true;
}
@@ -2322,26 +2329,18 @@ VTVideoDecodeAccelerator::GetSupportedProfiles(
supported_profile == VP9PROFILE_PROFILE2) {
if (workarounds.disable_accelerated_vp9_decode)
continue;
- if (!base::mac::IsAtLeastOS11())
- continue;
- if (__builtin_available(macOS 10.13, *)) {
- if ((supported_profile == VP9PROFILE_PROFILE0 ||
- supported_profile == VP9PROFILE_PROFILE2) &&
- !VTIsHardwareDecodeSupported(kCMVideoCodecType_VP9)) {
- continue;
- }
-
- // Success! We have VP9 hardware decoding support.
- } else {
+ if (!VTIsHardwareDecodeSupported(kCMVideoCodecType_VP9))
continue;
- }
+ // Success! We have VP9 hardware decoding support.
}
if (supported_profile == HEVCPROFILE_MAIN ||
- supported_profile == HEVCPROFILE_MAIN10) {
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+ supported_profile == HEVCPROFILE_MAIN10 ||
+ supported_profile == HEVCPROFILE_MAIN_STILL_PICTURE ||
+ supported_profile == HEVCPROFILE_REXT) {
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
if (!workarounds.disable_accelerated_hevc_decode &&
- base::FeatureList::IsEnabled(kVideoToolboxHEVCDecoding)) {
+ base::FeatureList::IsEnabled(kPlatformHEVCDecoderSupport)) {
if (__builtin_available(macOS 11.0, *)) {
// Success! We have HEVC hardware decoding (or software
// decoding if the hardware is not good enough) support too.
@@ -2353,7 +2352,7 @@ VTVideoDecodeAccelerator::GetSupportedProfiles(
profiles.push_back(profile);
}
}
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
continue;
}
diff --git a/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.h b/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.h
index 9b18787f708..4721d6946e8 100644
--- a/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.h
+++ b/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.h
@@ -26,10 +26,10 @@
#include "media/gpu/media_gpu_export.h"
#include "media/video/h264_parser.h"
#include "media/video/h264_poc.h"
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
#include "media/video/h265_parser.h"
#include "media/video/h265_poc.h"
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
#include "media/video/video_decode_accelerator.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_bindings.h"
@@ -47,7 +47,7 @@ class VP9SuperFrameBitstreamFilter;
MEDIA_GPU_EXPORT bool InitializeVideoToolbox();
// VideoToolbox.framework implementation of the VideoDecodeAccelerator
-// interface for Mac OS X (currently limited to 10.9+).
+// interface for macOS.
class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
public base::trace_event::MemoryDumpProvider {
public:
@@ -195,9 +195,9 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
// |frame| is owned by |pending_frames_|.
void DecodeTaskH264(scoped_refptr<DecoderBuffer> buffer, Frame* frame);
void DecodeTaskVp9(scoped_refptr<DecoderBuffer> buffer, Frame* frame);
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
void DecodeTaskHEVC(scoped_refptr<DecoderBuffer> buffer, Frame* frame);
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
void DecodeDone(Frame* frame);
//
@@ -307,7 +307,7 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
std::vector<uint8_t> configured_pps_;
H264POC h264_poc_;
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
H265Parser hevc_parser_;
// VPSs seen in the bitstream.
@@ -318,7 +318,7 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
std::vector<uint8_t> configured_vps_;
H265POC hevc_poc_;
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
Config config_;
VideoCodec codec_;
diff --git a/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc b/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc
index 6a3a7771360..965e3986fb0 100644
--- a/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc
+++ b/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc
@@ -19,6 +19,7 @@
#include "media/base/mac/video_frame_mac.h"
#include "media/base/media_log.h"
#include "media/base/video_frame.h"
+#include "media/video/video_encode_accelerator.h"
// This is a min version of macOS where we want to support SVC encoding via
// EnableLowLatencyRateControl flag. The flag is actually supported since 11.3,
@@ -149,6 +150,8 @@ VTVideoEncodeAccelerator::GetSupportedProfiles() {
SupportedProfile profile;
profile.max_framerate_numerator = kMaxFrameRateNumerator;
profile.max_framerate_denominator = kMaxFrameRateDenominator;
+ profile.rate_control_modes = VideoEncodeAccelerator::kConstantMode |
+ VideoEncodeAccelerator::kVariableMode;
profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight);
if (__builtin_available(macOS LOW_LATENCY_FLAG_AVAILABLE_VER, *))
profile.scalability_modes.push_back(SVCScalabilityMode::kL1T2);
@@ -169,6 +172,8 @@ VTVideoEncodeAccelerator::GetSupportedProfilesLight() {
SupportedProfile profile;
profile.max_framerate_numerator = kMaxFrameRateNumerator;
profile.max_framerate_denominator = kMaxFrameRateDenominator;
+ profile.rate_control_modes = VideoEncodeAccelerator::kConstantMode |
+ VideoEncodeAccelerator::kVariableMode;
profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight);
for (const auto& supported_profile : kSupportedProfiles) {
profile.profile = supported_profile;
@@ -257,8 +262,7 @@ void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer(
return;
}
- auto mapping =
- base::UnsafeSharedMemoryRegion::Deserialize(buffer.TakeRegion()).Map();
+ auto mapping = buffer.TakeRegion().Map();
if (!mapping.IsValid()) {
DLOG(ERROR) << "Failed mapping shared memory.";
client_->NotifyError(kPlatformFailureError);
diff --git a/chromium/media/gpu/v4l2/BUILD.gn b/chromium/media/gpu/v4l2/BUILD.gn
index a78ef7deabd..6a635eff41f 100644
--- a/chromium/media/gpu/v4l2/BUILD.gn
+++ b/chromium/media/gpu/v4l2/BUILD.gn
@@ -67,8 +67,6 @@ source_set("v4l2") {
"v4l2_video_decoder_delegate_vp8_legacy.h",
"v4l2_video_decoder_delegate_vp9.cc",
"v4l2_video_decoder_delegate_vp9.h",
- "v4l2_video_decoder_delegate_vp9_chromium.cc",
- "v4l2_video_decoder_delegate_vp9_chromium.h",
"v4l2_video_decoder_delegate_vp9_legacy.cc",
"v4l2_video_decoder_delegate_vp9_legacy.h",
"v4l2_video_encode_accelerator.cc",
@@ -162,6 +160,7 @@ executable("v4l2_stateless_decoder") {
sources = [
"test/av1_decoder.cc",
"test/av1_decoder.h",
+ "test/av1_pix_fmt.h",
"test/v4l2_ioctl_shim.cc",
"test/v4l2_ioctl_shim.h",
"test/v4l2_stateless_decoder.cc",
diff --git a/chromium/media/gpu/v4l2/v4l2_device.cc b/chromium/media/gpu/v4l2/v4l2_device.cc
index de2800fdab3..241b328fda1 100644
--- a/chromium/media/gpu/v4l2/v4l2_device.cc
+++ b/chromium/media/gpu/v4l2/v4l2_device.cc
@@ -2060,6 +2060,8 @@ V4L2Device::EnumerateSupportedEncodeProfiles() {
VideoEncodeAccelerator::SupportedProfile profile;
profile.max_framerate_numerator = 30;
profile.max_framerate_denominator = 1;
+ // TODO(b/182240945): remove hard-coding when VBR is supported
+ profile.rate_control_modes = media::VideoEncodeAccelerator::kConstantMode;
gfx::Size min_resolution;
GetSupportedResolution(pixelformat, &min_resolution,
&profile.max_resolution);
diff --git a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc
index 368294cffe1..3dbd184252e 100644
--- a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc
+++ b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc
@@ -30,17 +30,6 @@
#include "media/gpu/macros.h"
#include "media/gpu/v4l2/v4l2_utils.h"
-#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \
- do { \
- if (device_->Ioctl(type, arg) != 0) { \
- VPLOGF(1) << "ioctl() failed: " << type_str; \
- return value; \
- } \
- } while (0)
-
-#define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
- IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
-
namespace media {
namespace {
@@ -236,6 +225,9 @@ std::unique_ptr<ImageProcessorBackend> V4L2ImageProcessorBackend::Create(
VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner) {
+ VLOGF(2);
+ DCHECK_GT(num_buffers, 0u);
+
// Most of the users of this class are decoders that only want a pixel format
// conversion (with the same coded dimensions and visible rectangles). Video
// encoding, however, can try and ask for cropping (this is common for camera
@@ -249,25 +241,6 @@ std::unique_ptr<ImageProcessorBackend> V4L2ImageProcessorBackend::Create(
return nullptr;
}
- return V4L2ImageProcessorBackend::CreateWithOutputMode(
- device, num_buffers, input_config, output_config, output_mode,
- relative_rotation, error_cb, backend_task_runner);
-}
-
-// static
-std::unique_ptr<ImageProcessorBackend>
-V4L2ImageProcessorBackend::CreateWithOutputMode(
- scoped_refptr<V4L2Device> device,
- size_t num_buffers,
- const PortConfig& input_config,
- const PortConfig& output_config,
- const OutputMode& output_mode,
- VideoRotation relative_rotation,
- ErrorCB error_cb,
- scoped_refptr<base::SequencedTaskRunner> backend_task_runner) {
- VLOGF(2);
- DCHECK_GT(num_buffers, 0u);
-
if (!device) {
VLOGF(2) << "Failed creating V4L2Device";
return nullptr;
@@ -392,6 +365,42 @@ V4L2ImageProcessorBackend::CreateWithOutputMode(
output_planes[i].size = pix_mp.plane_fmt[i].sizeimage;
}
+ // Capabilities check.
+ struct v4l2_capability caps {};
+ const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ if (device->Ioctl(VIDIOC_QUERYCAP, &caps) != 0) {
+ VPLOGF(1) << "VIDIOC_QUERYCAP failed";
+ return nullptr;
+ }
+ if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
+ VLOGF(1) << "VIDIOC_QUERYCAP failed: "
+ << "caps check failed: 0x" << std::hex << caps.capabilities;
+ return nullptr;
+ }
+
+ // Set a few standard controls to default values.
+ struct v4l2_control rotation = {.id = V4L2_CID_ROTATE, .value = 0};
+ if (device->Ioctl(VIDIOC_S_CTRL, &rotation) != 0) {
+ VPLOGF(1) << "V4L2_CID_ROTATE failed";
+ return nullptr;
+ }
+
+ struct v4l2_control hflip = {.id = V4L2_CID_HFLIP, .value = 0};
+ if (device->Ioctl(VIDIOC_S_CTRL, &hflip) != 0) {
+ VPLOGF(1) << "V4L2_CID_HFLIP failed";
+ return nullptr;
+ }
+
+ struct v4l2_control vflip = {.id = V4L2_CID_VFLIP, .value = 0};
+ if (device->Ioctl(VIDIOC_S_CTRL, &vflip) != 0) {
+ VPLOGF(1) << "V4L2_CID_VFLIP failed";
+ return nullptr;
+ }
+
+ struct v4l2_control alpha = {.id = V4L2_CID_ALPHA_COMPONENT, .value = 255};
+ if (device->Ioctl(VIDIOC_S_CTRL, &alpha) != 0)
+ VPLOGF(1) << "V4L2_CID_ALPHA_COMPONENT failed";
+
const v4l2_memory output_memory_type =
output_mode == OutputMode::ALLOCATE
? V4L2_MEMORY_MMAP
@@ -407,7 +416,7 @@ V4L2ImageProcessorBackend::CreateWithOutputMode(
input_memory_type, output_memory_type, output_mode, relative_rotation,
num_buffers, std::move(error_cb)));
- // Initialize at |backend_task_runner_|.
+ // Initialize at |backend_task_runner|.
bool success = false;
base::WaitableEvent done;
auto init_cb = base::BindOnce(
@@ -435,22 +444,6 @@ void V4L2ImageProcessorBackend::Initialize(InitCB init_cb) {
DVLOGF(2);
DCHECK_CALLED_ON_VALID_SEQUENCE(backend_sequence_checker_);
- // Capabilities check.
- struct v4l2_capability caps;
- memset(&caps, 0, sizeof(caps));
- const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
- if (device_->Ioctl(VIDIOC_QUERYCAP, &caps) != 0) {
- VPLOGF(1) << "ioctl() failed: VIDIOC_QUERYCAP";
- std::move(init_cb).Run(false);
- return;
- }
- if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
- VLOGF(1) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP: "
- << "caps check failed: 0x" << std::hex << caps.capabilities;
- std::move(init_cb).Run(false);
- return;
- }
-
if (!CreateInputBuffers() || !CreateOutputBuffers()) {
std::move(init_cb).Run(false);
return;
@@ -684,28 +677,6 @@ bool V4L2ImageProcessorBackend::CreateInputBuffers() {
DCHECK_CALLED_ON_VALID_SEQUENCE(backend_sequence_checker_);
DCHECK_EQ(input_queue_, nullptr);
- struct v4l2_control control;
- memset(&control, 0, sizeof(control));
- control.id = V4L2_CID_ROTATE;
- control.value = 0;
- IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_CTRL, &control);
-
- memset(&control, 0, sizeof(control));
- control.id = V4L2_CID_HFLIP;
- control.value = 0;
- IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_CTRL, &control);
-
- memset(&control, 0, sizeof(control));
- control.id = V4L2_CID_VFLIP;
- control.value = 0;
- IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_CTRL, &control);
-
- memset(&control, 0, sizeof(control));
- control.id = V4L2_CID_ALPHA_COMPONENT;
- control.value = 255;
- if (device_->Ioctl(VIDIOC_S_CTRL, &control) != 0)
- DVLOGF(4) << "V4L2_CID_ALPHA_COMPONENT is not supported";
-
input_queue_ = device_->GetQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
return input_queue_ && AllocateV4L2Buffers(input_queue_.get(), num_buffers_,
input_memory_type_);
diff --git a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h
index a220f77ce21..b4fbaa911dd 100644
--- a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h
+++ b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h
@@ -108,16 +108,6 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessorBackend
size_t output_buffer_id;
};
- static std::unique_ptr<ImageProcessorBackend> CreateWithOutputMode(
- scoped_refptr<V4L2Device> device,
- size_t num_buffers,
- const PortConfig& input_config,
- const PortConfig& output_config,
- const OutputMode& preferred_output_modes,
- VideoRotation relative_rotation,
- ErrorCB error_cb,
- scoped_refptr<base::SequencedTaskRunner> backend_task_runner);
-
V4L2ImageProcessorBackend(
scoped_refptr<base::SequencedTaskRunner> backend_task_runner,
scoped_refptr<V4L2Device> device,
diff --git a/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.cc
index fd67d728173..6d4e96c3d81 100644
--- a/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.cc
@@ -69,37 +69,24 @@ V4L2JpegEncodeAccelerator::JobRecord::JobRecord(
scoped_refptr<VideoFrame> output_frame,
int quality,
int32_t task_id,
- BitstreamBuffer* exif_buffer)
+ base::WritableSharedMemoryMapping exif_mapping)
: input_frame(input_frame),
output_frame(output_frame),
quality(quality),
task_id(task_id),
- output_shm(base::subtle::PlatformSharedMemoryRegion(), 0, true), // dummy
- exif_shm(nullptr) {
- if (exif_buffer) {
- exif_shm.reset(new UnalignedSharedMemory(exif_buffer->TakeRegion(),
- exif_buffer->size(), false));
- exif_offset = exif_buffer->offset();
- }
-}
+ exif_mapping(std::move(exif_mapping)) {}
V4L2JpegEncodeAccelerator::JobRecord::JobRecord(
scoped_refptr<VideoFrame> input_frame,
int quality,
- BitstreamBuffer* exif_buffer,
- BitstreamBuffer output_buffer)
+ int32_t task_id,
+ base::WritableSharedMemoryMapping exif_mapping,
+ base::WritableSharedMemoryMapping output_mapping)
: input_frame(input_frame),
quality(quality),
- task_id(output_buffer.id()),
- output_shm(output_buffer.TakeRegion(), output_buffer.size(), false),
- output_offset(output_buffer.offset()),
- exif_shm(nullptr) {
- if (exif_buffer) {
- exif_shm.reset(new UnalignedSharedMemory(exif_buffer->TakeRegion(),
- exif_buffer->size(), false));
- exif_offset = exif_buffer->offset();
- }
-}
+ task_id(task_id),
+ output_mapping(std::move(output_mapping)),
+ exif_mapping(std::move(exif_mapping)) {}
V4L2JpegEncodeAccelerator::JobRecord::~JobRecord() {}
@@ -577,9 +564,9 @@ void V4L2JpegEncodeAccelerator::EncodedInstance::DestroyInputBuffers() {
input_streamon_ = false;
}
- for (const auto& input_record : input_buffer_map_) {
+ for (const auto& [address, length, at_device] : input_buffer_map_) {
for (size_t i = 0; i < input_buffer_num_planes_; ++i) {
- device_->Munmap(input_record.address[i], input_record.length[i]);
+ device_->Munmap(address[i], length[i]);
}
}
@@ -769,7 +756,7 @@ size_t V4L2JpegEncodeAccelerator::EncodedInstance::FinalizeJpegImage(
uint8_t* dst_ptr,
const JpegBufferRecord& output_buffer,
size_t buffer_size,
- std::unique_ptr<UnalignedSharedMemory> exif_shm) {
+ base::WritableSharedMemoryMapping exif_mapping) {
DCHECK(parent_->encoder_task_runner_->BelongsToCurrentThread());
size_t idx;
@@ -778,9 +765,9 @@ size_t V4L2JpegEncodeAccelerator::EncodedInstance::FinalizeJpegImage(
dst_ptr[1] = JPEG_SOI;
idx = 2;
- if (exif_shm) {
- uint8_t* exif_buffer = static_cast<uint8_t*>(exif_shm->memory());
- size_t exif_buffer_size = exif_shm->size();
+ if (exif_mapping.IsValid()) {
+ uint8_t* exif_buffer = exif_mapping.GetMemoryAs<uint8_t>();
+ size_t exif_buffer_size = exif_mapping.size();
// Application Segment for Exif data.
uint16_t exif_segment_size = static_cast<uint16_t>(exif_buffer_size + 2);
const uint8_t kAppSegment[] = {
@@ -914,8 +901,8 @@ void V4L2JpegEncodeAccelerator::EncodedInstance::Dequeue() {
}
size_t jpeg_size = FinalizeJpegImage(
- static_cast<uint8_t*>(job_record->output_shm.memory()), output_record,
- planes[0].bytesused, std::move(job_record->exif_shm));
+ job_record->output_mapping.GetMemoryAs<uint8_t>(), output_record,
+ planes[0].bytesused, std::move(job_record->exif_mapping));
if (!jpeg_size) {
NotifyError(job_record->task_id, PLATFORM_FAILURE);
return;
@@ -1575,7 +1562,7 @@ bool V4L2JpegEncodeAccelerator::EncodedInstanceDmaBuf::EnqueueOutputRecord() {
size_t V4L2JpegEncodeAccelerator::EncodedInstanceDmaBuf::FinalizeJpegImage(
scoped_refptr<VideoFrame> output_frame,
size_t buffer_size,
- std::unique_ptr<UnalignedSharedMemory> exif_shm) {
+ base::WritableSharedMemoryMapping exif_mapping) {
DCHECK(parent_->encoder_task_runner_->BelongsToCurrentThread());
size_t idx = 0;
@@ -1605,9 +1592,9 @@ size_t V4L2JpegEncodeAccelerator::EncodedInstanceDmaBuf::FinalizeJpegImage(
// Fill SOI and EXIF markers.
static const uint8_t kJpegStart[] = {0xFF, JPEG_SOI};
- if (exif_shm) {
- uint8_t* exif_buffer = static_cast<uint8_t*>(exif_shm->memory());
- size_t exif_buffer_size = exif_shm->size();
+ if (exif_mapping.IsValid()) {
+ uint8_t* exif_buffer = exif_mapping.GetMemoryAs<uint8_t>();
+ size_t exif_buffer_size = exif_mapping.size();
// Application Segment for Exif data.
uint16_t exif_segment_size = static_cast<uint16_t>(exif_buffer_size + 2);
const uint8_t kAppSegment[] = {
@@ -1795,7 +1782,7 @@ void V4L2JpegEncodeAccelerator::EncodedInstanceDmaBuf::Dequeue() {
size_t jpeg_size =
FinalizeJpegImage(job_record->output_frame, planes[0].bytesused,
- std::move(job_record->exif_shm));
+ std::move(job_record->exif_mapping));
if (!jpeg_size) {
NotifyError(job_record->task_id, PLATFORM_FAILURE);
@@ -1942,16 +1929,36 @@ void V4L2JpegEncodeAccelerator::Encode(
return;
}
+ base::WritableSharedMemoryMapping exif_mapping;
if (exif_buffer) {
VLOGF(4) << "EXIF size " << exif_buffer->size();
if (exif_buffer->size() > kMaxMarkerSizeAllowed) {
NotifyError(output_buffer.id(), INVALID_ARGUMENT);
return;
}
+
+ base::UnsafeSharedMemoryRegion exif_region = exif_buffer->TakeRegion();
+ exif_mapping =
+ exif_region.MapAt(exif_buffer->offset(), exif_buffer->size());
+ if (!exif_mapping.IsValid()) {
+ VPLOGF(1) << "could not map exif bitstream_buffer";
+ NotifyError(output_buffer.id(), PLATFORM_FAILURE);
+ return;
+ }
}
- std::unique_ptr<JobRecord> job_record(new JobRecord(
- video_frame, quality, exif_buffer, std::move(output_buffer)));
+ base::UnsafeSharedMemoryRegion output_region = output_buffer.TakeRegion();
+ base::WritableSharedMemoryMapping output_mapping =
+ output_region.MapAt(output_buffer.offset(), output_buffer.size());
+ if (!output_mapping.IsValid()) {
+ VPLOGF(1) << "could not map I420 bitstream_buffer";
+ NotifyError(output_buffer.id(), PLATFORM_FAILURE);
+ return;
+ }
+
+ std::unique_ptr<JobRecord> job_record(
+ new JobRecord(video_frame, quality, output_buffer.id(),
+ std::move(exif_mapping), std::move(output_mapping)));
encoder_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&V4L2JpegEncodeAccelerator::EncodeTaskLegacy,
@@ -1978,16 +1985,26 @@ void V4L2JpegEncodeAccelerator::EncodeWithDmaBuf(
return;
}
+ base::WritableSharedMemoryMapping exif_mapping;
if (exif_buffer) {
VLOGF(4) << "EXIF size " << exif_buffer->size();
if (exif_buffer->size() > kMaxMarkerSizeAllowed) {
NotifyError(task_id, INVALID_ARGUMENT);
return;
}
+
+ base::UnsafeSharedMemoryRegion exif_region = exif_buffer->TakeRegion();
+ exif_mapping =
+ exif_region.MapAt(exif_buffer->offset(), exif_buffer->size());
+ if (!exif_mapping.IsValid()) {
+ VPLOGF(1) << "could not map exif bitstream_buffer";
+ NotifyError(task_id, PLATFORM_FAILURE);
+ return;
+ }
}
- std::unique_ptr<JobRecord> job_record(
- new JobRecord(input_frame, output_frame, quality, task_id, exif_buffer));
+ std::unique_ptr<JobRecord> job_record(new JobRecord(
+ input_frame, output_frame, quality, task_id, std::move(exif_mapping)));
encoder_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&V4L2JpegEncodeAccelerator::EncodeTask,
@@ -1997,19 +2014,6 @@ void V4L2JpegEncodeAccelerator::EncodeWithDmaBuf(
void V4L2JpegEncodeAccelerator::EncodeTaskLegacy(
std::unique_ptr<JobRecord> job_record) {
DCHECK(encoder_task_runner_->BelongsToCurrentThread());
- if (!job_record->output_shm.MapAt(job_record->output_offset,
- job_record->output_shm.size())) {
- VPLOGF(1) << "could not map I420 bitstream_buffer";
- NotifyError(job_record->task_id, PLATFORM_FAILURE);
- return;
- }
- if (job_record->exif_shm &&
- !job_record->exif_shm->MapAt(job_record->exif_offset,
- job_record->exif_shm->size())) {
- VPLOGF(1) << "could not map exif bitstream_buffer";
- NotifyError(job_record->task_id, PLATFORM_FAILURE);
- return;
- }
// Check if the parameters of input frame changes.
// If it changes, we open a new device and put the job in it.
@@ -2034,7 +2038,7 @@ void V4L2JpegEncodeAccelerator::EncodeTaskLegacy(
}
if (!encoded_device->CreateBuffers(coded_size,
- job_record->output_shm.size())) {
+ job_record->output_mapping.size())) {
VLOGF(1) << "Create buffers failed.";
NotifyError(job_record->task_id, PLATFORM_FAILURE);
return;
@@ -2055,13 +2059,6 @@ void V4L2JpegEncodeAccelerator::EncodeTaskLegacy(
void V4L2JpegEncodeAccelerator::EncodeTask(
std::unique_ptr<JobRecord> job_record) {
DCHECK(encoder_task_runner_->BelongsToCurrentThread());
- if (job_record->exif_shm &&
- !job_record->exif_shm->MapAt(job_record->exif_offset,
- job_record->exif_shm->size())) {
- VPLOGF(1) << "could not map exif bitstream_buffer";
- NotifyError(job_record->task_id, PLATFORM_FAILURE);
- return;
- }
// Check if the parameters of input frame changes.
// If it changes, we open a new device and put the job in it.
diff --git a/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h
index 8a94b616bdc..b31ef0e2021 100644
--- a/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h
@@ -13,13 +13,13 @@
#include "base/containers/queue.h"
#include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory_mapping.h"
#include "base/memory/weak_ptr.h"
#include "base/task/single_thread_task_runner.h"
#include "base/threading/thread.h"
#include "components/chromeos_camera/jpeg_encode_accelerator.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
#include "media/base/bitstream_buffer.h"
-#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_frame.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/v4l2/v4l2_device.h"
@@ -113,11 +113,12 @@ class MEDIA_GPU_EXPORT V4L2JpegEncodeAccelerator
scoped_refptr<VideoFrame> output_frame,
int32_t task_id,
int quality,
- BitstreamBuffer* exif_buffer);
+ base::WritableSharedMemoryMapping exif_mapping);
JobRecord(scoped_refptr<VideoFrame> input_frame,
int quality,
- BitstreamBuffer* exif_buffer,
- BitstreamBuffer output_buffer);
+ int32_t task_id,
+ base::WritableSharedMemoryMapping exif_mapping,
+ base::WritableSharedMemoryMapping output_mapping);
~JobRecord();
// Input frame buffer.
@@ -132,16 +133,12 @@ class MEDIA_GPU_EXPORT V4L2JpegEncodeAccelerator
// Encode task ID.
int32_t task_id;
// Memory mapped from |output_buffer|.
- UnalignedSharedMemory output_shm;
- // Offset used for |output_shm|.
- off_t output_offset;
+ base::WritableSharedMemoryMapping output_mapping;
// Memory mapped from |exif_buffer|.
- // It contains EXIF data to be inserted into JPEG image. If it's nullptr,
- // the JFIF APP0 segment will be inserted.
- std::unique_ptr<UnalignedSharedMemory> exif_shm;
- // Offset used for |exif_shm|.
- off_t exif_offset;
+ // It contains EXIF data to be inserted into JPEG image. If `IsValid()` is
+ // false, the JFIF APP0 segment will be inserted.
+ base::WritableSharedMemoryMapping exif_mapping;
};
// TODO(wtlee): To be deprecated. (crbug.com/944705)
@@ -183,7 +180,7 @@ class MEDIA_GPU_EXPORT V4L2JpegEncodeAccelerator
size_t FinalizeJpegImage(uint8_t* dst_ptr,
const JpegBufferRecord& output_buffer,
size_t buffer_size,
- std::unique_ptr<UnalignedSharedMemory> exif_shm);
+ base::WritableSharedMemoryMapping exif_mapping);
bool SetInputBufferFormat(gfx::Size coded_size);
bool SetOutputBufferFormat(gfx::Size coded_size, size_t buffer_size);
@@ -300,7 +297,7 @@ class MEDIA_GPU_EXPORT V4L2JpegEncodeAccelerator
// Add JPEG Marks if needed. Add EXIF section by |exif_shm|.
size_t FinalizeJpegImage(scoped_refptr<VideoFrame> output_frame,
size_t buffer_size,
- std::unique_ptr<UnalignedSharedMemory> exif_shm);
+ base::WritableSharedMemoryMapping exif_mapping);
bool SetInputBufferFormat(gfx::Size coded_size,
const VideoFrameLayout& input_layout);
diff --git a/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.cc
index 1fe26607f67..1ee2eba47d6 100644
--- a/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.cc
@@ -18,11 +18,12 @@
#include "base/callback_helpers.h"
#include "base/files/scoped_file.h"
#include "base/memory/page_size.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/numerics/safe_conversions.h"
#include "base/threading/thread_task_runner_handle.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/bitstream_buffer.h"
-#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_frame.h"
#include "media/base/video_types.h"
#include "media/gpu/chromeos/fourcc.h"
@@ -145,7 +146,7 @@ class V4L2MjpegDecodeAccelerator::JobRecord {
// Input buffer size.
virtual size_t size() const = 0;
// Input buffer offset.
- virtual off_t offset() const = 0;
+ virtual uint64_t offset() const = 0;
// Maps input buffer.
virtual bool map() = 0;
// Pointer to the input content. Only valid if map() is already called.
@@ -164,9 +165,7 @@ class JobRecordBitstreamBuffer : public V4L2MjpegDecodeAccelerator::JobRecord {
JobRecordBitstreamBuffer(BitstreamBuffer bitstream_buffer,
scoped_refptr<VideoFrame> video_frame)
: task_id_(bitstream_buffer.id()),
- shm_(bitstream_buffer.TakeRegion(),
- bitstream_buffer.size(),
- false /* read_only */),
+ shm_region_(bitstream_buffer.TakeRegion()),
offset_(bitstream_buffer.offset()),
out_frame_(video_frame) {}
@@ -174,17 +173,21 @@ class JobRecordBitstreamBuffer : public V4L2MjpegDecodeAccelerator::JobRecord {
JobRecordBitstreamBuffer& operator=(const JobRecordBitstreamBuffer&) = delete;
int32_t task_id() const override { return task_id_; }
- size_t size() const override { return shm_.size(); }
- off_t offset() const override { return offset_; }
- bool map() override { return shm_.MapAt(offset(), size()); }
- const void* memory() const override { return shm_.memory(); }
+ size_t size() const override { return shm_region_.GetSize(); }
+ uint64_t offset() const override { return offset_; }
+ bool map() override {
+ shm_mapping_ = shm_region_.MapAt(offset(), size());
+ return shm_mapping_.IsValid();
+ }
+ const void* memory() const override { return shm_mapping_.memory(); }
const scoped_refptr<VideoFrame>& out_frame() override { return out_frame_; }
private:
int32_t task_id_;
- UnalignedSharedMemory shm_;
- off_t offset_;
+ base::UnsafeSharedMemoryRegion shm_region_;
+ uint64_t offset_;
+ base::WritableSharedMemoryMapping shm_mapping_;
scoped_refptr<VideoFrame> out_frame_;
};
@@ -215,7 +218,7 @@ class JobRecordDmaBuf : public V4L2MjpegDecodeAccelerator::JobRecord {
int32_t task_id() const override { return task_id_; }
size_t size() const override { return size_; }
- off_t offset() const override { return offset_; }
+ uint64_t offset() const override { return offset_; }
bool map() override {
if (mapped_addr_)
@@ -225,7 +228,7 @@ class JobRecordDmaBuf : public V4L2MjpegDecodeAccelerator::JobRecord {
DCHECK(dmabuf_fd_.is_valid());
DCHECK_GT(size(), 0u);
void* addr = mmap(nullptr, size(), PROT_READ, MAP_SHARED, dmabuf_fd_.get(),
- offset());
+ base::checked_cast<off_t>(offset()));
if (addr == MAP_FAILED)
return false;
mapped_addr_ = addr;
@@ -243,7 +246,7 @@ class JobRecordDmaBuf : public V4L2MjpegDecodeAccelerator::JobRecord {
int32_t task_id_;
base::ScopedFD dmabuf_fd_;
size_t size_;
- off_t offset_;
+ uint64_t offset_;
void* mapped_addr_;
scoped_refptr<VideoFrame> out_frame_;
};
@@ -710,9 +713,9 @@ void V4L2MjpegDecodeAccelerator::DestroyInputBuffers() {
input_streamon_ = false;
}
- for (const auto& input_record : input_buffer_map_) {
+ for (const auto& [address, length, at_device] : input_buffer_map_) {
for (size_t i = 0; i < kMaxInputPlanes; ++i) {
- device_->Munmap(input_record.address[i], input_record.length[i]);
+ device_->Munmap(address[i], length[i]);
}
}
@@ -740,9 +743,9 @@ void V4L2MjpegDecodeAccelerator::DestroyOutputBuffers() {
output_streamon_ = false;
}
- for (const auto& output_record : output_buffer_map_) {
+ for (const auto& [address, length, at_device] : output_buffer_map_) {
for (size_t i = 0; i < output_buffer_num_planes_; ++i) {
- device_->Munmap(output_record.address[i], output_record.length[i]);
+ device_->Munmap(address[i], length[i]);
}
}
diff --git a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
index 25df52f5015..722a4935acf 100644
--- a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
@@ -32,7 +32,6 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/media_switches.h"
#include "media/base/scopedfd_helper.h"
-#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_types.h"
#include "media/base/video_util.h"
#include "media/gpu/chromeos/fourcc.h"
@@ -47,7 +46,6 @@
#include "media/gpu/v4l2/v4l2_video_decoder_delegate_vp8.h"
#include "media/gpu/v4l2/v4l2_video_decoder_delegate_vp8_legacy.h"
#include "media/gpu/v4l2/v4l2_video_decoder_delegate_vp9.h"
-#include "media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_chromium.h"
#include "media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_legacy.h"
#include "ui/gfx/native_pixmap_handle.h"
#include "ui/gl/gl_context.h"
@@ -319,17 +317,11 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
#endif
const bool supports_stable_api =
device_->IsCtrlExposed(V4L2_CID_STATELESS_VP9_FRAME);
+ CHECK(supports_stable_api);
+ decoder_ = std::make_unique<VP9Decoder>(
+ std::make_unique<V4L2VideoDecoderDelegateVP9>(this, device_.get()),
+ video_profile_, config.container_color_space);
- if (supports_stable_api) {
- decoder_ = std::make_unique<VP9Decoder>(
- std::make_unique<V4L2VideoDecoderDelegateVP9>(this, device_.get()),
- video_profile_, config.container_color_space);
- } else {
- decoder_ = std::make_unique<VP9Decoder>(
- std::make_unique<V4L2VideoDecoderDelegateVP9Chromium>(
- this, device_.get()),
- video_profile_, config.container_color_space);
- }
} else {
decoder_ = std::make_unique<VP9Decoder>(
std::make_unique<V4L2VideoDecoderDelegateVP9Legacy>(this,
@@ -1227,8 +1219,9 @@ bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) {
if (output_buffer_map_.empty())
return true;
- for (auto& output_record : output_buffer_map_) {
- picture_buffers_to_dismiss.push_back(output_record.picture_id);
+ for (const auto& [output_frame, picture_id, client_texture_id, texture_id,
+ cleared, num_times_sent_to_client] : output_buffer_map_) {
+ picture_buffers_to_dismiss.push_back(picture_id);
}
if (dismiss) {
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
index c920940572f..f39cc569af9 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
@@ -28,7 +28,6 @@
#include "build/build_config.h"
#include "media/base/media_switches.h"
#include "media/base/scopedfd_helper.h"
-#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_frame_layout.h"
#include "media/base/video_types.h"
#include "media/gpu/chromeos/fourcc.h"
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder.cc b/chromium/media/gpu/v4l2/v4l2_video_decoder.cc
index 2b34a0c7da6..3f0b0fdcf34 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decoder.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder.cc
@@ -15,6 +15,7 @@
#include "base/trace_event/trace_event.h"
#include "media/base/limits.h"
#include "media/base/media_log.h"
+#include "media/base/media_switches.h"
#include "media/base/video_types.h"
#include "media/base/video_util.h"
#include "media/gpu/chromeos/chromeos_status.h"
@@ -264,7 +265,9 @@ V4L2Status V4L2VideoDecoder::InitializeBackend() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK(state_ == State::kInitialized);
- can_use_decoder_ = num_instances_.Increment() < kMaxNumOfInstances;
+ can_use_decoder_ =
+ num_instances_.Increment() < kMaxNumOfInstances ||
+ !base::FeatureList::IsEnabled(media::kLimitConcurrentDecoderInstances);
if (!can_use_decoder_) {
VLOGF(1) << "Reached maximum number of decoder instances ("
<< kMaxNumOfInstances << ")";
@@ -438,8 +441,6 @@ CroStatus V4L2VideoDecoder::SetupOutputFormat(const gfx::Size& size,
output_queue_->SetFormat(fourcc.ToV4L2PixFmt(), picked_size, 0);
DCHECK(format);
gfx::Size adjusted_size(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
- DCHECK_EQ(adjusted_size.width() % 16, 0);
- DCHECK_EQ(adjusted_size.height() % 16, 0);
if (!gfx::Rect(adjusted_size).Contains(gfx::Rect(picked_size))) {
VLOGF(1) << "The adjusted coded size (" << adjusted_size.ToString()
<< ") should contains the original coded size("
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc
index 90d3fa28356..0f2181d0c5d 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc
@@ -30,7 +30,6 @@
#include "media/gpu/v4l2/v4l2_video_decoder_delegate_vp8.h"
#include "media/gpu/v4l2/v4l2_video_decoder_delegate_vp8_legacy.h"
#include "media/gpu/v4l2/v4l2_video_decoder_delegate_vp9.h"
-#include "media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_chromium.h"
#include "media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_legacy.h"
namespace media {
@@ -718,17 +717,10 @@ bool V4L2StatelessVideoDecoderBackend::CreateAvd() {
#endif
const bool supports_stable_api =
device_->IsCtrlExposed(V4L2_CID_STATELESS_VP9_FRAME);
-
- if (supports_stable_api) {
- avd_ = std::make_unique<VP9Decoder>(
- std::make_unique<V4L2VideoDecoderDelegateVP9>(this, device_.get()),
- profile_, color_space_);
- } else {
- avd_ = std::make_unique<VP9Decoder>(
- std::make_unique<V4L2VideoDecoderDelegateVP9Chromium>(
- this, device_.get()),
- profile_, color_space_);
- }
+ CHECK(supports_stable_api);
+ avd_ = std::make_unique<VP9Decoder>(
+ std::make_unique<V4L2VideoDecoderDelegateVP9>(this, device_.get()),
+ profile_, color_space_);
} else {
avd_ = std::make_unique<VP9Decoder>(
std::make_unique<V4L2VideoDecoderDelegateVP9Legacy>(this,
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_chromium.cc b/chromium/media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_chromium.cc
deleted file mode 100644
index 54b9c40643a..00000000000
--- a/chromium/media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_chromium.cc
+++ /dev/null
@@ -1,377 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Prevent inclusion of legacy controls.
-#define __LINUX_MEDIA_VP9_CTRLS_LEGACY_H_
-
-#include <linux/media/vp9-ctrls.h>
-
-#include "base/logging.h"
-#include "media/filters/vp9_parser.h"
-#include "media/gpu/macros.h"
-#include "media/gpu/v4l2/v4l2_decode_surface.h"
-#include "media/gpu/v4l2/v4l2_decode_surface_handler.h"
-#include "media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_chromium.h"
-
-namespace media {
-
-using DecodeStatus = VP9Decoder::VP9Accelerator::Status;
-
-class V4L2VP9Picture : public VP9Picture {
- public:
- explicit V4L2VP9Picture(scoped_refptr<V4L2DecodeSurface> dec_surface)
- : dec_surface_(std::move(dec_surface)) {}
-
- V4L2VP9Picture(const V4L2VP9Picture&) = delete;
- V4L2VP9Picture& operator=(const V4L2VP9Picture&) = delete;
-
- V4L2VP9Picture* AsV4L2VP9Picture() override { return this; }
- scoped_refptr<V4L2DecodeSurface> dec_surface() { return dec_surface_; }
-
- private:
- ~V4L2VP9Picture() override = default;
-
- scoped_refptr<VP9Picture> CreateDuplicate() override {
- return new V4L2VP9Picture(dec_surface_);
- }
-
- scoped_refptr<V4L2DecodeSurface> dec_surface_;
-};
-
-namespace {
-
-scoped_refptr<V4L2DecodeSurface> VP9PictureToV4L2DecodeSurface(
- VP9Picture* pic) {
- V4L2VP9Picture* v4l2_pic = pic->AsV4L2VP9Picture();
- CHECK(v4l2_pic);
- return v4l2_pic->dec_surface();
-}
-
-void FillV4L2VP9LoopFilterParams(const Vp9LoopFilterParams& vp9_lf_params,
- struct v4l2_vp9_loop_filter* v4l2_lf) {
-#define SET_FLAG_IF(cond, flag) \
- v4l2_lf->flags |= ((vp9_lf_params.cond) ? (flag) : 0)
-
- SET_FLAG_IF(delta_enabled, V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED);
- SET_FLAG_IF(delta_update, V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE);
-#undef SET_FLAG_IF
-
- v4l2_lf->level = vp9_lf_params.level;
- v4l2_lf->sharpness = vp9_lf_params.sharpness;
- SafeArrayMemcpy(v4l2_lf->ref_deltas, vp9_lf_params.ref_deltas);
- SafeArrayMemcpy(v4l2_lf->mode_deltas, vp9_lf_params.mode_deltas);
- SafeArrayMemcpy(v4l2_lf->level_lookup, vp9_lf_params.lvl);
-}
-
-void FillV4L2VP9QuantizationParams(
- const Vp9QuantizationParams& vp9_quant_params,
- struct v4l2_vp9_quantization* v4l2_quant) {
- v4l2_quant->base_q_idx = vp9_quant_params.base_q_idx;
- v4l2_quant->delta_q_y_dc = vp9_quant_params.delta_q_y_dc;
- v4l2_quant->delta_q_uv_dc = vp9_quant_params.delta_q_uv_dc;
- v4l2_quant->delta_q_uv_ac = vp9_quant_params.delta_q_uv_ac;
-}
-
-void FillV4L2VP9SegmentationParams(const Vp9SegmentationParams& vp9_seg_params,
- struct v4l2_vp9_segmentation* v4l2_seg) {
-#define SET_FLAG_IF(cond, flag) \
- v4l2_seg->flags |= ((vp9_seg_params.cond) ? (flag) : 0)
-
- SET_FLAG_IF(enabled, V4L2_VP9_SEGMENTATION_FLAG_ENABLED);
- SET_FLAG_IF(update_map, V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP);
- SET_FLAG_IF(temporal_update, V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE);
- SET_FLAG_IF(update_data, V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA);
- SET_FLAG_IF(abs_or_delta_update,
- V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE);
-#undef SET_FLAG_IF
-
- SafeArrayMemcpy(v4l2_seg->tree_probs, vp9_seg_params.tree_probs);
- SafeArrayMemcpy(v4l2_seg->pred_probs, vp9_seg_params.pred_probs);
-
- static_assert(static_cast<size_t>(Vp9SegmentationParams::SEG_LVL_MAX) ==
- static_cast<size_t>(V4L2_VP9_SEGMENT_FEATURE_CNT),
- "mismatch in number of segmentation features");
- for (size_t j = 0; j < 8; j++) {
- for (size_t i = 0; i < V4L2_VP9_SEGMENT_FEATURE_CNT; i++) {
- if (vp9_seg_params.feature_enabled[j][i])
- v4l2_seg->feature_enabled[j] |= V4L2_VP9_SEGMENT_FEATURE_ENABLED(i);
- }
- }
-
- SafeArrayMemcpy(v4l2_seg->feature_data, vp9_seg_params.feature_data);
-}
-
-void FillV4L2VP9MvProbsParams(const Vp9FrameContext& vp9_ctx,
- struct v4l2_vp9_mv_probabilities* v4l2_mv_probs) {
- SafeArrayMemcpy(v4l2_mv_probs->joint, vp9_ctx.mv_joint_probs);
- SafeArrayMemcpy(v4l2_mv_probs->sign, vp9_ctx.mv_sign_prob);
- SafeArrayMemcpy(v4l2_mv_probs->class_, vp9_ctx.mv_class_probs);
- SafeArrayMemcpy(v4l2_mv_probs->class0_bit, vp9_ctx.mv_class0_bit_prob);
- SafeArrayMemcpy(v4l2_mv_probs->bits, vp9_ctx.mv_bits_prob);
- SafeArrayMemcpy(v4l2_mv_probs->class0_fr, vp9_ctx.mv_class0_fr_probs);
- SafeArrayMemcpy(v4l2_mv_probs->fr, vp9_ctx.mv_fr_probs);
- SafeArrayMemcpy(v4l2_mv_probs->class0_hp, vp9_ctx.mv_class0_hp_prob);
- SafeArrayMemcpy(v4l2_mv_probs->hp, vp9_ctx.mv_hp_prob);
-}
-
-void GetVP9MvProbsParams(const struct v4l2_vp9_mv_probabilities* v4l2_mv_probs,
- Vp9FrameContext* vp9_ctx) {
- SafeArrayMemcpy(vp9_ctx->mv_joint_probs, v4l2_mv_probs->joint);
- SafeArrayMemcpy(vp9_ctx->mv_sign_prob, v4l2_mv_probs->sign);
- SafeArrayMemcpy(vp9_ctx->mv_class_probs, v4l2_mv_probs->class_);
- SafeArrayMemcpy(vp9_ctx->mv_class0_bit_prob, v4l2_mv_probs->class0_bit);
- SafeArrayMemcpy(vp9_ctx->mv_bits_prob, v4l2_mv_probs->bits);
- SafeArrayMemcpy(vp9_ctx->mv_class0_fr_probs, v4l2_mv_probs->class0_fr);
- SafeArrayMemcpy(vp9_ctx->mv_fr_probs, v4l2_mv_probs->fr);
- SafeArrayMemcpy(vp9_ctx->mv_class0_hp_prob, v4l2_mv_probs->class0_hp);
- SafeArrayMemcpy(vp9_ctx->mv_hp_prob, v4l2_mv_probs->hp);
-}
-
-void FillV4L2VP9ProbsParams(const Vp9FrameContext& vp9_ctx,
- struct v4l2_vp9_probabilities* v4l2_probs) {
- SafeArrayMemcpy(v4l2_probs->tx8, vp9_ctx.tx_probs_8x8);
- SafeArrayMemcpy(v4l2_probs->tx16, vp9_ctx.tx_probs_16x16);
- SafeArrayMemcpy(v4l2_probs->tx32, vp9_ctx.tx_probs_32x32);
- SafeArrayMemcpy(v4l2_probs->coef, vp9_ctx.coef_probs);
- SafeArrayMemcpy(v4l2_probs->skip, vp9_ctx.skip_prob);
- SafeArrayMemcpy(v4l2_probs->inter_mode, vp9_ctx.inter_mode_probs);
- SafeArrayMemcpy(v4l2_probs->interp_filter, vp9_ctx.interp_filter_probs);
- SafeArrayMemcpy(v4l2_probs->is_inter, vp9_ctx.is_inter_prob);
- SafeArrayMemcpy(v4l2_probs->comp_mode, vp9_ctx.comp_mode_prob);
- SafeArrayMemcpy(v4l2_probs->single_ref, vp9_ctx.single_ref_prob);
- SafeArrayMemcpy(v4l2_probs->comp_ref, vp9_ctx.comp_ref_prob);
- SafeArrayMemcpy(v4l2_probs->y_mode, vp9_ctx.y_mode_probs);
- SafeArrayMemcpy(v4l2_probs->uv_mode, vp9_ctx.uv_mode_probs);
- SafeArrayMemcpy(v4l2_probs->partition, vp9_ctx.partition_probs);
-
- FillV4L2VP9MvProbsParams(vp9_ctx, &v4l2_probs->mv);
-}
-
-void GetVP9ProbsParams(const struct v4l2_vp9_probabilities* v4l2_probs,
- Vp9FrameContext* vp9_ctx) {
- SafeArrayMemcpy(vp9_ctx->tx_probs_8x8, v4l2_probs->tx8);
- SafeArrayMemcpy(vp9_ctx->tx_probs_16x16, v4l2_probs->tx16);
- SafeArrayMemcpy(vp9_ctx->tx_probs_32x32, v4l2_probs->tx32);
- SafeArrayMemcpy(vp9_ctx->coef_probs, v4l2_probs->coef);
- SafeArrayMemcpy(vp9_ctx->skip_prob, v4l2_probs->skip);
- SafeArrayMemcpy(vp9_ctx->inter_mode_probs, v4l2_probs->inter_mode);
- SafeArrayMemcpy(vp9_ctx->interp_filter_probs, v4l2_probs->interp_filter);
- SafeArrayMemcpy(vp9_ctx->is_inter_prob, v4l2_probs->is_inter);
- SafeArrayMemcpy(vp9_ctx->comp_mode_prob, v4l2_probs->comp_mode);
- SafeArrayMemcpy(vp9_ctx->single_ref_prob, v4l2_probs->single_ref);
- SafeArrayMemcpy(vp9_ctx->comp_ref_prob, v4l2_probs->comp_ref);
- SafeArrayMemcpy(vp9_ctx->y_mode_probs, v4l2_probs->y_mode);
- SafeArrayMemcpy(vp9_ctx->uv_mode_probs, v4l2_probs->uv_mode);
- SafeArrayMemcpy(vp9_ctx->partition_probs, v4l2_probs->partition);
-
- GetVP9MvProbsParams(&v4l2_probs->mv, vp9_ctx);
-}
-
-} // namespace
-
-V4L2VideoDecoderDelegateVP9Chromium::V4L2VideoDecoderDelegateVP9Chromium(
- V4L2DecodeSurfaceHandler* surface_handler,
- V4L2Device* device)
- : surface_handler_(surface_handler),
- device_(device),
- device_needs_compressed_header_parsed_(
- device->IsCtrlExposed(V4L2_CID_MPEG_VIDEO_VP9_FRAME_CONTEXT(0))) {
- DCHECK(surface_handler_);
-}
-
-V4L2VideoDecoderDelegateVP9Chromium::~V4L2VideoDecoderDelegateVP9Chromium() =
- default;
-
-scoped_refptr<VP9Picture>
-V4L2VideoDecoderDelegateVP9Chromium::CreateVP9Picture() {
- scoped_refptr<V4L2DecodeSurface> dec_surface =
- surface_handler_->CreateSurface();
- if (!dec_surface)
- return nullptr;
-
- return new V4L2VP9Picture(std::move(dec_surface));
-}
-
-DecodeStatus V4L2VideoDecoderDelegateVP9Chromium::SubmitDecode(
- scoped_refptr<VP9Picture> pic,
- const Vp9SegmentationParams& segm_params,
- const Vp9LoopFilterParams& lf_params,
- const Vp9ReferenceFrameVector& ref_frames,
- base::OnceClosure done_cb) {
- const Vp9FrameHeader* frame_hdr = pic->frame_hdr.get();
- DCHECK(frame_hdr);
- struct v4l2_ctrl_vp9_frame_decode_params v4l2_frame_params;
- memset(&v4l2_frame_params, 0, sizeof(v4l2_frame_params));
-
-#define SET_FLAG_IF(cond, flag) \
- v4l2_frame_params.flags |= ((frame_hdr->cond) ? (flag) : 0)
-
- SET_FLAG_IF(frame_type == Vp9FrameHeader::KEYFRAME,
- V4L2_VP9_FRAME_FLAG_KEY_FRAME);
- SET_FLAG_IF(show_frame, V4L2_VP9_FRAME_FLAG_SHOW_FRAME);
- SET_FLAG_IF(error_resilient_mode, V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT);
- SET_FLAG_IF(intra_only, V4L2_VP9_FRAME_FLAG_INTRA_ONLY);
- SET_FLAG_IF(allow_high_precision_mv, V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV);
- SET_FLAG_IF(refresh_frame_context, V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX);
- SET_FLAG_IF(frame_parallel_decoding_mode,
- V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE);
- SET_FLAG_IF(subsampling_x, V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING);
- SET_FLAG_IF(subsampling_y, V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING);
- SET_FLAG_IF(color_range, V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING);
-#undef SET_FLAG_IF
-
- v4l2_frame_params.compressed_header_size = frame_hdr->header_size_in_bytes;
- v4l2_frame_params.uncompressed_header_size =
- frame_hdr->uncompressed_header_size;
- v4l2_frame_params.profile = frame_hdr->profile;
- // As per the VP9 specification:
- switch (frame_hdr->reset_frame_context) {
- // "0 or 1 implies don’t reset."
- case 0:
- case 1:
- v4l2_frame_params.reset_frame_context = V4L2_VP9_RESET_FRAME_CTX_NONE;
- break;
- // "2 resets just the context specified in the frame header."
- case 2:
- v4l2_frame_params.reset_frame_context = V4L2_VP9_RESET_FRAME_CTX_SPEC;
- break;
- // "3 reset all contexts."
- case 3:
- v4l2_frame_params.reset_frame_context = V4L2_VP9_RESET_FRAME_CTX_ALL;
- break;
- default:
- VLOGF(1) << "Invalid reset frame context value!";
- v4l2_frame_params.reset_frame_context = V4L2_VP9_RESET_FRAME_CTX_NONE;
- break;
- }
- v4l2_frame_params.frame_context_idx =
- frame_hdr->frame_context_idx_to_save_probs;
- v4l2_frame_params.bit_depth = frame_hdr->bit_depth;
-
- v4l2_frame_params.interpolation_filter = frame_hdr->interpolation_filter;
- v4l2_frame_params.tile_cols_log2 = frame_hdr->tile_cols_log2;
- v4l2_frame_params.tile_rows_log2 = frame_hdr->tile_rows_log2;
- v4l2_frame_params.tx_mode = frame_hdr->compressed_header.tx_mode;
- v4l2_frame_params.reference_mode =
- frame_hdr->compressed_header.reference_mode;
- for (size_t i = 0; i < V4L2_REF_ID_CNT; i++) {
- v4l2_frame_params.ref_frame_sign_biases |=
- (frame_hdr->ref_frame_sign_bias[i + VP9_FRAME_LAST] ? (1 << i) : 0);
- }
- v4l2_frame_params.frame_width_minus_1 = frame_hdr->frame_width - 1;
- v4l2_frame_params.frame_height_minus_1 = frame_hdr->frame_height - 1;
- v4l2_frame_params.render_width_minus_1 = frame_hdr->render_width - 1;
- v4l2_frame_params.render_height_minus_1 = frame_hdr->render_height - 1;
-
- // Reference frames
- for (size_t i = 0; i < std::size(frame_hdr->ref_frame_idx); i++) {
- uint8_t idx = frame_hdr->ref_frame_idx[i];
- if (idx >= kVp9NumRefFrames) {
- VLOGF(1) << "Invalid reference frame index!";
- return DecodeStatus::kFail;
- }
-
- auto ref_pic = ref_frames.GetFrame(idx);
- if (ref_pic) {
- auto ref_surface = VP9PictureToV4L2DecodeSurface(ref_pic.get());
- v4l2_frame_params.refs[i] = ref_surface->GetReferenceID();
- } else {
- v4l2_frame_params.refs[i] = 0xffffffff;
- }
- }
-
- FillV4L2VP9LoopFilterParams(lf_params, &v4l2_frame_params.lf);
- FillV4L2VP9QuantizationParams(frame_hdr->quant_params,
- &v4l2_frame_params.quant);
- FillV4L2VP9SegmentationParams(segm_params, &v4l2_frame_params.seg);
- FillV4L2VP9ProbsParams(frame_hdr->frame_context, &v4l2_frame_params.probs);
-
- scoped_refptr<V4L2DecodeSurface> dec_surface =
- VP9PictureToV4L2DecodeSurface(pic.get());
-
- struct v4l2_ext_control ctrl;
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_DECODE_PARAMS;
- ctrl.size = sizeof(v4l2_frame_params);
- ctrl.ptr = &v4l2_frame_params;
-
- struct v4l2_ext_controls ctrls;
- memset(&ctrls, 0, sizeof(ctrls));
- ctrls.count = 1;
- ctrls.controls = &ctrl;
- dec_surface->PrepareSetCtrls(&ctrls);
- if (device_->Ioctl(VIDIOC_S_EXT_CTRLS, &ctrls) != 0) {
- VPLOGF(1) << "ioctl() failed: VIDIOC_S_EXT_CTRLS";
- return DecodeStatus::kFail;
- }
-
- std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
- for (size_t i = 0; i < kVp9NumRefFrames; i++) {
- auto ref_pic = ref_frames.GetFrame(i);
- if (ref_pic) {
- auto ref_surface = VP9PictureToV4L2DecodeSurface(ref_pic.get());
- ref_surfaces.emplace_back(std::move(ref_surface));
- }
- }
-
- dec_surface->SetReferenceSurfaces(std::move(ref_surfaces));
- dec_surface->SetDecodeDoneCallback(std::move(done_cb));
-
- // Copy the frame data into the V4L2 buffer.
- if (!surface_handler_->SubmitSlice(dec_surface.get(), frame_hdr->data,
- frame_hdr->frame_size))
- return DecodeStatus::kFail;
-
- // Queue the buffers to the kernel driver.
- DVLOGF(4) << "Submitting decode for surface: " << dec_surface->ToString();
- surface_handler_->DecodeSurface(dec_surface);
-
- return DecodeStatus::kOk;
-}
-
-bool V4L2VideoDecoderDelegateVP9Chromium::OutputPicture(
- scoped_refptr<VP9Picture> pic) {
- surface_handler_->SurfaceReady(VP9PictureToV4L2DecodeSurface(pic.get()),
- pic->bitstream_id(), pic->visible_rect(),
- pic->get_colorspace());
- return true;
-}
-
-bool V4L2VideoDecoderDelegateVP9Chromium::GetFrameContext(
- scoped_refptr<VP9Picture> pic,
- Vp9FrameContext* frame_ctx) {
- auto ctx_id = pic->frame_hdr->frame_context_idx_to_save_probs;
-
- struct v4l2_ctrl_vp9_frame_ctx v4l2_vp9_ctx;
- memset(&v4l2_vp9_ctx, 0, sizeof(v4l2_vp9_ctx));
-
- struct v4l2_ext_control ctrl;
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_CONTEXT(ctx_id);
- ctrl.size = sizeof(v4l2_vp9_ctx);
- ctrl.ptr = &v4l2_vp9_ctx;
-
- struct v4l2_ext_controls ctrls;
- memset(&ctrls, 0, sizeof(ctrls));
- ctrls.count = 1;
- ctrls.controls = &ctrl;
- if (device_->Ioctl(VIDIOC_G_EXT_CTRLS, &ctrls) != 0) {
- VPLOGF(1) << "ioctl() failed: VIDIOC_G_EXT_CTRLS";
- return false;
- }
-
- GetVP9ProbsParams(&v4l2_vp9_ctx.probs, frame_ctx);
- return true;
-}
-
-bool V4L2VideoDecoderDelegateVP9Chromium::NeedsCompressedHeaderParsed() const {
- return device_needs_compressed_header_parsed_;
-}
-
-bool V4L2VideoDecoderDelegateVP9Chromium::SupportsContextProbabilityReadback()
- const {
- return true;
-}
-
-} // namespace media
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_chromium.h b/chromium/media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_chromium.h
deleted file mode 100644
index 4255049becb..00000000000
--- a/chromium/media/gpu/v4l2/v4l2_video_decoder_delegate_vp9_chromium.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_DELEGATE_VP9_CHROMIUM_H_
-#define MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_DELEGATE_VP9_CHROMIUM_H_
-
-#include "media/gpu/vp9_decoder.h"
-
-namespace media {
-
-class V4L2DecodeSurface;
-class V4L2DecodeSurfaceHandler;
-class V4L2Device;
-
-class V4L2VideoDecoderDelegateVP9Chromium : public VP9Decoder::VP9Accelerator {
- public:
- explicit V4L2VideoDecoderDelegateVP9Chromium(
- V4L2DecodeSurfaceHandler* surface_handler,
- V4L2Device* device);
-
- V4L2VideoDecoderDelegateVP9Chromium(
- const V4L2VideoDecoderDelegateVP9Chromium&) = delete;
- V4L2VideoDecoderDelegateVP9Chromium& operator=(
- const V4L2VideoDecoderDelegateVP9Chromium&) = delete;
-
- ~V4L2VideoDecoderDelegateVP9Chromium() override;
-
- // VP9Decoder::VP9Accelerator implementation.
- scoped_refptr<VP9Picture> CreateVP9Picture() override;
-
- Status SubmitDecode(scoped_refptr<VP9Picture> pic,
- const Vp9SegmentationParams& segm_params,
- const Vp9LoopFilterParams& lf_params,
- const Vp9ReferenceFrameVector& reference_frames,
- base::OnceClosure done_cb) override;
-
- bool OutputPicture(scoped_refptr<VP9Picture> pic) override;
-
- bool GetFrameContext(scoped_refptr<VP9Picture> pic,
- Vp9FrameContext* frame_ctx) override;
-
- bool NeedsCompressedHeaderParsed() const override;
- bool SupportsContextProbabilityReadback() const override;
-
- private:
- V4L2DecodeSurfaceHandler* const surface_handler_;
- V4L2Device* const device_;
-
- // True if |device_| exposes the V4L2_CID_STATELESS_VP9_FRAME control
- // (indicating that the driver needs the entropy tables from the compressed
- // header).
- const bool device_needs_compressed_header_parsed_;
-};
-
-} // namespace media
-
-#endif // MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_DELEGATE_VP9_CHROMIUM_H_
diff --git a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
index 34c884049c2..87193cf6e0a 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
@@ -21,6 +21,8 @@
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/command_line.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/numerics/safe_conversions.h"
#include "base/task/single_thread_task_runner.h"
#include "base/task/task_traits.h"
@@ -31,7 +33,6 @@
#include "media/base/color_plane_layout.h"
#include "media/base/media_log.h"
#include "media/base/scopedfd_helper.h"
-#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_frame_layout.h"
#include "media/base/video_types.h"
#include "media/gpu/chromeos/fourcc.h"
@@ -128,10 +129,10 @@ absl::optional<VideoFrameLayout> AsMultiPlanarLayout(
} // namespace
struct V4L2VideoEncodeAccelerator::BitstreamBufferRef {
- BitstreamBufferRef(int32_t id, std::unique_ptr<UnalignedSharedMemory> shm)
- : id(id), shm(std::move(shm)) {}
+ BitstreamBufferRef(int32_t id, base::WritableSharedMemoryMapping shm_mapping)
+ : id(id), shm_mapping(std::move(shm_mapping)) {}
const int32_t id;
- const std::unique_ptr<UnalignedSharedMemory> shm;
+ base::WritableSharedMemoryMapping shm_mapping;
};
V4L2VideoEncodeAccelerator::InputRecord::InputRecord() = default;
@@ -277,24 +278,15 @@ bool V4L2VideoEncodeAccelerator::Initialize(
return false;
}
- bool result = false;
- base::WaitableEvent done;
encoder_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&V4L2VideoEncodeAccelerator::InitializeTask,
- weak_this_, config, &result, &done));
- done.Wait();
- return result;
+ weak_this_, config));
+ return true;
}
-void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config,
- bool* result,
- base::WaitableEvent* done) {
+void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config) {
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
-
- // Signal the event when leaving the method.
- base::ScopedClosureRunner signal_event(
- base::BindOnce(&base::WaitableEvent::Signal, base::Unretained(done)));
- *result = false;
+ TRACE_EVENT0("media,gpu", "V4L2VEA::InitializeTask");
native_input_mode_ =
config.storage_type.value_or(Config::StorageType::kShmem) ==
@@ -310,6 +302,7 @@ void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config,
if (!SetFormats(config.input_format, config.output_profile)) {
VLOGF(1) << "Failed setting up formats";
+ NOTIFY_ERROR(kPlatformFailureError);
return;
}
@@ -323,6 +316,7 @@ void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config,
VideoFrame::NumPlanes(config.input_format)));
if (!input_layout) {
VLOGF(1) << "Invalid image processor input layout";
+ NOTIFY_ERROR(kPlatformFailureError);
return;
}
@@ -332,6 +326,7 @@ void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config,
encoder_input_visible_rect_,
encoder_input_visible_rect_)) {
VLOGF(1) << "Failed to create image processor";
+ NOTIFY_ERROR(kPlatformFailureError);
return;
}
@@ -343,16 +338,16 @@ void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config,
VLOGF(1) << "Failed to reconfigure v4l2 encoder driver with the "
<< "ImageProcessor output buffer: "
<< ip_output_buffer_size.ToString();
+ NOTIFY_ERROR(kPlatformFailureError);
return;
}
}
- if (!InitInputMemoryType(config))
- return;
- if (!InitControls(config))
- return;
- if (!CreateOutputBuffers())
+ if (!InitInputMemoryType(config) || !InitControls(config) ||
+ !CreateOutputBuffers()) {
+ NOTIFY_ERROR(kPlatformFailureError);
return;
+ }
encoder_state_ = kInitialized;
RequestEncodingParametersChangeTask(
@@ -385,9 +380,6 @@ void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config,
child_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&Client::NotifyEncoderInfoChange, client_, encoder_info));
-
- // Finish initialization.
- *result = true;
}
bool V4L2VideoEncodeAccelerator::CreateImageProcessor(
@@ -417,7 +409,7 @@ bool V4L2VideoEncodeAccelerator::CreateImageProcessor(
}
auto platform_layout = GetPlatformVideoFrameLayout(
- /*gpu_memory_buffer_factory=*/nullptr, output_format, output_size,
+ output_format, output_size,
gfx::BufferUsage::VEA_READ_CAMERA_AND_CPU_READ_WRITE);
if (!platform_layout) {
VLOGF(1) << "Failed to get Platform VideoFrameLayout";
@@ -487,7 +479,6 @@ bool V4L2VideoEncodeAccelerator::AllocateImageProcessorOutputBuffers(
switch (output_config.storage_type()) {
case VideoFrame::STORAGE_GPU_MEMORY_BUFFER:
image_processor_output_buffers_[i] = CreateGpuMemoryBufferVideoFrame(
- /*gpu_memory_buffer_factory=*/nullptr,
output_config.fourcc.ToVideoPixelFormat(), output_config.size,
output_config.visible_rect, output_config.visible_rect.size(),
base::TimeDelta(),
@@ -657,8 +648,8 @@ size_t V4L2VideoEncodeAccelerator::CopyIntoOutputBuffer(
std::unique_ptr<BitstreamBufferRef> buffer_ref) {
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
- uint8_t* dst_ptr = static_cast<uint8_t*>(buffer_ref->shm->memory());
- size_t remaining_dst_size = buffer_ref->shm->size();
+ uint8_t* dst_ptr = buffer_ref->shm_mapping.GetMemoryAs<uint8_t>();
+ size_t remaining_dst_size = buffer_ref->shm_mapping.size();
if (!inject_sps_and_pps_) {
if (bitstream_size <= remaining_dst_size) {
@@ -735,7 +726,7 @@ size_t V4L2VideoEncodeAccelerator::CopyIntoOutputBuffer(
&remaining_dst_size);
}
- return buffer_ref->shm->size() - remaining_dst_size;
+ return buffer_ref->shm_mapping.size() - remaining_dst_size;
}
void V4L2VideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
@@ -749,16 +740,29 @@ void V4L2VideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
return;
}
- if (frame && !ReconfigureFormatIfNeeded(*frame)) {
- NOTIFY_ERROR(kPlatformFailureError);
- encoder_state_ = kError;
- return;
- }
+ if (frame) {
+ // |frame| can be nullptr to indicate a flush.
+ const bool is_expected_storage_type =
+ native_input_mode_
+ ? frame->storage_type() == VideoFrame::STORAGE_GPU_MEMORY_BUFFER
+ : frame->IsMappable();
+ if (!is_expected_storage_type) {
+ VLOGF(1) << "Unexpected storage: "
+ << VideoFrame::StorageTypeToString(frame->storage_type());
+ NOTIFY_ERROR(kInvalidArgumentError);
+ return;
+ }
- // If a video frame to be encoded is fed, then call VIDIOC_REQBUFS if it has
- // not been called yet.
- if (frame && input_buffer_map_.empty() && !CreateInputBuffers())
- return;
+ if (!ReconfigureFormatIfNeeded(*frame)) {
+ NOTIFY_ERROR(kPlatformFailureError);
+ return;
+ }
+
+ // If a video frame to be encoded is fed, then call VIDIOC_REQBUFS if it has
+ // not been called yet.
+ if (input_buffer_map_.empty() && !CreateInputBuffers())
+ return;
+ }
if (image_processor_) {
image_processor_input_queue_.emplace(std::move(frame), force_keyframe);
@@ -935,15 +939,17 @@ void V4L2VideoEncodeAccelerator::UseOutputBitstreamBufferTask(
NOTIFY_ERROR(kInvalidArgumentError);
return;
}
- auto shm = std::make_unique<UnalignedSharedMemory>(buffer.TakeRegion(),
- buffer.size(), false);
- if (!shm->MapAt(buffer.offset(), buffer.size())) {
+
+ base::UnsafeSharedMemoryRegion shm_region = buffer.TakeRegion();
+ base::WritableSharedMemoryMapping shm_mapping =
+ shm_region.MapAt(buffer.offset(), buffer.size());
+ if (!shm_mapping.IsValid()) {
NOTIFY_ERROR(kPlatformFailureError);
return;
}
- bitstream_buffer_pool_.push_back(
- std::make_unique<BitstreamBufferRef>(buffer.id(), std::move(shm)));
+ bitstream_buffer_pool_.push_back(std::make_unique<BitstreamBufferRef>(
+ buffer.id(), std::move(shm_mapping)));
PumpBitstreamBuffers();
if (encoder_state_ == kInitialized) {
@@ -1419,8 +1425,8 @@ bool V4L2VideoEncodeAccelerator::StopDevicePoll() {
// Reset all our accounting info.
while (!encoder_input_queue_.empty())
encoder_input_queue_.pop();
- for (auto& input_record : input_buffer_map_) {
- input_record.frame = nullptr;
+ for (auto& [frame, ip_output_buffer_index] : input_buffer_map_) {
+ frame = nullptr;
}
bitstream_buffer_pool_.clear();
@@ -1491,6 +1497,14 @@ void V4L2VideoEncodeAccelerator::RequestEncodingParametersChangeTask(
if (bitrate.mode() != Bitrate::Mode::kConstant)
return;
+ // Set bitrate control to CBR
+ // Not all devices support multiple bitrate control algorithms,
+ // so this control can't be mandatory and therefore the return
+ // value is not checked.
+ device_->SetExtCtrls(V4L2_CID_MPEG_CLASS,
+ {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)});
+
if (current_bitrate_ == bitrate.target_bps() &&
current_framerate_ == framerate) {
return;
@@ -1667,8 +1681,7 @@ bool V4L2VideoEncodeAccelerator::SetFormats(VideoPixelFormat input_format,
gfx::Size input_size = encoder_input_visible_rect_.size();
if (native_input_mode_) {
auto input_layout = GetPlatformVideoFrameLayout(
- /*gpu_memory_buffer_factory=*/nullptr, input_format,
- encoder_input_visible_rect_.size(),
+ input_format, encoder_input_visible_rect_.size(),
gfx::BufferUsage::VEA_READ_CAMERA_AND_CPU_READ_WRITE);
if (!input_layout)
return false;
diff --git a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h
index 6475b14f5f4..793c5301267 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h
@@ -200,9 +200,7 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
uint32_t framerate);
// Do several initializations (e.g. set up format) on |encoder_task_runner_|.
- void InitializeTask(const Config& config,
- bool* result,
- base::WaitableEvent* done);
+ void InitializeTask(const Config& config);
// Set up formats and initialize the device for them.
bool SetFormats(VideoPixelFormat input_format,
diff --git a/chromium/media/gpu/vaapi/BUILD.gn b/chromium/media/gpu/vaapi/BUILD.gn
index b79b6655abe..f701a807823 100644
--- a/chromium/media/gpu/vaapi/BUILD.gn
+++ b/chromium/media/gpu/vaapi/BUILD.gn
@@ -79,7 +79,7 @@ source_set("vaapi") {
"vp9_vaapi_video_encoder_delegate.cc",
"vp9_vaapi_video_encoder_delegate.h",
]
- if (enable_platform_hevc_decoding) {
+ if (enable_hevc_parser_and_hw_decoder) {
sources += [
"h265_vaapi_video_decoder_delegate.cc",
"h265_vaapi_video_decoder_delegate.h",
diff --git a/chromium/media/gpu/vaapi/OWNERS b/chromium/media/gpu/vaapi/OWNERS
index 7e72389cbc9..4da3e460b4b 100644
--- a/chromium/media/gpu/vaapi/OWNERS
+++ b/chromium/media/gpu/vaapi/OWNERS
@@ -5,4 +5,3 @@ andrescj@chromium.org
jkardatzke@google.com
# Legacy owners.
-dstaessens@chromium.org
diff --git a/chromium/media/gpu/vaapi/av1_vaapi_video_decoder_delegate.cc b/chromium/media/gpu/vaapi/av1_vaapi_video_decoder_delegate.cc
index b2d986b71d4..7c7214d5a1a 100644
--- a/chromium/media/gpu/vaapi/av1_vaapi_video_decoder_delegate.cc
+++ b/chromium/media/gpu/vaapi/av1_vaapi_video_decoder_delegate.cc
@@ -12,6 +12,7 @@
#include "base/callback_helpers.h"
#include "base/logging.h"
#include "base/memory/scoped_refptr.h"
+#include "build/chromeos_buildflags.h"
#include "media/gpu/av1_picture.h"
#include "media/gpu/decode_surface_handler.h"
#include "media/gpu/vaapi/vaapi_common.h"
@@ -736,7 +737,6 @@ AV1VaapiVideoDecoderDelegate::AV1VaapiVideoDecoderDelegate(
AV1VaapiVideoDecoderDelegate::~AV1VaapiVideoDecoderDelegate() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!picture_params_);
- DCHECK(slice_params_.empty());
DCHECK(!crypto_params_);
}
@@ -833,26 +833,25 @@ DecodeStatus AV1VaapiVideoDecoderDelegate::SubmitDecode(
if (!picture_params_)
return DecodeStatus::kFail;
}
- if (slice_params_.size() != slice_params.size()) {
- while (slice_params_.size() < slice_params.size()) {
- slice_params_.push_back(vaapi_wrapper_->CreateVABuffer(
- VASliceParameterBufferType, sizeof(VASliceParameterBufferAV1)));
- if (!slice_params_.back()) {
- slice_params_.clear();
- return DecodeStatus::kFail;
- }
- }
- slice_params_.resize(slice_params.size());
- slice_params_.shrink_to_fit();
+
+ // TODO(b/235138734): Once the driver is fixed, re-use the
+ // VASliceParameterBufferAV1 buffers across frames instead of creating new
+ // ones every time. Alternatively, consider recreating these buffers only if
+ // |slice_params| changes from frame to frame.
+ std::vector<std::unique_ptr<ScopedVABuffer>> slice_params_va_buffers;
+ for (size_t i = 0; i < slice_params.size(); i++) {
+ slice_params_va_buffers.push_back(vaapi_wrapper_->CreateVABuffer(
+ VASliceParameterBufferType, sizeof(VASliceParameterBufferAV1)));
+ if (!slice_params_va_buffers.back())
+ return DecodeStatus::kFail;
}
// TODO(hiroh): Don't submit the entire coded data to the buffer. Instead,
// only pass the data starting from the tile list OBU to reduce the size of
// the VA buffer. When this is changed, the encrypted subsample ranges must
// also be adjusted.
- // Always re-create |encoded_data| because reusing the buffer causes horrific
- // artifacts in decoded buffers. TODO(b/177028692): This seems to be a driver
- // bug, fix it and reuse the buffer.
+ // Create VASliceData buffer |encoded_data| every frame so that decoding can
+ // be more asynchronous than reusing the buffer.
auto encoded_data =
vaapi_wrapper_->CreateVABuffer(VASliceDataBufferType, data.size_bytes());
if (!encoded_data)
@@ -864,9 +863,9 @@ DecodeStatus AV1VaapiVideoDecoderDelegate::SubmitDecode(
{encoded_data->id(),
{encoded_data->type(), encoded_data->size(), data.data()}}};
for (size_t i = 0; i < slice_params.size(); ++i) {
- buffers.push_back({slice_params_[i]->id(),
- {slice_params_[i]->type(), slice_params_[i]->size(),
- &slice_params[i]}});
+ buffers.push_back({slice_params_va_buffers[i]->id(),
+ {slice_params_va_buffers[i]->type(),
+ slice_params_va_buffers[i]->size(), &slice_params[i]}});
}
#if BUILDFLAG(IS_CHROMEOS_ASH)
if (uses_crypto) {
@@ -892,7 +891,6 @@ void AV1VaapiVideoDecoderDelegate::OnVAContextDestructionSoon() {
// Destroy the member ScopedVABuffers below since they refer to a VAContextID
// that will be destroyed soon.
picture_params_.reset();
- slice_params_.clear();
crypto_params_.reset();
}
} // namespace media
diff --git a/chromium/media/gpu/vaapi/av1_vaapi_video_decoder_delegate.h b/chromium/media/gpu/vaapi/av1_vaapi_video_decoder_delegate.h
index 301c4884513..31fd5a4f2f6 100644
--- a/chromium/media/gpu/vaapi/av1_vaapi_video_decoder_delegate.h
+++ b/chromium/media/gpu/vaapi/av1_vaapi_video_decoder_delegate.h
@@ -43,7 +43,6 @@ class AV1VaapiVideoDecoderDelegate : public AV1Decoder::AV1Accelerator,
private:
std::unique_ptr<ScopedVABuffer> picture_params_;
- std::vector<std::unique_ptr<ScopedVABuffer>> slice_params_;
std::unique_ptr<ScopedVABuffer> crypto_params_;
};
} // namespace media
diff --git a/chromium/media/gpu/vaapi/h264_vaapi_video_decoder_delegate.cc b/chromium/media/gpu/vaapi/h264_vaapi_video_decoder_delegate.cc
index 36915e221b0..5999fa5b38f 100644
--- a/chromium/media/gpu/vaapi/h264_vaapi_video_decoder_delegate.cc
+++ b/chromium/media/gpu/vaapi/h264_vaapi_video_decoder_delegate.cc
@@ -96,6 +96,18 @@ static void InitVAPicture(VAPictureH264* va_pic) {
va_pic->flags = VA_PICTURE_H264_INVALID;
}
+void H264VaapiVideoDecoderDelegate::ProcessSPS(
+ const H264SPS* sps,
+ base::span<const uint8_t> sps_nalu_data) {
+ last_sps_nalu_data_.assign(sps_nalu_data.begin(), sps_nalu_data.end());
+}
+
+void H264VaapiVideoDecoderDelegate::ProcessPPS(
+ const H264PPS* pps,
+ base::span<const uint8_t> pps_nalu_data) {
+ last_pps_nalu_data_.assign(pps_nalu_data.begin(), pps_nalu_data.end());
+}
+
DecodeStatus H264VaapiVideoDecoderDelegate::SubmitFrameMetadata(
const H264SPS* sps,
const H264PPS* pps,
@@ -218,8 +230,6 @@ DecodeStatus H264VaapiVideoDecoderDelegate::SubmitFrameMetadata(
DecodeStatus H264VaapiVideoDecoderDelegate::ParseEncryptedSliceHeader(
const std::vector<base::span<const uint8_t>>& data,
const std::vector<SubsampleEntry>& subsamples,
- const std::vector<uint8_t>& sps_nalu_data,
- const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_header_out) {
DCHECK(slice_header_out);
DCHECK(!subsamples.empty());
@@ -278,8 +288,8 @@ DecodeStatus H264VaapiVideoDecoderDelegate::ParseEncryptedSliceHeader(
// Adjust the first segment length and init length to compensate for
// inserting the SPS, PPS and 3 start codes.
- size_t size_adjustment =
- sps_nalu_data.size() + pps_nalu_data.size() + kExtraDataBytes;
+ size_t size_adjustment = last_sps_nalu_data_.size() +
+ last_pps_nalu_data_.size() + kExtraDataBytes;
size_t total_size = 0;
size_t offset_adjustment = 0;
for (auto& segment : segment_info) {
@@ -333,11 +343,11 @@ DecodeStatus H264VaapiVideoDecoderDelegate::ParseEncryptedSliceHeader(
const std::vector<uint8_t> start_code = {0u, 0u, 1u};
full_data.reserve(total_size);
full_data.insert(full_data.end(), start_code.begin(), start_code.end());
- full_data.insert(full_data.end(), sps_nalu_data.begin(),
- sps_nalu_data.end());
+ full_data.insert(full_data.end(), last_sps_nalu_data_.begin(),
+ last_sps_nalu_data_.end());
full_data.insert(full_data.end(), start_code.begin(), start_code.end());
- full_data.insert(full_data.end(), pps_nalu_data.begin(),
- pps_nalu_data.end());
+ full_data.insert(full_data.end(), last_pps_nalu_data_.begin(),
+ last_pps_nalu_data_.end());
for (auto& nalu : data) {
full_data.insert(full_data.end(), start_code.begin(), start_code.end());
full_data.insert(full_data.end(), nalu.begin(), nalu.end());
diff --git a/chromium/media/gpu/vaapi/h264_vaapi_video_decoder_delegate.h b/chromium/media/gpu/vaapi/h264_vaapi_video_decoder_delegate.h
index e623cc04700..191f609cada 100644
--- a/chromium/media/gpu/vaapi/h264_vaapi_video_decoder_delegate.h
+++ b/chromium/media/gpu/vaapi/h264_vaapi_video_decoder_delegate.h
@@ -41,6 +41,10 @@ class H264VaapiVideoDecoderDelegate : public H264Decoder::H264Accelerator,
// H264Decoder::H264Accelerator implementation.
scoped_refptr<H264Picture> CreateH264Picture() override;
+ void ProcessSPS(const H264SPS* sps,
+ base::span<const uint8_t> sps_nalu_data) override;
+ void ProcessPPS(const H264PPS* pps,
+ base::span<const uint8_t> pps_nalu_data) override;
Status SubmitFrameMetadata(const H264SPS* sps,
const H264PPS* pps,
const H264DPB& dpb,
@@ -51,8 +55,6 @@ class H264VaapiVideoDecoderDelegate : public H264Decoder::H264Accelerator,
Status ParseEncryptedSliceHeader(
const std::vector<base::span<const uint8_t>>& data,
const std::vector<SubsampleEntry>& subsamples,
- const std::vector<uint8_t>& sps_nalu_data,
- const std::vector<uint8_t>& pps_nalu_data,
H264SliceHeader* slice_header_out) override;
Status SubmitSlice(const H264PPS* pps,
const H264SliceHeader* slice_hdr,
@@ -87,6 +89,11 @@ class H264VaapiVideoDecoderDelegate : public H264Decoder::H264Accelerator,
// We need to set this so we don't resubmit crypto params on decode.
bool full_sample_;
+
+ // The most recent SPS and PPS, assumed to be active when samples are fully
+ // encrypted.
+ std::vector<uint8_t> last_sps_nalu_data_;
+ std::vector<uint8_t> last_pps_nalu_data_;
};
} // namespace media
diff --git a/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate.cc b/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate.cc
index 211456dfed7..ffc25c1e82d 100644
--- a/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate.cc
+++ b/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate.cc
@@ -7,6 +7,7 @@
#include <va/va.h>
#include <va/va_enc_h264.h>
+#include <climits>
#include <utility>
#include "base/bits.h"
@@ -57,18 +58,30 @@ constexpr int kChromaFormatIDC = 1;
constexpr uint8_t kMinSupportedH264TemporalLayers = 2;
constexpr uint8_t kMaxSupportedH264TemporalLayers = 3;
-void FillVAEncRateControlParams(
- uint32_t bps,
- uint32_t window_size,
- uint32_t initial_qp,
- uint32_t min_qp,
- uint32_t max_qp,
- uint32_t framerate,
- uint32_t buffer_size,
- VAEncMiscParameterRateControl& rate_control_param,
- VAEncMiscParameterFrameRate& framerate_param,
- VAEncMiscParameterHRD& hrd_param) {
- memset(&rate_control_param, 0, sizeof(rate_control_param));
+template <typename VAEncMiscParam>
+VAEncMiscParam& AllocateMiscParameterBuffer(
+ std::vector<uint8_t>& misc_buffer,
+ VAEncMiscParameterType misc_param_type) {
+ constexpr size_t buffer_size =
+ sizeof(VAEncMiscParameterBuffer) + sizeof(VAEncMiscParam);
+ misc_buffer.resize(buffer_size);
+ auto* va_buffer =
+ reinterpret_cast<VAEncMiscParameterBuffer*>(misc_buffer.data());
+ va_buffer->type = misc_param_type;
+ return *reinterpret_cast<VAEncMiscParam*>(va_buffer->data);
+}
+
+void CreateVAEncRateControlParams(uint32_t bps,
+ uint32_t window_size,
+ uint32_t initial_qp,
+ uint32_t min_qp,
+ uint32_t max_qp,
+ uint32_t framerate,
+ uint32_t buffer_size,
+ std::vector<uint8_t> misc_buffers[3]) {
+ auto& rate_control_param =
+ AllocateMiscParameterBuffer<VAEncMiscParameterRateControl>(
+ misc_buffers[0], VAEncMiscParameterTypeRateControl);
rate_control_param.bits_per_second = bps;
rate_control_param.window_size = window_size;
rate_control_param.initial_qp = initial_qp;
@@ -76,10 +89,13 @@ void FillVAEncRateControlParams(
rate_control_param.max_qp = max_qp;
rate_control_param.rc_flags.bits.disable_frame_skip = true;
- memset(&framerate_param, 0, sizeof(framerate_param));
+ auto& framerate_param =
+ AllocateMiscParameterBuffer<VAEncMiscParameterFrameRate>(
+ misc_buffers[1], VAEncMiscParameterTypeFrameRate);
framerate_param.framerate = framerate;
- memset(&hrd_param, 0, sizeof(hrd_param));
+ auto& hrd_param = AllocateMiscParameterBuffer<VAEncMiscParameterHRD>(
+ misc_buffers[2], VAEncMiscParameterTypeHRD);
hrd_param.buffer_size = buffer_size;
hrd_param.initial_buffer_fullness = buffer_size / 2;
}
@@ -210,24 +226,6 @@ bool H264VaapiVideoEncoderDelegate::Initialize(
DVLOGF(1) << "Spatial layer encoding is not supported";
return false;
}
- if (config.HasTemporalLayer() && !supports_temporal_layer_for_testing_) {
- bool support_temporal_layer = false;
-#if defined(ARCH_CPU_X86_FAMILY) && BUILDFLAG(IS_CHROMEOS)
- VAImplementation implementation = VaapiWrapper::GetImplementationType();
- // TODO(b/199487660): Enable H.264 temporal layer encoding on AMD once their
- // drivers support them.
- support_temporal_layer =
- base::FeatureList::IsEnabled(kVaapiH264TemporalLayerHWEncoding) &&
- (implementation == VAImplementation::kIntelI965 ||
- implementation == VAImplementation::kIntelIHD);
-#endif
- if (!support_temporal_layer) {
- DVLOGF(1) << "Temporal layer encoding is not supported";
- return false;
- }
- }
-
- native_input_mode_ = ave_config.native_input_mode;
visible_size_ = config.input_visible_size;
// For 4:2:0, the pixel sizes have to be even.
@@ -276,15 +274,6 @@ bool H264VaapiVideoEncoderDelegate::Initialize(
<< base::strict_cast<size_t>(num_temporal_layers_);
return false;
}
-
- // |ave_config.max_num_ref_frames| represents the maximum number of
- // reference frames for both the reference picture list 0 (bottom 16 bits)
- // and the reference picture list 1 (top 16 bits) in H264 encoding.
- const size_t max_p_frame_slots = ave_config.max_num_ref_frames & 0xffff;
- if (max_p_frame_slots < num_temporal_layers_ - 1) {
- DVLOGF(1) << "P frame slots is too short: " << max_p_frame_slots;
- return false;
- }
}
curr_params_.max_ref_pic_list0_size =
@@ -407,7 +396,7 @@ bool H264VaapiVideoEncoderDelegate::PrepareEncodeJob(EncodeJob& encode_job) {
if (pic->type == H264SliceHeader::kISlice && submit_packed_headers_) {
// We always generate SPS and PPS with I(DR) frame. This will help for Seek
// operation on the generated stream.
- if (!SubmitPackedHeaders(packed_sps_, packed_pps_)) {
+ if (!SubmitPackedHeaders(*packed_sps_, *packed_pps_)) {
DVLOGF(1) << "Failed submitting keyframe headers";
return false;
}
@@ -855,34 +844,6 @@ H264VaapiVideoEncoderDelegate::GeneratePackedSliceHeader(
return packed_slice_header;
}
-bool H264VaapiVideoEncoderDelegate::SubmitH264BitstreamBuffer(
- const H264BitstreamBuffer& buffer) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-
- return vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderDataBufferType,
- buffer.BytesInBuffer(), buffer.data());
-}
-
-bool H264VaapiVideoEncoderDelegate::SubmitVAEncMiscParamBuffer(
- VAEncMiscParameterType type,
- const void* data,
- size_t size) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- // TODO(b/202337642): We don't have to allocate a misc parameter by having
- // VAEncMiscParameterBuffer + the size and filling VA enc misc data directly
- // into VAEncMiscParameterBuffer::data.
- const size_t temp_size = sizeof(VAEncMiscParameterBuffer) + size;
- std::vector<uint8_t> temp(temp_size);
-
- auto* const va_buffer =
- reinterpret_cast<VAEncMiscParameterBuffer*>(temp.data());
- va_buffer->type = type;
- memcpy(va_buffer->data, data, size);
-
- return vaapi_wrapper_->SubmitBuffer(VAEncMiscParameterBufferType, temp_size,
- temp.data());
-}
-
bool H264VaapiVideoEncoderDelegate::SubmitFrameParameters(
EncodeJob& job,
const H264VaapiVideoEncoderDelegate::EncodeParams& encode_params,
@@ -1009,86 +970,69 @@ bool H264VaapiVideoEncoderDelegate::SubmitFrameParameters(
slice_param.RefPicList0[j++] = va_pic_h264;
}
- VAEncMiscParameterRateControl rate_control_param;
- VAEncMiscParameterFrameRate framerate_param;
- VAEncMiscParameterHRD hrd_param;
- FillVAEncRateControlParams(
+ std::vector<uint8_t> misc_buffers[3];
+ CreateVAEncRateControlParams(
encode_params.bitrate_allocation.GetSumBps(),
encode_params.cpb_window_size_ms,
base::strict_cast<uint32_t>(pic_param.pic_init_qp),
base::strict_cast<uint32_t>(encode_params.min_qp),
base::strict_cast<uint32_t>(encode_params.max_qp),
encode_params.framerate,
- base::strict_cast<uint32_t>(encode_params.cpb_size_bits),
- rate_control_param, framerate_param, hrd_param);
-
- if (!vaapi_wrapper_->SubmitBuffer(VAEncSequenceParameterBufferType,
- &seq_param) ||
- !vaapi_wrapper_->SubmitBuffer(VAEncPictureParameterBufferType,
- &pic_param) ||
- !vaapi_wrapper_->SubmitBuffer(VAEncSliceParameterBufferType,
- &slice_param) ||
- !SubmitVAEncMiscParamBuffer(VAEncMiscParameterTypeRateControl,
- &rate_control_param,
- sizeof(rate_control_param)) ||
- !SubmitVAEncMiscParamBuffer(VAEncMiscParameterTypeFrameRate,
- &framerate_param, sizeof(framerate_param)) ||
- !SubmitVAEncMiscParamBuffer(VAEncMiscParameterTypeHRD, &hrd_param,
- sizeof(hrd_param))) {
- return false;
- }
-
- if (!submit_packed_headers_)
- return true;
-
- scoped_refptr<H264BitstreamBuffer> packed_slice_header =
- GeneratePackedSliceHeader(pic_param, slice_param, *pic);
+ base::strict_cast<uint32_t>(encode_params.cpb_size_bits), misc_buffers);
+
+ std::vector<VaapiWrapper::VABufferDescriptor> va_buffers = {
+ {VAEncSequenceParameterBufferType, sizeof(seq_param), &seq_param},
+ {VAEncPictureParameterBufferType, sizeof(pic_param), &pic_param},
+ {VAEncSliceParameterBufferType, sizeof(slice_param), &slice_param},
+ {VAEncMiscParameterBufferType, misc_buffers[0].size(),
+ misc_buffers[0].data()},
+ {VAEncMiscParameterBufferType, misc_buffers[1].size(),
+ misc_buffers[1].data()},
+ {VAEncMiscParameterBufferType, misc_buffers[2].size(),
+ misc_buffers[2].data()}};
+
+ scoped_refptr<H264BitstreamBuffer> packed_slice_header;
VAEncPackedHeaderParameterBuffer packed_slice_param_buffer;
- packed_slice_param_buffer.type = VAEncPackedHeaderSlice;
- packed_slice_param_buffer.bit_length = packed_slice_header->BitsInBuffer();
- packed_slice_param_buffer.has_emulation_bytes = 0;
-
- // Submit packed slice header.
- if (!vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderParameterBufferType,
- &packed_slice_param_buffer)) {
- return false;
+ if (submit_packed_headers_) {
+ packed_slice_header =
+ GeneratePackedSliceHeader(pic_param, slice_param, *pic);
+ packed_slice_param_buffer.type = VAEncPackedHeaderSlice;
+ packed_slice_param_buffer.bit_length = packed_slice_header->BitsInBuffer();
+ packed_slice_param_buffer.has_emulation_bytes = 0;
+ va_buffers.push_back({VAEncPackedHeaderParameterBufferType,
+ sizeof(packed_slice_param_buffer),
+ &packed_slice_param_buffer});
+ va_buffers.push_back({VAEncPackedHeaderDataBufferType,
+ packed_slice_header->BytesInBuffer(),
+ packed_slice_header->data()});
}
- return SubmitH264BitstreamBuffer(*packed_slice_header);
+ return vaapi_wrapper_->SubmitBuffers(va_buffers);
}
bool H264VaapiVideoEncoderDelegate::SubmitPackedHeaders(
- scoped_refptr<H264BitstreamBuffer> packed_sps,
- scoped_refptr<H264BitstreamBuffer> packed_pps) {
+ const H264BitstreamBuffer& packed_sps,
+ const H264BitstreamBuffer& packed_pps) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(submit_packed_headers_);
- DCHECK(packed_sps);
- DCHECK(packed_pps);
// Submit SPS.
- VAEncPackedHeaderParameterBuffer par_buffer = {};
- par_buffer.type = VAEncPackedHeaderSequence;
- par_buffer.bit_length = packed_sps->BytesInBuffer() * 8;
-
- if (!vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderParameterBufferType,
- &par_buffer)) {
- return false;
- }
-
- if (!SubmitH264BitstreamBuffer(*packed_sps))
- return false;
-
- // Submit PPS.
- par_buffer = {};
- par_buffer.type = VAEncPackedHeaderPicture;
- par_buffer.bit_length = packed_pps->BytesInBuffer() * 8;
-
- if (!vaapi_wrapper_->SubmitBuffer(VAEncPackedHeaderParameterBufferType,
- &par_buffer)) {
- return false;
- }
-
- return SubmitH264BitstreamBuffer(*packed_pps);
+ VAEncPackedHeaderParameterBuffer packed_sps_param = {};
+ packed_sps_param.type = VAEncPackedHeaderSequence;
+ packed_sps_param.bit_length = packed_sps.BytesInBuffer() * CHAR_BIT;
+ VAEncPackedHeaderParameterBuffer packed_pps_param = {};
+ packed_pps_param.type = VAEncPackedHeaderPicture;
+ packed_pps_param.bit_length = packed_pps.BytesInBuffer() * CHAR_BIT;
+
+ return vaapi_wrapper_->SubmitBuffers(
+ {{VAEncPackedHeaderParameterBufferType, sizeof(packed_sps_param),
+ &packed_sps_param},
+ {VAEncPackedHeaderDataBufferType, packed_sps.BytesInBuffer(),
+ packed_sps.data()},
+ {VAEncPackedHeaderParameterBufferType, sizeof(packed_pps_param),
+ &packed_pps_param},
+ {VAEncPackedHeaderDataBufferType, packed_pps.BytesInBuffer(),
+ packed_pps.data()}});
}
} // namespace media
diff --git a/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate.h b/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate.h
index ca6140ef570..c6f0badf8d3 100644
--- a/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate.h
+++ b/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate.h
@@ -98,16 +98,8 @@ class H264VaapiVideoEncoderDelegate : public VaapiVideoEncoderDelegate {
// current profile and level.
bool CheckConfigValidity(uint32_t bitrate, uint32_t framerate);
- // Submits a H264BitstreamBuffer |buffer| to the driver.
- bool SubmitH264BitstreamBuffer(const H264BitstreamBuffer& buffer);
- // Submits a VAEncMiscParameterBuffer |data| whose size and type are |size|
- // and |type| to the driver.
- bool SubmitVAEncMiscParamBuffer(VAEncMiscParameterType type,
- const void* data,
- size_t size);
-
- bool SubmitPackedHeaders(scoped_refptr<H264BitstreamBuffer> packed_sps,
- scoped_refptr<H264BitstreamBuffer> packed_pps);
+ bool SubmitPackedHeaders(const H264BitstreamBuffer& packed_sps,
+ const H264BitstreamBuffer& packed_pps);
bool SubmitFrameParameters(
EncodeJob& job,
@@ -160,10 +152,6 @@ class H264VaapiVideoEncoderDelegate : public VaapiVideoEncoderDelegate {
// RefPicList0 per spec (spec section 8.2.4.2).
base::circular_deque<scoped_refptr<H264Picture>> ref_pic_list0_;
- // Sets true if and only if testing.
- // TODO(b/199487660): Remove once all drivers support temporal layers.
- bool supports_temporal_layer_for_testing_ = false;
-
uint8_t num_temporal_layers_ = 1;
};
diff --git a/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate_unittest.cc b/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate_unittest.cc
index f5882a4b4ec..18b8a3e916a 100644
--- a/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate_unittest.cc
+++ b/chromium/media/gpu/vaapi/h264_vaapi_video_encoder_delegate_unittest.cc
@@ -23,9 +23,6 @@ namespace {
VaapiVideoEncoderDelegate::Config kDefaultVEADelegateConfig{
.max_num_ref_frames = 4,
- .native_input_mode = false,
- .bitrate_control =
- VaapiVideoEncoderDelegate::BitrateControl::kConstantBitrate,
};
VideoEncodeAccelerator::Config kDefaultVEAConfig(
@@ -164,12 +161,6 @@ class H264VaapiVideoEncoderDelegateTest
std::unique_ptr<VaapiVideoEncoderDelegate::EncodeJob>
H264VaapiVideoEncoderDelegateTest::CreateEncodeJob(bool keyframe) {
- auto input_frame = VideoFrame::CreateFrame(
- kDefaultVEAConfig.input_format, kDefaultVEAConfig.input_visible_size,
- gfx::Rect(kDefaultVEAConfig.input_visible_size),
- kDefaultVEAConfig.input_visible_size, base::TimeDelta());
- LOG_ASSERT(input_frame) << " Failed to create VideoFrame";
-
auto va_surface = base::MakeRefCounted<VASurface>(
next_surface_id_++, kDefaultVEAConfig.input_visible_size,
VA_RT_FORMAT_YUV420, base::DoNothing());
@@ -180,9 +171,11 @@ H264VaapiVideoEncoderDelegateTest::CreateEncodeJob(bool keyframe) {
kDummyVABufferID, VAEncCodedBufferType,
kDefaultVEAConfig.input_visible_size.GetArea());
+ // TODO(b/229358029): Set a valid timestamp and check the timestamp in
+ // metadata.
+ constexpr base::TimeDelta timestamp;
return std::make_unique<VaapiVideoEncoderDelegate::EncodeJob>(
- input_frame, keyframe, next_surface_id_++,
- kDefaultVEAConfig.input_visible_size, picture,
+ keyframe, timestamp, next_surface_id_++, picture,
std::move(scoped_va_buffer));
}
@@ -195,8 +188,6 @@ void H264VaapiVideoEncoderDelegateTest::SetUp() {
base::BindRepeating(&H264VaapiVideoEncoderDelegateTest::OnError,
base::Unretained(this)));
EXPECT_CALL(*this, OnError()).Times(0);
-
- encoder_->supports_temporal_layer_for_testing_ = true;
}
bool H264VaapiVideoEncoderDelegateTest::InitializeEncoder(
diff --git a/chromium/media/gpu/vaapi/vaapi_common.cc b/chromium/media/gpu/vaapi/vaapi_common.cc
index 14a5c416347..92e42b6a99c 100644
--- a/chromium/media/gpu/vaapi/vaapi_common.cc
+++ b/chromium/media/gpu/vaapi/vaapi_common.cc
@@ -17,7 +17,7 @@ VaapiH264Picture* VaapiH264Picture::AsVaapiH264Picture() {
return this;
}
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
VaapiH265Picture::VaapiH265Picture(scoped_refptr<VASurface> va_surface)
: va_surface_(va_surface) {}
@@ -27,7 +27,7 @@ VaapiH265Picture* VaapiH265Picture::AsVaapiH265Picture() {
return this;
}
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
VaapiVP8Picture::VaapiVP8Picture(scoped_refptr<VASurface> va_surface)
: va_surface_(va_surface) {}
diff --git a/chromium/media/gpu/vaapi/vaapi_common.h b/chromium/media/gpu/vaapi/vaapi_common.h
index 3c170021c9c..169eec95581 100644
--- a/chromium/media/gpu/vaapi/vaapi_common.h
+++ b/chromium/media/gpu/vaapi/vaapi_common.h
@@ -12,9 +12,9 @@
#include "media/gpu/vp9_picture.h"
#include "media/media_buildflags.h"
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
#include "media/gpu/h265_dpb.h"
-#endif
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
namespace media {
@@ -41,7 +41,7 @@ class VaapiH264Picture : public H264Picture {
scoped_refptr<VASurface> va_surface_;
};
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
class VaapiH265Picture : public H265Picture {
public:
explicit VaapiH265Picture(scoped_refptr<VASurface> va_surface);
@@ -60,7 +60,7 @@ class VaapiH265Picture : public H265Picture {
private:
scoped_refptr<VASurface> va_surface_;
};
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
class VaapiVP8Picture : public VP8Picture {
public:
diff --git a/chromium/media/gpu/vaapi/vaapi_image_processor_backend.cc b/chromium/media/gpu/vaapi/vaapi_image_processor_backend.cc
index 72f49658451..6c8cd3d38b5 100644
--- a/chromium/media/gpu/vaapi/vaapi_image_processor_backend.cc
+++ b/chromium/media/gpu/vaapi/vaapi_image_processor_backend.cc
@@ -16,6 +16,7 @@
#include "base/metrics/histogram_functions.h"
#include "base/stl_util.h"
#include "build/build_config.h"
+#include "build/chromeos_buildflags.h"
#include "media/gpu/chromeos/fourcc.h"
#include "media/gpu/chromeos/platform_video_frame_utils.h"
#include "media/gpu/macros.h"
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
index 8c2788dd896..72993a3a017 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
@@ -12,7 +12,8 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
-#include "base/memory/writable_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/numerics/safe_conversions.h"
@@ -51,13 +52,13 @@ static void ReportToVAJEAEncodeResultUMA(VAJEAEncoderResult result) {
VaapiJpegEncodeAccelerator::EncodeRequest::EncodeRequest(
int32_t task_id,
scoped_refptr<VideoFrame> video_frame,
- std::unique_ptr<UnalignedSharedMemory> exif_shm,
- std::unique_ptr<UnalignedSharedMemory> output_shm,
+ base::WritableSharedMemoryMapping exif_mapping,
+ base::WritableSharedMemoryMapping output_mapping,
int quality)
: task_id(task_id),
video_frame(std::move(video_frame)),
- exif_shm(std::move(exif_shm)),
- output_shm(std::move(output_shm)),
+ exif_mapping(std::move(exif_mapping)),
+ output_mapping(std::move(output_mapping)),
quality(quality) {}
VaapiJpegEncodeAccelerator::EncodeRequest::~EncodeRequest() {}
@@ -75,12 +76,11 @@ class VaapiJpegEncodeAccelerator::Encoder {
~Encoder();
// Processes one encode task with DMA-buf.
- void EncodeWithDmaBufTask(
- scoped_refptr<VideoFrame> input_frame,
- scoped_refptr<VideoFrame> output_frame,
- int32_t task_id,
- int quality,
- std::unique_ptr<WritableUnalignedMapping> exif_mapping);
+ void EncodeWithDmaBufTask(scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ int32_t task_id,
+ int quality,
+ base::WritableSharedMemoryMapping exif_mapping);
// Processes one encode |request|.
void EncodeTask(std::unique_ptr<EncodeRequest> request);
@@ -137,7 +137,7 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeWithDmaBufTask(
scoped_refptr<VideoFrame> output_frame,
int32_t task_id,
int quality,
- std::unique_ptr<WritableUnalignedMapping> exif_mapping) {
+ base::WritableSharedMemoryMapping exif_mapping) {
DVLOGF(4);
TRACE_EVENT0("jpeg", "EncodeWithDmaBufTask");
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
@@ -222,9 +222,9 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeWithDmaBufTask(
// Prepare exif.
const uint8_t* exif_buffer = nullptr;
size_t exif_buffer_size = 0;
- if (exif_mapping) {
- exif_buffer = static_cast<const uint8_t*>(exif_mapping->memory());
- exif_buffer_size = exif_mapping->size();
+ if (exif_mapping.IsValid()) {
+ exif_buffer = exif_mapping.GetMemoryAs<uint8_t>();
+ exif_buffer_size = exif_mapping.size();
}
if (!jpeg_encoder_->Encode(input_size, /*exif_buffer=*/nullptr,
@@ -410,9 +410,9 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeTask(
uint8_t* exif_buffer = nullptr;
size_t exif_buffer_size = 0;
- if (request->exif_shm) {
- exif_buffer = static_cast<uint8_t*>(request->exif_shm->memory());
- exif_buffer_size = request->exif_shm->size();
+ if (request->exif_mapping.IsValid()) {
+ exif_buffer = request->exif_mapping.GetMemoryAs<uint8_t>();
+ exif_buffer_size = request->exif_mapping.size();
}
// When the exif buffer contains a thumbnail, the VAAPI encoder would
@@ -435,15 +435,15 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeTask(
size_t encoded_size = 0;
if (!vaapi_wrapper_->DownloadFromVABuffer(
cached_output_buffer_->id(), va_surface_id_,
- static_cast<uint8_t*>(request->output_shm->memory()),
- request->output_shm->size(), &encoded_size)) {
+ request->output_mapping.GetMemoryAs<uint8_t>(),
+ request->output_mapping.size(), &encoded_size)) {
VLOGF(1) << "Failed to retrieve output image from VA coded buffer";
notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
return;
}
// Copy the real exif buffer into preserved space.
- memcpy(static_cast<uint8_t*>(request->output_shm->memory()) + exif_offset,
+ memcpy(request->output_mapping.GetMemoryAs<uint8_t>() + exif_offset,
exif_buffer, exif_buffer_size);
video_frame_ready_cb_.Run(task_id, encoded_size);
@@ -613,20 +613,20 @@ void VaapiJpegEncodeAccelerator::Encode(scoped_refptr<VideoFrame> video_frame,
return;
}
- std::unique_ptr<UnalignedSharedMemory> exif_shm;
+ base::WritableSharedMemoryMapping exif_mapping;
if (exif_buffer) {
- // |exif_shm| will take ownership of the |exif_buffer->region()|.
- exif_shm = std::make_unique<UnalignedSharedMemory>(
- exif_buffer->TakeRegion(), exif_buffer->size(), false);
- if (!exif_shm->MapAt(exif_buffer->offset(), exif_buffer->size())) {
+ base::UnsafeSharedMemoryRegion exif_region = exif_buffer->TakeRegion();
+ exif_mapping =
+ exif_region.MapAt(exif_buffer->offset(), exif_buffer->size());
+ if (!exif_mapping.IsValid()) {
VLOGF(1) << "Failed to map exif buffer";
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError,
weak_this_, task_id, PLATFORM_FAILURE));
return;
}
- if (exif_shm->size() > kMaxMarkerSizeAllowed) {
- VLOGF(1) << "Exif buffer too big: " << exif_shm->size();
+ if (exif_mapping.size() > kMaxMarkerSizeAllowed) {
+ VLOGF(1) << "Exif buffer too big: " << exif_mapping.size();
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError,
weak_this_, task_id, INVALID_ARGUMENT));
@@ -634,10 +634,10 @@ void VaapiJpegEncodeAccelerator::Encode(scoped_refptr<VideoFrame> video_frame,
}
}
- // |output_shm| will take ownership of the |output_buffer.handle()|.
- auto output_shm = std::make_unique<UnalignedSharedMemory>(
- output_buffer.TakeRegion(), output_buffer.size(), false);
- if (!output_shm->MapAt(output_buffer.offset(), output_buffer.size())) {
+ base::UnsafeSharedMemoryRegion output_region = output_buffer.TakeRegion();
+ base::WritableSharedMemoryMapping output_mapping =
+ output_region.MapAt(output_buffer.offset(), output_buffer.size());
+ if (!output_mapping.IsValid()) {
VLOGF(1) << "Failed to map output buffer";
task_runner_->PostTask(
FROM_HERE,
@@ -647,8 +647,8 @@ void VaapiJpegEncodeAccelerator::Encode(scoped_refptr<VideoFrame> video_frame,
}
auto request = std::make_unique<EncodeRequest>(
- task_id, std::move(video_frame), std::move(exif_shm),
- std::move(output_shm), quality);
+ task_id, std::move(video_frame), std::move(exif_mapping),
+ std::move(output_mapping), quality);
encoder_task_runner_->PostTask(
FROM_HERE,
@@ -682,21 +682,20 @@ void VaapiJpegEncodeAccelerator::EncodeWithDmaBuf(
return;
}
- std::unique_ptr<WritableUnalignedMapping> exif_mapping;
+ base::WritableSharedMemoryMapping exif_mapping;
if (exif_buffer) {
- // |exif_mapping| will take ownership of the |exif_buffer->region()|.
- exif_mapping = std::make_unique<WritableUnalignedMapping>(
- base::UnsafeSharedMemoryRegion::Deserialize(exif_buffer->TakeRegion()),
- exif_buffer->size(), exif_buffer->offset());
- if (!exif_mapping->IsValid()) {
+ base::UnsafeSharedMemoryRegion exif_region = exif_buffer->TakeRegion();
+ exif_mapping =
+ exif_region.MapAt(exif_buffer->offset(), exif_buffer->size());
+ if (!exif_mapping.IsValid()) {
LOG(ERROR) << "Failed to map exif buffer";
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError,
weak_this_, task_id, PLATFORM_FAILURE));
return;
}
- if (exif_mapping->size() > kMaxMarkerSizeAllowed) {
- LOG(ERROR) << "Exif buffer too big: " << exif_mapping->size();
+ if (exif_mapping.size() > kMaxMarkerSizeAllowed) {
+ LOG(ERROR) << "Exif buffer too big: " << exif_mapping.size();
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError,
weak_this_, task_id, INVALID_ARGUMENT));
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.h b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.h
index 94c2af1ecab..ab8932c7df6 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.h
@@ -7,12 +7,12 @@
#include <memory>
+#include "base/memory/shared_memory_mapping.h"
#include "base/memory/weak_ptr.h"
#include "base/task/single_thread_task_runner.h"
#include "base/threading/thread.h"
#include "components/chromeos_camera/jpeg_encode_accelerator.h"
#include "media/base/bitstream_buffer.h"
-#include "media/base/unaligned_shared_memory.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
@@ -63,8 +63,8 @@ class MEDIA_GPU_EXPORT VaapiJpegEncodeAccelerator
struct EncodeRequest {
EncodeRequest(int32_t task_id,
scoped_refptr<VideoFrame> video_frame,
- std::unique_ptr<UnalignedSharedMemory> exif_shm,
- std::unique_ptr<UnalignedSharedMemory> output_shm,
+ base::WritableSharedMemoryMapping exif_mapping,
+ base::WritableSharedMemoryMapping output_mapping,
int quality);
EncodeRequest(const EncodeRequest&) = delete;
@@ -74,8 +74,8 @@ class MEDIA_GPU_EXPORT VaapiJpegEncodeAccelerator
int32_t task_id;
scoped_refptr<VideoFrame> video_frame;
- std::unique_ptr<UnalignedSharedMemory> exif_shm;
- std::unique_ptr<UnalignedSharedMemory> output_shm;
+ base::WritableSharedMemoryMapping exif_mapping;
+ base::WritableSharedMemoryMapping output_mapping;
int quality;
};
diff --git a/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.cc
index 4a8d771574c..e56098a8758 100644
--- a/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.cc
@@ -17,6 +17,8 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/memory/page_size.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/metrics/histogram_macros.h"
#include "base/numerics/checked_math.h"
#include "base/numerics/safe_conversions.h"
@@ -27,7 +29,6 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/format_utils.h"
-#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_frame.h"
#include "media/base/video_frame_layout.h"
#include "media/base/video_util.h"
@@ -404,14 +405,13 @@ bool VaapiMjpegDecodeAccelerator::OutputPictureVppOnTaskRunner(
void VaapiMjpegDecodeAccelerator::DecodeFromShmTask(
int32_t task_id,
- std::unique_ptr<UnalignedSharedMemory> shm,
+ base::WritableSharedMemoryMapping mapping,
scoped_refptr<VideoFrame> dst_frame) {
DVLOGF(4);
DCHECK(decoder_task_runner_->BelongsToCurrentThread());
TRACE_EVENT0("jpeg", __func__);
- auto src_image =
- base::make_span(static_cast<const uint8_t*>(shm->memory()), shm->size());
+ auto src_image = mapping.GetMemoryAsSpan<uint8_t>();
DecodeImpl(task_id, src_image, std::move(dst_frame));
}
@@ -577,12 +577,10 @@ void VaapiMjpegDecodeAccelerator::Decode(
return;
}
- // UnalignedSharedMemory will take over the |bitstream_buffer.handle()|.
- auto shm = std::make_unique<UnalignedSharedMemory>(
- bitstream_buffer.TakeRegion(), bitstream_buffer.size(),
- false /* read_only */);
-
- if (!shm->MapAt(bitstream_buffer.offset(), bitstream_buffer.size())) {
+ auto region = bitstream_buffer.TakeRegion();
+ auto mapping =
+ region.MapAt(bitstream_buffer.offset(), bitstream_buffer.size());
+ if (!mapping.IsValid()) {
VLOGF(1) << "Failed to map input buffer";
NotifyError(bitstream_buffer.id(), UNREADABLE_INPUT);
return;
@@ -593,7 +591,7 @@ void VaapiMjpegDecodeAccelerator::Decode(
decoder_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VaapiMjpegDecodeAccelerator::DecodeFromShmTask,
base::Unretained(this), bitstream_buffer.id(),
- std::move(shm), std::move(video_frame)));
+ std::move(mapping), std::move(video_frame)));
}
void VaapiMjpegDecodeAccelerator::Decode(int32_t task_id,
diff --git a/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.h b/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.h
index 0821d5c8867..9d420769dde 100644
--- a/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.h
@@ -11,6 +11,7 @@
#include "base/containers/span.h"
#include "base/memory/scoped_refptr.h"
+#include "base/memory/shared_memory_mapping.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/thread.h"
#include "components/chromeos_camera/mjpeg_decode_accelerator.h"
@@ -26,7 +27,6 @@ namespace media {
class BitstreamBuffer;
class ScopedVAImage;
-class UnalignedSharedMemory;
class VaapiWrapper;
class VideoFrame;
@@ -75,7 +75,7 @@ class MEDIA_GPU_EXPORT VaapiMjpegDecodeAccelerator
// Processes one decode request.
void DecodeFromShmTask(int32_t task_id,
- std::unique_ptr<UnalignedSharedMemory> shm,
+ base::WritableSharedMemoryMapping mapping,
scoped_refptr<VideoFrame> dst_frame);
void DecodeFromDmaBufTask(int32_t task_id,
base::ScopedFD src_dmabuf_fd,
diff --git a/chromium/media/gpu/vaapi/vaapi_unittest.cc b/chromium/media/gpu/vaapi/vaapi_unittest.cc
index acab17cb060..0c79566591d 100644
--- a/chromium/media/gpu/vaapi/vaapi_unittest.cc
+++ b/chromium/media/gpu/vaapi/vaapi_unittest.cc
@@ -37,6 +37,7 @@
#include "media/base/media_switches.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
#include "media/media_buildflags.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
#include "ui/gfx/linux/gbm_defines.h"
@@ -54,10 +55,10 @@ absl::optional<VAProfile> ConvertToVAProfile(VideoCodecProfile profile) {
{VP9PROFILE_PROFILE0, VAProfileVP9Profile0},
{VP9PROFILE_PROFILE2, VAProfileVP9Profile2},
{AV1PROFILE_PROFILE_MAIN, VAProfileAV1Profile0},
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
{HEVCPROFILE_MAIN, VAProfileHEVCMain},
{HEVCPROFILE_MAIN10, VAProfileHEVCMain10},
-#endif
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
};
auto it = kProfileMap.find(profile);
return it != kProfileMap.end() ? absl::make_optional<VAProfile>(it->second)
@@ -79,10 +80,10 @@ absl::optional<VAProfile> StringToVAProfile(const std::string& va_profile) {
{"VAProfileVP9Profile0", VAProfileVP9Profile0},
{"VAProfileVP9Profile2", VAProfileVP9Profile2},
{"VAProfileAV1Profile0", VAProfileAV1Profile0},
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
{"VAProfileHEVCMain", VAProfileHEVCMain},
{"VAProfileHEVCMain10", VAProfileHEVCMain10},
-#endif
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
#if BUILDFLAG(USE_CHROMEOS_PROTECTED_MEDIA)
{"VAProfileProtected", VAProfileProtected},
#endif // BUILDFLAG(USE_CHROMEOS_PROTECTED_MEDIA)
@@ -338,11 +339,19 @@ TEST_F(VaapiTest, GetSupportedEncodeProfiles) {
// Verifies that VAProfileProtected is indeed supported by the command line
// vainfo utility.
TEST_F(VaapiTest, VaapiProfileProtected) {
- const auto va_info = RetrieveVAInfoOutput();
-
- EXPECT_TRUE(base::Contains(va_info.at(VAProfileProtected),
- VAEntrypointProtectedContent))
- << ", va profile: " << vaProfileStr(VAProfileProtected);
+ VAImplementation impl = VaapiWrapper::GetImplementationType();
+ // VAProfileProtected is only used in the Intel iHD implementation. AMD does
+ // not need to support that profile (but should be the only other protected
+ // content VAAPI implementation).
+ if (impl == VAImplementation::kIntelIHD) {
+ const auto va_info = RetrieveVAInfoOutput();
+
+ EXPECT_TRUE(base::Contains(va_info.at(VAProfileProtected),
+ VAEntrypointProtectedContent))
+ << ", va profile: " << vaProfileStr(VAProfileProtected);
+ } else {
+ EXPECT_EQ(impl, VAImplementation::kMesaGallium);
+ }
}
#endif // BUILDFLAG(USE_CHROMEOS_PROTECTED_MEDIA)
@@ -506,12 +515,7 @@ TEST_F(VaapiTest, CheckSupportedSVCScalabilityModes) {
VaapiWrapper::GetSupportedScalabilityModes(
H264PROFILE_BASELINE, VAProfileH264ConstrainedBaseline);
#if BUILDFLAG(IS_CHROMEOS)
- // TODO(b/199487660): Enable H.264 temporal layer encoding on AMD once their
- // drivers support them.
- const auto implementation = VaapiWrapper::GetImplementationType();
- if (base::FeatureList::IsEnabled(kVaapiH264TemporalLayerHWEncoding) &&
- (implementation == VAImplementation::kIntelI965 ||
- implementation == VAImplementation::kIntelIHD)) {
+ if (base::FeatureList::IsEnabled(kVaapiH264TemporalLayerHWEncoding)) {
EXPECT_EQ(scalability_modes_h264_baseline, kSupportedTemporalSVC);
} else {
EXPECT_TRUE(scalability_modes_h264_baseline.empty());
@@ -644,8 +648,11 @@ TEST_P(VaapiMinigbmTest, AllocateAndCompareWithMinigbm) {
const gfx::Size resolution = std::get<2>(GetParam());
// TODO(b/187852384): enable the other backends.
- if (VaapiWrapper::GetImplementationType() != VAImplementation::kIntelIHD)
+ const auto backend = VaapiWrapper::GetImplementationType();
+ if (!(backend == VAImplementation::kIntelIHD ||
+ backend == VAImplementation::kMesaGallium)) {
GTEST_SKIP() << "backend not supported";
+ }
ASSERT_NE(va_rt_format, kInvalidVaRtFormat);
if (!VaapiWrapper::IsDecodeSupported(va_profile))
@@ -655,6 +662,11 @@ TEST_P(VaapiMinigbmTest, AllocateAndCompareWithMinigbm) {
va_rt_format)) {
GTEST_SKIP() << VARTFormatToString(va_rt_format) << " not supported.";
}
+ // TODO(b/200817282): Fix high-bit depth formats on AMD Gallium impl.
+ if (backend == VAImplementation::kMesaGallium &&
+ va_rt_format == VA_RT_FORMAT_YUV420_10) {
+ GTEST_SKIP() << vaProfileStr(va_profile) << " fails on AMD, skipping.";
+ }
gfx::Size minimum_supported_size;
ASSERT_TRUE(VaapiWrapper::GetDecodeMinResolution(va_profile,
@@ -704,8 +716,8 @@ TEST_P(VaapiMinigbmTest, AllocateAndCompareWithMinigbm) {
ASSERT_EQ(va_res, VA_STATUS_SUCCESS);
}
- // Verify some expected properties of the allocated VASurface. We expect a
- // single |object|, with a number of |layers| of the same |pitch|.
+ // Verify some expected properties of the allocated VASurface. We expect one
+ // or two |object|s, with a number of |layers| of the same |pitch|.
EXPECT_EQ(scoped_va_surface->size(),
gfx::Size(base::checked_cast<int>(va_descriptor.width),
base::checked_cast<int>(va_descriptor.height)));
@@ -715,18 +727,28 @@ TEST_P(VaapiMinigbmTest, AllocateAndCompareWithMinigbm) {
EXPECT_EQ(va_descriptor.fourcc, va_fourcc)
<< FourccToString(va_descriptor.fourcc)
<< " != " << FourccToString(va_fourcc);
- EXPECT_EQ(va_descriptor.num_objects, 1u);
+ EXPECT_THAT(va_descriptor.num_objects, ::testing::AnyOf(1, 2));
// TODO(mcasas): consider comparing |size| with a better estimate of the
// |scoped_va_surface| memory footprint (e.g. including planes and format).
EXPECT_GE(va_descriptor.objects[0].size,
base::checked_cast<uint32_t>(scoped_va_surface->size().GetArea()));
+ if (va_descriptor.num_objects == 2) {
+ const int uv_width = (scoped_va_surface->size().width() + 1) / 2;
+ const int uv_height = (scoped_va_surface->size().height() + 1) / 2;
+ EXPECT_GE(va_descriptor.objects[1].size,
+ base::checked_cast<uint32_t>(2 * uv_width * uv_height));
+ }
+ const auto expected_drm_modifier =
+ backend == VAImplementation::kIntelIHD ? I915_FORMAT_MOD_Y_TILED : 0x0;
EXPECT_EQ(va_descriptor.objects[0].drm_format_modifier,
- I915_FORMAT_MOD_Y_TILED);
+ expected_drm_modifier);
// TODO(mcasas): |num_layers| actually depends on |va_descriptor.va_fourcc|.
EXPECT_EQ(va_descriptor.num_layers, 2u);
for (uint32_t i = 0; i < va_descriptor.num_layers; ++i) {
EXPECT_EQ(va_descriptor.layers[i].num_planes, 1u);
- EXPECT_EQ(va_descriptor.layers[i].object_index[0], 0u);
+ const uint32_t expected_object_index =
+ (va_descriptor.num_objects == 1) ? 0 : i;
+ EXPECT_EQ(va_descriptor.layers[i].object_index[0], expected_object_index);
DVLOG(2) << "plane " << i
<< ", pitch: " << va_descriptor.layers[i].pitch[0];
@@ -766,7 +788,7 @@ TEST_P(VaapiMinigbmTest, AllocateAndCompareWithMinigbm) {
version->name,
base::checked_cast<std::string::size_type>(version->name_len));
drmFreeVersion(version);
- if (base::LowerCaseEqualsASCII(version_name, "vgem"))
+ if (base::EqualsCaseInsensitiveASCII(version_name, "vgem"))
continue;
gbm = gbm_create_device(drm_fd.GetPlatformFile());
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
index baaf2ae468d..3a07fa21be8 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
@@ -31,7 +31,6 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/format_utils.h"
#include "media/base/media_log.h"
-#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_util.h"
#include "media/gpu/accelerated_video_decoder.h"
#include "media/gpu/h264_decoder.h"
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
index 4d229d6fec4..0cd77859cbe 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
@@ -278,9 +278,7 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<TestParams>,
1, picture_size, _))
.WillOnce(RunClosure(run_loop.QuitClosure()));
- auto region = base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- in_shm_.Duplicate());
- BitstreamBuffer bitstream_buffer(bitstream_id, std::move(region),
+ BitstreamBuffer bitstream_buffer(bitstream_id, in_shm_.Duplicate(),
kInputSize);
QueueInputBuffer(std::move(bitstream_buffer));
@@ -372,10 +370,8 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<TestParams>,
EXPECT_CALL(*this, NotifyEndOfBitstreamBuffer(bitstream_id))
.WillOnce(RunClosure(run_loop.QuitClosure()));
- auto region = base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- in_shm_.Duplicate());
QueueInputBuffer(
- BitstreamBuffer(bitstream_id, std::move(region), kInputSize));
+ BitstreamBuffer(bitstream_id, in_shm_.Duplicate(), kInputSize));
run_loop.Run();
}
@@ -441,9 +437,8 @@ TEST_P(VaapiVideoDecodeAcceleratorTest,
QueueInputBufferAndErrorWhenVDAUninitialized) {
SetVdaStateToUnitialized();
- auto region = base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- in_shm_.Duplicate());
- BitstreamBuffer bitstream_buffer(kBitstreamId, std::move(region), kInputSize);
+ BitstreamBuffer bitstream_buffer(kBitstreamId, in_shm_.Duplicate(),
+ kInputSize);
EXPECT_CALL(*this,
NotifyError(VaapiVideoDecodeAccelerator::PLATFORM_FAILURE));
@@ -452,9 +447,8 @@ TEST_P(VaapiVideoDecodeAcceleratorTest,
// Verifies that Decode() returning kDecodeError ends up pinging NotifyError().
TEST_P(VaapiVideoDecodeAcceleratorTest, QueueInputBufferAndDecodeError) {
- auto region = base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- in_shm_.Duplicate());
- BitstreamBuffer bitstream_buffer(kBitstreamId, std::move(region), kInputSize);
+ BitstreamBuffer bitstream_buffer(kBitstreamId, in_shm_.Duplicate(),
+ kInputSize);
base::RunLoop run_loop;
EXPECT_CALL(*mock_decoder_,
@@ -473,9 +467,8 @@ TEST_P(VaapiVideoDecodeAcceleratorTest, QueueVP9Profile2AndError) {
if (GetParam().video_codec != VP9PROFILE_PROFILE2)
GTEST_SKIP() << "The test parameter is not vp9 profile 2";
- auto region = base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- in_shm_.Duplicate());
- BitstreamBuffer bitstream_buffer(kBitstreamId, std::move(region), kInputSize);
+ BitstreamBuffer bitstream_buffer(kBitstreamId, in_shm_.Duplicate(),
+ kInputSize);
base::RunLoop run_loop;
EXPECT_CALL(*mock_decoder_,
SetStream(_, IsExpectedDecoderBuffer(kInputSize, nullptr)))
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decoder.cc b/chromium/media/gpu/vaapi/vaapi_video_decoder.cc
index 47c682edbc4..0ef047f707d 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decoder.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_decoder.cc
@@ -41,9 +41,9 @@
#include "media/media_buildflags.h"
#include "ui/gfx/buffer_format_util.h"
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
#include "media/gpu/vaapi/h265_vaapi_video_decoder_delegate.h"
-#endif
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
#if BUILDFLAG(IS_CHROMEOS_ASH)
// gn check does not account for BUILDFLAG(), so including these headers will
@@ -110,7 +110,8 @@ std::unique_ptr<VideoDecoderMixin> VaapiVideoDecoder::Create(
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
base::WeakPtr<VideoDecoderMixin::Client> client) {
const bool can_create_decoder =
- num_instances_.Increment() < kMaxNumOfInstances;
+ num_instances_.Increment() < kMaxNumOfInstances ||
+ !base::FeatureList::IsEnabled(media::kLimitConcurrentDecoderInstances);
if (!can_create_decoder) {
num_instances_.Decrement();
return nullptr;
@@ -273,14 +274,6 @@ void VaapiVideoDecoder::Initialize(const VideoDecoderConfig& config,
transcryption_ = (VaapiWrapper::GetImplementationType() ==
VAImplementation::kMesaGallium);
#endif
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
- } else if (config.codec() == VideoCodec::kHEVC &&
- !base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableClearHevcForTesting)) {
- SetErrorState("clear HEVC content is not supported");
- std::move(init_cb).Run(DecoderStatus::Codes::kUnsupportedEncryptionMode);
- return;
-#endif
}
// Initialize VAAPI wrapper.
@@ -795,8 +788,7 @@ void VaapiVideoDecoder::ApplyResolutionChangeWithScreenSizes(
// TODO(b/203240043): Create a GMB directly instead of allocating a
// VideoFrame.
scoped_refptr<VideoFrame> dummy_frame = CreateGpuMemoryBufferVideoFrame(
- /*gpu_memory_buffer_factory=*/nullptr, *format, decoder_pic_size,
- decoder_visible_rect, decoder_natural_size,
+ *format, decoder_pic_size, decoder_visible_rect, decoder_natural_size,
/*timestamp=*/base::TimeDelta(),
cdm_context_ref_ ? gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE
: gfx::BufferUsage::SCANOUT_VDA_WRITE);
@@ -840,7 +832,6 @@ void VaapiVideoDecoder::ApplyResolutionChangeWithScreenSizes(
CroStatus::Or<scoped_refptr<VideoFrame>>
VaapiVideoDecoder::AllocateCustomFrameProxy(
base::WeakPtr<VaapiVideoDecoder> decoder,
- gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -850,13 +841,12 @@ VaapiVideoDecoder::AllocateCustomFrameProxy(
base::TimeDelta timestamp) {
if (!decoder)
return CroStatus::Codes::kFailedToCreateVideoFrame;
- return decoder->AllocateCustomFrame(
- gpu_memory_buffer_factory, format, coded_size, visible_rect, natural_size,
- use_protected, use_linear_buffers, timestamp);
+ return decoder->AllocateCustomFrame(format, coded_size, visible_rect,
+ natural_size, use_protected,
+ use_linear_buffers, timestamp);
}
CroStatus::Or<scoped_refptr<VideoFrame>> VaapiVideoDecoder::AllocateCustomFrame(
- gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -881,11 +871,8 @@ CroStatus::Or<scoped_refptr<VideoFrame>> VaapiVideoDecoder::AllocateCustomFrame(
if (!pixmap_and_info)
return CroStatus::Codes::kFailedToCreateVideoFrame;
- // Increase this one every time this method is called.
- static int gmb_id = 0;
- CHECK_LT(gmb_id, std::numeric_limits<int>::max());
gfx::GpuMemoryBufferHandle gmb_handle;
- auto handle_id = gfx::GpuMemoryBufferId(gmb_id++);
+ auto handle_id = GetNextGpuMemoryBufferId();
gmb_handle.id = handle_id;
gmb_handle.type = gfx::GpuMemoryBufferType::NATIVE_PIXMAP;
gmb_handle.native_pixmap_handle = pixmap_and_info->pixmap->ExportHandle();
@@ -1123,7 +1110,7 @@ VaapiStatus VaapiVideoDecoder::CreateAcceleratedVideoDecoder() {
decoder_ = std::make_unique<VP9Decoder>(std::move(accelerator), profile_,
color_space_);
}
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
else if (profile_ >= HEVCPROFILE_MIN && profile_ <= HEVCPROFILE_MAX) {
auto accelerator = std::make_unique<H265VaapiVideoDecoderDelegate>(
this, vaapi_wrapper_, std::move(protected_update_cb),
@@ -1134,7 +1121,7 @@ VaapiStatus VaapiVideoDecoder::CreateAcceleratedVideoDecoder() {
decoder_ = std::make_unique<H265Decoder>(std::move(accelerator), profile_,
color_space_);
}
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
else if (profile_ >= AV1PROFILE_MIN && profile_ <= AV1PROFILE_MAX) {
auto accelerator = std::make_unique<AV1VaapiVideoDecoderDelegate>(
this, vaapi_wrapper_, std::move(protected_update_cb),
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decoder.h b/chromium/media/gpu/vaapi/vaapi_video_decoder.h
index f7462189db4..7c2e40a4578 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decoder.h
+++ b/chromium/media/gpu/vaapi/vaapi_video_decoder.h
@@ -36,10 +36,6 @@
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/hdr_metadata.h"
-namespace gpu {
-class GpuMemoryBufferFactory;
-} // namespace gpu
-
namespace media {
class AcceleratedVideoDecoder;
@@ -176,7 +172,6 @@ class VaapiVideoDecoder : public VideoDecoderMixin,
// Private static helper to allow using weak ptr instead of an unretained ptr.
static CroStatus::Or<scoped_refptr<VideoFrame>> AllocateCustomFrameProxy(
base::WeakPtr<VaapiVideoDecoder> decoder,
- gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -189,7 +184,6 @@ class VaapiVideoDecoder : public VideoDecoderMixin,
// only used on linux, it also sets the required YCbCr information for the
// frame it creates.
CroStatus::Or<scoped_refptr<VideoFrame>> AllocateCustomFrame(
- gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
index 5d25b14694b..0d571e9c985 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
@@ -22,6 +22,8 @@
#include "base/cxx17_backports.h"
#include "base/feature_list.h"
#include "base/memory/ptr_util.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/task/task_traits.h"
@@ -34,7 +36,6 @@
#include "media/base/format_utils.h"
#include "media/base/media_log.h"
#include "media/base/media_switches.h"
-#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_bitrate_allocation.h"
#include "media/gpu/chromeos/platform_video_frame_utils.h"
#include "media/gpu/gpu_video_encode_accelerator_helpers.h"
@@ -101,19 +102,8 @@ std::unique_ptr<ScopedVASurface> CreateScopedSurface(
VaapiWrapper& vaapi_wrapper,
const gfx::Size& encode_size,
const std::vector<VaapiWrapper::SurfaceUsageHint>& surface_usage_hints) {
- // iHD driver doesn't align a resolution for encoding properly. Align it only
- // with encoder driver.
- // TODO(https://github.com/intel/media-driver/issues/1232): Remove this
- // workaround of aligning |encode_size|.
- gfx::Size surface_size = encode_size;
- if (!base::Contains(surface_usage_hints,
- VaapiWrapper::SurfaceUsageHint::kVideoProcessWrite)) {
- surface_size = gfx::Size(base::bits::AlignUp(encode_size.width(), 16u),
- base::bits::AlignUp(encode_size.height(), 16u));
- }
-
auto surfaces = vaapi_wrapper.CreateScopedVASurfaces(
- kVaSurfaceFormat, surface_size, surface_usage_hints, 1u,
+ kVaSurfaceFormat, encode_size, surface_usage_hints, 1u,
/*visible_size=*/absl::nullopt,
/*va_fourcc=*/absl::nullopt);
return surfaces.empty() ? nullptr : std::move(surfaces.front());
@@ -129,13 +119,10 @@ struct VaapiVideoEncodeAccelerator::InputFrameRef {
struct VaapiVideoEncodeAccelerator::BitstreamBufferRef {
BitstreamBufferRef(int32_t id, BitstreamBuffer buffer)
- : id(id),
- shm(std::make_unique<UnalignedSharedMemory>(buffer.TakeRegion(),
- buffer.size(),
- false)),
- offset(buffer.offset()) {}
+ : id(id), shm_region(buffer.TakeRegion()), offset(buffer.offset()) {}
const int32_t id;
- const std::unique_ptr<UnalignedSharedMemory> shm;
+ base::UnsafeSharedMemoryRegion shm_region;
+ base::WritableSharedMemoryMapping shm_mapping;
const off_t offset;
};
@@ -358,35 +345,34 @@ void VaapiVideoEncodeAccelerator::InitializeTask(const Config& config) {
},
base::Unretained(this));
- VaapiVideoEncoderDelegate::Config ave_config{.native_input_mode =
- native_input_mode_};
+ VaapiVideoEncoderDelegate::Config ave_config{};
switch (output_codec_) {
case VideoCodec::kH264:
if (!IsConfiguredForTesting()) {
encoder_ = std::make_unique<H264VaapiVideoEncoderDelegate>(
vaapi_wrapper_, error_cb);
+ // HW encoders on Intel GPUs will not put average QP in slice/tile
+ // header when it is not working at CQP mode. Currently only H264 is
+ // working at non CQP mode.
+ if (VaapiWrapper::GetImplementationType() ==
+ VAImplementation::kIntelI965 ||
+ VaapiWrapper::GetImplementationType() ==
+ VAImplementation::kIntelIHD) {
+ encoder_info_.reports_average_qp = false;
+ }
}
-
- DCHECK_EQ(ave_config.bitrate_control,
- VaapiVideoEncoderDelegate::BitrateControl::kConstantBitrate);
break;
case VideoCodec::kVP8:
if (!IsConfiguredForTesting()) {
encoder_ = std::make_unique<VP8VaapiVideoEncoderDelegate>(
vaapi_wrapper_, error_cb);
}
-
- ave_config.bitrate_control = VaapiVideoEncoderDelegate::BitrateControl::
- kConstantQuantizationParameter;
break;
case VideoCodec::kVP9:
if (!IsConfiguredForTesting()) {
encoder_ = std::make_unique<VP9VaapiVideoEncoderDelegate>(
vaapi_wrapper_, error_cb);
}
-
- ave_config.bitrate_control = VaapiVideoEncoderDelegate::BitrateControl::
- kConstantQuantizationParameter;
break;
default:
NOTREACHED() << "Unsupported codec type " << GetCodecName(output_codec_);
@@ -472,7 +458,8 @@ void VaapiVideoEncodeAccelerator::RecycleVASurface(
return;
}
- EncodePendingInputs();
+ if (!input_queue_.empty())
+ EncodePendingInputs();
}
void VaapiVideoEncodeAccelerator::TryToReturnBitstreamBuffers() {
@@ -481,8 +468,10 @@ void VaapiVideoEncodeAccelerator::TryToReturnBitstreamBuffers() {
if (state_ != kEncoding)
return;
- TRACE_EVENT1("media,gpu", "VAVEA::TryToReturnBitstreamBuffers",
- "pending encode results", pending_encode_results_.size());
+ TRACE_EVENT2("media,gpu", "VAVEA::TryToReturnBitstreamBuffers",
+ "pending encode results", pending_encode_results_.size(),
+ "available bitstream buffers",
+ available_bitstream_buffers_.size());
while (!pending_encode_results_.empty()) {
if (pending_encode_results_.front() == nullptr) {
// A null job indicates a flush command.
@@ -510,13 +499,13 @@ void VaapiVideoEncodeAccelerator::ReturnBitstreamBuffer(
std::unique_ptr<EncodeResult> encode_result,
std::unique_ptr<BitstreamBufferRef> buffer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
- uint8_t* target_data = static_cast<uint8_t*>(buffer->shm->memory());
+ uint8_t* target_data = buffer->shm_mapping.GetMemoryAs<uint8_t>();
size_t data_size = 0;
// vaSyncSurface() is not necessary because GetEncodedChunkSize() has been
// called in VaapiVideoEncoderDelegate::Encode().
if (!vaapi_wrapper_->DownloadFromVABuffer(
encode_result->coded_buffer_id(), /*sync_surface_id=*/absl::nullopt,
- target_data, buffer->shm->size(), &data_size)) {
+ target_data, buffer->shm_region.GetSize(), &data_size)) {
NOTIFY_ERROR(kPlatformFailureError, "Failed downloading coded buffer");
return;
}
@@ -551,6 +540,20 @@ void VaapiVideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
DCHECK_NE(state_, kUninitialized);
+ if (frame) {
+ // |frame| can be nullptr to indicate a flush.
+ const bool is_expected_storage_type =
+ native_input_mode_
+ ? frame->storage_type() == VideoFrame::STORAGE_GPU_MEMORY_BUFFER
+ : frame->IsMappable();
+ if (!is_expected_storage_type) {
+ NOTIFY_ERROR(kInvalidArgumentError,
+ "Unexpected storage: " << VideoFrame::StorageTypeToString(
+ frame->storage_type()));
+ return;
+ }
+ }
+
input_queue_.push(
std::make_unique<InputFrameRef>(std::move(frame), force_keyframe));
EncodePendingInputs();
@@ -563,14 +566,9 @@ bool VaapiVideoEncodeAccelerator::CreateSurfacesForGpuMemoryBufferEncoding(
std::vector<scoped_refptr<VASurface>>* reconstructed_surfaces) {
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
DCHECK(native_input_mode_);
+ DCHECK_EQ(frame.storage_type(), VideoFrame::STORAGE_GPU_MEMORY_BUFFER);
TRACE_EVENT0("media,gpu", "VAVEA::CreateSurfacesForGpuMemoryBuffer");
- if (frame.storage_type() != VideoFrame::STORAGE_GPU_MEMORY_BUFFER) {
- NOTIFY_ERROR(kPlatformFailureError,
- "Unexpected storage: "
- << VideoFrame::StorageTypeToString(frame.storage_type()));
- return false;
- }
if (frame.format() != PIXEL_FORMAT_NV12) {
NOTIFY_ERROR(
kPlatformFailureError,
@@ -668,6 +666,12 @@ bool VaapiVideoEncodeAccelerator::CreateSurfacesForShmemEncoding(
return false;
}
+ if (!vaapi_wrapper_->UploadVideoFrameToSurface(frame, (*input_surface)->id(),
+ (*input_surface)->size())) {
+ NOTIFY_ERROR(kPlatformFailureError, "Failed to upload frame");
+ return false;
+ }
+
*reconstructed_surface = CreateEncodeSurface(encode_size);
return !!*reconstructed_surface;
}
@@ -792,12 +796,11 @@ scoped_refptr<VASurface> VaapiVideoEncodeAccelerator::ExecuteBlitSurface(
std::unique_ptr<VaapiVideoEncoderDelegate::EncodeJob>
VaapiVideoEncodeAccelerator::CreateEncodeJob(
- scoped_refptr<VideoFrame> frame,
bool force_keyframe,
+ base::TimeDelta frame_timestamp,
const VASurface& input_surface,
scoped_refptr<VASurface> reconstructed_surface) {
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
- DCHECK(frame);
DCHECK_NE(input_surface.id(), VA_INVALID_ID);
DCHECK(!input_surface.size().IsEmpty());
DCHECK(reconstructed_surface);
@@ -829,8 +832,8 @@ VaapiVideoEncodeAccelerator::CreateEncodeJob(
return nullptr;
}
- return std::make_unique<EncodeJob>(frame, force_keyframe, input_surface.id(),
- input_surface.size(), std::move(picture),
+ return std::make_unique<EncodeJob>(force_keyframe, frame_timestamp,
+ input_surface.id(), std::move(picture),
std::move(coded_buffer));
}
@@ -845,8 +848,10 @@ void VaapiVideoEncodeAccelerator::EncodePendingInputs() {
return;
}
+ TRACE_EVENT1("media,gpu", "VAVEA::EncodePendingInputs",
+ "pending input frames", input_queue_.size());
while (state_ == kEncoding && !input_queue_.empty()) {
- const std::unique_ptr<InputFrameRef>& input_frame = input_queue_.front();
+ std::unique_ptr<InputFrameRef>& input_frame = input_queue_.front();
if (!input_frame) {
// If this is a flush (null) frame, don't create/submit a new encode
// result for it, but forward a null result to the
@@ -887,7 +892,7 @@ void VaapiVideoEncodeAccelerator::EncodePendingInputs() {
TRACE_EVENT0("media,gpu", "VAVEA::FromCreateEncodeJobToReturn");
const bool force_key =
(spatial_idx == 0 ? input_frame->force_keyframe : false);
- job = CreateEncodeJob(input_frame->frame, force_key,
+ job = CreateEncodeJob(force_key, input_frame->frame->timestamp(),
*input_surfaces[spatial_idx],
std::move(reconstructed_surfaces[spatial_idx]));
if (!job)
@@ -904,6 +909,17 @@ void VaapiVideoEncodeAccelerator::EncodePendingInputs() {
}
}
+ // Invalidates |input_frame| here; it notifies a client |input_frame->frame|
+ // can be reused for the future encoding.
+ // If the frame is copied (|native_input_mode_| == false), it is clearly
+ // safe to release |input_frame|. If the frame is imported
+ // (|native_input_mode_| == true), the write operation to the frame is
+ // blocked on DMA_BUF_IOCTL_SYNC because a VA-API driver protects the buffer
+ // through a DRM driver until encoding is complete, that is, vaMapBuffer()
+ // on a coded buffer returns.
+ input_frame.reset();
+ input_queue_.pop();
+
for (auto&& job : jobs) {
TRACE_EVENT0("media,gpu", "VAVEA::GetEncodeResult");
std::unique_ptr<EncodeResult> result =
@@ -917,7 +933,6 @@ void VaapiVideoEncodeAccelerator::EncodePendingInputs() {
}
TryToReturnBitstreamBuffers();
- input_queue_.pop();
}
}
@@ -945,7 +960,9 @@ void VaapiVideoEncodeAccelerator::UseOutputBitstreamBufferTask(
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
DCHECK_NE(state_, kUninitialized);
- if (!buffer_ref->shm->MapAt(buffer_ref->offset, buffer_ref->shm->size())) {
+ buffer_ref->shm_mapping = buffer_ref->shm_region.MapAt(
+ buffer_ref->offset, buffer_ref->shm_region.GetSize());
+ if (!buffer_ref->shm_mapping.IsValid()) {
NOTIFY_ERROR(kPlatformFailureError, "Failed mapping shared memory.");
return;
}
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
index 5354619192b..a00720a170f 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
@@ -178,8 +178,8 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
// are available, and if so, claims them by associating them with
// a EncodeJob, and returns the newly-created job, nullptr otherwise.
std::unique_ptr<EncodeJob> CreateEncodeJob(
- scoped_refptr<VideoFrame> frame,
bool force_keyframe,
+ base::TimeDelta frame_timestamp,
const VASurface& input_surface,
scoped_refptr<VASurface> reconstructed_surface);
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator_unittest.cc b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator_unittest.cc
index b4b78925c0a..71a48b750fc 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator_unittest.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator_unittest.cc
@@ -12,9 +12,11 @@
#include "base/run_loop.h"
#include "base/test/gmock_callback_support.h"
#include "base/test/task_environment.h"
+#include "build/chromeos_buildflags.h"
#include "media/base/media_util.h"
#include "media/gpu/gpu_video_encode_accelerator_helpers.h"
#include "media/gpu/vaapi/vaapi_utils.h"
+#include "media/gpu/vaapi/vaapi_video_encoder_delegate.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
#include "media/gpu/vaapi/vp9_vaapi_video_encoder_delegate.h"
#include "media/gpu/vp9_picture.h"
@@ -40,6 +42,7 @@ constexpr Bitrate kDefaultBitrate =
Bitrate::ConstantBitrate(kDefaultBitrateBps);
constexpr uint32_t kDefaultFramerate = 30;
constexpr size_t kMaxNumOfRefFrames = 3u;
+
constexpr int kSpatialLayersResolutionDenom[][3] = {
{1, 0, 0}, // For one spatial layer.
{2, 1, 0}, // For two spatial layers.
@@ -96,16 +99,6 @@ bool IsSVCSupported(const VideoEncodeAccelerator::Config& config) {
config.output_profile == VP9PROFILE_PROFILE0;
}
-MATCHER_P3(MatchesVaapiVideoEncoderDelegateConfig,
- max_ref_frames,
- native_input_mode,
- bitrate_control,
- "") {
- return arg.max_num_ref_frames == max_ref_frames &&
- arg.native_input_mode == native_input_mode &&
- arg.bitrate_control == bitrate_control;
-}
-
MATCHER_P3(MatchesBitstreamBufferMetadata,
payload_size_bytes,
key_frame,
@@ -221,9 +214,6 @@ class MockVP9VaapiVideoEncoderDelegate : public VP9VaapiVideoEncoderDelegate {
bool UpdateRates(const VideoBitrateAllocation&, uint32_t) override {
return false;
}
- void set_native_input_mode(bool native_input_mode) {
- native_input_mode_ = native_input_mode;
- }
};
} // namespace
@@ -308,12 +298,6 @@ class VaapiVideoEncodeAcceleratorTest
NO_THREAD_SAFETY_ANALYSIS {
base::RunLoop run_loop;
::testing::InSequence s;
- constexpr auto kBitrateControl = VaapiVideoEncoderDelegate::BitrateControl::
- kConstantQuantizationParameter;
- const bool native_input_mode =
- config.storage_type.value_or(
- VideoEncodeAccelerator::Config::StorageType::kShmem) ==
- VideoEncodeAccelerator::Config::StorageType::kGpuMemoryBuffer;
const size_t num_spatial_layers = config.spatial_layers.size();
// Scaling is needed only for non highest spatial layer, so here the vpp
// number is |num_spatial_layers - 1|.
@@ -331,16 +315,13 @@ class VaapiVideoEncodeAcceleratorTest
reinterpret_cast<VaapiVideoEncodeAccelerator*>(encoder_.get());
vaapi_encoder->vpp_vaapi_wrapper_ = mock_vpp_vaapi_wrapper_;
- EXPECT_CALL(*mock_encoder_,
- Initialize(_, MatchesVaapiVideoEncoderDelegateConfig(
- kMaxNumOfRefFrames, native_input_mode,
- kBitrateControl)))
- .WillOnce(WithArgs<1>(
- [mock_encoder = mock_encoder_](
- const VaapiVideoEncoderDelegate::Config& ave_config) {
- mock_encoder->set_native_input_mode(ave_config.native_input_mode);
- return true;
- }));
+ EXPECT_CALL(
+ *mock_encoder_,
+ Initialize(_,
+ testing::Field(
+ &VaapiVideoEncoderDelegate::Config::max_num_ref_frames,
+ kMaxNumOfRefFrames)))
+ .WillOnce(Return(true));
EXPECT_CALL(*mock_vaapi_wrapper_, CreateContext(kDefaultEncodeSize))
.WillOnce(Return(true));
EXPECT_CALL(client_, RequireBitstreamBuffers(_, kDefaultEncodeSize, _))
@@ -376,6 +357,11 @@ class VaapiVideoEncodeAcceleratorTest
return va_surfaces;
}));
+ EXPECT_CALL(
+ *mock_vaapi_wrapper_,
+ UploadVideoFrameToSurface(_, kInputSurfaceId, kDefaultEncodeSize))
+ .WillOnce(Return(true));
+
constexpr VASurfaceID kEncodeSurfaceId = 1234;
EXPECT_CALL(*mock_vaapi_wrapper_,
CreateScopedVASurfaces(
@@ -419,10 +405,6 @@ class VaapiVideoEncodeAcceleratorTest
}
return true;
}));
- EXPECT_CALL(
- *mock_vaapi_wrapper_,
- UploadVideoFrameToSurface(_, kInputSurfaceId, kDefaultEncodeSize))
- .WillOnce(Return(true));
EXPECT_CALL(*mock_vaapi_wrapper_,
ExecuteAndDestroyPendingBuffers(kInputSurfaceId))
.WillOnce(Return(true));
@@ -556,16 +538,10 @@ class VaapiVideoEncodeAcceleratorTest
// For reconstructed surface.
if (va_encode_surface_ids_[i].empty()) {
- // TODO(https://github.com/intel/media-driver/issues/1232): Remove this
- // workaround of aligning |encode_size|.
- gfx::Size aligned_size(
- base::bits::AlignUp(svc_resolutions[i].width(), 16),
- base::bits::AlignUp(svc_resolutions[i].height(), 16));
-
EXPECT_CALL(
*mock_vaapi_wrapper_,
CreateScopedVASurfaces(
- VA_RT_FORMAT_YUV420, aligned_size,
+ VA_RT_FORMAT_YUV420, svc_resolutions[i],
std::vector<VaapiWrapper::SurfaceUsageHint>{
VaapiWrapper::SurfaceUsageHint::kVideoEncoder},
_, absl::optional<gfx::Size>(), absl::optional<uint32_t>()))
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encoder_delegate.cc b/chromium/media/gpu/vaapi/vaapi_video_encoder_delegate.cc
index 7826dbad7d5..21e8ca9e82a 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encoder_delegate.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_encoder_delegate.cc
@@ -19,28 +19,26 @@
namespace media {
VaapiVideoEncoderDelegate::EncodeJob::EncodeJob(
- scoped_refptr<VideoFrame> input_frame,
bool keyframe,
+ base::TimeDelta timestamp,
VASurfaceID input_surface_id,
- const gfx::Size& input_surface_size,
scoped_refptr<CodecPicture> picture,
std::unique_ptr<ScopedVABuffer> coded_buffer)
- : input_frame_(input_frame),
- keyframe_(keyframe),
+ : keyframe_(keyframe),
+ timestamp_(timestamp),
input_surface_id_(input_surface_id),
- input_surface_size_(input_surface_size),
picture_(std::move(picture)),
coded_buffer_(std::move(coded_buffer)) {
DCHECK(picture_);
DCHECK(coded_buffer_);
}
-VaapiVideoEncoderDelegate::EncodeJob::EncodeJob(
- scoped_refptr<VideoFrame> input_frame,
- bool keyframe)
- : input_frame_(input_frame),
- keyframe_(keyframe),
- input_surface_id_(VA_INVALID_ID) {}
+VaapiVideoEncoderDelegate::EncodeJob::EncodeJob(bool keyframe,
+ base::TimeDelta timestamp,
+ VASurfaceID input_surface_id)
+ : keyframe_(keyframe),
+ timestamp_(timestamp),
+ input_surface_id_(input_surface_id) {}
VaapiVideoEncoderDelegate::EncodeJob::~EncodeJob() = default;
@@ -51,12 +49,7 @@ VaapiVideoEncoderDelegate::EncodeJob::CreateEncodeResult(
}
base::TimeDelta VaapiVideoEncoderDelegate::EncodeJob::timestamp() const {
- return input_frame_->timestamp();
-}
-
-const scoped_refptr<VideoFrame>&
-VaapiVideoEncoderDelegate::EncodeJob::input_frame() const {
- return input_frame_;
+ return timestamp_;
}
VABufferID VaapiVideoEncoderDelegate::EncodeJob::coded_buffer_id() const {
@@ -67,11 +60,6 @@ VASurfaceID VaapiVideoEncoderDelegate::EncodeJob::input_surface_id() const {
return input_surface_id_;
}
-const gfx::Size& VaapiVideoEncoderDelegate::EncodeJob::input_surface_size()
- const {
- return input_surface_size_;
-}
-
const scoped_refptr<CodecPicture>&
VaapiVideoEncoderDelegate::EncodeJob::picture() const {
return picture_;
@@ -128,15 +116,8 @@ bool VaapiVideoEncoderDelegate::Encode(EncodeJob& encode_job) {
return false;
}
- const VASurfaceID va_surface_id = encode_job.input_surface_id();
- if (!native_input_mode_ && !vaapi_wrapper_->UploadVideoFrameToSurface(
- *encode_job.input_frame(), va_surface_id,
- encode_job.input_surface_size())) {
- VLOGF(1) << "Failed to upload frame";
- return false;
- }
-
- if (!vaapi_wrapper_->ExecuteAndDestroyPendingBuffers(va_surface_id)) {
+ if (!vaapi_wrapper_->ExecuteAndDestroyPendingBuffers(
+ encode_job.input_surface_id())) {
VLOGF(1) << "Failed to execute encode";
return false;
}
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encoder_delegate.h b/chromium/media/gpu/vaapi/vaapi_video_encoder_delegate.h
index 5a7975e1104..995347c55a5 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encoder_delegate.h
+++ b/chromium/media/gpu/vaapi/vaapi_video_encoder_delegate.h
@@ -42,25 +42,12 @@ class VaapiVideoEncoderDelegate {
base::RepeatingClosure error_cb);
virtual ~VaapiVideoEncoderDelegate();
- enum class BitrateControl {
- kConstantBitrate, // Constant Bitrate mode. This class relies on other
- // parts (e.g. driver) to achieve the specified bitrate.
- kConstantQuantizationParameter // Constant Quantization Parameter mode.
- // This class needs to compute a proper
- // quantization parameter and give other
- // parts (e.g. the driver) the value.
- };
-
struct Config {
- // Maxium number of reference frames.
+ // Maximum number of reference frames.
// For H.264 encoding, the value represents the maximum number of reference
// frames for both the reference picture list 0 (bottom 16 bits) and the
// reference picture list 1 (top 16 bits).
size_t max_num_ref_frames;
-
- bool native_input_mode = false;
-
- BitrateControl bitrate_control = BitrateControl::kConstantBitrate;
};
// EncodeResult owns the necessary resource to keep the encoded buffer. The
@@ -91,12 +78,13 @@ class VaapiVideoEncoderDelegate {
// Creates an EncodeJob to encode |input_frame|, which will be executed by
// calling ExecuteSetupCallbacks() in VaapiVideoEncoderDelegate::Encode().
// If |keyframe| is true, requests this job to produce a keyframe.
- EncodeJob(scoped_refptr<VideoFrame> input_frame, bool keyframe);
+ EncodeJob(bool keyframe,
+ base::TimeDelta timestamp,
+ VASurfaceID input_surface_id);
// Constructor for VA-API.
- EncodeJob(scoped_refptr<VideoFrame> input_frame,
- bool keyframe,
+ EncodeJob(bool keyframe,
+ base::TimeDelta timestamp,
VASurfaceID input_surface_id,
- const gfx::Size& input_surface_size,
scoped_refptr<CodecPicture> picture,
std::unique_ptr<ScopedVABuffer> coded_buffer);
@@ -121,25 +109,20 @@ class VaapiVideoEncoderDelegate {
base::TimeDelta timestamp() const;
- const scoped_refptr<VideoFrame>& input_frame() const;
-
// VA-API specific methods.
VABufferID coded_buffer_id() const;
VASurfaceID input_surface_id() const;
- const gfx::Size& input_surface_size() const;
const scoped_refptr<CodecPicture>& picture() const;
private:
- // Input VideoFrame to be encoded.
- const scoped_refptr<VideoFrame> input_frame_;
-
// True if this job is to produce a keyframe.
bool keyframe_;
+ // |timestamp_| to be added to the produced encoded chunk.
+ const base::TimeDelta timestamp_;
// VA-API specific members.
// Input surface ID and size for video frame data or scaled data.
const VASurfaceID input_surface_id_;
- const gfx::Size input_surface_size_;
const scoped_refptr<CodecPicture> picture_;
// Buffer that will contain the output bitstream data for this frame.
std::unique_ptr<ScopedVABuffer> coded_buffer_;
@@ -192,8 +175,6 @@ class VaapiVideoEncoderDelegate {
base::RepeatingClosure error_cb_;
- bool native_input_mode_ = false;
-
SEQUENCE_CHECKER(sequence_checker_);
private:
@@ -203,9 +184,8 @@ class VaapiVideoEncoderDelegate {
virtual bool PrepareEncodeJob(EncodeJob& encode_job) = 0;
// Notifies the encoded chunk size in bytes to update a bitrate controller in
- // VaapiVideoEncoderDelegate. This should be called only if
- // VaapiVideoEncoderDelegate is configured with
- // BitrateControl::kConstantQuantizationParameter.
+ // VaapiVideoEncoderDelegate. This should be called only if constant
+ // quantization encoding is used, which currently is true for VP8 and VP9.
virtual void BitrateControlUpdate(uint64_t encoded_chunk_size_bytes);
};
} // namespace media
diff --git a/chromium/media/gpu/vaapi/vaapi_wrapper.cc b/chromium/media/gpu/vaapi/vaapi_wrapper.cc
index 1fd9df78082..cdbd4fb2c73 100644
--- a/chromium/media/gpu/vaapi/vaapi_wrapper.cc
+++ b/chromium/media/gpu/vaapi/vaapi_wrapper.cc
@@ -347,9 +347,19 @@ bool IsLowPowerIntelProcessor() {
return is_low_power_intel;
}
+bool IsModeDecoding(VaapiWrapper::CodecMode mode) {
+ return mode == VaapiWrapper::CodecMode::kDecode
+#if BUILDFLAG(IS_CHROMEOS_ASH)
+ || VaapiWrapper::CodecMode::kDecodeProtected
+#endif
+ ;
+}
+
bool IsModeEncoding(VaapiWrapper::CodecMode mode) {
return mode == VaapiWrapper::CodecMode::kEncodeConstantBitrate ||
- mode == VaapiWrapper::CodecMode::kEncodeConstantQuantizationParameter;
+ mode ==
+ VaapiWrapper::CodecMode::kEncodeConstantQuantizationParameter ||
+ mode == VaapiWrapper::CodecMode::kEncodeVariableBitrate;
}
bool GetNV12VisibleWidthBytes(int visible_width,
@@ -431,15 +441,15 @@ const ProfileCodecMap& GetProfileCodecMap() {
// VaapiWrapper does not support VP9 Profile 1, see b/153680337.
// {VP9PROFILE_PROFILE1, VAProfileVP9Profile1},
{VP9PROFILE_PROFILE2, VAProfileVP9Profile2},
- // VaapiWrapper does not support Profile 3.
- //{VP9PROFILE_PROFILE3, VAProfileVP9Profile3},
+ // VaapiWrapper does not support Profile 3.
+ //{VP9PROFILE_PROFILE3, VAProfileVP9Profile3},
{AV1PROFILE_PROFILE_MAIN, VAProfileAV1Profile0},
// VaapiWrapper does not support AV1 Profile 1.
// {AV1PROFILE_PROFILE_HIGH, VAProfileAV1Profile1},
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
{HEVCPROFILE_MAIN, VAProfileHEVCMain},
{HEVCPROFILE_MAIN10, VAProfileHEVCMain10},
-#endif
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
});
return *kMediaToVAProfileMap;
}
@@ -569,7 +579,7 @@ void VADisplayState::PreSandboxInitialization() {
version->name,
base::checked_cast<std::string::size_type>(version->name_len));
drmFreeVersion(version);
- if (base::LowerCaseEqualsASCII(version_name, "vgem"))
+ if (base::EqualsCaseInsensitiveASCII(version_name, "vgem"))
continue;
VADisplayState::Get()->SetDrmFd(drm_file.GetPlatformFile());
return;
@@ -808,7 +818,8 @@ std::vector<VAEntrypoint> GetEntryPointsForProfile(const base::Lock* va_lock,
VAEntrypointEncSliceLP}, // kEncodeConstantBitrate.
{VAEntrypointEncSlice,
VAEntrypointEncSliceLP}, // kEncodeConstantQuantizationParameter.
- {VAEntrypointVideoProc} // kVideoProcess.
+ {VAEntrypointEncSlice, VAEntrypointEncSliceLP}, // kEncodeVariableBitrate.
+ {VAEntrypointVideoProc} // kVideoProcess.
};
static_assert(std::size(kAllowedEntryPoints) == VaapiWrapper::kCodecModeMax,
"");
@@ -870,6 +881,8 @@ bool GetRequiredAttribs(const base::Lock* va_lock,
required_attribs->push_back({VAConfigAttribRateControl, VA_RC_CBR});
if (mode == VaapiWrapper::kEncodeConstantQuantizationParameter)
required_attribs->push_back({VAConfigAttribRateControl, VA_RC_CQP});
+ if (mode == VaapiWrapper::kEncodeVariableBitrate)
+ required_attribs->push_back({VAConfigAttribRateControl, VA_RC_VBR});
constexpr VAProfile kSupportedH264VaProfilesForEncoding[] = {
VAProfileH264ConstrainedBaseline, VAProfileH264Main, VAProfileH264High};
@@ -1043,6 +1056,7 @@ void VASupportedProfiles::FillSupportedProfileInfos(base::Lock* va_lock,
#endif
VaapiWrapper::kEncodeConstantBitrate,
VaapiWrapper::kEncodeConstantQuantizationParameter,
+ VaapiWrapper::kEncodeVariableBitrate,
VaapiWrapper::kVideoProcess
};
static_assert(std::size(kWrapperModes) == VaapiWrapper::kCodecModeMax, "");
@@ -1504,12 +1518,7 @@ std::vector<SVCScalabilityMode> VaapiWrapper::GetSupportedScalabilityModes(
}
if (media_profile >= H264PROFILE_MIN && media_profile <= H264PROFILE_MAX) {
- // TODO(b/199487660): Enable H.264 temporal layer encoding on AMD once their
- // drivers support them.
- VAImplementation implementation = VaapiWrapper::GetImplementationType();
- if (base::FeatureList::IsEnabled(kVaapiH264TemporalLayerHWEncoding) &&
- (implementation == VAImplementation::kIntelI965 ||
- implementation == VAImplementation::kIntelIHD)) {
+ if (base::FeatureList::IsEnabled(kVaapiH264TemporalLayerHWEncoding)) {
scalability_modes.push_back(SVCScalabilityMode::kL1T2);
scalability_modes.push_back(SVCScalabilityMode::kL1T3);
}
@@ -1523,9 +1532,7 @@ VideoEncodeAccelerator::SupportedProfiles
VaapiWrapper::GetSupportedEncodeProfiles() {
VideoEncodeAccelerator::SupportedProfiles profiles;
- for (const auto& media_to_va_profile_map_entry : GetProfileCodecMap()) {
- const VideoCodecProfile media_profile = media_to_va_profile_map_entry.first;
- const VAProfile va_profile = media_to_va_profile_map_entry.second;
+ for (const auto& [media_profile, va_profile] : GetProfileCodecMap()) {
DCHECK(va_profile != VAProfileNone);
const VASupportedProfiles::ProfileInfo* profile_info =
@@ -1543,6 +1550,8 @@ VaapiWrapper::GetSupportedEncodeProfiles() {
constexpr int kMaxEncoderFramerate = 30;
profile.max_framerate_numerator = kMaxEncoderFramerate;
profile.max_framerate_denominator = 1;
+ // TODO(b/193680666): remove hard-coding when VBR is supported
+ profile.rate_control_modes = media::VideoEncodeAccelerator::kConstantMode;
profile.scalability_modes =
GetSupportedScalabilityModes(media_profile, va_profile);
profiles.push_back(profile);
@@ -1555,9 +1564,7 @@ VideoDecodeAccelerator::SupportedProfiles
VaapiWrapper::GetSupportedDecodeProfiles() {
VideoDecodeAccelerator::SupportedProfiles profiles;
- for (const auto& media_to_va_profile_map_entry : GetProfileCodecMap()) {
- const VideoCodecProfile media_profile = media_to_va_profile_map_entry.first;
- const VAProfile va_profile = media_to_va_profile_map_entry.second;
+ for (const auto& [media_profile, va_profile] : GetProfileCodecMap()) {
DCHECK(va_profile != VAProfileNone);
const VASupportedProfiles::ProfileInfo* profile_info =
@@ -1803,6 +1810,7 @@ VAEntrypoint VaapiWrapper::GetDefaultVaEntryPoint(CodecMode mode,
#endif
case VaapiWrapper::kEncodeConstantBitrate:
case VaapiWrapper::kEncodeConstantQuantizationParameter:
+ case VaapiWrapper::kEncodeVariableBitrate:
if (profile == VAProfileJPEGBaseline)
return VAEntrypointEncPicture;
DCHECK(IsModeEncoding(mode));
@@ -2140,21 +2148,37 @@ scoped_refptr<VASurface> VaapiWrapper::CreateVASurfaceForPixmap(
sequence_checker_.CalledOnValidSequence());
const gfx::BufferFormat buffer_format = pixmap->GetBufferFormat();
+ const uint32_t va_fourcc = BufferFormatToVAFourCC(buffer_format);
+ if (!va_fourcc) {
+ LOG(ERROR) << "Failed to get the VA fourcc from the buffer format";
+ return nullptr;
+ }
+
+ const size_t num_planes = pixmap->GetNumberOfPlanes();
+
// Create a VASurface for a NativePixmap by importing the underlying dmabufs.
const gfx::Size size = pixmap->GetBufferSize();
VASurfaceAttribExternalBuffers va_attrib_extbuf{};
- va_attrib_extbuf.pixel_format = BufferFormatToVAFourCC(buffer_format);
- va_attrib_extbuf.width = size.width();
- va_attrib_extbuf.height = size.height();
+ va_attrib_extbuf.pixel_format = va_fourcc;
+ va_attrib_extbuf.width = base::checked_cast<uint32_t>(size.width());
+ va_attrib_extbuf.height = base::checked_cast<uint32_t>(size.height());
- const size_t num_planes = pixmap->GetNumberOfPlanes();
+ static_assert(std::size(va_attrib_extbuf.pitches) ==
+ std::size(va_attrib_extbuf.offsets));
+ if (num_planes > std::size(va_attrib_extbuf.pitches)) {
+ LOG(ERROR) << "Too many planes in the NativePixmap; got " << num_planes
+ << " but the maximum number is "
+ << std::size(va_attrib_extbuf.pitches);
+ return nullptr;
+ }
for (size_t i = 0; i < num_planes; ++i) {
va_attrib_extbuf.pitches[i] = pixmap->GetDmaBufPitch(i);
- va_attrib_extbuf.offsets[i] = pixmap->GetDmaBufOffset(i);
+ va_attrib_extbuf.offsets[i] =
+ base::checked_cast<uint32_t>(pixmap->GetDmaBufOffset(i));
DVLOG(4) << "plane " << i << ": pitch: " << va_attrib_extbuf.pitches[i]
<< " offset: " << va_attrib_extbuf.offsets[i];
}
- va_attrib_extbuf.num_planes = num_planes;
+ va_attrib_extbuf.num_planes = base::checked_cast<uint32_t>(num_planes);
const int dma_buf_fd = pixmap->GetDmaBufFd(0);
if (dma_buf_fd < 0) {
@@ -2183,7 +2207,12 @@ scoped_refptr<VASurface> VaapiWrapper::CreateVASurfaceForPixmap(
DCHECK_EQ(va_attrib_extbuf.flags, 0u);
DCHECK_EQ(va_attrib_extbuf.private_data, nullptr);
- uint32_t va_format = BufferFormatToVARTFormat(buffer_format);
+ unsigned int va_format =
+ base::strict_cast<unsigned int>(BufferFormatToVARTFormat(buffer_format));
+ if (!va_format) {
+ LOG(ERROR) << "Failed to get the VA RT format from the buffer format";
+ return nullptr;
+ }
if (protected_content) {
if (GetImplementationType() == VAImplementation::kMesaGallium)
@@ -2658,7 +2687,8 @@ std::unique_ptr<ScopedVABuffer> VaapiWrapper::CreateVABuffer(VABufferType type,
sequence_checker_.CalledOnValidSequence());
TRACE_EVENT0("media,gpu", "VaapiWrapper::CreateVABuffer");
base::AutoLockMaybe auto_lock(va_lock_);
- TRACE_EVENT0("media,gpu", "VaapiWrapper::CreateVABufferLocked");
+ TRACE_EVENT2("media,gpu", "VaapiWrapper::CreateVABufferLocked", "type", type,
+ "size", size);
#if BUILDFLAG(IS_CHROMEOS_ASH)
VAContextID context_id = type == VAProtectedSessionExecuteBufferType
? va_protected_session_id_
@@ -2897,9 +2927,6 @@ bool VaapiWrapper::BlitSurface(const VASurface& va_surface_src,
pipeline_param->rotation_state = VA_ROTATION_270;
break;
}
-
- const VAStatus va_res = mapping.Unmap();
- VA_SUCCESS_OR_RETURN(va_res, VaapiFunctions::kVAUnmapBuffer, false);
}
#if BUILDFLAG(IS_CHROMEOS_ASH)
@@ -3003,6 +3030,10 @@ bool VaapiWrapper::Initialize(VAProfile va_profile,
DCHECK_NE(va_profile, VAProfileJPEGBaseline)
<< "JPEG Encoding doesn't support CQP bitrate control";
}
+ if (mode_ == kEncodeVariableBitrate) {
+ DCHECK_NE(va_profile, VAProfileJPEGBaseline)
+ << "JPEG Encoding doesn't support VBR bitrate control";
+ }
#endif // DCHECK_IS_ON()
#if BUILDFLAG(IS_CHROMEOS_ASH)
@@ -3279,8 +3310,11 @@ bool VaapiWrapper::Execute_Locked(VASurfaceID va_surface_id,
va_res = vaEndPicture(va_display_, va_context_id_);
VA_SUCCESS_OR_RETURN(va_res, VaapiFunctions::kVAEndPicture, false);
- UMA_HISTOGRAM_TIMES("Media.PlatformVideoDecoding.Decode",
- base::TimeTicks::Now() - decode_start_time);
+ if (IsModeDecoding(mode_) && va_profile_ != VAProfileNone &&
+ va_profile_ != VAProfileJPEGBaseline) {
+ UMA_HISTOGRAM_TIMES("Media.PlatformVideoDecoding.Decode",
+ base::TimeTicks::Now() - decode_start_time);
+ }
return true;
}
@@ -3304,17 +3338,18 @@ bool VaapiWrapper::SubmitBuffer_Locked(const VABufferDescriptor& va_buffer) {
VABufferID buffer_id;
{
- TRACE_EVENT0("media,gpu",
- "VaapiWrapper::SubmitBuffer_Locked_vaCreateBuffer");
- const VAStatus va_res =
- vaCreateBuffer(va_display_, va_context_id_, va_buffer.type,
- va_buffer_size, 1, nullptr, &buffer_id);
+ TRACE_EVENT2("media,gpu",
+ "VaapiWrapper::SubmitBuffer_Locked_vaCreateBuffer", "type",
+ va_buffer.type, "size", va_buffer_size);
+ // The type of |data| in vaCreateBuffer() is void*, though a driver must not
+ // change the |data| buffer. We execute const_cast to limit the type
+ // mismatch. https://github.com/intel/libva/issues/597
+ const VAStatus va_res = vaCreateBuffer(
+ va_display_, va_context_id_, va_buffer.type, va_buffer_size, 1,
+ const_cast<void*>(va_buffer.data), &buffer_id);
VA_SUCCESS_OR_RETURN(va_res, VaapiFunctions::kVACreateBuffer, false);
}
- if (!MapAndCopy_Locked(buffer_id, va_buffer))
- return false;
-
pending_va_buffers_.push_back(buffer_id);
pending_buffers_destroyer_on_failure.ReplaceClosure(base::DoNothing());
return true;
diff --git a/chromium/media/gpu/vaapi/vaapi_wrapper.h b/chromium/media/gpu/vaapi/vaapi_wrapper.h
index 00e9e9a974f..d6feb85cb35 100644
--- a/chromium/media/gpu/vaapi/vaapi_wrapper.h
+++ b/chromium/media/gpu/vaapi/vaapi_wrapper.h
@@ -138,6 +138,7 @@ class MEDIA_GPU_EXPORT VaapiWrapper
kEncodeConstantBitrate, // Encode with Constant Bitrate algorithm.
kEncodeConstantQuantizationParameter, // Encode with Constant Quantization
// Parameter algorithm.
+ kEncodeVariableBitrate, // Encode with variable bitrate algorithm.
kVideoProcess,
kCodecModeMax,
};
diff --git a/chromium/media/gpu/vaapi/vp8_vaapi_video_decoder_delegate.cc b/chromium/media/gpu/vaapi/vp8_vaapi_video_decoder_delegate.cc
index 8816c5d83ae..e9433bedcf3 100644
--- a/chromium/media/gpu/vaapi/vp8_vaapi_video_decoder_delegate.cc
+++ b/chromium/media/gpu/vaapi/vp8_vaapi_video_decoder_delegate.cc
@@ -82,8 +82,9 @@ bool VP8VaapiVideoDecoderDelegate::SubmitDecode(
if (!slice_params_)
return false;
}
- // |encoded_data| cannot be reused even when it's of the appropriate size, due
- // to strange stutterings in e.g. Gen 9.5.
+
+ // Create VASliceData buffer |encoded_data| every frame so that decoding can
+ // be more asynchronous than reusing the buffer.
std::unique_ptr<ScopedVABuffer> encoded_data =
vaapi_wrapper_->CreateVABuffer(VASliceDataBufferType, header->frame_size);
if (!encoded_data)
diff --git a/chromium/media/gpu/vaapi/vp8_vaapi_video_encoder_delegate.cc b/chromium/media/gpu/vaapi/vp8_vaapi_video_encoder_delegate.cc
index 85811972169..2979fa60363 100644
--- a/chromium/media/gpu/vaapi/vp8_vaapi_video_encoder_delegate.cc
+++ b/chromium/media/gpu/vaapi/vp8_vaapi_video_encoder_delegate.cc
@@ -274,14 +274,6 @@ bool VP8VaapiVideoEncoderDelegate::Initialize(
return false;
}
- // Even though VP8VaapiVideoEncoderDelegate might support other bitrate
- // control modes, only the kConstantQuantizationParameter is used.
- if (ave_config.bitrate_control != VaapiVideoEncoderDelegate::BitrateControl::
- kConstantQuantizationParameter) {
- DVLOGF(1) << "Only CQ bitrate control is supported";
- return false;
- }
-
if (config.HasSpatialLayer()) {
DVLOGF(1) << "Invalid configuration. Spatial layers not supported in VP8";
return false;
@@ -310,8 +302,6 @@ bool VP8VaapiVideoEncoderDelegate::Initialize(
}
}
- native_input_mode_ = ave_config.native_input_mode;
-
visible_size_ = config.input_visible_size;
coded_size_ = gfx::Size(base::bits::AlignUp(visible_size_.width(), 16),
base::bits::AlignUp(visible_size_.height(), 16));
@@ -624,10 +614,9 @@ bool VP8VaapiVideoEncoderDelegate::SubmitFrameParameters(
qmatrix_buf.quantization_index_delta[4] =
frame_header->quantization_hdr.uv_ac_delta;
- return vaapi_wrapper_->SubmitBuffer(VAEncSequenceParameterBufferType,
- &seq_param) &&
- vaapi_wrapper_->SubmitBuffer(VAEncPictureParameterBufferType,
- &pic_param) &&
- vaapi_wrapper_->SubmitBuffer(VAQMatrixBufferType, &qmatrix_buf);
+ return vaapi_wrapper_->SubmitBuffers(
+ {{VAEncSequenceParameterBufferType, sizeof(seq_param), &seq_param},
+ {VAEncPictureParameterBufferType, sizeof(pic_param), &pic_param},
+ {VAQMatrixBufferType, sizeof(qmatrix_buf), &qmatrix_buf}});
}
} // namespace media
diff --git a/chromium/media/gpu/vaapi/vp9_vaapi_video_decoder_delegate.cc b/chromium/media/gpu/vaapi/vp9_vaapi_video_decoder_delegate.cc
index 4c1ecc3ba60..a1ea410276f 100644
--- a/chromium/media/gpu/vaapi/vp9_vaapi_video_decoder_delegate.cc
+++ b/chromium/media/gpu/vaapi/vp9_vaapi_video_decoder_delegate.cc
@@ -197,9 +197,8 @@ DecodeStatus VP9VaapiVideoDecoderDelegate::SubmitDecode(
seg_param.chroma_ac_quant_scale = seg.uv_dequant[i][1];
}
- // Always re-create |encoded_data| because reusing the buffer causes horrific
- // artifacts in decoded buffers. TODO(b/169725321): This seems to be a driver
- // bug, fix it and reuse the buffer.
+ // Create VASliceData buffer |encoded_data| every frame so that decoding can
+ // be more asynchronous than reusing the buffer.
std::unique_ptr<ScopedVABuffer> encoded_data;
std::vector<std::pair<VABufferID, VaapiWrapper::VABufferDescriptor>> buffers =
diff --git a/chromium/media/gpu/vaapi/vp9_vaapi_video_encoder_delegate.cc b/chromium/media/gpu/vaapi/vp9_vaapi_video_encoder_delegate.cc
index 31bdfbc99b6..f2ab04a6105 100644
--- a/chromium/media/gpu/vaapi/vp9_vaapi_video_encoder_delegate.cc
+++ b/chromium/media/gpu/vaapi/vp9_vaapi_video_encoder_delegate.cc
@@ -185,16 +185,6 @@ bool VP9VaapiVideoEncoderDelegate::Initialize(
return false;
}
- // Even though VP9VaapiVideoEncoderDelegate might support other bitrate
- // control modes, only the kConstantQuantizationParameter is used.
- if (ave_config.bitrate_control != VaapiVideoEncoderDelegate::BitrateControl::
- kConstantQuantizationParameter) {
- DVLOGF(1) << "Only CQ bitrate control is supported";
- return false;
- }
-
- native_input_mode_ = ave_config.native_input_mode;
-
visible_size_ = config.input_visible_size;
coded_size_ = gfx::Size(base::bits::AlignUp(visible_size_.width(), 16),
base::bits::AlignUp(visible_size_.height(), 16));
@@ -577,10 +567,9 @@ bool VP9VaapiVideoEncoderDelegate::SubmitFrameParameters(
pic_param.log2_tile_rows = frame_header->tile_rows_log2;
pic_param.log2_tile_columns = frame_header->tile_cols_log2;
- return vaapi_wrapper_->SubmitBuffer(VAEncSequenceParameterBufferType,
- &seq_param) &&
- vaapi_wrapper_->SubmitBuffer(VAEncPictureParameterBufferType,
- &pic_param);
+ return vaapi_wrapper_->SubmitBuffers(
+ {{VAEncSequenceParameterBufferType, sizeof(seq_param), &seq_param},
+ {VAEncPictureParameterBufferType, sizeof(pic_param), &pic_param}});
}
} // namespace media
diff --git a/chromium/media/gpu/vaapi/vp9_vaapi_video_encoder_delegate_unittest.cc b/chromium/media/gpu/vaapi/vp9_vaapi_video_encoder_delegate_unittest.cc
index f8665b5bdd2..8f07a9f1285 100644
--- a/chromium/media/gpu/vaapi/vp9_vaapi_video_encoder_delegate_unittest.cc
+++ b/chromium/media/gpu/vaapi/vp9_vaapi_video_encoder_delegate_unittest.cc
@@ -49,10 +49,7 @@ constexpr uint8_t kTemporalLayerPattern[][4] = {
};
VaapiVideoEncoderDelegate::Config kDefaultVaapiVideoEncoderDelegateConfig{
- .max_num_ref_frames = kDefaultMaxNumRefFrames,
- .native_input_mode = false,
- .bitrate_control = VaapiVideoEncoderDelegate::BitrateControl::
- kConstantQuantizationParameter};
+ .max_num_ref_frames = kDefaultMaxNumRefFrames};
VideoEncodeAccelerator::Config kDefaultVideoEncodeAcceleratorConfig(
PIXEL_FORMAT_I420,
@@ -262,8 +259,6 @@ struct VP9VaapiVideoEncoderDelegateTestParam;
class VP9VaapiVideoEncoderDelegateTest
: public ::testing::TestWithParam<VP9VaapiVideoEncoderDelegateTestParam> {
public:
- using BitrateControl = VaapiVideoEncoderDelegate::BitrateControl;
-
VP9VaapiVideoEncoderDelegateTest() = default;
~VP9VaapiVideoEncoderDelegateTest() override = default;
@@ -324,21 +319,16 @@ VP9VaapiVideoEncoderDelegateTest::CreateEncodeJob(
bool keyframe,
const scoped_refptr<VASurface>& va_surface,
const scoped_refptr<VP9Picture>& picture) {
- auto input_frame = VideoFrame::CreateFrame(
- kDefaultVideoEncodeAcceleratorConfig.input_format,
- kDefaultVideoEncodeAcceleratorConfig.input_visible_size,
- gfx::Rect(kDefaultVideoEncodeAcceleratorConfig.input_visible_size),
- kDefaultVideoEncodeAcceleratorConfig.input_visible_size,
- base::TimeDelta());
- LOG_ASSERT(input_frame) << " Failed to create VideoFrame";
-
constexpr VABufferID kDummyVABufferID = 12;
auto scoped_va_buffer = ScopedVABuffer::CreateForTesting(
kDummyVABufferID, VAEncCodedBufferType,
kDefaultVideoEncodeAcceleratorConfig.input_visible_size.GetArea());
+ // TODO(b/229358029): Set a valid timestamp and check the timestamp in
+ // metadata.
+ constexpr base::TimeDelta timestamp;
return std::make_unique<VaapiVideoEncoderDelegate::EncodeJob>(
- input_frame, keyframe, va_surface->id(), va_surface->size(), picture,
+ keyframe, timestamp, va_surface->id(), picture,
std::move(scoped_va_buffer));
}
@@ -354,7 +344,7 @@ void VP9VaapiVideoEncoderDelegateTest::InitializeVP9VaapiVideoEncoderDelegate(
auto initial_bitrate_allocation = AllocateDefaultBitrateForTesting(
num_spatial_layers, num_temporal_layers,
- kDefaultVideoEncodeAcceleratorConfig.bitrate.target_bps());
+ kDefaultVideoEncodeAcceleratorConfig.bitrate.target_bps(), false);
std::vector<gfx::Size> svc_layer_size =
GetDefaultSpatialLayerResolutions(num_spatial_layers);
if (num_spatial_layers > 1u || num_temporal_layers > 1u) {
@@ -378,7 +368,7 @@ void VP9VaapiVideoEncoderDelegateTest::InitializeVP9VaapiVideoEncoderDelegate(
EXPECT_CALL(*mock_rate_ctrl_, UpdateRateControl(MatchRtcConfigWithRates(
AllocateDefaultBitrateForTesting(
num_spatial_layers, num_temporal_layers,
- config.bitrate.target_bps()),
+ config.bitrate.target_bps(), false),
VideoEncodeAccelerator::kDefaultFramerate,
num_temporal_layers, svc_layer_size)))
.Times(1)
@@ -506,7 +496,7 @@ void VP9VaapiVideoEncoderDelegateTest::UpdateRatesTest(
uint8_t expected_temporal_layer_id,
uint32_t bitrate, uint32_t framerate) {
auto bitrate_allocation = AllocateDefaultBitrateForTesting(
- num_spatial_layers, num_temporal_layers, bitrate);
+ num_spatial_layers, num_temporal_layers, bitrate, false);
UpdateRatesAndEncode(bitrate_allocation, framerate,
/*valid_rates_request=*/true, is_key_pic,
spatial_layer_resolutions, num_temporal_layers,
@@ -637,7 +627,7 @@ TEST_P(VP9VaapiVideoEncoderDelegateTest, DeactivateActivateSpatialLayers) {
const VideoBitrateAllocation kDefaultBitrateAllocation =
AllocateDefaultBitrateForTesting(
num_spatial_layers, num_temporal_layers,
- kDefaultVideoEncodeAcceleratorConfig.bitrate.target_bps());
+ kDefaultVideoEncodeAcceleratorConfig.bitrate.target_bps(), false);
const std::vector<gfx::Size> kDefaultSpatialLayers =
GetDefaultSpatialLayerResolutions(num_spatial_layers);
const uint32_t kFramerate =
@@ -667,7 +657,7 @@ TEST_P(VP9VaapiVideoEncoderDelegateTest, FailsWithInvalidSpatialLayers) {
const VideoBitrateAllocation kDefaultBitrateAllocation =
AllocateDefaultBitrateForTesting(
num_spatial_layers, num_temporal_layers,
- kDefaultVideoEncodeAcceleratorConfig.bitrate.target_bps());
+ kDefaultVideoEncodeAcceleratorConfig.bitrate.target_bps(), false);
std::vector<VideoBitrateAllocation> invalid_bitrate_allocations;
constexpr uint32_t kBitrate = 1234u;
auto bitrate_allocation = kDefaultBitrateAllocation;
diff --git a/chromium/media/gpu/video_decode_accelerator_perf_tests.cc b/chromium/media/gpu/video_decode_accelerator_perf_tests.cc
index 1757dedc50f..fb2a3dff31a 100644
--- a/chromium/media/gpu/video_decode_accelerator_perf_tests.cc
+++ b/chromium/media/gpu/video_decode_accelerator_perf_tests.cc
@@ -356,9 +356,8 @@ class VideoDecoderTest : public ::testing::Test {
config.implementation = g_env->GetDecoderImplementation();
config.linear_output = g_env->ShouldOutputLinearBuffers();
- auto video_player = VideoPlayer::Create(
- config, g_env->GetGpuMemoryBufferFactory(), std::move(frame_renderer),
- std::move(frame_processors));
+ auto video_player = VideoPlayer::Create(config, std::move(frame_renderer),
+ std::move(frame_processors));
LOG_ASSERT(video_player);
LOG_ASSERT(video_player->Initialize(video));
diff --git a/chromium/media/gpu/video_decode_accelerator_tests.cc b/chromium/media/gpu/video_decode_accelerator_tests.cc
index de895d54700..0286cbbf083 100644
--- a/chromium/media/gpu/video_decode_accelerator_tests.cc
+++ b/chromium/media/gpu/video_decode_accelerator_tests.cc
@@ -183,9 +183,8 @@ class VideoDecoderTest : public ::testing::Test {
config.implementation = g_env->GetDecoderImplementation();
config.linear_output = g_env->ShouldOutputLinearBuffers();
- auto video_player = VideoPlayer::Create(
- config, g_env->GetGpuMemoryBufferFactory(), std::move(frame_renderer),
- std::move(frame_processors));
+ auto video_player = VideoPlayer::Create(config, std::move(frame_renderer),
+ std::move(frame_processors));
LOG_ASSERT(video_player);
LOG_ASSERT(video_player->Initialize(video));
@@ -510,8 +509,7 @@ TEST_F(VideoDecoderTest, Reinitialize) {
TEST_F(VideoDecoderTest, DestroyBeforeInitialize) {
VideoDecoderClientConfig config = VideoDecoderClientConfig();
config.implementation = g_env->GetDecoderImplementation();
- auto tvp = VideoPlayer::Create(config, g_env->GetGpuMemoryBufferFactory(),
- FrameRendererDummy::Create());
+ auto tvp = VideoPlayer::Create(config, FrameRendererDummy::Create());
EXPECT_NE(tvp, nullptr);
}
diff --git a/chromium/media/gpu/video_encode_accelerator_perf_tests.cc b/chromium/media/gpu/video_encode_accelerator_perf_tests.cc
index d92634ee276..5d3a8cad54f 100644
--- a/chromium/media/gpu/video_encode_accelerator_perf_tests.cc
+++ b/chromium/media/gpu/video_encode_accelerator_perf_tests.cc
@@ -535,8 +535,7 @@ class VideoEncoderTest : public ::testing::Test {
}
auto video_encoder =
- VideoEncoder::Create(config, g_env->GetGpuMemoryBufferFactory(),
- std::move(bitstream_processors));
+ VideoEncoder::Create(config, std::move(bitstream_processors));
LOG_ASSERT(video_encoder);
LOG_ASSERT(video_encoder->Initialize(video));
diff --git a/chromium/media/gpu/video_encode_accelerator_tests.cc b/chromium/media/gpu/video_encode_accelerator_tests.cc
index cd865eb7d1f..d4c87a91f49 100644
--- a/chromium/media/gpu/video_encode_accelerator_tests.cc
+++ b/chromium/media/gpu/video_encode_accelerator_tests.cc
@@ -139,8 +139,7 @@ class VideoEncoderTest : public ::testing::Test {
LOG_ASSERT(video);
auto video_encoder =
- VideoEncoder::Create(config, g_env->GetGpuMemoryBufferFactory(),
- CreateBitstreamProcessors(video, config));
+ VideoEncoder::Create(config, CreateBitstreamProcessors(video, config));
LOG_ASSERT(video_encoder);
if (!video_encoder->Initialize(video))
@@ -407,8 +406,7 @@ TEST_F(VideoEncoderTest, DestroyBeforeInitialize) {
if (g_env->SpatialLayers().size() > 1)
GTEST_SKIP() << "Skip SHMEM input test cases in spatial SVC encoding";
- auto video_encoder = VideoEncoder::Create(GetDefaultConfig(),
- g_env->GetGpuMemoryBufferFactory());
+ auto video_encoder = VideoEncoder::Create(GetDefaultConfig());
EXPECT_NE(video_encoder, nullptr);
}
@@ -509,8 +507,9 @@ TEST_F(VideoEncoderTest, BitrateCheck) {
EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
EXPECT_EQ(encoder->GetFrameReleasedCount(), config.num_frames_to_encode);
EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
- EXPECT_NEAR(encoder->GetStats().Bitrate(), config.bitrate.GetSumBps(),
- kBitrateTolerance * config.bitrate.GetSumBps());
+ EXPECT_NEAR(encoder->GetStats().Bitrate(),
+ config.bitrate_allocation.GetSumBps(),
+ kBitrateTolerance * config.bitrate_allocation.GetSumBps());
}
TEST_F(VideoEncoderTest, BitrateCheck_DynamicBitrate) {
@@ -525,7 +524,7 @@ TEST_F(VideoEncoderTest, BitrateCheck_DynamicBitrate) {
encoder->SetEventWaitTimeout(kBitrateCheckEventTimeout);
// Encode the video with the first bitrate.
- const uint32_t first_bitrate = config.bitrate.GetSumBps();
+ const uint32_t first_bitrate = config.bitrate_allocation.GetSumBps();
encoder->EncodeUntil(VideoEncoder::kFrameReleased,
kNumFramesToEncodeForBitrateCheck);
EXPECT_TRUE(encoder->WaitUntilIdle());
@@ -537,7 +536,7 @@ TEST_F(VideoEncoderTest, BitrateCheck_DynamicBitrate) {
encoder->ResetStats();
encoder->UpdateBitrate(AllocateDefaultBitrateForTesting(
config.num_spatial_layers,
- config.num_temporal_layers, second_bitrate),
+ config.num_temporal_layers, second_bitrate, false),
config.framerate);
encoder->Encode();
EXPECT_TRUE(encoder->WaitForFlushDone());
@@ -568,17 +567,19 @@ TEST_F(VideoEncoderTest, BitrateCheck_DynamicFramerate) {
encoder->EncodeUntil(VideoEncoder::kFrameReleased,
kNumFramesToEncodeForBitrateCheck);
EXPECT_TRUE(encoder->WaitUntilIdle());
- EXPECT_NEAR(encoder->GetStats().Bitrate(), config.bitrate.GetSumBps(),
- kBitrateTolerance * config.bitrate.GetSumBps());
+ EXPECT_NEAR(encoder->GetStats().Bitrate(),
+ config.bitrate_allocation.GetSumBps(),
+ kBitrateTolerance * config.bitrate_allocation.GetSumBps());
// Encode the video with the second framerate.
const uint32_t second_framerate = first_framerate * 3 / 2;
encoder->ResetStats();
- encoder->UpdateBitrate(config.bitrate, second_framerate);
+ encoder->UpdateBitrate(config.bitrate_allocation, second_framerate);
encoder->Encode();
EXPECT_TRUE(encoder->WaitForFlushDone());
- EXPECT_NEAR(encoder->GetStats().Bitrate(), config.bitrate.GetSumBps(),
- kBitrateTolerance * config.bitrate.GetSumBps());
+ EXPECT_NEAR(encoder->GetStats().Bitrate(),
+ config.bitrate_allocation.GetSumBps(),
+ kBitrateTolerance * config.bitrate_allocation.GetSumBps());
EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
EXPECT_EQ(encoder->GetFrameReleasedCount(), config.num_frames_to_encode);
@@ -642,7 +643,7 @@ TEST_F(VideoEncoderTest, FlushAtEndOfStream_NV12DmabufScaling) {
VideoEncoderClientConfig config(
nv12_video, g_env->Profile(), spatial_layers,
AllocateDefaultBitrateForTesting(/*num_spatial_layers=*/1u,
- num_temporal_layers, new_bitrate),
+ num_temporal_layers, new_bitrate, false),
g_env->Reverse());
config.output_resolution = output_resolution;
config.input_storage_type =
diff --git a/chromium/media/gpu/vp9_decoder.cc b/chromium/media/gpu/vp9_decoder.cc
index e07e1c70f46..f396e03f44b 100644
--- a/chromium/media/gpu/vp9_decoder.cc
+++ b/chromium/media/gpu/vp9_decoder.cc
@@ -33,7 +33,7 @@ bool GetSpatialLayerFrameSize(const DecoderBuffer& decoder_buffer,
// due to we want keep returning false to MediaCapability.
#if BUILDFLAG(IS_WIN)
base::FeatureList::IsEnabled(media::kD3D11Vp9kSVCHWDecoding);
-#elif defined(IS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
+#elif BUILDFLAG(IS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
// V4L2 stateless API decoder is not capable of decoding VP9 k-SVC stream.
false;
#else
diff --git a/chromium/media/gpu/windows/d3d11_copying_texture_wrapper_unittest.cc b/chromium/media/gpu/windows/d3d11_copying_texture_wrapper_unittest.cc
index 3a35a43d76c..db1a84af106 100644
--- a/chromium/media/gpu/windows/d3d11_copying_texture_wrapper_unittest.cc
+++ b/chromium/media/gpu/windows/d3d11_copying_texture_wrapper_unittest.cc
@@ -239,7 +239,7 @@ TEST_P(D3D11CopyingTexture2DWrapperTest,
// TODO: check |gpu_task_runner_|.
MailboxHolderArray mailboxes;
- gfx::ColorSpace input_color_space = gfx::ColorSpace::CreateSCRGBLinear();
+ gfx::ColorSpace input_color_space = gfx::ColorSpace::CreateSRGBLinear();
gfx::ColorSpace output_color_space;
EXPECT_EQ(wrapper
->Init(gpu_task_runner_, CreateMockHelperCB(),
@@ -292,7 +292,7 @@ TEST_P(D3D11CopyingTexture2DWrapperTest, HDRMetadataIsSentToVideoProcessor) {
MockVideoProcessorProxy* processor_raw = processor.get();
auto wrapper = std::make_unique<CopyingTexture2DWrapper>(
gfx::Size(100, 200), ExpectTextureWrapper(), std::move(processor),
- nullptr, gfx::ColorSpace::CreateSCRGBLinear());
+ nullptr, gfx::ColorSpace::CreateSRGBLinear());
const DXGI_HDR_METADATA_HDR10 dxgi_metadata =
gl::HDRMetadataHelperWin::HDRMetadataToDXGI(metadata);
diff --git a/chromium/media/gpu/windows/d3d11_decoder_configurator.cc b/chromium/media/gpu/windows/d3d11_decoder_configurator.cc
index 5967559120e..795193da518 100644
--- a/chromium/media/gpu/windows/d3d11_decoder_configurator.cc
+++ b/chromium/media/gpu/windows/d3d11_decoder_configurator.cc
@@ -64,13 +64,13 @@ std::unique_ptr<D3D11DecoderConfigurator> D3D11DecoderConfigurator::Create(
} else if (config.profile() == AV1PROFILE_PROFILE_PRO) {
decoder_guid = DXVA_ModeAV1_VLD_Profile2;
}
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
else if (config.profile() == HEVCPROFILE_MAIN) {
decoder_guid = D3D11_DECODER_PROFILE_HEVC_VLD_MAIN;
} else if (config.profile() == HEVCPROFILE_MAIN10) {
decoder_guid = D3D11_DECODER_PROFILE_HEVC_VLD_MAIN10;
}
-#endif
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
else {
MEDIA_LOG(INFO, media_log)
<< "D3D11VideoDecoder does not support codec " << config.codec();
diff --git a/chromium/media/gpu/windows/d3d11_texture_selector.cc b/chromium/media/gpu/windows/d3d11_texture_selector.cc
index 40bc3db9dd8..ac560044876 100644
--- a/chromium/media/gpu/windows/d3d11_texture_selector.cc
+++ b/chromium/media/gpu/windows/d3d11_texture_selector.cc
@@ -103,7 +103,7 @@ std::unique_ptr<TextureSelector> TextureSelector::Create(
} else if (supports_fmt(DXGI_FORMAT_R16G16B16A16_FLOAT)) {
output_dxgi_format = DXGI_FORMAT_R16G16B16A16_FLOAT;
output_pixel_format = PIXEL_FORMAT_RGBAF16;
- output_color_space = gfx::ColorSpace::CreateSCRGBLinear();
+ output_color_space = gfx::ColorSpace::CreateSCRGBLinear80Nits();
MEDIA_LOG(INFO, media_log) << "D3D11VideoDecoder: Selected RGBAF16";
} else if (supports_fmt(DXGI_FORMAT_R10G10B10A2_UNORM)) {
output_dxgi_format = DXGI_FORMAT_R10G10B10A2_UNORM;
diff --git a/chromium/media/gpu/windows/d3d11_texture_wrapper_unittest.cc b/chromium/media/gpu/windows/d3d11_texture_wrapper_unittest.cc
index 3a182d04831..e5b4543dbf6 100644
--- a/chromium/media/gpu/windows/d3d11_texture_wrapper_unittest.cc
+++ b/chromium/media/gpu/windows/d3d11_texture_wrapper_unittest.cc
@@ -8,6 +8,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
+#include "base/memory/raw_ptr.h"
#include "base/task/single_thread_task_runner.h"
#include "base/test/task_environment.h"
#include "base/win/windows_version.h"
@@ -44,7 +45,7 @@ class D3D11TextureWrapperUnittest : public ::testing::Test {
task_runner_ = task_environment_.GetMainThreadTaskRunner();
- gl::GLSurfaceTestSupport::InitializeOneOffImplementation(
+ display_ = gl::GLSurfaceTestSupport::InitializeOneOffImplementation(
gl::GLImplementationParts(gl::ANGLEImplementation::kD3D11), false);
surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
share_group_ = new gl::GLShareGroup();
@@ -66,7 +67,7 @@ class D3D11TextureWrapperUnittest : public ::testing::Test {
context_ = nullptr;
share_group_ = nullptr;
surface_ = nullptr;
- gl::init::ShutdownGL(false);
+ gl::GLSurfaceTestSupport::ShutdownGL(display_);
}
base::test::TaskEnvironment task_environment_;
@@ -83,6 +84,8 @@ class D3D11TextureWrapperUnittest : public ::testing::Test {
// a wrapper.
scoped_refptr<FakeCommandBufferHelper> fake_command_buffer_helper_;
GetCommandBufferHelperCB get_helper_cb_;
+
+ raw_ptr<gl::GLDisplay> display_ = nullptr;
};
TEST_F(D3D11TextureWrapperUnittest, NV12InitSucceeds) {
diff --git a/chromium/media/gpu/windows/d3d11_video_decoder.cc b/chromium/media/gpu/windows/d3d11_video_decoder.cc
index ad84677300f..6ccddbc83c4 100644
--- a/chromium/media/gpu/windows/d3d11_video_decoder.cc
+++ b/chromium/media/gpu/windows/d3d11_video_decoder.cc
@@ -29,9 +29,9 @@
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
#include "media/gpu/windows/d3d11_av1_accelerator.h"
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
#include "media/gpu/windows/d3d11_h265_accelerator.h"
-#endif
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
#include "media/gpu/windows/d3d11_picture_buffer.h"
#include "media/gpu/windows/d3d11_status.h"
#include "media/gpu/windows/d3d11_video_context_wrapper.h"
@@ -190,15 +190,15 @@ HRESULT D3D11VideoDecoder::InitializeAcceleratedDecoder(
this, media_log_.get(), video_device_, std::move(video_context)),
profile_, config.color_space_info());
} else if (config.codec() == VideoCodec::kHEVC) {
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
- DCHECK(base::FeatureList::IsEnabled(kD3D11HEVCDecoding));
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
+ DCHECK(base::FeatureList::IsEnabled(kPlatformHEVCDecoderSupport));
accelerated_video_decoder_ = std::make_unique<H265Decoder>(
std::make_unique<D3D11H265Accelerator>(
this, media_log_.get(), video_device_, std::move(video_context)),
profile_, config.color_space_info());
#else
return E_FAIL;
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
} else {
return E_FAIL;
}
@@ -212,13 +212,14 @@ HRESULT D3D11VideoDecoder::InitializeAcceleratedDecoder(
D3D11Status::Or<ComD3D11VideoDecoder> D3D11VideoDecoder::CreateD3D11Decoder() {
// By default we assume outputs are 8-bit for SDR color spaces and 10 bit for
- // HDR color spaces (or VP9.2) with HBD capable codecs (the decoder doesn't
- // support H264PROFILE_HIGH10PROFILE). We'll get a config change once we know
- // the real bit depth if this turns out to be wrong.
+ // HDR color spaces (or VP9.2, or HEVC Main10) with HBD capable codecs (the
+ // decoder doesn't support H264PROFILE_HIGH10PROFILE). We'll get a config
+ // change once we know the real bit depth if this turns out to be wrong.
bit_depth_ =
accelerated_video_decoder_
? accelerated_video_decoder_->GetBitDepth()
: (config_.profile() == VP9PROFILE_PROFILE2 ||
+ config_.profile() == HEVCPROFILE_MAIN10 ||
(config_.color_space_info().ToGfxColorSpace().IsHDR() &&
config_.codec() != VideoCodec::kH264)
? 10
diff --git a/chromium/media/gpu/windows/dxva_picture_buffer_win.cc b/chromium/media/gpu/windows/dxva_picture_buffer_win.cc
index 51299c6b68f..d2a422431e1 100644
--- a/chromium/media/gpu/windows/dxva_picture_buffer_win.cc
+++ b/chromium/media/gpu/windows/dxva_picture_buffer_win.cc
@@ -65,7 +65,8 @@ class GLImagePbuffer : public DummyGLImage {
private:
~GLImagePbuffer() override {
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
eglReleaseTexImage(egl_display, surface_, EGL_BACK_BUFFER);
@@ -171,7 +172,8 @@ bool PbufferPictureBuffer::Initialize(const DXVAVideoDecodeAccelerator& decoder,
RETURN_ON_FAILURE(!picture_buffer_.service_texture_ids().empty(),
"No service texture ids provided", false);
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
EGLint use_rgb = 1;
eglGetConfigAttrib(egl_display, egl_config, EGL_BIND_TO_TEXTURE_RGB,
&use_rgb);
@@ -368,7 +370,8 @@ bool PbufferPictureBuffer::CopySurfaceComplete(
RETURN_ON_FAILURE(result == S_OK, "Could not acquire sync mutex", false);
}
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
eglBindTexImage(egl_display, decoding_surface_, EGL_BACK_BUFFER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
@@ -398,7 +401,8 @@ PbufferPictureBuffer::~PbufferPictureBuffer() {
bool PbufferPictureBuffer::ReusePictureBuffer() {
DCHECK_NE(UNUSED, state_);
DCHECK(decoding_surface_);
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
eglReleaseTexImage(egl_display, decoding_surface_, EGL_BACK_BUFFER);
decoder_surface_.Reset();
@@ -423,7 +427,8 @@ bool EGLStreamPictureBuffer::Initialize() {
RETURN_ON_FAILURE(picture_buffer_.service_texture_ids().size() >= 2,
"Not enough texture ids provided", false);
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
const EGLint stream_attributes[] = {
EGL_CONSUMER_LATENCY_USEC_KHR,
0,
@@ -468,7 +473,8 @@ bool EGLStreamPictureBuffer::Initialize() {
bool EGLStreamPictureBuffer::ReusePictureBuffer() {
DCHECK_NE(UNUSED, state_);
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
if (stream_) {
EGLBoolean result = eglStreamConsumerReleaseKHR(egl_display, stream_);
@@ -491,7 +497,8 @@ bool EGLStreamPictureBuffer::BindSampleToTexture(
shared_images_.resize(picture_buffer_.service_texture_ids().size());
current_d3d_sample_ = sample;
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
Microsoft::WRL::ComPtr<IMFMediaBuffer> output_buffer;
HRESULT hr = current_d3d_sample_->GetBufferByIndex(0, &output_buffer);
@@ -547,7 +554,8 @@ bool EGLStreamDelayedCopyPictureBuffer::Initialize(
RETURN_ON_FAILURE(picture_buffer_.service_texture_ids().size() >= 2,
"Not enough texture ids provided", false);
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
const EGLint stream_attributes[] = {
EGL_CONSUMER_LATENCY_USEC_KHR,
0,
@@ -662,7 +670,8 @@ bool EGLStreamCopyPictureBuffer::Initialize(
RETURN_ON_FAILURE(picture_buffer_.service_texture_ids().size() >= 2,
"Not enough texture ids provided", false);
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
const EGLint stream_attributes[] = {
EGL_CONSUMER_LATENCY_USEC_KHR,
0,
@@ -784,7 +793,8 @@ bool EGLStreamCopyPictureBuffer::CopySurfaceComplete(
EGL_D3D_TEXTURE_SUBRESOURCE_ID_ANGLE, 0, EGL_NONE,
};
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
EGLBoolean result = eglStreamPostD3DTextureANGLE(
egl_display, stream_, static_cast<void*>(angle_copy_texture_.Get()),
@@ -803,7 +813,8 @@ bool EGLStreamCopyPictureBuffer::CopySurfaceComplete(
bool EGLStreamCopyPictureBuffer::ReusePictureBuffer() {
DCHECK_NE(UNUSED, state_);
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
if (state_ == IN_CLIENT) {
HRESULT hr = egl_keyed_mutex_->ReleaseSync(++keyed_mutex_value_);
diff --git a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
index 4a533175fb7..c3edfc630ed 100644
--- a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
+++ b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
@@ -699,9 +699,6 @@ bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
if (media_log_)
MEDIA_LOG(INFO, media_log_) << "Starting Initialization of DXVAVDA";
- AddPlaybackSucceededLifetimeStageIfNeeded();
- AddLifetimeProgressionStage(DXVALifetimeProgression::kInitializeStarted);
-
if (!get_gl_context_cb_ || !make_context_current_cb_) {
NOTREACHED() << "GL callbacks are required for this VDA";
return false;
@@ -839,11 +836,6 @@ bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
UMA_HISTOGRAM_ENUMERATION("Media.DXVAVDA.PictureBufferMechanism",
GetPictureBufferMechanism());
-
- AddLifetimeProgressionStage(
- use_dx11_ ? DXVALifetimeProgression::kDX11InitializeSucceeded
- : DXVALifetimeProgression::kDX9InitializeSucceeded);
-
return StartDecoderThread();
}
@@ -1389,9 +1381,6 @@ void DXVAVideoDecodeAccelerator::Reset() {
void DXVAVideoDecodeAccelerator::Destroy() {
DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
-
- AddPlaybackSucceededLifetimeStageIfNeeded();
-
Invalidate();
delete this;
}
@@ -1563,10 +1552,11 @@ bool DXVAVideoDecodeAccelerator::InitDecoder(VideoCodecProfile profile) {
RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D manager to decoder", false);
}
- if (!gl::GLSurfaceEGL::IsPixelFormatFloatSupported())
+ if (!gl::GLSurfaceEGL::GetGLDisplayEGL()->IsPixelFormatFloatSupported())
use_fp16_ = false;
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ EGLDisplay egl_display =
+ gl::GLSurfaceEGL::GetGLDisplayEGL()->GetHardwareDisplay();
while (true) {
std::vector<EGLint> config_attribs = {EGL_BUFFER_SIZE, 32,
@@ -1672,7 +1662,8 @@ bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() {
}
use_keyed_mutex_ =
- use_dx11_ && gl::GLSurfaceEGL::HasEGLExtension("EGL_ANGLE_keyed_mutex");
+ use_dx11_ && gl::GLSurfaceEGL::GetGLDisplayEGL()->HasEGLExtension(
+ "EGL_ANGLE_keyed_mutex");
if (!use_dx11_ ||
!gl::g_driver_egl.ext.b_EGL_ANGLE_stream_producer_d3d_texture ||
@@ -2044,20 +2035,6 @@ void DXVAVideoDecodeAccelerator::StopOnError(
return;
}
- DXVALifetimeProgression result;
- if (use_dx11_) {
- if (decoded_any_frames_)
- result = DXVALifetimeProgression::kDX11PlaybackFailedAfterFirstFrame;
- else
- result = DXVALifetimeProgression::kDX11PlaybackFailedBeforeFirstFrame;
- } else {
- if (decoded_any_frames_)
- result = DXVALifetimeProgression::kDX9PlaybackFailedAfterFirstFrame;
- else
- result = DXVALifetimeProgression::kDX9PlaybackFailedBeforeFirstFrame;
- }
- AddLifetimeProgressionStage(result);
-
if (client_)
client_->NotifyError(error);
client_ = nullptr;
@@ -3055,7 +3032,8 @@ bool DXVAVideoDecodeAccelerator::InitializeID3D11VideoProcessor(
if (use_fp16_ && config_.target_color_space.IsHDR() && color_space.IsHDR()) {
// Note, we only use the SCRGBLinear output color space when the input is
// PQ, because nvidia drivers will not convert G22 to G10 for some reason.
- dx11_converter_output_color_space_ = gfx::ColorSpace::CreateSCRGBLinear();
+ dx11_converter_output_color_space_ =
+ gfx::ColorSpace::CreateSCRGBLinear80Nits();
} else {
dx11_converter_output_color_space_ = gfx::ColorSpace::CreateSRGB();
}
@@ -3349,32 +3327,4 @@ ID3D11Device* DXVAVideoDecodeAccelerator::D3D11Device() const {
return ShouldUseANGLEDevice() ? angle_device_.Get() : d3d11_device_.Get();
}
-void DXVAVideoDecodeAccelerator::AddLifetimeProgressionStage(
- DXVALifetimeProgression stage) {
- // If we're starting init, then forget about any previously output frames.
- if (stage == DXVALifetimeProgression::kInitializeStarted)
- decoded_any_frames_ = false;
-
- // If init has succeeded, then we can output a playback success / failure when
- // we fail / re-init / are destroyed, as needed.
- already_initialized_ =
- (stage == DXVALifetimeProgression::kDX11InitializeSucceeded ||
- stage == DXVALifetimeProgression::kDX9InitializeSucceeded);
-
- base::UmaHistogramEnumeration("Media.DXVAVDA.DecoderLifetimeProgression",
- stage);
-}
-
-void DXVAVideoDecodeAccelerator::AddPlaybackSucceededLifetimeStageIfNeeded() {
- // If we didn't complete initialization, then we didn't complete playback.
- // This will also prevent us from sending "playback succeeded" more than once
- // per init, or after a playback error.
- if (!already_initialized_)
- return;
-
- AddLifetimeProgressionStage(
- use_dx11_ ? DXVALifetimeProgression::kDX11PlaybackSucceeded
- : DXVALifetimeProgression::kDX9PlaybackSucceeded);
-}
-
} // namespace media
diff --git a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
index 3bf7d99109d..3f8c140cdde 100644
--- a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
+++ b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
@@ -161,41 +161,6 @@ class MEDIA_GPU_EXPORT DXVAVideoDecodeAccelerator
kMaxValue = BIND
};
- // These values are persisted to logs. Entries should not be renumbered and
- // numeric values should never be reused.
- enum class DXVALifetimeProgression {
- kInitializeStarted = 0,
-
- // DX11 init completed successfully.
- kDX11InitializeSucceeded = 1,
-
- // An error occurred after successful init, split up by whether a frame was
- // delivered to the client yet or not.
- kDX11PlaybackFailedBeforeFirstFrame = 2,
- kDX11PlaybackFailedAfterFirstFrame = 3,
-
- // Playback succeeded, which requires successful init.
- kDX11PlaybackSucceeded = 4,
-
- // DX9 variants of the above.
- kDX9InitializeSucceeded = 5,
- kDX9PlaybackFailedBeforeFirstFrame = 6,
- kDX9PlaybackFailedAfterFirstFrame = 7,
- kDX9PlaybackSucceeded = 8,
-
- // For UMA. Must be the last entry. It should be initialized to the
- // numerically largest value above; if you add more entries, then please
- // update this to the last one.
- kMaxValue = kDX9PlaybackSucceeded
- };
-
- // Log UMA progression state.
- void AddLifetimeProgressionStage(DXVALifetimeProgression stage);
-
- // Logs the appropriate PlaybackSucceeded lifetime stage, if we've completed
- // init successfully and not logged an error or playback success since then.
- void AddPlaybackSucceededLifetimeStageIfNeeded();
-
// Creates and initializes an instance of the D3D device and the
// corresponding device manager. The device manager instance is eventually
// passed to the IMFTransform interface implemented by the decoder.
diff --git a/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.cc b/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.cc
index cb4aeecb86e..890b86c9fc5 100644
--- a/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.cc
+++ b/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.cc
@@ -34,6 +34,7 @@
#include "media/base/media_log.h"
#include "media/base/media_switches.h"
#include "media/base/video_frame.h"
+#include "media/base/video_util.h"
#include "media/base/win/mf_helpers.h"
#include "media/base/win/mf_initializer.h"
#include "media/gpu/gpu_video_encode_accelerator_helpers.h"
@@ -46,6 +47,9 @@ namespace media {
namespace {
const uint32_t kDefaultGOPLength = 3000;
const uint32_t kDefaultTargetBitrate = 5000000u;
+const VideoEncodeAccelerator::SupportedRateControlMode kSupportedProfileModes =
+ VideoEncodeAccelerator::kConstantMode |
+ VideoEncodeAccelerator::kVariableMode;
const size_t kMaxFrameRateNumerator = 30;
const size_t kMaxFrameRateDenominator = 1;
const size_t kMaxResolutionWidth = 1920;
@@ -81,6 +85,16 @@ eAVEncH264VProfile GetH264VProfile(VideoCodecProfile profile,
}
}
+// Only eAVEncVP9VProfile_420_8 is supported on Intel graphics.
+eAVEncVP9VProfile GetVP9VProfile(VideoCodecProfile profile) {
+ switch (profile) {
+ case VP9PROFILE_PROFILE0:
+ return eAVEncVP9VProfile_420_8;
+ default:
+ return eAVEncVP9VProfile_unknown;
+ }
+}
+
bool IsSvcSupported(IMFActivate* activate) {
#if defined(ARCH_CPU_X86)
// x86 systems sometimes crash in video drivers here.
@@ -213,8 +227,9 @@ MediaFoundationVideoEncodeAccelerator::MediaFoundationVideoEncodeAccelerator(
bitrate_(Bitrate::ConstantBitrate(kDefaultTargetBitrate)),
input_required_(false),
main_client_task_runner_(base::SequencedTaskRunnerHandle::Get()),
- encoder_thread_task_runner_(
- base::ThreadPool::CreateCOMSTATaskRunner({})) {
+ encoder_thread_task_runner_(base::ThreadPool::CreateCOMSTATaskRunner(
+ {},
+ base::SingleThreadTaskRunnerThreadMode::DEDICATED)) {
encoder_weak_ptr_ = encoder_task_weak_factory_.GetWeakPtr();
}
@@ -235,7 +250,7 @@ MediaFoundationVideoEncodeAccelerator::GetSupportedProfiles() {
SupportedProfiles profiles;
- for (auto codec : {VideoCodec::kH264, VideoCodec::kAV1}) {
+ for (auto codec : {VideoCodec::kH264, VideoCodec::kVP9, VideoCodec::kAV1}) {
auto codec_profiles = GetSupportedProfilesForCodec(codec, true);
profiles.insert(profiles.end(), codec_profiles.begin(),
codec_profiles.end());
@@ -254,7 +269,7 @@ MediaFoundationVideoEncodeAccelerator::GetSupportedProfilesLight() {
SupportedProfiles profiles;
- for (auto codec : {VideoCodec::kH264, VideoCodec::kAV1}) {
+ for (auto codec : {VideoCodec::kH264, VideoCodec::kVP9, VideoCodec::kAV1}) {
auto codec_profiles = GetSupportedProfilesForCodec(codec, false);
profiles.insert(profiles.end(), codec_profiles.begin(),
codec_profiles.end());
@@ -269,9 +284,12 @@ MediaFoundationVideoEncodeAccelerator::GetSupportedProfilesForCodec(
VideoCodec codec,
bool populate_svc_info) {
SupportedProfiles profiles;
- if (codec == VideoCodec::kAV1 &&
- !base::FeatureList::IsEnabled(kMediaFoundationAV1Encoding))
+ if ((codec == VideoCodec::kVP9 &&
+ !base::FeatureList::IsEnabled(kMediaFoundationVP9Encoding)) ||
+ (codec == VideoCodec::kAV1 &&
+ !base::FeatureList::IsEnabled(kMediaFoundationAV1Encoding))) {
return profiles;
+ }
IMFActivate** pp_activate = nullptr;
uint32_t encoder_count = EnumerateHardwareEncoders(codec, &pp_activate);
@@ -306,6 +324,7 @@ MediaFoundationVideoEncodeAccelerator::GetSupportedProfilesForCodec(
// fallback as well.
profile.max_framerate_numerator = kMaxFrameRateNumerator;
profile.max_framerate_denominator = kMaxFrameRateDenominator;
+ profile.rate_control_modes = kSupportedProfileModes;
profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight);
if (svc_supported) {
profile.scalability_modes.push_back(SVCScalabilityMode::kL1T2);
@@ -320,6 +339,9 @@ MediaFoundationVideoEncodeAccelerator::GetSupportedProfilesForCodec(
profile.profile = H264PROFILE_HIGH;
profiles.push_back(profile);
+ } else if (codec == VideoCodec::kVP9) {
+ profile.profile = VP9PROFILE_PROFILE0;
+ profiles.push_back(profile);
} else if (codec == VideoCodec::kAV1) {
profile.profile = AV1PROFILE_PROFILE_MAIN;
profiles.push_back(profile);
@@ -347,17 +369,25 @@ bool MediaFoundationVideoEncodeAccelerator::Initialize(
if (GetH264VProfile(config.output_profile, config.is_constrained_h264) ==
eAVEncH264VProfile_unknown) {
MEDIA_LOG(ERROR, media_log.get())
- << "Output profile not supported= " << config.output_profile;
+ << "Output profile not supported = " << config.output_profile;
return false;
}
codec_ = VideoCodec::kH264;
+ } else if (config.output_profile >= VP9PROFILE_MIN &&
+ config.output_profile <= VP9PROFILE_MAX) {
+ if (GetVP9VProfile(config.output_profile) == eAVEncVP9VProfile_unknown) {
+ MEDIA_LOG(ERROR, media_log.get())
+ << "Output profile not supported = " << config.output_profile;
+ return false;
+ }
+ codec_ = VideoCodec::kVP9;
} else if (config.output_profile == AV1PROFILE_PROFILE_MAIN) {
codec_ = VideoCodec::kAV1;
}
if (codec_ == VideoCodec::kUnknown) {
MEDIA_LOG(ERROR, media_log.get())
- << "Output profile not supported= " << config.output_profile;
+ << "Output profile not supported = " << config.output_profile;
return false;
}
@@ -435,17 +465,12 @@ bool MediaFoundationVideoEncodeAccelerator::Initialize(
HRESULT hr = MFCreateSample(&input_sample_);
RETURN_ON_HR_FAILURE(hr, "Failed to create sample", false);
- if (config.input_format == PIXEL_FORMAT_NV12 &&
- media::IsMediaFoundationD3D11VideoCaptureEnabled()) {
+ if (media::IsMediaFoundationD3D11VideoCaptureEnabled()) {
dxgi_device_manager_ = DXGIDeviceManager::Create();
if (!dxgi_device_manager_) {
MEDIA_LOG(ERROR, media_log.get()) << "Failed to create DXGIDeviceManager";
return false;
}
- }
-
- // Start the asynchronous processing model
- if (dxgi_device_manager_) {
auto mf_dxgi_device_manager =
dxgi_device_manager_->GetMFDXGIDeviceManager();
hr = encoder_->ProcessMessage(
@@ -454,6 +479,8 @@ bool MediaFoundationVideoEncodeAccelerator::Initialize(
RETURN_ON_HR_FAILURE(
hr, "Couldn't set ProcessMessage MFT_MESSAGE_SET_D3D_MANAGER", false);
}
+
+ // Start the asynchronous processing model
hr = encoder_->ProcessMessage(MFT_MESSAGE_COMMAND_FLUSH, 0);
RETURN_ON_HR_FAILURE(
hr, "Couldn't set ProcessMessage MFT_MESSAGE_COMMAND_FLUSH", false);
@@ -522,10 +549,9 @@ void MediaFoundationVideoEncodeAccelerator::UseOutputBitstreamBuffer(
return;
}
- auto region =
- base::UnsafeSharedMemoryRegion::Deserialize(buffer.TakeRegion());
+ auto region = buffer.TakeRegion();
auto mapping = region.Map();
- if (!region.IsValid() || !mapping.IsValid()) {
+ if (!mapping.IsValid()) {
DLOG(ERROR) << "Failed mapping shared memory.";
NotifyError(kPlatformFailureError);
return;
@@ -602,7 +628,8 @@ uint32_t MediaFoundationVideoEncodeAccelerator::EnumerateHardwareEncoders(
return 0;
}
- if (codec != VideoCodec::kH264 && codec != VideoCodec::kAV1) {
+ if (codec != VideoCodec::kH264 && codec != VideoCodec::kVP9 &&
+ codec != VideoCodec::kAV1) {
DVLOG(ERROR) << "Enumerating unsupported hardware encoders.";
return 0;
}
@@ -784,6 +811,9 @@ bool MediaFoundationVideoEncodeAccelerator::InitializeInputOutputParameters(
hr = imf_output_media_type_->SetUINT32(
MF_MT_MPEG2_PROFILE,
GetH264VProfile(output_profile, is_constrained_h264));
+ } else if (codec_ == VideoCodec::kVP9) {
+ hr = imf_output_media_type_->SetUINT32(MF_MT_MPEG2_PROFILE,
+ GetVP9VProfile(output_profile));
}
RETURN_ON_HR_FAILURE(hr, "Couldn't set codec profile", false);
hr = encoder_->SetOutputType(output_stream_id_, imf_output_media_type_.Get(),
@@ -839,9 +869,11 @@ bool MediaFoundationVideoEncodeAccelerator::SetEncoderModes() {
RETURN_ON_HR_FAILURE(hr, "Couldn't set CommonRateControlMode", false);
}
- // Intel drivers want the layer count to be set explicitly, even if it's one.
+ // Intel drivers want the layer count to be set explicitly for H.264, even if
+ // it's one.
const bool set_svc_layer_count =
- (num_temporal_layers_ > 1) || (vendor_ == DriverVendor::kIntel);
+ (num_temporal_layers_ > 1) ||
+ (vendor_ == DriverVendor::kIntel && codec_ == VideoCodec::kH264);
if (set_svc_layer_count) {
var.ulVal = num_temporal_layers_;
hr = codec_api_->SetValue(&CODECAPI_AVEncVideoTemporalLayerCount, &var);
@@ -999,17 +1031,8 @@ HRESULT MediaFoundationVideoEncodeAccelerator::PopulateInputSampleBuffer(
return MF_E_INVALID_STREAM_DATA;
}
- const uint8_t* src_y = nullptr;
- const uint8_t* src_uv = nullptr;
- base::ScopedClosureRunner scoped_unmap_gmb;
-
if (frame->storage_type() ==
VideoFrame::StorageType::STORAGE_GPU_MEMORY_BUFFER) {
- if (frame->format() != PIXEL_FORMAT_NV12) {
- DLOG(ERROR) << "GMB video frame is not NV12";
- return MF_E_INVALID_STREAM_DATA;
- }
-
gfx::GpuMemoryBuffer* gmb = frame->GetGpuMemoryBuffer();
if (!gmb) {
DLOG(ERROR) << "Failed to get GMB for input frame";
@@ -1027,20 +1050,17 @@ HRESULT MediaFoundationVideoEncodeAccelerator::PopulateInputSampleBuffer(
return PopulateInputSampleBufferGpu(std::move(frame));
}
- // Shared memory GMB case.
- if (!gmb->Map()) {
+ // ConvertToMemoryMappedFrame() doesn't copy pixel data,
+ // it just maps GPU buffer owned by |frame| and presents it as mapped
+ // view in CPU memory. |frame| will unmap the buffer when destructed.
+ frame = ConvertToMemoryMappedFrame(std::move(frame));
+ if (!frame) {
DLOG(ERROR) << "Failed to map shared memory GMB";
return E_FAIL;
}
-
- scoped_unmap_gmb.ReplaceClosure(
- base::BindOnce([](gfx::GpuMemoryBuffer* gmb) { gmb->Unmap(); }, gmb));
-
- src_y = reinterpret_cast<const uint8_t*>(gmb->memory(VideoFrame::kYPlane));
- src_uv =
- reinterpret_cast<const uint8_t*>(gmb->memory(VideoFrame::kUVPlane));
}
+ const auto kTargetPixelFormat = PIXEL_FORMAT_NV12;
Microsoft::WRL::ComPtr<IMFMediaBuffer> input_buffer;
HRESULT hr = input_sample_->GetBufferByIndex(0, &input_buffer);
if (FAILED(hr)) {
@@ -1048,11 +1068,10 @@ HRESULT MediaFoundationVideoEncodeAccelerator::PopulateInputSampleBuffer(
MFT_INPUT_STREAM_INFO input_stream_info;
hr = encoder_->GetInputStreamInfo(input_stream_id_, &input_stream_info);
RETURN_ON_HR_FAILURE(hr, "Couldn't get input stream info", hr);
-
hr = MFCreateAlignedMemoryBuffer(
input_stream_info.cbSize ? input_stream_info.cbSize
: VideoFrame::AllocationSize(
- PIXEL_FORMAT_NV12, input_visible_size_),
+ kTargetPixelFormat, input_visible_size_),
input_stream_info.cbAlignment == 0 ? input_stream_info.cbAlignment
: input_stream_info.cbAlignment - 1,
&input_buffer);
@@ -1061,54 +1080,36 @@ HRESULT MediaFoundationVideoEncodeAccelerator::PopulateInputSampleBuffer(
RETURN_ON_HR_FAILURE(hr, "Failed to add buffer to sample", hr);
}
+ // Establish plain pointers into the input buffer, where we will copy pixel
+ // data to.
MediaBufferScopedPointer scoped_buffer(input_buffer.Get());
DCHECK(scoped_buffer.get());
uint8_t* dst_y = scoped_buffer.get();
+ size_t dst_y_stride = VideoFrame::RowBytes(
+ VideoFrame::kYPlane, kTargetPixelFormat, input_visible_size_.width());
uint8_t* dst_uv =
scoped_buffer.get() +
- frame->row_bytes(VideoFrame::kYPlane) * frame->rows(VideoFrame::kYPlane);
- uint8_t* end = dst_uv + frame->row_bytes(VideoFrame::kUVPlane) *
- frame->rows(VideoFrame::kUVPlane);
+ dst_y_stride * VideoFrame::Rows(VideoFrame::kYPlane, kTargetPixelFormat,
+ input_visible_size_.height());
+ size_t dst_uv_stride = VideoFrame::RowBytes(
+ VideoFrame::kUVPlane, kTargetPixelFormat, input_visible_size_.width());
+ uint8_t* end = dst_uv + dst_uv_stride * frame->rows(VideoFrame::kUVPlane);
DCHECK_GE(static_cast<ptrdiff_t>(scoped_buffer.max_length()),
end - scoped_buffer.get());
- if (frame->format() == PIXEL_FORMAT_NV12) {
- // Copy NV12 pixel data from |frame| to |input_buffer|.
- if (frame->IsMappable()) {
- src_y = frame->visible_data(VideoFrame::kYPlane);
- src_uv = frame->visible_data(VideoFrame::kUVPlane);
- }
- int error = libyuv::NV12Copy(src_y, frame->stride(VideoFrame::kYPlane),
- src_uv, frame->stride(VideoFrame::kUVPlane),
- dst_y, frame->row_bytes(VideoFrame::kYPlane),
- dst_uv, frame->row_bytes(VideoFrame::kUVPlane),
- input_visible_size_.width(),
- input_visible_size_.height());
- if (error) {
- DLOG(ERROR) << "NV12Copy failed";
- return E_FAIL;
- }
- } else if (frame->format() == PIXEL_FORMAT_I420) {
- DCHECK(frame->IsMappable());
- // Convert I420 to NV12 as input.
- int error = libyuv::I420ToNV12(
- frame->visible_data(VideoFrame::kYPlane),
- frame->stride(VideoFrame::kYPlane),
- frame->visible_data(VideoFrame::kUPlane),
- frame->stride(VideoFrame::kUPlane),
- frame->visible_data(VideoFrame::kVPlane),
- frame->stride(VideoFrame::kVPlane), dst_y,
- frame->row_bytes(VideoFrame::kYPlane), dst_uv,
- frame->row_bytes(VideoFrame::kUPlane) * 2, input_visible_size_.width(),
- input_visible_size_.height());
- if (error) {
- DLOG(ERROR) << "I420ToNV12 failed";
- return E_FAIL;
- }
- } else {
- NOTREACHED();
+ // Set up a VideoFrame with the data pointing into the input buffer.
+ // We need it to ease copying and scaling by reusing ConvertAndScaleFrame()
+ auto frame_in_buffer = VideoFrame::WrapExternalYuvData(
+ kTargetPixelFormat, input_visible_size_, gfx::Rect(input_visible_size_),
+ input_visible_size_, dst_y_stride, dst_uv_stride, dst_y, dst_uv,
+ frame->timestamp());
+
+ auto status = ConvertAndScaleFrame(*frame, *frame_in_buffer, resize_buffer_);
+ if (!status.is_ok()) {
+ DLOG(ERROR) << "ConvertAndScaleFrame failed with error code: "
+ << static_cast<uint32_t>(status.code());
+ return E_FAIL;
}
-
return S_OK;
}
diff --git a/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.h b/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.h
index 2182336a199..a2004ca79e7 100644
--- a/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.h
+++ b/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.h
@@ -105,7 +105,7 @@ class MEDIA_GPU_EXPORT MediaFoundationVideoEncodeAccelerator
// |main_client_task_runner_|.
void NotifyError(VideoEncodeAccelerator::Error error);
- // Encoding task to be run on |encoder_thread_|.
+ // Encoding task to be run on |encoder_thread_task_runner_|.
void EncodeTask(scoped_refptr<VideoFrame> frame, bool force_keyframe);
// Processes the input video frame for the encoder.
@@ -126,10 +126,10 @@ class MEDIA_GPU_EXPORT MediaFoundationVideoEncodeAccelerator
bool temporalScalableCoding() { return num_temporal_layers_ > 1; }
- // Checks for and copies encoded output on |encoder_thread_|.
+ // Checks for and copies encoded output on |encoder_thread_task_runner_|.
void ProcessOutput();
- // Drains pending output samples on |encoder_thread_|.
+ // Drains pending output samples on |encoder_thread_task_runner_|.
void DrainPendingOutputs();
// Tries to deliver the input frame to the encoder.
@@ -139,15 +139,15 @@ class MEDIA_GPU_EXPORT MediaFoundationVideoEncodeAccelerator
// Tries to return a bitstream buffer to the client.
void TryToReturnBitstreamBuffer();
- // Inserts the output buffers for reuse on |encoder_thread_|.
+ // Inserts the output buffers for reuse on |encoder_thread_task_runner_|.
void UseOutputBitstreamBufferTask(
std::unique_ptr<BitstreamBufferRef> buffer_ref);
- // Changes encode parameters on |encoder_thread_|.
+ // Changes encode parameters on |encoder_thread_task_runner_|.
void RequestEncodingParametersChangeTask(const Bitrate& bitrate,
uint32_t framerate);
- // Destroys encode session on |encoder_thread_|.
+ // Destroys encode session on |encoder_thread_task_runner_|.
void DestroyTask();
// Releases resources encoder holds.
@@ -231,6 +231,9 @@ class MEDIA_GPU_EXPORT MediaFoundationVideoEncodeAccelerator
// DXGI device manager for handling hardware input textures
scoped_refptr<DXGIDeviceManager> dxgi_device_manager_;
+ // A buffer used as a scratch space for I420 to NV12 conversion
+ std::vector<uint8_t> resize_buffer_;
+
// Declared last to ensure that all weak pointers are invalidated before
// other destructors run.
base::WeakPtr<MediaFoundationVideoEncodeAccelerator> encoder_weak_ptr_;
diff --git a/chromium/media/gpu/windows/mf_audio_encoder.cc b/chromium/media/gpu/windows/mf_audio_encoder.cc
index 2fc1dba0a08..2f356862432 100644
--- a/chromium/media/gpu/windows/mf_audio_encoder.cc
+++ b/chromium/media/gpu/windows/mf_audio_encoder.cc
@@ -510,6 +510,7 @@ void MFAudioEncoder::Encode(std::unique_ptr<AudioBus> audio_bus,
void MFAudioEncoder::Flush(EncoderStatusCB done_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(done_cb);
+ done_cb = BindToCurrentLoop(std::move(done_cb));
if (!initialized_) {
std::move(done_cb).Run(
diff --git a/chromium/media/gpu/windows/supported_profile_helpers.cc b/chromium/media/gpu/windows/supported_profile_helpers.cc
index d395ab03d83..b8c799d1c08 100644
--- a/chromium/media/gpu/windows/supported_profile_helpers.cc
+++ b/chromium/media/gpu/windows/supported_profile_helpers.cc
@@ -222,9 +222,9 @@ SupportedResolutionRangeMap GetSupportedD3D11VideoDecoderResolutions(
continue;
}
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
if (!workarounds.disable_accelerated_hevc_decode &&
- base::FeatureList::IsEnabled(kD3D11HEVCDecoding)) {
+ base::FeatureList::IsEnabled(kPlatformHEVCDecoderSupport)) {
if (profile_id == D3D11_DECODER_PROFILE_HEVC_VLD_MAIN) {
supported_resolutions[HEVCPROFILE_MAIN] = GetResolutionsForGUID(
video_device.Get(), profile_id, kModernResolutions);
@@ -237,7 +237,7 @@ SupportedResolutionRangeMap GetSupportedD3D11VideoDecoderResolutions(
continue;
}
}
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
}
return supported_resolutions;
diff --git a/chromium/media/gpu/windows/supported_profile_helpers_unittest.cc b/chromium/media/gpu/windows/supported_profile_helpers_unittest.cc
index a8e9655b538..87adc2cdcbb 100644
--- a/chromium/media/gpu/windows/supported_profile_helpers_unittest.cc
+++ b/chromium/media/gpu/windows/supported_profile_helpers_unittest.cc
@@ -329,10 +329,10 @@ TEST_F(SupportedResolutionResolverTest, AV1ProfileProSupports8k) {
kSquare8k, kSquare8k, kSquare8k);
}
-TEST_F(SupportedResolutionResolverTest, H265Supports4kIfEnabled) {
+#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
+TEST_F(SupportedResolutionResolverTest, H265Supports8kIfEnabled) {
DONT_RUN_ON_WIN_7();
-#if BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
EnableDecoders({D3D11_DECODER_PROFILE_HEVC_VLD_MAIN});
base::test::ScopedFeatureList scoped_feature_list;
@@ -343,27 +343,16 @@ TEST_F(SupportedResolutionResolverTest, H265Supports4kIfEnabled) {
ASSERT_EQ(3u, no_feature_resolutions.size());
// enable the feature and try again
- scoped_feature_list.InitAndEnableFeature(kD3D11HEVCDecoding);
+ SetMaxResolution(D3D11_DECODER_PROFILE_HEVC_VLD_MAIN, kSquare8k);
+ scoped_feature_list.InitAndEnableFeature(kPlatformHEVCDecoderSupport);
const auto resolutions_for_feature = GetSupportedD3D11VideoDecoderResolutions(
mock_d3d11_device_, gpu_workarounds_);
- ASSERT_EQ(4u, no_feature_resolutions.size());
+ ASSERT_EQ(4u, resolutions_for_feature.size());
const auto it = resolutions_for_feature.find(HEVCPROFILE_MAIN);
ASSERT_NE(it, resolutions_for_feature.end());
ASSERT_EQ(it->second.max_landscape_resolution, kSquare8k);
ASSERT_EQ(it->second.max_portrait_resolution, kSquare8k);
-#else
- {
- // Even with the flag enabled and decoder supported, we shouldn't support
- // HEVC unless the buildflag is enabled.
- base::test::ScopedFeatureList scoped_feature_list;
- scoped_feature_list.InitAndEnableFeature(kD3D11HEVCDecoding);
- EnableDecoders({D3D11_DECODER_PROFILE_HEVC_VLD_MAIN});
- const auto supported_resolutions = GetSupportedD3D11VideoDecoderResolutions(
- mock_d3d11_device_, gpu_workarounds_);
- // H264 always is supported, and it adds three profile entries.
- ASSERT_EQ(3u, supported_resolutions.size());
- }
-#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC_DECODING)
}
+#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
} // namespace media
diff --git a/chromium/media/learning/common/target_histogram.h b/chromium/media/learning/common/target_histogram.h
index 6b99ad478b0..b173f44bba6 100644
--- a/chromium/media/learning/common/target_histogram.h
+++ b/chromium/media/learning/common/target_histogram.h
@@ -27,6 +27,11 @@ class TargetHistogramDataView;
struct COMPONENT_EXPORT(LEARNING_COMMON) TargetHistogramPair {
TargetValue target_value;
double count;
+
+ TargetHistogramPair() = default;
+
+ TargetHistogramPair(const TargetValue& value, double count)
+ : target_value(value), count(count) {}
};
// Histogram of target values that allows fractional counts.
diff --git a/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.h b/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.h
index 92033347501..262a6f16ec0 100644
--- a/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.h
+++ b/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.h
@@ -93,8 +93,8 @@ struct COMPONENT_EXPORT(MEDIA_LEARNING_SHARED_TYPEMAP_TRAITS)
static std::vector<media::learning::TargetHistogramPair> pairs(
const media::learning::TargetHistogram& e) {
std::vector<media::learning::TargetHistogramPair> pairs;
- for (auto const& entry : e.counts_) {
- pairs.push_back({entry.first, entry.second});
+ for (auto const& [target_val, count] : e.counts_) {
+ pairs.emplace_back(target_val, count);
}
return pairs;
diff --git a/chromium/media/media_options.gni b/chromium/media/media_options.gni
index 5667cac61f3..b3727203d40 100644
--- a/chromium/media/media_options.gni
+++ b/chromium/media/media_options.gni
@@ -57,10 +57,6 @@ declare_args() {
enable_mse_mpeg2ts_stream_parser =
proprietary_codecs && (is_chromecast || is_fuchsia || use_fuzzing_engine)
- enable_platform_encrypted_hevc =
- proprietary_codecs &&
- (use_chromeos_protected_media || (is_win && is_chrome_branded))
-
# Enable Dolby Vision demuxing. Enable by default for Chromecast. Actual
# decoding must be provided by the platform. Note some Dolby Vision profiles
# which are encoded using HEVC require |enable_platform_hevc| to be enabled.
@@ -82,24 +78,26 @@ declare_args() {
# Enable HLS manifest parser and demuxer.
enable_hls_demuxer = false
+
+ # Enable inclusion of the HEVC/H265 parser and also enable HEVC/H265 decoding
+ # with hardware acceleration assist. Enabled by default for fuzzer builds,
+ # ChromeOS builds with protected content support, Windows, Mac, and Android.
+ enable_hevc_parser_and_hw_decoder =
+ proprietary_codecs &&
+ (use_fuzzing_engine || use_chromeos_protected_media || is_win || is_mac ||
+ is_android)
}
+# Use another declare_args() to allow dependence on
+# |enable_hevc_parser_and_hw_decoder|.
declare_args() {
enable_av1_decoder = enable_dav1d_decoder || enable_libgav1_decoder
# Enable HEVC/H265 demuxing. Actual decoding must be provided by the
- # platform. Enabled by default for Chromecast, fuzzer builds and protected
- # video on ChromeOS and Windows.
+ # platform.
+ # TODO(b/194429120): Enable this for Lacros builds.
enable_platform_hevc =
- proprietary_codecs &&
- (is_chromecast || use_fuzzing_engine || enable_platform_encrypted_hevc)
-
- # Enable HEVC/H265 decoding with hardware acceleration assist. Enabled by
- # default for fuzzer builds and protected video on ChromeOS. It is also
- # enabled for Chromecast by default so the unit tests get run in Chrome CQ.
- enable_platform_hevc_decoding =
- proprietary_codecs &&
- (is_chromecast || use_fuzzing_engine || use_chromeos_protected_media)
+ proprietary_codecs && (is_chromecast || enable_hevc_parser_and_hw_decoder)
}
assert(
@@ -114,10 +112,10 @@ assert(!enable_platform_hevc || proprietary_codecs,
"proprietary_codecs=true is required for enable_platform_hevc=true.")
assert(
- !enable_platform_encrypted_hevc || enable_platform_hevc,
- "enable_platform_hevc=true is required for enable_platform_encrypted_hevc=true.")
+ !enable_hevc_parser_and_hw_decoder || enable_platform_hevc,
+ "enable_platform_hevc=true is required for enable_hevc_parser_and_hw_decoder=true.")
-# Use a second declare_args() to pick up possible overrides of |use_cras|.
+# Use another declare_args() to pick up possible overrides of |use_cras|.
declare_args() {
# Enables runtime selection of PulseAudio library.
use_pulseaudio = false
@@ -125,10 +123,10 @@ declare_args() {
# Enables runtime selection of ALSA library for audio.
use_alsa = false
- # Alsa should be used on non-Android, non-Mac POSIX systems.
- # Alsa should be used on desktop Chromecast and audio-only Chromecast builds.
+ # Alsa should be used on non-Android, non-Mac POSIX systems (excluding CastOS
+ # video builds).
if (is_posix && !is_android && !is_mac &&
- (!is_chromecast || is_cast_desktop_build || is_cast_audio_only)) {
+ (!is_castos || is_cast_audio_only)) {
use_alsa = true
# Pulse is not supported on Chromecast platforms.
@@ -164,14 +162,6 @@ declare_args() {
}
declare_args() {
- # When enabled, this feature allows developers to use a runtime flag to
- # choose the implementation of the renderer that is used. On a build which
- # enables the mojo renderer, if --disable-mojo-renderer is passed at start-up,
- # the "default" renderer will be used instead. Both renderer implementations
- # will be linked if this feature is enabled, increasing the binary size. This
- # feature does not make sense if the mojo renderer is not enabled.
- enable_runtime_media_renderer_selection = is_cast_desktop_build
-
# Enables host verification for CDMs.
# Windows and Mac.
enable_cdm_host_verification =
@@ -208,14 +198,21 @@ if (is_chromecast) {
"video_decoder",
]
_default_mojo_media_host = "gpu"
-} else if (is_mac) {
+} else if (is_mac || (is_win && enable_platform_dts_audio)) {
_default_mojo_media_services = [
"audio_decoder",
+ "audio_encoder",
"video_decoder",
]
_default_mojo_media_host = "gpu"
-} else if (is_chromeos_ash || is_win || ((is_linux || is_chromeos_lacros) &&
- (use_vaapi || use_v4l2_codec))) {
+} else if (is_win) {
+ _default_mojo_media_services = [
+ "audio_encoder",
+ "video_decoder",
+ ]
+ _default_mojo_media_host = "gpu"
+} else if (is_chromeos_ash || ((is_linux || is_chromeos_lacros) &&
+ (use_vaapi || use_v4l2_codec))) {
_default_mojo_media_services = [ "video_decoder" ]
_default_mojo_media_host = "gpu"
}
@@ -227,10 +224,6 @@ if (enable_library_cdms) {
_default_mojo_media_services += [ "cdm" ]
}
-if (is_win) {
- _default_mojo_media_services += [ "audio_encoder" ]
-}
-
declare_args() {
# A list of mojo media services that should be used in the media pipeline.
# Valid entries in the list are:
diff --git a/chromium/media/midi/BUILD.gn b/chromium/media/midi/BUILD.gn
index abefcea38dc..e64097cdeab 100644
--- a/chromium/media/midi/BUILD.gn
+++ b/chromium/media/midi/BUILD.gn
@@ -50,6 +50,8 @@ if (is_android) {
android_library("midi_java") {
deps = [
"//base:base_java",
+ "//base:jni_java",
+ "//build/android:build_java",
"//third_party/androidx:androidx_annotation_annotation_java",
]
annotation_processor_deps = [ "//base/android/jni_generator:jni_processor" ]
@@ -158,10 +160,7 @@ component("midi") {
}
if (use_alsa && use_udev) {
- deps += [
- "//crypto",
- "//crypto:platform",
- ]
+ deps += [ "//crypto" ]
libs += [ "asound" ]
sources += [
"midi_manager_alsa.cc",
diff --git a/chromium/media/mojo/clients/mojo_video_encode_accelerator.cc b/chromium/media/mojo/clients/mojo_video_encode_accelerator.cc
index f2412a448af..2e418bed1bc 100644
--- a/chromium/media/mojo/clients/mojo_video_encode_accelerator.cc
+++ b/chromium/media/mojo/clients/mojo_video_encode_accelerator.cc
@@ -22,7 +22,6 @@
#include "mojo/public/cpp/bindings/pending_receiver.h"
#include "mojo/public/cpp/bindings/receiver.h"
#include "mojo/public/cpp/bindings/self_owned_receiver.h"
-#include "mojo/public/cpp/system/platform_handle.h"
namespace media {
@@ -107,6 +106,10 @@ MojoVideoEncodeAccelerator::MojoVideoEncodeAccelerator(
: vea_(std::move(vea)) {
DVLOG(1) << __func__;
DCHECK(vea_);
+
+ vea_.set_disconnect_handler(
+ base::BindOnce(&MojoVideoEncodeAccelerator::MojoDisconnectionHandler,
+ base::Unretained(this)));
}
VideoEncodeAccelerator::SupportedProfiles
@@ -134,10 +137,6 @@ bool MojoVideoEncodeAccelerator::Initialize(
vea_client_ = std::make_unique<VideoEncodeAcceleratorClient>(
client, vea_client_remote.InitWithNewEndpointAndPassReceiver());
- vea_.set_disconnect_handler(
- base::BindOnce(&MojoVideoEncodeAccelerator::MojoDisconnectionHandler,
- base::Unretained(this)));
-
// Use `mojo::MakeSelfOwnedReceiver` for MediaLog so logs may go through even
// after `MojoVideoEncodeAccelerator` is destructed.
mojo::PendingReceiver<mojom::MediaLog> media_log_pending_receiver;
@@ -201,10 +200,7 @@ void MojoVideoEncodeAccelerator::UseOutputBitstreamBuffer(
DCHECK(buffer.region().IsValid());
- auto buffer_handle =
- mojo::WrapPlatformSharedMemoryRegion(buffer.TakeRegion());
-
- vea_->UseOutputBitstreamBuffer(buffer.id(), std::move(buffer_handle));
+ vea_->UseOutputBitstreamBuffer(buffer.id(), buffer.TakeRegion());
}
void MojoVideoEncodeAccelerator::RequestEncodingParametersChange(
diff --git a/chromium/media/mojo/clients/mojo_video_encode_accelerator_unittest.cc b/chromium/media/mojo/clients/mojo_video_encode_accelerator_unittest.cc
index 6172a5816da..2dd1b5be9c3 100644
--- a/chromium/media/mojo/clients/mojo_video_encode_accelerator_unittest.cc
+++ b/chromium/media/mojo/clients/mojo_video_encode_accelerator_unittest.cc
@@ -93,14 +93,14 @@ class MockMojoVideoEncodeAccelerator : public mojom::VideoEncodeAccelerator {
void UseOutputBitstreamBuffer(
int32_t bitstream_buffer_id,
- mojo::ScopedSharedBufferHandle buffer) override {
+ base::UnsafeSharedMemoryRegion region) override {
EXPECT_EQ(-1, configured_bitstream_buffer_id_);
configured_bitstream_buffer_id_ = bitstream_buffer_id;
- DoUseOutputBitstreamBuffer(bitstream_buffer_id, &buffer);
+ DoUseOutputBitstreamBuffer(bitstream_buffer_id, &region);
}
MOCK_METHOD2(DoUseOutputBitstreamBuffer,
- void(int32_t, mojo::ScopedSharedBufferHandle*));
+ void(int32_t, base::UnsafeSharedMemoryRegion*));
MOCK_METHOD2(RequestEncodingParametersChangeWithLayers,
void(const media::VideoBitrateAllocation&, uint32_t));
@@ -244,11 +244,9 @@ TEST_F(MojoVideoEncodeAcceleratorTest, EncodeOneFrame) {
auto shmem = base::UnsafeSharedMemoryRegion::Create(kShMemSize);
EXPECT_CALL(*mock_mojo_vea(),
DoUseOutputBitstreamBuffer(kBitstreamBufferId, _));
- mojo_vea()->UseOutputBitstreamBuffer(BitstreamBuffer(
- kBitstreamBufferId,
- base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
- std::move(shmem)),
- kShMemSize, 0 /* offset */, base::TimeDelta()));
+ mojo_vea()->UseOutputBitstreamBuffer(
+ BitstreamBuffer(kBitstreamBufferId, std::move(shmem), kShMemSize,
+ 0 /* offset */, base::TimeDelta()));
base::RunLoop().RunUntilIdle();
}
@@ -341,8 +339,23 @@ TEST_F(MojoVideoEncodeAcceleratorTest, InitializeFailure) {
base::RunLoop().RunUntilIdle();
}
-// Test that mojo disconnect is surfaced as a platform error
-TEST_F(MojoVideoEncodeAcceleratorTest, MojoDisconnect) {
+// Test that mojo disconnect before initialize is surfaced as a platform error.
+TEST_F(MojoVideoEncodeAcceleratorTest, MojoDisconnectBeforeInitialize) {
+ std::unique_ptr<MockVideoEncodeAcceleratorClient> mock_vea_client =
+ std::make_unique<MockVideoEncodeAcceleratorClient>();
+
+ constexpr Bitrate kInitialBitrate = Bitrate::ConstantBitrate(100000u);
+ const VideoEncodeAccelerator::Config config(
+ PIXEL_FORMAT_I420, kInputVisibleSize, VIDEO_CODEC_PROFILE_UNKNOWN,
+ kInitialBitrate);
+ mojo_vea_receiver_->Close();
+ EXPECT_FALSE(mojo_vea()->Initialize(config, mock_vea_client.get(),
+ std::make_unique<media::NullMediaLog>()));
+ base::RunLoop().RunUntilIdle();
+}
+
+// Test that mojo disconnect after initialize is surfaced as a platform error.
+TEST_F(MojoVideoEncodeAcceleratorTest, MojoDisconnectAfterInitialize) {
std::unique_ptr<MockVideoEncodeAcceleratorClient> mock_vea_client =
std::make_unique<MockVideoEncodeAcceleratorClient>();
diff --git a/chromium/media/mojo/clients/win/media_foundation_renderer_client.cc b/chromium/media/mojo/clients/win/media_foundation_renderer_client.cc
index 4f79b942af8..15fdbd8c45d 100644
--- a/chromium/media/mojo/clients/win/media_foundation_renderer_client.cc
+++ b/chromium/media/mojo/clients/win/media_foundation_renderer_client.cc
@@ -9,9 +9,13 @@
#include "base/callback_helpers.h"
#include "base/task/bind_post_task.h"
#include "media/base/media_log.h"
+#include "media/base/win/mf_feature_checks.h"
#include "media/base/win/mf_helpers.h"
+#include "media/mojo/mojom/speech_recognition_service.mojom.h"
#include "media/renderers/win/media_foundation_renderer.h"
#include "mojo/public/cpp/bindings/callback_helpers.h"
+#include "mojo/public/cpp/bindings/pending_remote.h"
+#include "mojo/public/cpp/bindings/remote.h"
namespace media {
@@ -22,15 +26,21 @@ MediaFoundationRendererClient::MediaFoundationRendererClient(
mojo::PendingRemote<RendererExtension> pending_renderer_extension,
mojo::PendingReceiver<ClientExtension> client_extension_receiver,
std::unique_ptr<DCOMPTextureWrapper> dcomp_texture_wrapper,
- VideoRendererSink* sink)
+ ObserveOverlayStateCB observe_overlay_state_cb,
+ VideoRendererSink* sink,
+ mojo::PendingRemote<media::mojom::MediaFoundationRendererObserver>
+ media_foundation_renderer_observer)
: media_task_runner_(std::move(media_task_runner)),
media_log_(std::move(media_log)),
mojo_renderer_(std::move(mojo_renderer)),
pending_renderer_extension_(std::move(pending_renderer_extension)),
dcomp_texture_wrapper_(std::move(dcomp_texture_wrapper)),
+ observe_overlay_state_cb_(std::move(observe_overlay_state_cb)),
sink_(sink),
pending_client_extension_receiver_(std::move(client_extension_receiver)),
- client_extension_receiver_(this) {
+ client_extension_receiver_(this),
+ pending_media_foundation_renderer_observer_(
+ std::move(media_foundation_renderer_observer)) {
DVLOG_FUNC(1);
}
@@ -53,6 +63,9 @@ void MediaFoundationRendererClient::Initialize(MediaResource* media_resource,
renderer_extension_.Bind(std::move(pending_renderer_extension_),
media_task_runner_);
+ media_foundation_renderer_observer_.Bind(
+ std::move(pending_media_foundation_renderer_observer_),
+ media_task_runner_);
client_extension_receiver_.Bind(std::move(pending_client_extension_receiver_),
media_task_runner_);
@@ -66,28 +79,35 @@ void MediaFoundationRendererClient::Initialize(MediaResource* media_resource,
init_cb_ = std::move(init_cb);
auto media_streams = media_resource->GetAllStreams();
- bool start_in_dcomp_mode = false;
+
+ // Check the rendering strategy & whether we're operating on clear or
+ // protected content to determine the starting 'rendering_mode_'.
+ // If the Direct Composition strategy is specified or if we're operating on
+ // protected content then start in Direct Composition mode, else start in
+ // Frame Server mode. This behavior must match the logic in
+ // MediaFoundationRenderer::Initialize.
+ rendering_strategy_ = kMediaFoundationClearRenderingStrategyParam.Get();
+ rendering_mode_ =
+ rendering_strategy_ ==
+ MediaFoundationClearRenderingStrategy::kDirectComposition
+ ? MediaFoundationRenderingMode::DirectComposition
+ : MediaFoundationRenderingMode::FrameServer;
+
// Start off at 60 fps for our render interval, however it will be updated
// later in OnVideoFrameRateChange
render_interval_ = base::Microseconds(16666);
for (DemuxerStream* stream : media_streams) {
if (stream->type() == DemuxerStream::Type::VIDEO) {
if (stream->video_decoder_config().is_encrypted()) {
- // If the content is clear we'll start in frame server mode
- // and wait to be promoted to DComp.
- // This conditional must match the conditional in
- // MediaFoundationRenderer::Initialize
- start_in_dcomp_mode = true;
+ // This is protected content which only supports Direct Composition
+ // mode, update 'rendering_mode_' accordingly.
+ rendering_mode_ = MediaFoundationRenderingMode::DirectComposition;
}
has_video_ = true;
break;
}
}
- if (!start_in_dcomp_mode) {
- media_engine_in_frame_server_mode_ = true;
- }
-
mojo_renderer_->Initialize(
media_resource, this,
base::BindOnce(
@@ -118,7 +138,7 @@ void MediaFoundationRendererClient::InitializeFramePool(
}
bool MediaFoundationRendererClient::IsFrameServerMode() const {
- return media_engine_in_frame_server_mode_;
+ return rendering_mode_ == MediaFoundationRenderingMode::FrameServer;
}
void MediaFoundationRendererClient::OnFrameAvailable(
@@ -236,6 +256,16 @@ void MediaFoundationRendererClient::OnSelectedVideoTracksChanged(
std::move(change_completed_cb).Run();
}
+void MediaFoundationRendererClient::OnExternalVideoFrameRequest() {
+ // A frame read back signal is currently treated as a permanent signal for
+ // the session so we only need to handle it the first time it is encountered.
+ if (!has_frame_read_back_signal_) {
+ has_frame_read_back_signal_ = true;
+ MEDIA_LOG(INFO, media_log_) << "Frame read back signal";
+ UpdateRenderMode();
+ }
+}
+
// RendererClient implementation.
void MediaFoundationRendererClient::OnError(PipelineStatus status) {
@@ -247,6 +277,11 @@ void MediaFoundationRendererClient::OnError(PipelineStatus status) {
client_->OnError(status);
}
+void MediaFoundationRendererClient::OnFallback(PipelineStatus fallback) {
+ SignalMediaPlayingStateChange(false);
+ client_->OnFallback(std::move(fallback).AddHere());
+}
+
void MediaFoundationRendererClient::OnEnded() {
SignalMediaPlayingStateChange(false);
client_->OnEnded();
@@ -311,13 +346,13 @@ void MediaFoundationRendererClient::OnVideoFrameRateChange(
}
// RenderCallback implementation.
-scoped_refptr<media::VideoFrame> MediaFoundationRendererClient::Render(
+scoped_refptr<VideoFrame> MediaFoundationRendererClient::Render(
base::TimeTicks deadline_min,
base::TimeTicks deadline_max,
RenderingMode mode) {
// Sends a frame request if in frame server mode, otherwise return nothing as
// it is rendered independently by Windows Direct Composition.
- if (!media_engine_in_frame_server_mode_) {
+ if (!IsFrameServerMode()) {
return nullptr;
}
@@ -420,7 +455,7 @@ void MediaFoundationRendererClient::OnSetOutputRectDone(
if (output_size_updated_)
return;
- if (media_engine_in_frame_server_mode_) {
+ if (IsFrameServerMode()) {
return;
}
@@ -490,7 +525,8 @@ void MediaFoundationRendererClient::OnDCOMPSurfaceHandleSet(bool success) {
}
void MediaFoundationRendererClient::OnVideoFrameCreated(
- scoped_refptr<VideoFrame> video_frame) {
+ scoped_refptr<VideoFrame> video_frame,
+ const gpu::Mailbox& mailbox) {
DVLOG_FUNC(1);
DCHECK(media_task_runner_->BelongsToCurrentThread());
DCHECK(has_video_);
@@ -500,7 +536,11 @@ void MediaFoundationRendererClient::OnVideoFrameCreated(
if (cdm_context_) {
video_frame->metadata().protected_video = true;
} else {
+ DCHECK(SupportMediaFoundationClearPlayback());
+ // This video frame is for clear content: setup observation of the mailbox
+ // overlay state changes.
video_frame->metadata().wants_promotion_hint = true;
+ ObserveMailboxForOverlayState(mailbox);
}
dcomp_video_frame_ = video_frame;
@@ -529,7 +569,7 @@ void MediaFoundationRendererClient::SignalMediaPlayingStateChange(
}
// Only start the render loop if we are in frame server mode
- if (media_engine_in_frame_server_mode_) {
+ if (IsFrameServerMode()) {
if (is_playing) {
sink_->Start(this);
} else {
@@ -539,4 +579,77 @@ void MediaFoundationRendererClient::SignalMediaPlayingStateChange(
is_playing_ = is_playing;
}
+void MediaFoundationRendererClient::OnOverlayStateChanged(
+ const gpu::Mailbox& mailbox,
+ bool promoted) {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+ promoted_to_overlay_signal_ = promoted;
+ MEDIA_LOG(INFO, media_log_)
+ << "Overlay state signal, promoted = " << promoted;
+ UpdateRenderMode();
+}
+
+void MediaFoundationRendererClient::UpdateRenderMode() {
+ // We only change modes if we're using the dynamic rendering strategy and
+ // presenting clear content, so return early otherwise.
+ if (rendering_strategy_ != MediaFoundationClearRenderingStrategy::kDynamic ||
+ cdm_context_) {
+ return;
+ }
+
+ // Frame Server mode is required if we are not promoted to an overlay or if
+ // frame readback is required.
+ bool needs_frame_server =
+ has_frame_read_back_signal_ || !promoted_to_overlay_signal_;
+
+ if (!needs_frame_server && IsFrameServerMode()) {
+ MEDIA_LOG(INFO, media_log_) << "Switching to Direct Composition.";
+ // Switch to Frame Server Mode
+ // Switch to Direct Composition mode.
+ rendering_mode_ = MediaFoundationRenderingMode::DirectComposition;
+ renderer_extension_->SetMediaFoundationRenderingMode(rendering_mode_);
+ if (is_playing_) {
+ sink_->Stop();
+ }
+ // If we don't have a DComp Visual then create one, otherwise paint
+ // DComp frame again.
+ if (!dcomp_video_frame_) {
+ InitializeDCOMPRenderingIfNeeded();
+ } else {
+ sink_->PaintSingleFrame(dcomp_video_frame_, true);
+ }
+ } else if (needs_frame_server && !IsFrameServerMode()) {
+ // Switch to Frame Server mode.
+ MEDIA_LOG(INFO, media_log_) << "Switching to Frame Server.";
+ rendering_mode_ = MediaFoundationRenderingMode::FrameServer;
+ renderer_extension_->SetMediaFoundationRenderingMode(rendering_mode_);
+ if (is_playing_) {
+ sink_->Start(this);
+ }
+ }
+}
+
+void MediaFoundationRendererClient::ObserveMailboxForOverlayState(
+ const gpu::Mailbox& mailbox) {
+ // If the rendering strategy is dynamic then setup an OverlayStateObserver to
+ // respond to promotion changes. If the rendering strategy is Direct
+ // Composition or Frame Server then we do not need to listen & respond to
+ // overlay state changes.
+ if (rendering_strategy_ == MediaFoundationClearRenderingStrategy::kDynamic) {
+ mailbox_ = mailbox;
+ // 'observe_overlay_state_cb_' creates a content::OverlayStateObserver to
+ // subscribe to overlay state information for the given 'mailbox' from the
+ // Viz layer in the GPU process. We hold an OverlayStateObserverSubscription
+ // since a direct dependency on a content object is not allowed. Once the
+ // OverlayStateObserverSubscription is destroyed the OnOverlayStateChanged
+ // callback will no longer be invoked, so base::Unretained(this) is safe to
+ // use.
+ observer_subscription_ = observe_overlay_state_cb_.Run(
+ mailbox, base::BindRepeating(
+ &MediaFoundationRendererClient::OnOverlayStateChanged,
+ base::Unretained(this), mailbox));
+ DCHECK(observer_subscription_);
+ }
+}
+
} // namespace media
diff --git a/chromium/media/mojo/clients/win/media_foundation_renderer_client.h b/chromium/media/mojo/clients/win/media_foundation_renderer_client.h
index 01ac28e5176..0b8f337caf3 100644
--- a/chromium/media/mojo/clients/win/media_foundation_renderer_client.h
+++ b/chromium/media/mojo/clients/win/media_foundation_renderer_client.h
@@ -10,20 +10,27 @@
#include "base/memory/weak_ptr.h"
#include "base/task/single_thread_task_runner.h"
#include "base/time/time.h"
+#include "gpu/ipc/common/gpu_channel.mojom.h"
#include "media/base/media_resource.h"
+#include "media/base/media_switches.h"
#include "media/base/renderer.h"
#include "media/base/renderer_client.h"
#include "media/base/video_renderer_sink.h"
#include "media/base/win/dcomp_texture_wrapper.h"
+#include "media/base/win/overlay_state_observer_subscription.h"
#include "media/mojo/clients/mojo_renderer.h"
#include "media/mojo/mojom/dcomp_surface_registry.mojom.h"
#include "media/mojo/mojom/renderer_extensions.mojom.h"
+#include "media/renderers/win/media_foundation_rendering_mode.h"
#include "mojo/public/cpp/bindings/pending_receiver.h"
+#include "mojo/public/cpp/bindings/pending_remote.h"
#include "mojo/public/cpp/bindings/receiver.h"
+#include "mojo/public/cpp/bindings/remote.h"
namespace media {
class MediaLog;
+class OverlayStateObserverSubscription;
// MediaFoundationRendererClient lives in Renderer process talks to the
// MediaFoundationRenderer living in the MediaFoundationService (utility)
@@ -57,7 +64,10 @@ class MediaFoundationRendererClient
mojo::PendingRemote<RendererExtension> pending_renderer_extension,
mojo::PendingReceiver<ClientExtension> client_extension_receiver,
std::unique_ptr<DCOMPTextureWrapper> dcomp_texture_wrapper,
- VideoRendererSink* sink);
+ media::ObserveOverlayStateCB observe_overlay_state_cb,
+ VideoRendererSink* sink,
+ mojo::PendingRemote<media::mojom::MediaFoundationRendererObserver>
+ media_foundation_renderer_observer);
MediaFoundationRendererClient(const MediaFoundationRendererClient&) = delete;
MediaFoundationRendererClient& operator=(
@@ -79,9 +89,11 @@ class MediaFoundationRendererClient
void OnSelectedVideoTracksChanged(
const std::vector<DemuxerStream*>& enabled_tracks,
base::OnceClosure change_completed_cb) override;
+ void OnExternalVideoFrameRequest() override;
// RendererClient implementation.
void OnError(PipelineStatus status) override;
+ void OnFallback(PipelineStatus fallback) override;
void OnEnded() override;
void OnStatisticsUpdate(const PipelineStatistics& stats) override;
void OnBufferingStateChange(BufferingState state,
@@ -119,10 +131,14 @@ class MediaFoundationRendererClient
const absl::optional<base::UnguessableToken>& token,
const std::string& error);
void OnDCOMPSurfaceHandleSet(bool success);
- void OnVideoFrameCreated(scoped_refptr<VideoFrame> video_frame);
+ void OnVideoFrameCreated(scoped_refptr<VideoFrame> video_frame,
+ const gpu::Mailbox& mailbox);
void OnCdmAttached(bool success);
void OnConnectionError();
void SignalMediaPlayingStateChange(bool is_playing);
+ void ObserveMailboxForOverlayState(const gpu::Mailbox& mailbox);
+ void OnOverlayStateChanged(const gpu::Mailbox& mailbox, bool promoted);
+ void UpdateRenderMode();
// This class is constructed on the main thread and used exclusively on the
// media thread. Hence we store PendingRemotes so we can bind the Remotes
@@ -132,6 +148,13 @@ class MediaFoundationRendererClient
std::unique_ptr<MojoRenderer> mojo_renderer_;
mojo::PendingRemote<RendererExtension> pending_renderer_extension_;
std::unique_ptr<DCOMPTextureWrapper> dcomp_texture_wrapper_;
+ // The 'observer_subscription_' is used to manage the lifetime of our current
+ // observed mailbox, when a mailbox associated with a new video frame of
+ // interest is available the existing observer_subscription_ is freed
+ // allowing the underlying content::OverlayStateObserver object to be cleaned
+ // up.
+ std::unique_ptr<OverlayStateObserverSubscription> observer_subscription_;
+ ObserveOverlayStateCB observe_overlay_state_cb_;
raw_ptr<VideoRendererSink> sink_ = nullptr;
mojo::Remote<RendererExtension> renderer_extension_;
@@ -144,9 +167,21 @@ class MediaFoundationRendererClient
bool output_size_updated_ = false;
bool is_playing_ = false;
bool has_video_ = false;
- bool media_engine_in_frame_server_mode_ = false;
+ bool has_frame_read_back_signal_ = false;
+ bool promoted_to_overlay_signal_ = false;
scoped_refptr<VideoFrame> dcomp_video_frame_;
scoped_refptr<VideoFrame> next_video_frame_;
+ gpu::Mailbox mailbox_;
+
+ // Rendering mode the Media Engine will use.
+ MediaFoundationRenderingMode rendering_mode_ =
+ MediaFoundationRenderingMode::DirectComposition;
+
+ // Rendering strategy informs whether we enforce a rendering mode or allow
+ // dynamic transitions for Clear content. (Note: Protected content will always
+ // use Direct Composition mode).
+ MediaFoundationClearRenderingStrategy rendering_strategy_ =
+ MediaFoundationClearRenderingStrategy::kDirectComposition;
PipelineStatusCallback init_cb_;
raw_ptr<CdmContext> cdm_context_ = nullptr;
@@ -164,6 +199,12 @@ class MediaFoundationRendererClient
// Used to receive calls from the MF_CMD LPAC Utility Process.
mojo::PendingReceiver<ClientExtension> pending_client_extension_receiver_;
mojo::Receiver<ClientExtension> client_extension_receiver_;
+
+ mojo::PendingRemote<media::mojom::MediaFoundationRendererObserver>
+ pending_media_foundation_renderer_observer_;
+ mojo::Remote<media::mojom::MediaFoundationRendererObserver>
+ media_foundation_renderer_observer_;
+
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<MediaFoundationRendererClient> weak_factory_{this};
};
diff --git a/chromium/media/mojo/clients/win/media_foundation_renderer_client_factory.cc b/chromium/media/mojo/clients/win/media_foundation_renderer_client_factory.cc
index 1ee87a057b2..0f3e560ef67 100644
--- a/chromium/media/mojo/clients/win/media_foundation_renderer_client_factory.cc
+++ b/chromium/media/mojo/clients/win/media_foundation_renderer_client_factory.cc
@@ -5,13 +5,13 @@
#include "media/mojo/clients/win/media_foundation_renderer_client_factory.h"
#include "media/base/win/dcomp_texture_wrapper.h"
+#include "media/base/win/mf_feature_checks.h"
#include "media/base/win/mf_helpers.h"
#include "media/mojo/clients/mojo_media_log_service.h"
#include "media/mojo/clients/mojo_renderer.h"
#include "media/mojo/clients/mojo_renderer_factory.h"
#include "media/mojo/clients/win/media_foundation_renderer_client.h"
#include "media/mojo/mojom/renderer_extensions.mojom.h"
-#include "media/mojo/mojom/speech_recognition_service.mojom.h"
#include "mojo/public/cpp/bindings/pending_receiver.h"
#include "mojo/public/cpp/bindings/pending_remote.h"
#include "mojo/public/cpp/bindings/remote.h"
@@ -21,12 +21,14 @@ namespace media {
MediaFoundationRendererClientFactory::MediaFoundationRendererClientFactory(
MediaLog* media_log,
- GetDCOMPTextureWrapperCB get_dcomp_texture_cb,
+ GetDCOMPTextureWrapperCB get_dcomp_texture_wrapper_cb,
+ ObserveOverlayStateCB observe_overlay_state_cb,
std::unique_ptr<media::MojoRendererFactory> mojo_renderer_factory,
mojo::Remote<media::mojom::MediaFoundationRendererNotifier>
media_foundation_renderer_notifier)
: media_log_(media_log),
- get_dcomp_texture_cb_(std::move(get_dcomp_texture_cb)),
+ get_dcomp_texture_wrapper_cb_(std::move(get_dcomp_texture_wrapper_cb)),
+ observe_overlay_state_cb_(std::move(observe_overlay_state_cb)),
mojo_renderer_factory_(std::move(mojo_renderer_factory)),
media_foundation_renderer_notifier_(
std::move(media_foundation_renderer_notifier)) {
@@ -74,8 +76,8 @@ MediaFoundationRendererClientFactory::CreateRenderer(
auto client_extension_receiver =
client_extension_remote.InitWithNewPipeAndPassReceiver();
- auto dcomp_texture = get_dcomp_texture_cb_.Run();
- DCHECK(dcomp_texture);
+ auto dcomp_texture_wrapper = get_dcomp_texture_wrapper_cb_.Run();
+ DCHECK(dcomp_texture_wrapper);
std::unique_ptr<media::MojoRenderer> mojo_renderer =
mojo_renderer_factory_->CreateMediaFoundationRenderer(
@@ -87,14 +89,19 @@ MediaFoundationRendererClientFactory::CreateRenderer(
// Notify the browser that a Media Foundation Renderer has been created. Live
// Caption supports muted media so this is run regardless of whether the media
// is audible.
- media_foundation_renderer_notifier_->MediaFoundationRendererCreated();
+ mojo::PendingRemote<media::mojom::MediaFoundationRendererObserver>
+ media_foundation_renderer_observer_remote;
+ media_foundation_renderer_notifier_->MediaFoundationRendererCreated(
+ media_foundation_renderer_observer_remote
+ .InitWithNewPipeAndPassReceiver());
// mojo_renderer's ownership is passed to MediaFoundationRendererClient.
return std::make_unique<MediaFoundationRendererClient>(
media_task_runner, media_log_->Clone(), std::move(mojo_renderer),
std::move(renderer_extension_remote),
- std::move(client_extension_receiver), std::move(dcomp_texture),
- video_renderer_sink);
+ std::move(client_extension_receiver), std::move(dcomp_texture_wrapper),
+ observe_overlay_state_cb_, video_renderer_sink,
+ std::move(media_foundation_renderer_observer_remote));
}
media::MediaResource::Type
@@ -102,4 +109,4 @@ MediaFoundationRendererClientFactory::GetRequiredMediaResourceType() {
return media::MediaResource::Type::STREAM;
}
-} // namespace media \ No newline at end of file
+} // namespace media
diff --git a/chromium/media/mojo/clients/win/media_foundation_renderer_client_factory.h b/chromium/media/mojo/clients/win/media_foundation_renderer_client_factory.h
index 4c6d5897341..f37be85c59c 100644
--- a/chromium/media/mojo/clients/win/media_foundation_renderer_client_factory.h
+++ b/chromium/media/mojo/clients/win/media_foundation_renderer_client_factory.h
@@ -10,8 +10,9 @@
#include "base/task/single_thread_task_runner.h"
#include "media/base/renderer_factory.h"
#include "media/base/win/dcomp_texture_wrapper.h"
+#include "media/base/win/overlay_state_observer_subscription.h"
#include "media/mojo/clients/mojo_renderer_factory.h"
-#include "media/mojo/mojom/speech_recognition_service.mojom.h"
+#include "media/mojo/mojom/renderer_extensions.mojom.h"
#include "mojo/public/cpp/bindings/remote.h"
namespace media {
@@ -27,7 +28,8 @@ class MediaFoundationRendererClientFactory : public media::RendererFactory {
MediaFoundationRendererClientFactory(
MediaLog* media_log,
- GetDCOMPTextureWrapperCB get_dcomp_texture_cb,
+ GetDCOMPTextureWrapperCB get_dcomp_texture_wrapper_cb,
+ ObserveOverlayStateCB observe_overlay_state_cb,
std::unique_ptr<media::MojoRendererFactory> mojo_renderer_factory,
mojo::Remote<media::mojom::MediaFoundationRendererNotifier>
media_foundation_renderer_notifier);
@@ -49,7 +51,8 @@ class MediaFoundationRendererClientFactory : public media::RendererFactory {
// WebMediaPlayerImpl with the correct declaration order.
raw_ptr<MediaLog> media_log_ = nullptr;
- GetDCOMPTextureWrapperCB get_dcomp_texture_cb_;
+ GetDCOMPTextureWrapperCB get_dcomp_texture_wrapper_cb_;
+ ObserveOverlayStateCB observe_overlay_state_cb_;
std::unique_ptr<media::MojoRendererFactory> mojo_renderer_factory_;
mojo::Remote<media::mojom::MediaFoundationRendererNotifier>
media_foundation_renderer_notifier_;
diff --git a/chromium/media/mojo/common/audio_data_s16_converter.cc b/chromium/media/mojo/common/audio_data_s16_converter.cc
index add32e1e055..7d1a07b284f 100644
--- a/chromium/media/mojo/common/audio_data_s16_converter.cc
+++ b/chromium/media/mojo/common/audio_data_s16_converter.cc
@@ -10,6 +10,7 @@
#include "media/base/audio_bus.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/channel_mixer.h"
+#include "media/mojo/mojom/audio_data.mojom.h"
#include "media/mojo/mojom/media_types.mojom.h"
namespace media {
@@ -117,4 +118,4 @@ void AudioDataS16Converter::ResetChannelMixerIfNeeded(
}
}
-} // namespace media \ No newline at end of file
+} // namespace media
diff --git a/chromium/media/mojo/mojom/BUILD.gn b/chromium/media/mojo/mojom/BUILD.gn
index 0ff87ecca0f..d36f580cbeb 100644
--- a/chromium/media/mojo/mojom/BUILD.gn
+++ b/chromium/media/mojo/mojom/BUILD.gn
@@ -73,6 +73,7 @@ mojom("mojom") {
}
public_deps = [
+ ":audio_data",
":encryption_pattern",
"//gpu/ipc/common:interfaces",
"//media/learning/mojo/public/mojom",
@@ -96,6 +97,10 @@ mojom("mojom") {
public_deps += [ "//sandbox/mac/mojom" ]
}
+ if (!is_android) {
+ public_deps += [ ":speech_recognition" ]
+ }
+
enabled_features = []
# Help select ServiceSandbox for media_service.mojom.
@@ -260,15 +265,6 @@ mojom("mojom") {
{
types = [
{
- mojom = "media.mojom.CopyMode"
- cpp = "::media::VideoFrameMetadata::CopyMode"
- },
- ]
- traits_headers = [ "media_types_enum_mojom_traits.h" ]
- },
- {
- types = [
- {
mojom = "media.mojom.VideoRotation"
cpp = "::media::VideoRotation"
},
@@ -665,36 +661,6 @@ mojom("mojom") {
},
]
- if (!is_android) {
- cpp_typemaps += [
- {
- types = [
- {
- mojom = "media.mojom.HypothesisParts"
- cpp = "::media::HypothesisParts"
- },
- {
- mojom = "media.mojom.TimingInformation"
- cpp = "::media::TimingInformation"
- },
- {
- mojom = "media.mojom.SpeechRecognitionResult"
- cpp = "::media::SpeechRecognitionResult"
- },
- ]
- traits_headers = [
- "speech_recognition_result_mojom_traits.h",
- "speech_recognition_result.h",
- ]
- traits_sources = [
- "speech_recognition_result.cc",
- "speech_recognition_result_mojom_traits.cc",
- ]
- traits_public_deps = [ "//base" ]
- },
- ]
- }
-
if (is_win) {
cpp_typemaps += [
{
@@ -721,8 +687,8 @@ mojom("mojom") {
{
types = [
{
- mojom = "media.mojom.RenderingMode"
- cpp = "::media::RenderingMode"
+ mojom = "media.mojom.MediaFoundationRenderingMode"
+ cpp = "::media::MediaFoundationRenderingMode"
},
]
traits_headers = [ "media_foundation_rendering_mode_mojom_traits.h" ]
@@ -739,6 +705,11 @@ mojom("mojom") {
export_header_blink = "third_party/blink/public/platform/web_common.h"
}
+mojom("audio_data") {
+ generate_java = true
+ sources = [ "audio_data.mojom" ]
+}
+
mojom("encryption_pattern") {
generate_java = true
sources = [ "encryption_pattern.mojom" ]
@@ -760,6 +731,44 @@ mojom("encryption_pattern") {
]
}
+# Speech recognition API used between renderer/browser/service and between
+# LaCrOS/Ash.
+mojom("speech_recognition") {
+ sources = [ "speech_recognition.mojom" ]
+ cpp_typemaps = [
+ {
+ types = [
+ {
+ mojom = "media.mojom.HypothesisParts"
+ cpp = "::media::HypothesisParts"
+ },
+ {
+ mojom = "media.mojom.TimingInformation"
+ cpp = "::media::TimingInformation"
+ },
+ {
+ mojom = "media.mojom.SpeechRecognitionResult"
+ cpp = "::media::SpeechRecognitionResult"
+ },
+ ]
+ traits_headers = [
+ "speech_recognition_result_mojom_traits.h",
+ "speech_recognition_result.h",
+ ]
+ traits_sources = [
+ "speech_recognition_result.cc",
+ "speech_recognition_result_mojom_traits.cc",
+ ]
+ traits_public_deps = [ "//base" ]
+ },
+ ]
+ public_deps = [
+ ":audio_data",
+ "//mojo/public/mojom/base",
+ "//sandbox/policy/mojom",
+ ]
+}
+
source_set("shared_mojom_traits") {
sources = [
"audio_processing_mojom_traits.cc",
@@ -824,4 +833,8 @@ source_set("unit_tests") {
"//testing/gtest",
"//ui/gfx:test_support",
]
+
+ if (!is_android) {
+ deps += [ ":speech_recognition" ]
+ }
}
diff --git a/chromium/media/mojo/mojom/audio_data.mojom b/chromium/media/mojo/mojom/audio_data.mojom
new file mode 100644
index 00000000000..8d48cfc27ed
--- /dev/null
+++ b/chromium/media/mojo/mojom/audio_data.mojom
@@ -0,0 +1,24 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module media.mojom;
+
+// This defines a mojo transport format for an interleaved, signed
+// 16-bit audio buffer.
+// Marked [Stable], enabling its use across the LaCrOS/Ash boundary as part of
+// the speech recognition API.
+[Stable]
+struct AudioDataS16 {
+ // Number of channels.
+ int32 channel_count;
+
+ // Sample rate of the buffer.
+ int32 sample_rate;
+
+ // Number of frames in the buffer.
+ int32 frame_count;
+
+ // Channel data.
+ array<int16> data;
+};
diff --git a/chromium/media/mojo/mojom/media_foundation_rendering_mode_mojom_traits.h b/chromium/media/mojo/mojom/media_foundation_rendering_mode_mojom_traits.h
index 95f9352b0a1..c6d0ce26588 100644
--- a/chromium/media/mojo/mojom/media_foundation_rendering_mode_mojom_traits.h
+++ b/chromium/media/mojo/mojom/media_foundation_rendering_mode_mojom_traits.h
@@ -6,37 +6,38 @@
#define MEDIA_MOJO_MOJOM_MEDIA_FOUNDATION_RENDERING_MODE_MOJOM_TRAITS_H_
#include "media/mojo/mojom/renderer_extensions.mojom-shared.h"
-#include "media/renderers/win/media_foundation_renderer_extension.h"
+#include "media/renderers/win/media_foundation_rendering_mode.h"
template <>
-struct mojo::EnumTraits<media::mojom::RenderingMode, media::RenderingMode> {
+struct mojo::EnumTraits<media::mojom::MediaFoundationRenderingMode,
+ media::MediaFoundationRenderingMode> {
public:
- static bool FromMojom(media::mojom::RenderingMode data,
- media::RenderingMode* output) {
+ static bool FromMojom(media::mojom::MediaFoundationRenderingMode data,
+ media::MediaFoundationRenderingMode* output) {
switch (data) {
- case media::mojom::RenderingMode::DirectComposition:
- *output = media::RenderingMode::DirectComposition;
- break;
- case media::mojom::RenderingMode::FrameServer:
- *output = media::RenderingMode::FrameServer;
- break;
+ case media::mojom::MediaFoundationRenderingMode::DirectComposition:
+ *output = media::MediaFoundationRenderingMode::DirectComposition;
+ return true;
+ case media::mojom::MediaFoundationRenderingMode::FrameServer:
+ *output = media::MediaFoundationRenderingMode::FrameServer;
+ return true;
}
NOTREACHED();
return false;
}
- static media::mojom::RenderingMode ToMojom(media::RenderingMode data) {
+ static media::mojom::MediaFoundationRenderingMode ToMojom(
+ media::MediaFoundationRenderingMode data) {
switch (data) {
- case media::RenderingMode::DirectComposition:
- return media::mojom::RenderingMode::DirectComposition;
- break;
- case media::RenderingMode::FrameServer:
- return media::mojom::RenderingMode::FrameServer;
+ case media::MediaFoundationRenderingMode::DirectComposition:
+ return media::mojom::MediaFoundationRenderingMode::DirectComposition;
+ case media::MediaFoundationRenderingMode::FrameServer:
+ return media::mojom::MediaFoundationRenderingMode::FrameServer;
break;
}
NOTREACHED();
- return media::mojom::RenderingMode::DirectComposition;
+ return media::mojom::MediaFoundationRenderingMode::DirectComposition;
}
};
-#endif // MEDIA_MOJO_MOJOM_MEDIA_FOUNDATION_RENDERING_MODE_MOJOM_TRAITS_H_ \ No newline at end of file
+#endif // MEDIA_MOJO_MOJOM_MEDIA_FOUNDATION_RENDERING_MODE_MOJOM_TRAITS_H_
diff --git a/chromium/media/mojo/mojom/media_metrics_provider.mojom b/chromium/media/mojo/mojom/media_metrics_provider.mojom
index 71eb9886575..d388e4a9c82 100644
--- a/chromium/media/mojo/mojom/media_metrics_provider.mojom
+++ b/chromium/media/mojo/mojom/media_metrics_provider.mojom
@@ -47,6 +47,12 @@ interface MediaMetricsProvider {
// the provider is destructed.
OnError(PipelineStatus status);
+ // Called when some portion of the pipeline wants to report a non-fatal error,
+ // such as hardware decode failure that falls back to a successful software
+ // decoded playback session, or a failed hardware renderer path which falls
+ // back to a successful software one.
+ OnFallback(PipelineStatus status);
+
// Setters for various one-time lazily generated metrics or properties.
SetHasPlayed();
SetHaveEnough();
diff --git a/chromium/media/mojo/mojom/media_types.mojom b/chromium/media/mojo/mojom/media_types.mojom
index 34434907c9a..ba8b55be7c3 100644
--- a/chromium/media/mojo/mojom/media_types.mojom
+++ b/chromium/media/mojo/mojom/media_types.mojom
@@ -6,6 +6,7 @@ module media.mojom;
import "gpu/ipc/common/mailbox_holder.mojom";
import "gpu/ipc/common/vulkan_ycbcr_info.mojom";
+import "media/mojo/mojom/audio_data.mojom";
import "media/mojo/mojom/encryption_pattern.mojom";
import "mojo/public/mojom/base/shared_memory.mojom";
import "mojo/public/mojom/base/time.mojom";
@@ -76,12 +77,6 @@ enum VideoRotation {
kVideoRotation270,
};
-// see media/base/video_frame_metadata.h for descriptions.
-enum CopyMode {
- kCopyToNewTexture,
- kCopyMailboxesOnly,
-};
-
// see third_party/blink/public/platform/web_fullscreen_video_status.h for
// descriptions.
[Native]
@@ -279,22 +274,6 @@ struct AudioBuffer {
array<uint8> data;
};
-// This defines a mojo transport format for an interleaved, signed
-// 16-bit audio buffer.
-struct AudioDataS16 {
- // Number of channels.
- int32 channel_count;
-
- // Sample rate of the buffer.
- int32 sample_rate;
-
- // Number of frames in the buffer.
- int32 frame_count;
-
- // Channel data.
- array<int16> data;
-};
-
// See media/base/video_frame_metadata.h for a description of fields.
// TODO(crbug.com/657632): Remove |has_*| values and use nullable types.
struct VideoFrameMetadata {
@@ -311,8 +290,7 @@ struct VideoFrameMetadata {
gfx.mojom.Rect? region_capture_rect;
uint32 crop_version;
- bool has_copy_mode;
- CopyMode copy_mode;
+ bool copy_required;
bool end_of_stream;
@@ -492,6 +470,7 @@ struct StatusData {
array<mojo_base.mojom.Value> frames;
StatusData? cause;
mojo_base.mojom.Value data;
+ uint64 packed_root_cause;
};
struct EncoderStatus {
diff --git a/chromium/media/mojo/mojom/media_types_enum_mojom_traits.h b/chromium/media/mojo/mojom/media_types_enum_mojom_traits.h
index 18924b45d81..21053131c3d 100644
--- a/chromium/media/mojo/mojom/media_types_enum_mojom_traits.h
+++ b/chromium/media/mojo/mojom/media_types_enum_mojom_traits.h
@@ -9,7 +9,6 @@
#include "build/build_config.h"
#include "media/base/renderer_factory_selector.h"
#include "media/base/svc_scalability_mode.h"
-#include "media/base/video_frame_metadata.h"
#include "media/base/video_transformation.h"
#include "media/cdm/cdm_document_service.h"
#include "media/mojo/mojom/media_types.mojom-shared.h"
@@ -271,40 +270,6 @@ struct EnumTraits<media::mojom::VideoRotation, ::media::VideoRotation> {
};
template <>
-struct EnumTraits<media::mojom::CopyMode,
- ::media::VideoFrameMetadata::CopyMode> {
- static media::mojom::CopyMode ToMojom(
- ::media::VideoFrameMetadata::CopyMode input) {
- switch (input) {
- case ::media::VideoFrameMetadata::CopyMode::kCopyToNewTexture:
- return media::mojom::CopyMode::kCopyToNewTexture;
- case ::media::VideoFrameMetadata::CopyMode::kCopyMailboxesOnly:
- return media::mojom::CopyMode::kCopyMailboxesOnly;
- }
-
- NOTREACHED();
- return static_cast<media::mojom::CopyMode>(input);
- }
-
- // Returning false results in deserialization failure and causes the
- // message pipe receiving it to be disconnected.
- static bool FromMojom(media::mojom::CopyMode input,
- media::VideoFrameMetadata::CopyMode* output) {
- switch (input) {
- case media::mojom::CopyMode::kCopyToNewTexture:
- *output = ::media::VideoFrameMetadata::CopyMode::kCopyToNewTexture;
- return true;
- case media::mojom::CopyMode::kCopyMailboxesOnly:
- *output = ::media::VideoFrameMetadata::CopyMode::kCopyMailboxesOnly;
- return true;
- }
-
- NOTREACHED();
- return false;
- }
-};
-
-template <>
struct EnumTraits<media::mojom::RendererType, ::media::RendererType> {
static media::mojom::RendererType ToMojom(::media::RendererType input) {
switch (input) {
diff --git a/chromium/media/mojo/mojom/renderer_extensions.mojom b/chromium/media/mojo/mojom/renderer_extensions.mojom
index 34990cd2d65..293ed504679 100644
--- a/chromium/media/mojo/mojom/renderer_extensions.mojom
+++ b/chromium/media/mojo/mojom/renderer_extensions.mojom
@@ -91,7 +91,7 @@ interface MediaFoundationRendererClientExtension {
// direct composition visual, skipping the Chromium compositor.
// FrameServer is when the media engine will render to a texture and
// that textured is provided to the Chromium compositor.
-enum RenderingMode {
+enum MediaFoundationRenderingMode {
DirectComposition,
FrameServer
};
@@ -133,5 +133,26 @@ interface MediaFoundationRendererExtension {
mojo_base.mojom.TimeTicks deadline_max);
// Notify which rendering mode to be using for future video frames.
- SetRenderingMode(RenderingMode mode);
+ SetMediaFoundationRenderingMode(MediaFoundationRenderingMode mode);
+};
+
+// This interface is used by the browser to determine if there are any renderers
+// actively using the Media Foundation Renderer. The number of Media Foundation
+// Renderers in use is determined by the number of active connections. The
+// remote lives in the renderer process and the receiver lives in the browser
+// process.
+interface MediaFoundationRendererObserver {
+};
+
+// This interface is used to notify the browser that the renderer is using the
+// Media Foundation Renderer which uses MediaFoundation to render audio
+// directly. Live Caption will not work in this case because Chrome is unable
+// to tap into the audio rendering pipeline. The remote lives in the renderer
+// process and the receiver lives in the browser process.
+interface MediaFoundationRendererNotifier {
+ // Notify the browser than a Media Foundation Renderer has been created. The
+ // browser will use this event to notify the user that some features
+ // incompatible with the Media Foundation Renderer may not work.
+ MediaFoundationRendererCreated(
+ pending_receiver<MediaFoundationRendererObserver> observer);
};
diff --git a/chromium/media/mojo/mojom/speech_recognition.mojom b/chromium/media/mojo/mojom/speech_recognition.mojom
new file mode 100644
index 00000000000..39a14db269e
--- /dev/null
+++ b/chromium/media/mojo/mojom/speech_recognition.mojom
@@ -0,0 +1,197 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module media.mojom;
+
+import "media/mojo/mojom/audio_data.mojom";
+import "mojo/public/mojom/base/time.mojom";
+
+// Corresponds to the LangIdEvent.ConfidenceInterval defined in
+// http://google3/speech/soda/public/soda_event.proto.
+[Stable, Extensible]
+enum ConfidenceLevel {
+ [Default] kUnknown,
+ kNotConfident,
+ kConfident,
+ kHighlyConfident,
+};
+
+// The main interface a renderer client uses to interact with a speech
+// recognition service process. In Live Caption, every renderer can own one or
+// more Remote<SpeechRecognitionContext>, with the receiver bound through the
+// BrowserInterfaceBroker. This is a stable interface that is used across the
+// LaCrOS/Ash boundary.
+[Stable]
+interface SpeechRecognitionContext {
+ // Bind the recognizers to the speech recognition service. Returns a flag
+ // indicating whether multichannel audio is supported by the speech
+ // recognition service.
+ BindRecognizer@0(pending_receiver<SpeechRecognitionRecognizer> receiver,
+ pending_remote<SpeechRecognitionRecognizerClient> client,
+ SpeechRecognitionOptions options)
+ => (bool is_multichannel_supported);
+};
+
+// The interface used to pass raw audio from the renderer to the speech
+// recognition service. The remote lives in the renderer process and the
+// receiver lives in the speech recognition process.
+[Stable]
+interface SpeechRecognitionRecognizer {
+ // Initialize the speech recognition instance. The speech recognition client
+ // will return the recognition events containing the transcribed audio back
+ // to the originating media.
+ SendAudioToSpeechRecognitionService@0(AudioDataS16 buffer);
+
+ // Mark audio stream done. This informs the speech recognition client to stop
+ // speech recognition after it finishes processing the audio it has received
+ // already. This will eventually trigger the
+ // SpeechRecognitionRecognizerClient::OnSpeechRecognitionStopped callback.
+ MarkDone@1();
+
+ // Notify the speech recognition recognizer that the language changed. Takes
+ // in the locale string (e.g. "en-US").
+ OnLanguageChanged@2(string language);
+};
+
+// The interface used to return speech recognition events from the speech
+// recognition service to the client that will display the results to the user.
+// The remote lives in the speech recognition process and the receiver lives in
+// the browser process.
+[Stable]
+interface SpeechRecognitionRecognizerClient {
+ // Triggered by speech recognition process on a speech recognition event.
+ // Returns whether the result was received successfully. Speech recognition
+ // will halt if this returns false.
+ OnSpeechRecognitionRecognitionEvent@0(SpeechRecognitionResult result)
+ => (bool success);
+
+ // Called when speech recognition stops.
+ OnSpeechRecognitionStopped@1();
+
+ // Triggered by an error within the speech recognition service.
+ OnSpeechRecognitionError@2();
+
+ // Triggered by speech recognition process on a language identification event.
+ OnLanguageIdentificationEvent@3(LanguageIdentificationEvent event);
+};
+
+// The hypothesis parts that provides timing information for each word in
+// recognized speech.
+[Stable]
+struct HypothesisParts {
+ // A section of the final transcription text. Either an entire word or single
+ // character (depending on the language) with adjacent punctuation. There will
+ // usually only be one value here. If formatting is enabled in the speech
+ // recognition, then the raw text will be included as the second element.
+ array<string> text;
+
+ // Time offset from this event's |audio_start_time| defined below. We enforce
+ // the following invariant: 0 <= hypothesis_part_offset < |audio_end_time -
+ // audio_start_time|.
+ mojo_base.mojom.TimeDelta hypothesis_part_offset;
+};
+
+// The timing information for the transcript.
+[Stable]
+struct TimingInformation {
+ // Start time in audio time from the start of the SODA session.
+ // This time measures the amount of audio input into SODA.
+ mojo_base.mojom.TimeDelta audio_start_time;
+
+ // Elapsed processed audio from first frame after preamble.
+ mojo_base.mojom.TimeDelta audio_end_time;
+
+ // The timing information for each word/letter in the transription.
+ // HypothesisPartsInResult was introduced in min version 1 in
+ // chromeos/services/machine_learning/public/mojom/soda.mojom. Therefore, it
+ // must be optional. Hypothesis parts maybe non-empty optional containing a
+ // zero length vector if no words were spoken during the event's time span.
+ array<HypothesisParts> ? hypothesis_parts;
+};
+
+// A speech recognition result created by the speech service and passed to the
+// browser.
+[Stable]
+struct SpeechRecognitionResult {
+ string transcription;
+
+ // A flag indicating whether the result is final. If true, the result is
+ // locked in and the next result returned will not overlap with the previous
+ // final result.
+ bool is_final;
+
+ // Timing information for the current transcription. |timing_information| is
+ // expected to be valid if:
+ // 1. speech recognition is provided by |CrosSodaClient| and
+ // 2. |is_final| is true.
+ TimingInformation? timing_information;
+};
+
+// A language identification event created by the speech recognition service
+// and passed to the browser and renderer.
+[Stable]
+struct LanguageIdentificationEvent {
+ // The locale of the language with the highest confidence.
+ string language;
+
+ // The confidence interval.
+ ConfidenceLevel confidence_level;
+};
+
+// The interface used to notify the speech recognition client of events
+// triggered by the browser. The remote lives in the browser process and the
+// receiver lives in the renderer process.
+[Stable]
+interface SpeechRecognitionBrowserObserver {
+ // Notify the speech recognition client when speech recognition availability
+ // changes.
+ SpeechRecognitionAvailabilityChanged@0(bool is_speech_recognition_available);
+
+ // Notify the speech recognition client when the speech recognition language
+ // changes.
+ SpeechRecognitionLanguageChanged@1(string language);
+};
+
+// This interface between the speech recognition client and the browser.
+// The remote lives in the renderer process and the receiver lives in the
+// browser process.
+[Stable]
+interface SpeechRecognitionClientBrowserInterface {
+ // Bind the speech recognition availability observer.
+ BindSpeechRecognitionBrowserObserver@0(
+ pending_remote<SpeechRecognitionBrowserObserver> observer);
+};
+
+// Corresponds to ExtendedSodaConfigMsg.RecognitionMode in
+// chrome/services/speech/soda/proto/soda_api.proto and
+// SodaRecognitionMode in
+// chromeos/services/machine_learning/public/mojom/soda.mojom.
+[Stable, Extensible]
+enum SpeechRecognitionMode {
+ [Default] kUnknown,
+ // Intended for voice input for keyboard usage.
+ kIme,
+ // Intended to caption a stream of audio.
+ kCaption,
+};
+
+// Options for speech recognition.
+// TODO(crbug.com/1165437): Add option to include timing metrics in the result.
+[Stable]
+struct SpeechRecognitionOptions {
+ // What kind of recognition to use.
+ // In the case of web fallback (not for launch, used for development only),
+ // this option will be ignored.
+ SpeechRecognitionMode recognition_mode;
+
+ // Whether to enable formatting and punctuation in the recognition results.
+ bool enable_formatting;
+
+ // The BCP-47 localized language code to use (e.g. "en-US").
+ // TODO(crbug.com/1161569): Language needs to be required when multiple
+ // languages are supported by SODA, so that each SpeechRecognitionRecognizer
+ // can use its own language. Right now Language is only used by Projector
+ // and Dictation via OnDeviceSpeechRecognizer in Chrome OS.
+ string? language;
+};
diff --git a/chromium/media/mojo/mojom/speech_recognition_result_mojom_traits.h b/chromium/media/mojo/mojom/speech_recognition_result_mojom_traits.h
index c3eb1509e02..addceb988da 100644
--- a/chromium/media/mojo/mojom/speech_recognition_result_mojom_traits.h
+++ b/chromium/media/mojo/mojom/speech_recognition_result_mojom_traits.h
@@ -9,8 +9,8 @@
#include <vector>
#include "base/time/time.h"
+#include "media/mojo/mojom/speech_recognition.mojom.h"
#include "media/mojo/mojom/speech_recognition_result.h"
-#include "media/mojo/mojom/speech_recognition_service.mojom.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
namespace mojo {
diff --git a/chromium/media/mojo/mojom/speech_recognition_service.mojom b/chromium/media/mojo/mojom/speech_recognition_service.mojom
index b359fab0b29..60daf9b600b 100644
--- a/chromium/media/mojo/mojom/speech_recognition_service.mojom
+++ b/chromium/media/mojo/mojom/speech_recognition_service.mojom
@@ -4,44 +4,29 @@
module media.mojom;
+import "media/mojo/mojom/audio_data.mojom";
import "media/mojo/mojom/audio_parameters.mojom";
import "media/mojo/mojom/audio_stream_factory.mojom";
import "media/mojo/mojom/media_types.mojom";
+import "media/mojo/mojom/speech_recognition.mojom";
import "mojo/public/mojom/base/file_path.mojom";
import "mojo/public/mojom/base/time.mojom";
import "sandbox/policy/mojom/sandbox.mojom";
-// Corresponds to the LangIdEvent.ConfidenceInterval defined in
-// http://google3/speech/soda/public/soda_event.proto.
-enum ConfidenceLevel {
- kUnknown,
- kNotConfident,
- kConfident,
- kHighlyConfident,
-};
-
-// The main interface a client uses to interact with a speech recognition
-// service process. In Live Caption, every renderer can own one or more
-// Remote<SpeechRecognitionContext>, with the receiver bound through the
-// BrowserInterfaceBroker. In Chrome OS features like Dictation and Projector,
-// every OnDeviceSpeechRecognizer can own a Remote<SpeechRecognitionContext>.
-interface SpeechRecognitionContext {
- // Bind the recognizers to the speech recognition service. Returns a flag
- // indicating whether multichannel audio is supported by the speech
- // recognition service.
- BindRecognizer(pending_receiver<SpeechRecognitionRecognizer> receiver,
- pending_remote<SpeechRecognitionRecognizerClient> client,
- SpeechRecognitionOptions options)
- => (bool is_multichannel_supported);
-
+// Like a SpeechRecognitionContext, except it binds AudioSourceFetcher speech
+// recognizer objects, rather than SpeechRecognitionRecognizer objects. In
+// Chrome OS features like Dictation and Projector, every
+// OnDeviceSpeechRecognizer can own a
+// Remote<AudioSourceSpeechRecognitionContext>.
+interface AudioSourceSpeechRecognitionContext {
// Prepares microphone audio to be captured from within the
// SpeechRecognitionService process, with results passed back to the
// SpeechRecognitionRecognizerClient.
BindAudioSourceFetcher(
- pending_receiver<AudioSourceFetcher> fetcher_receiver,
- pending_remote<SpeechRecognitionRecognizerClient> client,
- SpeechRecognitionOptions options)
- => (bool is_multichannel_supported);
+ pending_receiver<AudioSourceFetcher> fetcher_receiver,
+ pending_remote<SpeechRecognitionRecognizerClient> client,
+ SpeechRecognitionOptions options)
+ => (bool is_multichannel_supported);
};
// The main interface to a speech secognition service process.
@@ -49,13 +34,19 @@ interface SpeechRecognitionContext {
// acquired during process launch.
[ServiceSandbox=sandbox.mojom.Sandbox.kSpeechRecognition]
interface SpeechRecognitionService {
- // Bind the context to a new instance of the speech recognition.
- BindContext(pending_receiver<SpeechRecognitionContext> context);
+ // Binds a new SpeechRecognitionContext hosted in the service process.
+ BindSpeechRecognitionContext(
+ pending_receiver<SpeechRecognitionContext> context);
+
+ // Binds a new AudioSourceSpeechRecognitionContext hosted in the service
+ // process.
+ BindAudioSourceSpeechRecognitionContext(
+ pending_receiver<AudioSourceSpeechRecognitionContext> context);
// Sets the file path to the Speech On-Device API (SODA) binary and
// the config file for the language pack.
SetSodaPath(mojo_base.mojom.FilePath binary_path,
- mojo_base.mojom.FilePath config_path);
+ mojo_base.mojom.FilePath config_path);
};
// The interface used to start and stop fetching audio from the microphone
@@ -70,168 +61,3 @@ interface AudioSourceFetcher {
// Stops audio fetching.
Stop();
};
-
-// The interface used to pass raw audio from the renderer to the speech
-// recognition service. The remote lives in the renderer process and the
-// receiver lives in the speech recognition process.
-interface SpeechRecognitionRecognizer {
- // Initialize the speech recognition instance. The speech recognition client
- // will return the recognition events containing the transcribed audio back
- // to the originating media.
- SendAudioToSpeechRecognitionService(AudioDataS16 buffer);
-
- // Mark audio stream done. This informs the speech recognition client to stop
- // speech recognition after it finishes processing the audio it has received
- // already. This will eventually trigger the
- // SpeechRecognitionRecognizerClient::OnSpeechRecognitionStopped callback.
- MarkDone();
-
- // Notify the speech recognition recognizer that the language changed. Takes
- // in the locale string (e.g. "en-US").
- OnLanguageChanged(string language);
-};
-
-// The interface used to return speech recognition events from the speech
-// recognition service to the client that will display the results to the user.
-// The remote lives in the speech recognition process and the receiver lives in
-// the browser process.
-interface SpeechRecognitionRecognizerClient {
- // Triggered by speech recognition process on a speech recognition event.
- // Returns whether the result was received successfully. Speech recognition
- // will halt if this returns false.
- OnSpeechRecognitionRecognitionEvent(SpeechRecognitionResult result)
- => (bool success);
-
- // Called when speech recognition stops.
- OnSpeechRecognitionStopped();
-
- // Triggered by an error within the speech recognition service.
- OnSpeechRecognitionError();
-
- // Triggered by speech recognition process on a language identification event.
- OnLanguageIdentificationEvent(LanguageIdentificationEvent event);
-};
-
-// The hypothesis parts that provides timing information for each word in
-// recognized speech.
-struct HypothesisParts {
- // A section of the final transcription text. Either an entire word or single
- // character (depending on the language) with adjacent punctuation. There will
- // usually only be one value here. If formatting is enabled in the speech
- // recognition, then the raw text will be included as the second element.
- array<string> text;
-
- // Time offset from this event's |audio_start_time| defined below. We enforce
- // the following invariant: 0 <= hypothesis_part_offset < |audio_end_time -
- // audio_start_time|.
- mojo_base.mojom.TimeDelta hypothesis_part_offset;
-};
-
-// The timing information for the transcript.
-struct TimingInformation {
- // Start time in audio time from the start of the SODA session.
- // This time measures the amount of audio input into SODA.
- mojo_base.mojom.TimeDelta audio_start_time;
-
- // Elapsed processed audio from first frame after preamble.
- mojo_base.mojom.TimeDelta audio_end_time;
-
- // The timing information for each word/letter in the transription.
- // HypothesisPartsInResult was introduced in min version 1 in
- // chromeos/services/machine_learning/public/mojom/soda.mojom. Therefore, it
- // must be optional. Hypothesis parts maybe non-empty optional containing a
- // zero length vector if no words were spoken during the event's time span.
- array<HypothesisParts> ? hypothesis_parts;
-};
-
-// A speech recognition result created by the speech service and passed to the
-// browser.
-struct SpeechRecognitionResult {
- string transcription;
-
- // A flag indicating whether the result is final. If true, the result is
- // locked in and the next result returned will not overlap with the previous
- // final result.
- bool is_final;
-
- // Timing information for the current transcription. |timing_information| is
- // expected to be valid if:
- // 1. speech recognition is provided by |CrosSodaClient| and
- // 2. |is_final| is true.
- TimingInformation? timing_information;
-};
-
-// A language identification event created by the speech recognition service
-// and passed to the browser and renderer.
-struct LanguageIdentificationEvent {
- // The locale of the language with the highest confidence.
- string language;
-
- // The confidence interval.
- ConfidenceLevel confidence_level;
-};
-
-// The interface used to notify the speech recognition client of events
-// triggered by the browser. The remote lives in the browser process and the
-// receiver lives in the renderer process.
-interface SpeechRecognitionBrowserObserver {
- // Notify the speech recognition client when speech recognition availability
- // changes.
- SpeechRecognitionAvailabilityChanged(bool is_speech_recognition_available);
-
- // Notify the speech recognition client when the speech recognition language
- // changes.
- SpeechRecognitionLanguageChanged(string language);
-};
-
-// This interface between the speech recognition client and the browser.
-// The remote lives in the renderer process and the receiver lives in the
-// browser process.
-interface SpeechRecognitionClientBrowserInterface {
- // Bind the speech recognition availability observer.
- BindSpeechRecognitionBrowserObserver(
- pending_remote<SpeechRecognitionBrowserObserver> observer);
-};
-
-// Corresponds to ExtendedSodaConfigMsg.RecognitionMode in
-// chrome/services/speech/soda/proto/soda_api.proto and
-// SodaRecognitionMode in
-// chromeos/services/machine_learning/public/mojom/soda.mojom.
-enum SpeechRecognitionMode {
- kUnknown,
- // Intended for voice input for keyboard usage.
- kIme,
- // Intended to caption a stream of audio.
- kCaption,
-};
-
-// Options for speech recognition.
-// TODO(crbug.com/1165437): Add option to include timing metrics in the result.
-struct SpeechRecognitionOptions {
- // What kind of recognition to use.
- // In the case of web fallback (not for launch, used for development only),
- // this option will be ignored.
- SpeechRecognitionMode recognition_mode;
-
- // Whether to enable formatting and punctuation in the recognition results.
- bool enable_formatting;
-
- // The BCP-47 localized language code to use (e.g. "en-US").
- // TODO(crbug.com/1161569): Language needs to be required when multiple
- // languages are supported by SODA, so that each SpeechRecognitionRecognizer
- // can use its own language. Right now Language is only used by Projector
- // and Dictation via OnDeviceSpeechRecognizer in Chrome OS.
- string? language;
-};
-
-// This interface is used to notify the browser that the renderer is using the
-// Media Foundation Renderer which uses MediaFoundation to render audio
-// directly. Live Caption will not work in this case because Chrome is unable
-// to tap into the audio rendering pipeline. The remote lives in the renderer
-// process and the receiver lives in the browser process.
-interface MediaFoundationRendererNotifier {
- // Notify the browser than a Media Foundation Renderer has been created. The
- // browser will use this event to notify the user that some features
- // incompatible with the Media Foundation Renderer may not work.
- MediaFoundationRendererCreated();
-};
diff --git a/chromium/media/mojo/mojom/stable/stable_video_decoder_types.mojom b/chromium/media/mojo/mojom/stable/stable_video_decoder_types.mojom
index c7587d6bcd5..810c4fc6a8c 100644
--- a/chromium/media/mojo/mojom/stable/stable_video_decoder_types.mojom
+++ b/chromium/media/mojo/mojom/stable/stable_video_decoder_types.mojom
@@ -37,6 +37,8 @@ enum VideoCodecProfile {
// Keep the values in this enum unique, as they imply format (h.264 vs. VP8,
// for example), and keep the values for a particular format grouped
// together for clarity.
+ // Next version: 2
+ // Next value: 37
[Default] kVideoCodecProfileUnknown = -1,
kH264ProfileMin = 0,
kH264ProfileBaseline = kH264ProfileMin,
@@ -79,6 +81,16 @@ enum VideoCodecProfile {
kAV1ProfileMax = kAV1ProfilePro,
kDolbyVisionProfile8 = 27,
kDolbyVisionProfile9 = 28,
+ [MinVersion=1] kHEVCProfileExtMin = 29,
+ [MinVersion=1] kHEVCProfileRext = kHEVCProfileExtMin,
+ [MinVersion=1] kHEVCProfileHighThroughput = 30,
+ [MinVersion=1] kHEVCProfileMultiviewMain = 31,
+ [MinVersion=1] kHEVCProfileScalableMain = 32,
+ [MinVersion=1] kHEVCProfile3dMain = 33,
+ [MinVersion=1] kHEVCProfileScreenExtended = 34,
+ [MinVersion=1] kHEVCProfileScalableRext = 35,
+ [MinVersion=1] kHEVCProfileHighThroughputScreenExtended = 36,
+ [MinVersion=1] kHEVCProfileExtMax = kHEVCProfileHighThroughputScreenExtended,
};
// Based on |media.mojom.SubsampleEntry|.
@@ -278,6 +290,7 @@ enum ColorSpaceTransferID {
kCustom,
kCustomHDR,
kPiecewiseHDR,
+ [MinVersion=1] kScrgbLinear80Nits,
};
// Maps to |gfx.mojom.ColorSpaceMatrixID|.
@@ -412,7 +425,7 @@ struct GpuMemoryBufferVideoFrameData {
};
// Based on |media.mojom.VideoFrameMetadata| but does not depend on
-// |media.mojom.VideoTransformation| or |media.mojom.CopyMode|.
+// |media.mojom.VideoTransformation|.
// Next min field ID: 6
[Stable]
struct VideoFrameMetadata {
diff --git a/chromium/media/mojo/mojom/stable/stable_video_decoder_types_mojom_traits.h b/chromium/media/mojo/mojom/stable/stable_video_decoder_types_mojom_traits.h
index ae8c60f1df6..ab317b69722 100644
--- a/chromium/media/mojo/mojom/stable/stable_video_decoder_types_mojom_traits.h
+++ b/chromium/media/mojo/mojom/stable/stable_video_decoder_types_mojom_traits.h
@@ -173,6 +173,8 @@ struct EnumTraits<media::stable::mojom::ColorSpaceTransferID,
return media::stable::mojom::ColorSpaceTransferID::kCustomHDR;
case gfx::ColorSpace::TransferID::PIECEWISE_HDR:
return media::stable::mojom::ColorSpaceTransferID::kPiecewiseHDR;
+ case gfx::ColorSpace::TransferID::SCRGB_LINEAR_80_NITS:
+ return media::stable::mojom::ColorSpaceTransferID::kScrgbLinear80Nits;
}
NOTREACHED();
@@ -259,6 +261,9 @@ struct EnumTraits<media::stable::mojom::ColorSpaceTransferID,
case media::stable::mojom::ColorSpaceTransferID::kPiecewiseHDR:
*output = gfx::ColorSpace::TransferID::PIECEWISE_HDR;
return true;
+ case media::stable::mojom::ColorSpaceTransferID::kScrgbLinear80Nits:
+ *output = gfx::ColorSpace::TransferID::SCRGB_LINEAR_80_NITS;
+ return true;
}
NOTREACHED();
@@ -824,6 +829,29 @@ struct EnumTraits<media::stable::mojom::VideoCodecProfile,
case ::media::VideoCodecProfile::HEVCPROFILE_MAIN_STILL_PICTURE:
return media::stable::mojom::VideoCodecProfile::
kHEVCProfileMainStillPicture;
+ case ::media::VideoCodecProfile::HEVCPROFILE_REXT:
+ return media::stable::mojom::VideoCodecProfile::kHEVCProfileRext;
+ case ::media::VideoCodecProfile::HEVCPROFILE_HIGH_THROUGHPUT:
+ return media::stable::mojom::VideoCodecProfile::
+ kHEVCProfileHighThroughput;
+ case ::media::VideoCodecProfile::HEVCPROFILE_MULTIVIEW_MAIN:
+ return media::stable::mojom::VideoCodecProfile::
+ kHEVCProfileMultiviewMain;
+ case ::media::VideoCodecProfile::HEVCPROFILE_SCALABLE_MAIN:
+ return media::stable::mojom::VideoCodecProfile::
+ kHEVCProfileScalableMain;
+ case ::media::VideoCodecProfile::HEVCPROFILE_3D_MAIN:
+ return media::stable::mojom::VideoCodecProfile::kHEVCProfile3dMain;
+ case ::media::VideoCodecProfile::HEVCPROFILE_SCREEN_EXTENDED:
+ return media::stable::mojom::VideoCodecProfile::
+ kHEVCProfileScreenExtended;
+ case ::media::VideoCodecProfile::HEVCPROFILE_SCALABLE_REXT:
+ return media::stable::mojom::VideoCodecProfile::
+ kHEVCProfileScalableRext;
+ case ::media::VideoCodecProfile::
+ HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED:
+ return media::stable::mojom::VideoCodecProfile::
+ kHEVCProfileHighThroughputScreenExtended;
case ::media::VideoCodecProfile::DOLBYVISION_PROFILE0:
return media::stable::mojom::VideoCodecProfile::kDolbyVisionProfile0;
case ::media::VideoCodecProfile::DOLBYVISION_PROFILE4:
@@ -919,6 +947,32 @@ struct EnumTraits<media::stable::mojom::VideoCodecProfile,
kHEVCProfileMainStillPicture:
*output = ::media::VideoCodecProfile::HEVCPROFILE_MAIN_STILL_PICTURE;
return true;
+ case media::stable::mojom::VideoCodecProfile::kHEVCProfileRext:
+ *output = ::media::VideoCodecProfile::HEVCPROFILE_REXT;
+ return true;
+ case media::stable::mojom::VideoCodecProfile::kHEVCProfileHighThroughput:
+ *output = ::media::VideoCodecProfile::HEVCPROFILE_HIGH_THROUGHPUT;
+ return true;
+ case media::stable::mojom::VideoCodecProfile::kHEVCProfileMultiviewMain:
+ *output = ::media::VideoCodecProfile::HEVCPROFILE_MULTIVIEW_MAIN;
+ return true;
+ case media::stable::mojom::VideoCodecProfile::kHEVCProfileScalableMain:
+ *output = ::media::VideoCodecProfile::HEVCPROFILE_SCALABLE_MAIN;
+ return true;
+ case media::stable::mojom::VideoCodecProfile::kHEVCProfile3dMain:
+ *output = ::media::VideoCodecProfile::HEVCPROFILE_3D_MAIN;
+ return true;
+ case media::stable::mojom::VideoCodecProfile::kHEVCProfileScreenExtended:
+ *output = ::media::VideoCodecProfile::HEVCPROFILE_SCREEN_EXTENDED;
+ return true;
+ case media::stable::mojom::VideoCodecProfile::kHEVCProfileScalableRext:
+ *output = ::media::VideoCodecProfile::HEVCPROFILE_SCALABLE_REXT;
+ return true;
+ case media::stable::mojom::VideoCodecProfile::
+ kHEVCProfileHighThroughputScreenExtended:
+ *output = ::media::VideoCodecProfile::
+ HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED;
+ return true;
case media::stable::mojom::VideoCodecProfile::kDolbyVisionProfile0:
*output = ::media::VideoCodecProfile::DOLBYVISION_PROFILE0;
return true;
diff --git a/chromium/media/mojo/mojom/status_mojom_traits.cc b/chromium/media/mojo/mojom/status_mojom_traits.cc
index 7c88970eb18..3f9433476aa 100644
--- a/chromium/media/mojo/mojom/status_mojom_traits.cc
+++ b/chromium/media/mojo/mojom/status_mojom_traits.cc
@@ -15,6 +15,7 @@ bool StructTraits<
media::internal::StatusData>::Read(media::mojom::StatusDataDataView data,
media::internal::StatusData* output) {
output->code = data.code();
+ output->packed_root_cause = data.packed_root_cause();
if (!data.ReadGroup(&output->group))
return false;
diff --git a/chromium/media/mojo/mojom/status_mojom_traits.h b/chromium/media/mojo/mojom/status_mojom_traits.h
index 7180389f0b0..4e21f872de7 100644
--- a/chromium/media/mojo/mojom/status_mojom_traits.h
+++ b/chromium/media/mojo/mojom/status_mojom_traits.h
@@ -47,6 +47,11 @@ struct StructTraits<media::mojom::StatusDataDataView,
return input.data;
}
+ static media::UKMPackedType packed_root_cause(
+ const media::internal::StatusData& input) {
+ return input.packed_root_cause;
+ }
+
static bool Read(media::mojom::StatusDataDataView data,
media::internal::StatusData* output);
};
diff --git a/chromium/media/mojo/mojom/video_encode_accelerator.mojom b/chromium/media/mojo/mojom/video_encode_accelerator.mojom
index deadae6d8d0..9e720fdf94a 100644
--- a/chromium/media/mojo/mojom/video_encode_accelerator.mojom
+++ b/chromium/media/mojo/mojom/video_encode_accelerator.mojom
@@ -6,6 +6,7 @@ module media.mojom;
import "media/mojo/mojom/media_log.mojom";
import "media/mojo/mojom/media_types.mojom";
+import "mojo/public/mojom/base/shared_memory.mojom";
import "mojo/public/mojom/base/time.mojom";
import "ui/gfx/geometry/mojom/geometry.mojom";
import "media/mojo/mojom/video_encoder_info.mojom";
@@ -35,12 +36,19 @@ import "media/mojo/mojom/video_encoder_info.mojom";
// of these messages are acknowledged.
+enum VideoEncodeAcceleratorSupportedRateControlMode {
+ kNoMode,
+ kConstantMode,
+ kVariableMode
+};
+
struct VideoEncodeAcceleratorSupportedProfile {
VideoCodecProfile profile;
gfx.mojom.Size min_resolution;
gfx.mojom.Size max_resolution;
uint32 max_framerate_numerator;
uint32 max_framerate_denominator;
+ array<VideoEncodeAcceleratorSupportedRateControlMode> rate_control_modes;
array<SVCScalabilityMode> scalability_modes;
};
@@ -159,7 +167,7 @@ interface VideoEncodeAccelerator {
Encode(VideoFrame frame, bool force_keyframe) => ();
UseOutputBitstreamBuffer(int32 bitstream_buffer_id,
- handle<shared_buffer> buffer);
+ mojo_base.mojom.UnsafeSharedMemoryRegion region);
// Request a change to the encoding parameters. This is only a request,
// fulfilled on a best-effort basis. This method is intended for use with
diff --git a/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits.cc b/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits.cc
index 85145696dcf..0a098ea4eda 100644
--- a/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits.cc
+++ b/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits.cc
@@ -7,12 +7,56 @@
#include "base/notreached.h"
#include "media/base/video_bitrate_allocation.h"
#include "media/mojo/mojom/video_encode_accelerator.mojom.h"
+#include "media/video/video_encode_accelerator.h"
#include "mojo/public/cpp/base/time_mojom_traits.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
namespace mojo {
// static
+media::mojom::VideoEncodeAcceleratorSupportedRateControlMode
+EnumTraits<media::mojom::VideoEncodeAcceleratorSupportedRateControlMode,
+ media::VideoEncodeAccelerator::SupportedRateControlMode>::
+ ToMojom(media::VideoEncodeAccelerator::SupportedRateControlMode mode) {
+ switch (mode) {
+ case media::VideoEncodeAccelerator::kNoMode:
+ return media::mojom::VideoEncodeAcceleratorSupportedRateControlMode::
+ kNoMode;
+ case media::VideoEncodeAccelerator::kConstantMode:
+ return media::mojom::VideoEncodeAcceleratorSupportedRateControlMode::
+ kConstantMode;
+ case media::VideoEncodeAccelerator::kVariableMode:
+ return media::mojom::VideoEncodeAcceleratorSupportedRateControlMode::
+ kVariableMode;
+ }
+ NOTREACHED();
+ return media::mojom::VideoEncodeAcceleratorSupportedRateControlMode::
+ kConstantMode;
+}
+
+// static
+bool EnumTraits<media::mojom::VideoEncodeAcceleratorSupportedRateControlMode,
+ media::VideoEncodeAccelerator::SupportedRateControlMode>::
+ FromMojom(media::mojom::VideoEncodeAcceleratorSupportedRateControlMode mode,
+ media::VideoEncodeAccelerator::SupportedRateControlMode* out) {
+ switch (mode) {
+ case media::mojom::VideoEncodeAcceleratorSupportedRateControlMode::kNoMode:
+ *out = media::VideoEncodeAccelerator::kNoMode;
+ return true;
+ case media::mojom::VideoEncodeAcceleratorSupportedRateControlMode::
+ kConstantMode:
+ *out = media::VideoEncodeAccelerator::kConstantMode;
+ return true;
+ case media::mojom::VideoEncodeAcceleratorSupportedRateControlMode::
+ kVariableMode:
+ *out = media::VideoEncodeAccelerator::kVariableMode;
+ return true;
+ }
+ NOTREACHED();
+ return false;
+}
+
+// static
bool StructTraits<media::mojom::VideoEncodeAcceleratorSupportedProfileDataView,
media::VideoEncodeAccelerator::SupportedProfile>::
Read(media::mojom::VideoEncodeAcceleratorSupportedProfileDataView data,
@@ -25,6 +69,13 @@ bool StructTraits<media::mojom::VideoEncodeAcceleratorSupportedProfileDataView,
out->max_framerate_numerator = data.max_framerate_numerator();
out->max_framerate_denominator = data.max_framerate_denominator();
+ out->rate_control_modes = media::VideoEncodeAccelerator::kNoMode;
+ std::vector<media::VideoEncodeAccelerator::SupportedRateControlMode> modes;
+ if (!data.ReadRateControlModes(&modes))
+ return false;
+ for (const auto& mode : modes) {
+ out->rate_control_modes |= mode;
+ }
std::vector<media::SVCScalabilityMode> scalability_modes;
if (!data.ReadScalabilityModes(&scalability_modes))
@@ -151,16 +202,16 @@ bool UnionTraits<media::mojom::CodecMetadataDataView,
Read(media::mojom::CodecMetadataDataView data,
media::BitstreamBufferMetadata* out) {
switch (data.tag()) {
- case media::mojom::CodecMetadataDataView::Tag::H264: {
+ case media::mojom::CodecMetadataDataView::Tag::kH264: {
return data.ReadH264(&out->h264);
}
- case media::mojom::CodecMetadataDataView::Tag::VP8: {
+ case media::mojom::CodecMetadataDataView::Tag::kVp8: {
return data.ReadVp8(&out->vp8);
}
- case media::mojom::CodecMetadataDataView::Tag::VP9: {
+ case media::mojom::CodecMetadataDataView::Tag::kVp9: {
return data.ReadVp9(&out->vp9);
}
- case media::mojom::CodecMetadataDataView::Tag::AV1: {
+ case media::mojom::CodecMetadataDataView::Tag::kAv1: {
return data.ReadAv1(&out->av1);
}
}
diff --git a/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits.h b/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits.h
index d9d201bda20..2a8d33c81ef 100644
--- a/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits.h
+++ b/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits.h
@@ -18,6 +18,17 @@
namespace mojo {
template <>
+struct EnumTraits<media::mojom::VideoEncodeAcceleratorSupportedRateControlMode,
+ media::VideoEncodeAccelerator::SupportedRateControlMode> {
+ static media::mojom::VideoEncodeAcceleratorSupportedRateControlMode ToMojom(
+ media::VideoEncodeAccelerator::SupportedRateControlMode mode);
+
+ static bool FromMojom(
+ media::mojom::VideoEncodeAcceleratorSupportedRateControlMode input,
+ media::VideoEncodeAccelerator::SupportedRateControlMode* out);
+};
+
+template <>
struct StructTraits<
media::mojom::VideoEncodeAcceleratorSupportedProfileDataView,
media::VideoEncodeAccelerator::SupportedProfile> {
@@ -46,6 +57,22 @@ struct StructTraits<
return profile.max_framerate_denominator;
}
+ static std::vector<media::VideoEncodeAccelerator::SupportedRateControlMode>
+ rate_control_modes(
+ const media::VideoEncodeAccelerator::SupportedProfile& profile) {
+ std::vector<media::VideoEncodeAccelerator::SupportedRateControlMode> modes;
+ if (profile.rate_control_modes &
+ media::VideoEncodeAccelerator::kConstantMode) {
+ modes.push_back(media::VideoEncodeAccelerator::kConstantMode);
+ }
+ if (profile.rate_control_modes &
+ media::VideoEncodeAccelerator::kVariableMode) {
+ modes.push_back(media::VideoEncodeAccelerator::kVariableMode);
+ }
+
+ return modes;
+ }
+
static const std::vector<media::SVCScalabilityMode>& scalability_modes(
const media::VideoEncodeAccelerator::SupportedProfile& profile) {
return profile.scalability_modes;
@@ -84,8 +111,7 @@ class StructTraits<media::mojom::VideoBitrateAllocationDataView,
static absl::optional<uint32_t> variable_bitrate_peak(
const media::VideoBitrateAllocation& bitrate_allocation) {
- if (bitrate_allocation.GetSumBitrate().mode() ==
- media::Bitrate::Mode::kConstant) {
+ if (bitrate_allocation.GetMode() == media::Bitrate::Mode::kConstant) {
return absl::nullopt;
} else {
return absl::optional<uint32_t>(
@@ -103,16 +129,16 @@ struct UnionTraits<media::mojom::CodecMetadataDataView,
static media::mojom::CodecMetadataDataView::Tag GetTag(
const media::BitstreamBufferMetadata& metadata) {
if (metadata.h264) {
- return media::mojom::CodecMetadataDataView::Tag::H264;
+ return media::mojom::CodecMetadataDataView::Tag::kH264;
} else if (metadata.vp8) {
- return media::mojom::CodecMetadataDataView::Tag::VP8;
+ return media::mojom::CodecMetadataDataView::Tag::kVp8;
} else if (metadata.vp9) {
- return media::mojom::CodecMetadataDataView::Tag::VP9;
+ return media::mojom::CodecMetadataDataView::Tag::kVp9;
} else if (metadata.av1) {
- return media::mojom::CodecMetadataDataView::Tag::AV1;
+ return media::mojom::CodecMetadataDataView::Tag::kAv1;
}
NOTREACHED();
- return media::mojom::CodecMetadataDataView::Tag::VP8;
+ return media::mojom::CodecMetadataDataView::Tag::kVp8;
}
static bool IsNull(const media::BitstreamBufferMetadata& metadata) {
diff --git a/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits_unittest.cc b/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits_unittest.cc
index 85715d7dc6a..ec2add2e610 100644
--- a/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits_unittest.cc
+++ b/chromium/media/mojo/mojom/video_encode_accelerator_mojom_traits_unittest.cc
@@ -21,6 +21,8 @@ TEST(VideoEncodeAcceleratorSupportedProfile, RoundTrip) {
input.max_resolution = gfx::Size(4096, 4096);
input.max_framerate_numerator = 30;
input.max_framerate_denominator = 1;
+ input.rate_control_modes = VideoEncodeAccelerator::kConstantMode |
+ VideoEncodeAccelerator::kVariableMode;
input.scalability_modes.push_back(::media::SVCScalabilityMode::kL1T3);
input.scalability_modes.push_back(::media::SVCScalabilityMode::kL3T3Key);
diff --git a/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits.cc b/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits.cc
index 0187c8c395b..c8417a4b86d 100644
--- a/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits.cc
+++ b/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits.cc
@@ -37,6 +37,7 @@ bool StructTraits<media::mojom::VideoFrameMetadataDataView,
// bool.
output->allow_overlay = input.allow_overlay();
+ output->copy_required = input.copy_required();
output->end_of_stream = input.end_of_stream();
output->texture_owner = input.texture_owner();
output->wants_promotion_hint = input.wants_promotion_hint();
@@ -60,13 +61,6 @@ bool StructTraits<media::mojom::VideoFrameMetadataDataView,
READ_AND_ASSIGN_OPT(media::VideoTransformation, transformation,
Transformation);
- if (input.has_copy_mode()) {
- media::VideoFrameMetadata::CopyMode copy_mode;
- if (!input.ReadCopyMode(&copy_mode))
- return false;
- output->copy_mode = copy_mode;
- }
-
READ_AND_ASSIGN_OPT(base::UnguessableToken, overlay_plane_id, OverlayPlaneId);
READ_AND_ASSIGN_OPT(gfx::Rect, capture_update_rect, CaptureUpdateRect);
@@ -87,4 +81,4 @@ bool StructTraits<media::mojom::VideoFrameMetadataDataView,
return true;
}
-} // namespace mojo
+} // namespace mojo \ No newline at end of file
diff --git a/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits.h b/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits.h
index 065785533c7..1ce7fbd2d7a 100644
--- a/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits.h
+++ b/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits.h
@@ -35,6 +35,10 @@ struct StructTraits<media::mojom::VideoFrameMetadataDataView,
return input.allow_overlay;
}
+ static bool copy_required(const media::VideoFrameMetadata& input) {
+ return input.copy_required;
+ }
+
static bool end_of_stream(const media::VideoFrameMetadata& input) {
return input.end_of_stream;
}
@@ -82,11 +86,6 @@ struct StructTraits<media::mojom::VideoFrameMetadataDataView,
GENERATE_OPT_SERIALIZATION(int, capture_counter, 0)
- GENERATE_OPT_SERIALIZATION(
- media::VideoFrameMetadata::CopyMode,
- copy_mode,
- media::VideoFrameMetadata::CopyMode::kCopyToNewTexture)
-
static const absl::optional<media::VideoTransformation>& transformation(
const media::VideoFrameMetadata& input) {
return input.transformation;
diff --git a/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits_unittest.cc b/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits_unittest.cc
index 7c646430be8..cf7090d8f33 100644
--- a/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits_unittest.cc
+++ b/chromium/media/mojo/mojom/video_frame_metadata_mojom_traits_unittest.cc
@@ -66,7 +66,7 @@ TEST_F(VideoFrameMetadataStructTraitsTest, EmptyMetadata) {
EXPECT_FALSE(metadata_out.capture_update_rect.has_value());
EXPECT_FALSE(metadata_out.transformation.has_value());
EXPECT_FALSE(metadata_out.allow_overlay);
- EXPECT_FALSE(metadata_out.copy_mode.has_value());
+ EXPECT_FALSE(metadata_out.copy_required);
EXPECT_FALSE(metadata_out.end_of_stream);
EXPECT_FALSE(metadata_out.texture_owner);
EXPECT_FALSE(metadata_out.wants_promotion_hint);
@@ -109,11 +109,9 @@ TEST_F(VideoFrameMetadataStructTraitsTest, ValidMetadata) {
// VideoTransformation
metadata_in.transformation = VideoTransformation(VIDEO_ROTATION_90, true);
- // VideoFrameMetadata::CopyMode
- metadata_in.copy_mode = VideoFrameMetadata::CopyMode::kCopyToNewTexture;
-
// bools
metadata_in.allow_overlay = true;
+ metadata_in.copy_required = true;
metadata_in.end_of_stream = true;
metadata_in.texture_owner = true;
metadata_in.wants_promotion_hint = true;
@@ -158,7 +156,7 @@ TEST_F(VideoFrameMetadataStructTraitsTest, ValidMetadata) {
EXPECT_EQ(metadata_in.capture_update_rect, metadata_out.capture_update_rect);
EXPECT_EQ(metadata_in.transformation, metadata_out.transformation);
EXPECT_EQ(metadata_in.allow_overlay, metadata_out.allow_overlay);
- EXPECT_EQ(metadata_in.copy_mode, metadata_out.copy_mode);
+ EXPECT_EQ(metadata_in.copy_required, metadata_out.copy_required);
EXPECT_EQ(metadata_in.end_of_stream, metadata_out.end_of_stream);
EXPECT_EQ(metadata_in.texture_owner, metadata_out.texture_owner);
EXPECT_EQ(metadata_in.wants_promotion_hint,
diff --git a/chromium/media/mojo/services/BUILD.gn b/chromium/media/mojo/services/BUILD.gn
index 9a50c755032..4adecd5badf 100644
--- a/chromium/media/mojo/services/BUILD.gn
+++ b/chromium/media/mojo/services/BUILD.gn
@@ -4,7 +4,9 @@
import("//build/config/chromecast_build.gni")
import("//build/config/chromeos/ui_mode.gni")
+import("//media/gpu/args.gni")
import("//media/media_options.gni")
+import("//mojo/public/tools/fuzzers/mojolpm.gni")
import("//testing/test.gni")
component("services") {
@@ -177,16 +179,23 @@ component("services") {
"media_foundation_service_broker.cc",
"media_foundation_service_broker.h",
]
- deps += [ "//media/base/win:media_foundation_util" ]
+ deps += [
+ "//media/base/win:media_foundation_util",
+ "//ui/gfx/mojom:dxgi_info",
+ ]
}
- if (is_chromeos_ash || is_linux) {
+ if ((is_chromeos_ash || is_linux) && (use_vaapi || use_v4l2_codec)) {
sources += [
"stable_video_decoder_factory_service.cc",
"stable_video_decoder_factory_service.h",
+ "stable_video_decoder_service.cc",
+ "stable_video_decoder_service.h",
]
public_deps += [ "//media/mojo/mojom/stable:stable_video_decoder" ]
+
+ deps += [ "//media/gpu/chromeos" ]
}
}
@@ -249,4 +258,32 @@ source_set("unit_tests") {
"//components/chromeos_camera:mjpeg_decode_accelerator_service_unittest",
]
}
+
+ if ((is_chromeos_ash || is_linux) && (use_vaapi || use_v4l2_codec)) {
+ sources += [ "stable_video_decoder_service_unittest.cc" ]
+ }
+}
+
+mojolpm_fuzzer_test("webrtc_video_perf_mojolpm_fuzzer") {
+ sources = [ "webrtc_video_perf_mojolpm_fuzzer.cc" ]
+
+ proto_source = "webrtc_video_perf_mojolpm_fuzzer.proto"
+ testcase_proto_kind = "media.fuzzing.webrtc_video_perf.proto.Testcase"
+
+ seed_corpus_sources = [
+ "webrtc_video_perf_fuzzer_seed_corpus/update_record_and_get_perf.textproto",
+ ]
+
+ proto_deps = [ "//third_party/blink/public/mojom:mojom_platform_mojolpm" ]
+
+ deps = [
+ ":services",
+ "//base",
+ "//base/test:test_support",
+ "//components/leveldb_proto:test_support",
+ "//content/public/browser",
+ "//media",
+ "//media/capabilities:webrtc_video_stats_proto",
+ "//third_party/libprotobuf-mutator",
+ ]
}
diff --git a/chromium/media/mojo/services/DEPS b/chromium/media/mojo/services/DEPS
index 418719a3c7f..a3c15af7986 100644
--- a/chromium/media/mojo/services/DEPS
+++ b/chromium/media/mojo/services/DEPS
@@ -5,5 +5,9 @@ specific_include_rules = {
"media_manifest\.cc": [
"+chromecast/common/mojom",
],
+ "webrtc_video_perf_mojolpm_fuzzer\.cc": [
+ "+third_party/libprotobuf-mutator/src/src",
+ "+components/leveldb_proto/testing/fake_db.h",
+ ]
}
diff --git a/chromium/media/mojo/services/gpu_mojo_media_client.cc b/chromium/media/mojo/services/gpu_mojo_media_client.cc
index 4e3ba6591ea..3102883a26e 100644
--- a/chromium/media/mojo/services/gpu_mojo_media_client.cc
+++ b/chromium/media/mojo/services/gpu_mojo_media_client.cc
@@ -9,25 +9,20 @@
#include "base/bind.h"
#include "base/feature_list.h"
#include "base/memory/ptr_util.h"
-#include "base/task/thread_pool.h"
#include "build/build_config.h"
#include "build/chromeos_buildflags.h"
#include "gpu/ipc/service/gpu_channel.h"
-#include "media/audio/audio_features.h"
#include "media/base/audio_decoder.h"
+#include "media/base/audio_encoder.h"
#include "media/base/cdm_factory.h"
#include "media/base/media_switches.h"
#include "media/base/media_util.h"
-#include "media/base/offloading_audio_encoder.h"
#include "media/base/video_decoder.h"
#include "media/gpu/gpu_video_accelerator_util.h"
#include "media/gpu/gpu_video_decode_accelerator_factory.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
#include "media/gpu/ipc/service/media_gpu_channel_manager.h"
#include "media/gpu/ipc/service/vda_video_decoder.h"
-#if BUILDFLAG(IS_WIN)
-#include "media/gpu/windows/mf_audio_encoder.h"
-#endif // IS_WIN
#include "media/mojo/mojom/video_decoder.mojom.h"
#include "media/video/video_decode_accelerator.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
@@ -91,7 +86,8 @@ VideoDecoderTraits::VideoDecoderTraits(
gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
GetConfigCacheCB get_cached_configs_cb,
GetCommandBufferStubCB get_command_buffer_stub_cb,
- AndroidOverlayMojoFactoryCB android_overlay_factory_cb)
+ AndroidOverlayMojoFactoryCB android_overlay_factory_cb,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder> oop_video_decoder)
: task_runner(std::move(task_runner)),
gpu_task_runner(std::move(gpu_task_runner)),
media_log(std::move(media_log)),
@@ -104,7 +100,8 @@ VideoDecoderTraits::VideoDecoderTraits(
gpu_memory_buffer_factory(gpu_memory_buffer_factory),
get_cached_configs_cb(std::move(get_cached_configs_cb)),
get_command_buffer_stub_cb(std::move(get_command_buffer_stub_cb)),
- android_overlay_factory_cb(std::move(android_overlay_factory_cb)) {}
+ android_overlay_factory_cb(std::move(android_overlay_factory_cb)),
+ oop_video_decoder(std::move(oop_video_decoder)) {}
GpuMojoMediaClient::GpuMojoMediaClient(
const gpu::GpuPreferences& gpu_preferences,
@@ -128,23 +125,14 @@ GpuMojoMediaClient::~GpuMojoMediaClient() = default;
std::unique_ptr<AudioDecoder> GpuMojoMediaClient::CreateAudioDecoder(
scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
- return CreatePlatformAudioDecoder(task_runner);
+ return CreatePlatformAudioDecoder(std::move(task_runner));
}
std::unique_ptr<AudioEncoder> GpuMojoMediaClient::CreateAudioEncoder(
scoped_refptr<base::SequencedTaskRunner> task_runner) {
-#if BUILDFLAG(IS_WIN)
- if (!base::FeatureList::IsEnabled(features::kPlatformAudioEncoder))
- return nullptr;
-
- auto encoding_runner = base::ThreadPool::CreateCOMSTATaskRunner({});
- auto mf_encoder = std::make_unique<MFAudioEncoder>(encoding_runner);
- return std::make_unique<OffloadingAudioEncoder>(std::move(mf_encoder),
- std::move(encoding_runner),
- std::move(task_runner));
-#else
- return nullptr;
-#endif // IS_WIN
+ return base::FeatureList::IsEnabled(kPlatformAudioEncoder)
+ ? CreatePlatformAudioEncoder(std::move(task_runner))
+ : nullptr;
}
VideoDecoderType GpuMojoMediaClient::GetDecoderImplementationType() {
@@ -177,7 +165,8 @@ std::unique_ptr<VideoDecoder> GpuMojoMediaClient::CreateVideoDecoder(
MediaLog* media_log,
mojom::CommandBufferIdPtr command_buffer_id,
RequestOverlayInfoCB request_overlay_info_cb,
- const gfx::ColorSpace& target_color_space) {
+ const gfx::ColorSpace& target_color_space,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder> oop_video_decoder) {
// All implementations require a command buffer.
if (!command_buffer_id)
return nullptr;
@@ -195,7 +184,7 @@ std::unique_ptr<VideoDecoder> GpuMojoMediaClient::CreateVideoDecoder(
base::BindRepeating(
&GetCommandBufferStub, gpu_task_runner_, media_gpu_channel_manager_,
command_buffer_id->channel_token, command_buffer_id->route_id),
- android_overlay_factory_cb_);
+ android_overlay_factory_cb_, std::move(oop_video_decoder));
return CreatePlatformVideoDecoder(traits);
}
diff --git a/chromium/media/mojo/services/gpu_mojo_media_client.h b/chromium/media/mojo/services/gpu_mojo_media_client.h
index 9eb5d806f56..4fcad1f5ae4 100644
--- a/chromium/media/mojo/services/gpu_mojo_media_client.h
+++ b/chromium/media/mojo/services/gpu_mojo_media_client.h
@@ -58,6 +58,8 @@ struct VideoDecoderTraits {
AndroidOverlayMojoFactoryCB android_overlay_factory_cb;
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder> oop_video_decoder;
+
VideoDecoderTraits(
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
@@ -71,15 +73,15 @@ struct VideoDecoderTraits {
gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
GetConfigCacheCB get_cached_configs_cb,
GetCommandBufferStubCB get_command_buffer_stub_cb,
- AndroidOverlayMojoFactoryCB android_overlay_factory_cb);
+ AndroidOverlayMojoFactoryCB android_overlay_factory_cb,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder> oop_video_decoder);
~VideoDecoderTraits();
};
// Find platform specific implementations of these in
// gpu_mojo_media_client_{platform}.cc
// Creates a platform-specific media::VideoDecoder.
-std::unique_ptr<VideoDecoder> CreatePlatformVideoDecoder(
- const VideoDecoderTraits&);
+std::unique_ptr<VideoDecoder> CreatePlatformVideoDecoder(VideoDecoderTraits&);
// Queries the platform-specific VideoDecoder implementation for its
// supported profiles. Many platforms fall back to use the VDAVideoDecoder
@@ -97,6 +99,11 @@ GetPlatformSupportedVideoDecoderConfigs(
std::unique_ptr<AudioDecoder> CreatePlatformAudioDecoder(
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
+// Creates a platform-specific media::AudioEncoder. Most platforms don't do
+// anything here.
+std::unique_ptr<AudioEncoder> CreatePlatformAudioEncoder(
+ scoped_refptr<base::SequencedTaskRunner> task_runner);
+
// Creates a CDM factory, right now only used on android and chromeos.
std::unique_ptr<CdmFactory> CreatePlatformCdmFactory(
mojom::FrameInterfaceFactory* frame_interfaces);
@@ -141,7 +148,9 @@ class MEDIA_MOJO_EXPORT GpuMojoMediaClient final : public MojoMediaClient {
MediaLog* media_log,
mojom::CommandBufferIdPtr command_buffer_id,
RequestOverlayInfoCB request_overlay_info_cb,
- const gfx::ColorSpace& target_color_space) final;
+ const gfx::ColorSpace& target_color_space,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder> oop_video_decoder)
+ final;
std::unique_ptr<CdmFactory> CreateCdmFactory(
mojom::FrameInterfaceFactory* interface_provider) final;
diff --git a/chromium/media/mojo/services/gpu_mojo_media_client_android.cc b/chromium/media/mojo/services/gpu_mojo_media_client_android.cc
index 74916600476..3760358e136 100644
--- a/chromium/media/mojo/services/gpu_mojo_media_client_android.cc
+++ b/chromium/media/mojo/services/gpu_mojo_media_client_android.cc
@@ -30,7 +30,7 @@ using media::android_mojo_util::CreateProvisionFetcher;
namespace media {
std::unique_ptr<VideoDecoder> CreatePlatformVideoDecoder(
- const VideoDecoderTraits& traits) {
+ VideoDecoderTraits& traits) {
scoped_refptr<gpu::RefCountedLock> ref_counted_lock;
// When this feature is enabled, CodecImage, CodecBufferWaitCorrdinator and
@@ -94,6 +94,11 @@ std::unique_ptr<AudioDecoder> CreatePlatformAudioDecoder(
return std::make_unique<MediaCodecAudioDecoder>(std::move(task_runner));
}
+std::unique_ptr<AudioEncoder> CreatePlatformAudioEncoder(
+ scoped_refptr<base::SequencedTaskRunner> task_runner) {
+ return nullptr;
+}
+
std::unique_ptr<CdmFactory> CreatePlatformCdmFactory(
mojom::FrameInterfaceFactory* frame_interfaces) {
return std::make_unique<AndroidCdmFactory>(
diff --git a/chromium/media/mojo/services/gpu_mojo_media_client_cros.cc b/chromium/media/mojo/services/gpu_mojo_media_client_cros.cc
index f34b6108978..ad0ea2328a2 100644
--- a/chromium/media/mojo/services/gpu_mojo_media_client_cros.cc
+++ b/chromium/media/mojo/services/gpu_mojo_media_client_cros.cc
@@ -7,6 +7,7 @@
#include "base/metrics/histogram_functions.h"
#include "build/build_config.h"
#include "media/base/audio_decoder.h"
+#include "media/base/audio_encoder.h"
#include "media/base/media_switches.h"
#include "media/gpu/chromeos/mailbox_video_frame_converter.h"
#include "media/gpu/chromeos/platform_video_frame_pool.h"
@@ -90,6 +91,8 @@ VideoDecoderType GetActualPlatformDecoderImplementation(
// NVIDIA drivers have a broken implementation of most va_* methods,
// ARM & AMD aren't tested yet, and ImgTec/Qualcomm don't have a vaapi
// driver.
+ if (base::FeatureList::IsEnabled(kVaapiIgnoreDriverChecks))
+ return VideoDecoderType::kVaapi;
return VideoDecoderType::kUnknown;
}
}
@@ -103,13 +106,29 @@ VideoDecoderType GetActualPlatformDecoderImplementation(
} // namespace
std::unique_ptr<VideoDecoder> CreatePlatformVideoDecoder(
- const VideoDecoderTraits& traits) {
+ VideoDecoderTraits& traits) {
+ // TODO(b/195769334): we'll need to structure this function a bit differently
+ // to account for the following:
+ //
+ // 1) Eventually, we may turn off USE_VAAPI and USE_V4L2_CODEC on LaCrOS if we
+ // delegate all video acceleration to ash-chrome. In those cases,
+ // GetPreferredCrosDecoderImplementation() won't be able to determine the
+ // video API in LaCrOS.
+ //
+ // 2) For out-of-process video decoding, we don't need a |frame_pool| because
+ // the buffers will be allocated and managed out-of-process.
+ //
+ // 3) It's very possible that not all platforms will be able to migrate to the
+ // direct VD soon enough. In those cases, the GPU process will still need
+ // to use a VideoDecoderPipeline backed by an OOPVideoDecoder, and the
+ // video decoder process will need to run the legacy VDA code and return
+ // GpuMemoryBuffers.
+
switch (GetActualPlatformDecoderImplementation(traits.gpu_preferences,
traits.gpu_info)) {
case VideoDecoderType::kVaapi:
case VideoDecoderType::kV4L2: {
- auto frame_pool = std::make_unique<PlatformVideoFramePool>(
- traits.gpu_memory_buffer_factory);
+ auto frame_pool = std::make_unique<PlatformVideoFramePool>();
auto frame_converter = MailboxVideoFrameConverter::Create(
base::BindRepeating(&PlatformVideoFramePool::UnwrapFrame,
base::Unretained(frame_pool.get())),
@@ -117,7 +136,7 @@ std::unique_ptr<VideoDecoder> CreatePlatformVideoDecoder(
traits.gpu_preferences.enable_unsafe_webgpu);
return VideoDecoderPipeline::Create(
traits.task_runner, std::move(frame_pool), std::move(frame_converter),
- traits.media_log->Clone());
+ traits.media_log->Clone(), std::move(traits.oop_video_decoder));
}
case VideoDecoderType::kVda: {
return VdaVideoDecoder::Create(
@@ -173,6 +192,11 @@ std::unique_ptr<AudioDecoder> CreatePlatformAudioDecoder(
return nullptr;
}
+std::unique_ptr<AudioEncoder> CreatePlatformAudioEncoder(
+ scoped_refptr<base::SequencedTaskRunner> task_runner) {
+ return nullptr;
+}
+
#if !BUILDFLAG(IS_CHROMEOS)
class CdmFactory {};
#endif // !BUILDFLAG(IS_CHROMEOS)
diff --git a/chromium/media/mojo/services/gpu_mojo_media_client_mac.cc b/chromium/media/mojo/services/gpu_mojo_media_client_mac.cc
index 6a19197a505..4bf6fd884b8 100644
--- a/chromium/media/mojo/services/gpu_mojo_media_client_mac.cc
+++ b/chromium/media/mojo/services/gpu_mojo_media_client_mac.cc
@@ -2,15 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/task/thread_pool.h"
#include "media/base/audio_decoder.h"
+#include "media/base/offloading_audio_encoder.h"
#include "media/filters/mac/audio_toolbox_audio_decoder.h"
+#include "media/filters/mac/audio_toolbox_audio_encoder.h"
#include "media/gpu/ipc/service/vda_video_decoder.h"
#include "media/mojo/services/gpu_mojo_media_client.h"
namespace media {
std::unique_ptr<VideoDecoder> CreatePlatformVideoDecoder(
- const VideoDecoderTraits& traits) {
+ VideoDecoderTraits& traits) {
return VdaVideoDecoder::Create(
traits.task_runner, traits.gpu_task_runner, traits.media_log->Clone(),
*traits.target_color_space, traits.gpu_preferences,
@@ -31,6 +34,14 @@ std::unique_ptr<AudioDecoder> CreatePlatformAudioDecoder(
return std::make_unique<AudioToolboxAudioDecoder>();
}
+std::unique_ptr<AudioEncoder> CreatePlatformAudioEncoder(
+ scoped_refptr<base::SequencedTaskRunner> task_runner) {
+ auto encoding_runner = base::ThreadPool::CreateSequencedTaskRunner({});
+ auto encoder = std::make_unique<AudioToolboxAudioEncoder>();
+ return std::make_unique<OffloadingAudioEncoder>(
+ std::move(encoder), std::move(encoding_runner), std::move(task_runner));
+}
+
// This class doesn't exist on mac, so we need a stub for unique_ptr.
class CdmFactory {};
diff --git a/chromium/media/mojo/services/gpu_mojo_media_client_stubs.cc b/chromium/media/mojo/services/gpu_mojo_media_client_stubs.cc
index 858919d632c..b96de020c83 100644
--- a/chromium/media/mojo/services/gpu_mojo_media_client_stubs.cc
+++ b/chromium/media/mojo/services/gpu_mojo_media_client_stubs.cc
@@ -3,13 +3,14 @@
// found in the LICENSE file.
#include "media/base/audio_decoder.h"
+#include "media/base/audio_encoder.h"
#include "media/base/video_decoder.h"
#include "media/mojo/services/gpu_mojo_media_client.h"
namespace media {
std::unique_ptr<VideoDecoder> CreatePlatformVideoDecoder(
- const VideoDecoderTraits& traits) {
+ VideoDecoderTraits& traits) {
return nullptr;
}
@@ -27,6 +28,11 @@ std::unique_ptr<AudioDecoder> CreatePlatformAudioDecoder(
return nullptr;
}
+std::unique_ptr<AudioEncoder> CreatePlatformAudioEncoder(
+ scoped_refptr<base::SequencedTaskRunner> task_runner) {
+ return nullptr;
+}
+
// This class doesn't exist on any of the platforms that use the stubs.
class CdmFactory {};
diff --git a/chromium/media/mojo/services/gpu_mojo_media_client_win.cc b/chromium/media/mojo/services/gpu_mojo_media_client_win.cc
index 601b4b33f54..faf9033fcdd 100644
--- a/chromium/media/mojo/services/gpu_mojo_media_client_win.cc
+++ b/chromium/media/mojo/services/gpu_mojo_media_client_win.cc
@@ -4,11 +4,18 @@
#include "media/mojo/services/gpu_mojo_media_client.h"
+#include "base/task/thread_pool.h"
#include "base/win/windows_version.h"
#include "media/base/audio_decoder.h"
#include "media/base/media_switches.h"
+#include "media/base/offloading_audio_encoder.h"
+#if BUILDFLAG(USE_PROPRIETARY_CODECS) && BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+#include "media/filters/win/media_foundation_audio_decoder.h"
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) &&
+ // BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
#include "media/gpu/ipc/service/vda_video_decoder.h"
#include "media/gpu/windows/d3d11_video_decoder.h"
+#include "media/gpu/windows/mf_audio_encoder.h"
#include "ui/gl/direct_composition_surface_win.h"
#include "ui/gl/gl_angle_util_win.h"
@@ -30,7 +37,7 @@ bool ShouldUseD3D11VideoDecoder(
} // namespace
std::unique_ptr<VideoDecoder> CreatePlatformVideoDecoder(
- const VideoDecoderTraits& traits) {
+ VideoDecoderTraits& traits) {
if (!ShouldUseD3D11VideoDecoder(*traits.gpu_workarounds)) {
if (traits.gpu_workarounds->disable_dxva_video_decoder)
return nullptr;
@@ -39,11 +46,25 @@ std::unique_ptr<VideoDecoder> CreatePlatformVideoDecoder(
*traits.target_color_space, traits.gpu_preferences,
*traits.gpu_workarounds, traits.get_command_buffer_stub_cb);
}
+ // Report that HDR is enabled if any display has HDR enabled.
+ bool hdr_enabled = false;
+ auto dxgi_info = gl::DirectCompositionSurfaceWin::GetDXGIInfo();
+ for (const auto& output_desc : dxgi_info->output_descs)
+ hdr_enabled |= output_desc->hdr_enabled;
return D3D11VideoDecoder::Create(
traits.gpu_task_runner, traits.media_log->Clone(), traits.gpu_preferences,
*traits.gpu_workarounds, traits.get_command_buffer_stub_cb,
GetD3D11DeviceCallback(), traits.get_cached_configs_cb.Run(),
- gl::DirectCompositionSurfaceWin::IsHDRSupported());
+ hdr_enabled);
+}
+
+std::unique_ptr<AudioEncoder> CreatePlatformAudioEncoder(
+ scoped_refptr<base::SequencedTaskRunner> task_runner) {
+ auto encoding_runner = base::ThreadPool::CreateCOMSTATaskRunner({});
+ auto mf_encoder = std::make_unique<MFAudioEncoder>(encoding_runner);
+ return std::make_unique<OffloadingAudioEncoder>(std::move(mf_encoder),
+ std::move(encoding_runner),
+ std::move(task_runner));
}
absl::optional<SupportedVideoDecoderConfigs>
@@ -64,7 +85,12 @@ GetPlatformSupportedVideoDecoderConfigs(
std::unique_ptr<AudioDecoder> CreatePlatformAudioDecoder(
scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
+#if BUILDFLAG(USE_PROPRIETARY_CODECS) && BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ return MediaFoundationAudioDecoder::Create(std::move(task_runner));
+#else
return nullptr;
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) &&
+ // BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
}
VideoDecoderType GetPlatformDecoderImplementationType(
diff --git a/chromium/media/mojo/services/interface_factory_impl.cc b/chromium/media/mojo/services/interface_factory_impl.cc
index 6de1aa66fb1..30bcba60448 100644
--- a/chromium/media/mojo/services/interface_factory_impl.cc
+++ b/chromium/media/mojo/services/interface_factory_impl.cc
@@ -86,11 +86,9 @@ void InterfaceFactoryImpl::CreateVideoDecoder(
dst_video_decoder) {
DVLOG(2) << __func__;
#if BUILDFLAG(ENABLE_MOJO_VIDEO_DECODER)
- // TODO(pmolinalopez): finish plumbing |dst_video_decoder| through
- // MojoVideoDecoderService so that we can use it both for out-of-process video
- // decoding and LaCrOS video decoding. See https://crrev.com/c/3094628.
video_decoder_receivers_.Add(std::make_unique<MojoVideoDecoderService>(
- mojo_media_client_, &cdm_service_context_),
+ mojo_media_client_, &cdm_service_context_,
+ std::move(dst_video_decoder)),
std::move(receiver));
#endif // BUILDFLAG(ENABLE_MOJO_VIDEO_DECODER)
}
diff --git a/chromium/media/mojo/services/media_foundation_mojo_media_client.cc b/chromium/media/mojo/services/media_foundation_mojo_media_client.cc
index 9cdd6675c0c..37197d1fcf2 100644
--- a/chromium/media/mojo/services/media_foundation_mojo_media_client.cc
+++ b/chromium/media/mojo/services/media_foundation_mojo_media_client.cc
@@ -8,6 +8,10 @@
#include "media/cdm/win/media_foundation_cdm_factory.h"
#include "media/mojo/services/media_foundation_renderer_wrapper.h"
#include "media/mojo/services/mojo_cdm_helper.h"
+#if BUILDFLAG(USE_PROPRIETARY_CODECS) && BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+#include "media/filters/win/media_foundation_audio_decoder.h"
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) &&
+ // BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
namespace media {
@@ -19,6 +23,17 @@ MediaFoundationMojoMediaClient::~MediaFoundationMojoMediaClient() {
DVLOG_FUNC(1);
}
+std::unique_ptr<AudioDecoder>
+MediaFoundationMojoMediaClient::CreateAudioDecoder(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
+#if BUILDFLAG(USE_PROPRIETARY_CODECS) && BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+ return std::make_unique<MediaFoundationAudioDecoder>(task_runner);
+#else
+ return nullptr;
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS) &&
+ // BUILDFLAG(ENABLE_PLATFORM_DTS_AUDIO)
+}
+
std::unique_ptr<Renderer>
MediaFoundationMojoMediaClient::CreateMediaFoundationRenderer(
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
diff --git a/chromium/media/mojo/services/media_foundation_mojo_media_client.h b/chromium/media/mojo/services/media_foundation_mojo_media_client.h
index 9b9a8ad9ad1..294a8e3cae7 100644
--- a/chromium/media/mojo/services/media_foundation_mojo_media_client.h
+++ b/chromium/media/mojo/services/media_foundation_mojo_media_client.h
@@ -26,6 +26,10 @@ class MediaFoundationMojoMediaClient final : public MojoMediaClient {
~MediaFoundationMojoMediaClient() override;
// MojoMediaClient implementation.
+ std::unique_ptr<AudioDecoder> CreateAudioDecoder(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) override;
+
+ // MojoMediaClient implementation.
std::unique_ptr<Renderer> CreateMediaFoundationRenderer(
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
mojom::FrameInterfaceFactory* frame_interfaces,
diff --git a/chromium/media/mojo/services/media_foundation_renderer_wrapper.cc b/chromium/media/mojo/services/media_foundation_renderer_wrapper.cc
index cc86cc0d341..ca915083821 100644
--- a/chromium/media/mojo/services/media_foundation_renderer_wrapper.cc
+++ b/chromium/media/mojo/services/media_foundation_renderer_wrapper.cc
@@ -217,9 +217,8 @@ void MediaFoundationRendererWrapper::RequestNextFrameBetweenTimestamps(
renderer_->RequestNextFrameBetweenTimestamps(deadline_min, deadline_max);
}
-void MediaFoundationRendererWrapper::SetRenderingMode(
- media::RenderingMode mode) {
- // We define the media RenderingMode enum to match the mojom.
- renderer_->SetRenderingMode(mode);
+void MediaFoundationRendererWrapper::SetMediaFoundationRenderingMode(
+ MediaFoundationRenderingMode mode) {
+ renderer_->SetMediaFoundationRenderingMode(mode);
}
} // namespace media
diff --git a/chromium/media/mojo/services/media_foundation_renderer_wrapper.h b/chromium/media/mojo/services/media_foundation_renderer_wrapper.h
index afc11e7ad3b..20d2d469183 100644
--- a/chromium/media/mojo/services/media_foundation_renderer_wrapper.h
+++ b/chromium/media/mojo/services/media_foundation_renderer_wrapper.h
@@ -64,7 +64,8 @@ class MediaFoundationRendererWrapper final
void NotifyFrameReleased(const base::UnguessableToken& frame_token) override;
void RequestNextFrameBetweenTimestamps(base::TimeTicks deadline_min,
base::TimeTicks deadline_max) override;
- void SetRenderingMode(media::RenderingMode mode) override;
+ void SetMediaFoundationRenderingMode(
+ MediaFoundationRenderingMode mode) override;
// mojom::MuteStateObserver implementation.
void OnMuteStateChange(bool muted) override;
diff --git a/chromium/media/mojo/services/media_foundation_service.cc b/chromium/media/mojo/services/media_foundation_service.cc
index 8970a224992..3c47f757eaf 100644
--- a/chromium/media/mojo/services/media_foundation_service.cc
+++ b/chromium/media/mojo/services/media_foundation_service.cc
@@ -9,12 +9,17 @@
#include "base/bind.h"
#include "base/check.h"
+#include "base/files/file_util.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
+#include "base/path_service.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
+#include "base/task/task_traits.h"
+#include "base/task/thread_pool.h"
#include "base/time/time.h"
+#include "base/unguessable_token.h"
#include "media/base/audio_codecs.h"
#include "media/base/content_decryption_module.h"
#include "media/base/encryption_scheme.h"
@@ -22,6 +27,7 @@
#include "media/base/video_codecs.h"
#include "media/cdm/cdm_capability.h"
#include "media/cdm/win/media_foundation_cdm_module.h"
+#include "media/cdm/win/media_foundation_cdm_util.h"
#include "media/media_buildflags.h"
#include "media/mojo/mojom/interface_factory.mojom.h"
#include "media/mojo/mojom/key_system_support.mojom.h"
@@ -69,15 +75,18 @@ constexpr VideoCodec kAllVideoCodecs[] = {
constexpr AudioCodec kAllAudioCodecs[] = {
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
- AudioCodec::kAAC, AudioCodec::kEAC3, AudioCodec::kAC3,
+ AudioCodec::kAAC,
+#if BUILDFLAG(ENABLE_PLATFORM_AC3_EAC3_AUDIO)
+ AudioCodec::kEAC3, AudioCodec::kAC3,
+#endif // BUILDFLAG(ENABLE_PLATFORM_AC3_EAC3_AUDIO)
+#if BUILDFLAG(ENABLE_PLATFORM_MPEG_H_AUDIO)
AudioCodec::kMpegHAudio,
-#endif
- AudioCodec::kVorbis, AudioCodec::kFLAC, AudioCodec::kOpus};
+#endif // BUILDFLAG(ENABLE_PLATFORM_MPEG_H_AUDIO)
+#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
+ AudioCodec::kVorbis, AudioCodec::kFLAC, AudioCodec::kOpus};
constexpr EncryptionScheme kAllEncryptionSchemes[] = {EncryptionScheme::kCenc,
EncryptionScheme::kCbcs};
-using IsTypeSupportedCB =
- base::RepeatingCallback<bool(const std::string& content_type)>;
bool IsTypeSupportedInternal(
ComPtr<IMFContentDecryptionModuleFactory> cdm_factory,
@@ -209,7 +218,9 @@ std::string GetTypeString(VideoCodec video_codec,
}
base::flat_set<EncryptionScheme> GetSupportedEncryptionSchemes(
- IsTypeSupportedCB callback,
+ ComPtr<IMFContentDecryptionModuleFactory> cdm_factory,
+ const std::string& key_system,
+ bool is_hw_secure,
VideoCodec video_codec,
const std::string& robustness) {
base::flat_set<EncryptionScheme> supported_schemes;
@@ -220,16 +231,68 @@ base::flat_set<EncryptionScheme> GetSupportedEncryptionSchemes(
{kEncryptionIvQueryName, base::NumberToString(GetIvSize(scheme))},
{kRobustnessQueryName, robustness.c_str()}});
- if (callback.Run(type))
+ if (IsTypeSupportedInternal(cdm_factory, key_system, is_hw_secure, type))
supported_schemes.insert(scheme);
}
return supported_schemes;
}
-absl::optional<CdmCapability> GetCdmCapability(IsTypeSupportedCB callback,
- bool is_hw_secure) {
+HRESULT CreateDummyMediaFoundationCdm(
+ ComPtr<IMFContentDecryptionModuleFactory> cdm_factory,
+ const std::string& key_system) {
+ // Set `use_hw_secure_codecs` to indicate this for hardware secure mode,
+ // which typically requires identifier and persistent storage.
+ CdmConfig cdm_config = {key_system, /*allow_distinctive_identifier=*/true,
+ /*allow_persistent_state=*/true,
+ /*use_hw_secure_codecs=*/true};
+
+ // Use a random CDM origin.
+ auto cdm_origin_id = base::UnguessableToken::Create();
+
+ // Use a dummy CDM store path root under the temp dir here. Since this code
+ // runs in the LPAC process, the temp dir will be something like:
+ // C:\Users\<user>\AppData\Local\Packages\cr.sb.cdm<...>\AC\Temp
+ // This folder is specifically for the CDM app container, so there's no need
+ // to set ACL explicitly.
+ base::FilePath temp_dir;
+ base::PathService::Get(base::DIR_TEMP, &temp_dir);
+ const char kDummyCdmStore[] = "DummyMediaFoundationCdmStore";
+ auto dummy_cdm_store_path_root = temp_dir.AppendASCII(kDummyCdmStore);
+
+ // Create the dummy CDM.
+ Microsoft::WRL::ComPtr<IMFContentDecryptionModule> mf_cdm;
+ auto hr = CreateMediaFoundationCdm(cdm_factory, cdm_config, cdm_origin_id,
+ /*cdm_client_token=*/absl::nullopt,
+ dummy_cdm_store_path_root, mf_cdm);
+ DLOG_IF(ERROR, FAILED(hr)) << __func__ << ": Failed for " << key_system;
+ mf_cdm.Reset();
+
+ // Delete the dummy CDM store folder so we don't leave files behind. This may
+ // fail since the CDM and related objects may have the files open longer than
+ // the total delete retry period or before the process terminates. This is
+ // fine since they will be cleaned next time so files will not accumulate.
+ // Ignore the `reply_callback` since nothing can be done with the result.
+ base::ThreadPool::PostTask(
+ FROM_HERE, {base::TaskPriority::BEST_EFFORT, base::MayBlock()},
+ base::GetDeletePathRecursivelyCallback(dummy_cdm_store_path_root));
+
+ return hr;
+}
+
+absl::optional<CdmCapability> GetCdmCapability(
+ ComPtr<IMFContentDecryptionModuleFactory> cdm_factory,
+ const std::string& key_system,
+ bool is_hw_secure) {
DVLOG(2) << __func__ << ", is_hw_secure=" << is_hw_secure;
+ // For hardware secure decryption, even when IsTypeSupportedInternal() says
+ // it's supported, CDM creation could fail immediately. Therefore, create a
+ // dummy CDM instance to detect this case.
+ if (is_hw_secure &&
+ FAILED(CreateDummyMediaFoundationCdm(cdm_factory, key_system))) {
+ return absl::nullopt;
+ }
+
// TODO(hmchen): make this generic for more key systems.
const std::string robustness =
is_hw_secure ? kHwSecureRobustness : kSwSecureRobustness;
@@ -241,7 +304,7 @@ absl::optional<CdmCapability> GetCdmCapability(IsTypeSupportedCB callback,
auto type = GetTypeString(video_codec, /*audio_codec=*/absl::nullopt,
{{kRobustnessQueryName, robustness}});
- if (callback.Run(type)) {
+ if (IsTypeSupportedInternal(cdm_factory, key_system, is_hw_secure, type)) {
// IsTypeSupported() does not support querying profiling, so specify {}
// to indicate all relevant profiles should be considered supported.
const std::vector<media::VideoCodecProfile> kAllProfiles = {};
@@ -264,7 +327,7 @@ absl::optional<CdmCapability> GetCdmCapability(IsTypeSupportedCB callback,
auto type = GetTypeString(video_codec, audio_codec,
{{kRobustnessQueryName, robustness}});
- if (callback.Run(type))
+ if (IsTypeSupportedInternal(cdm_factory, key_system, is_hw_secure, type))
capability.audio_codecs.push_back(audio_codec);
}
@@ -278,8 +341,8 @@ absl::optional<CdmCapability> GetCdmCapability(IsTypeSupportedCB callback,
base::flat_set<EncryptionScheme> intersection(
std::begin(kAllEncryptionSchemes), std::end(kAllEncryptionSchemes));
for (auto codec : capability.video_codecs) {
- const auto schemes =
- GetSupportedEncryptionSchemes(callback, codec.first, robustness);
+ const auto schemes = GetSupportedEncryptionSchemes(
+ cdm_factory, key_system, is_hw_secure, codec.first, robustness);
intersection = base::STLSetIntersection<base::flat_set<EncryptionScheme>>(
intersection, schemes);
}
@@ -294,6 +357,7 @@ absl::optional<CdmCapability> GetCdmCapability(IsTypeSupportedCB callback,
// IsTypeSupported does not support session type yet. So just use temporary
// session which is required by EME spec.
capability.session_types.insert(CdmSessionType::kTemporary);
+
return capability;
}
@@ -328,14 +392,10 @@ void MediaFoundationService::IsKeySystemSupported(
return;
}
- absl::optional<CdmCapability> sw_secure_capability = GetCdmCapability(
- base::BindRepeating(&IsTypeSupportedInternal, cdm_factory, key_system,
- /*is_hw_secure=*/false),
- /*is_hw_secure=*/false);
- absl::optional<CdmCapability> hw_secure_capability = GetCdmCapability(
- base::BindRepeating(&IsTypeSupportedInternal, cdm_factory, key_system,
- /*is_hw_secure=*/true),
- /*is_hw_secure=*/true);
+ absl::optional<CdmCapability> sw_secure_capability =
+ GetCdmCapability(cdm_factory, key_system, /*is_hw_secure=*/false);
+ absl::optional<CdmCapability> hw_secure_capability =
+ GetCdmCapability(cdm_factory, key_system, /*is_hw_secure=*/true);
if (!sw_secure_capability && !hw_secure_capability) {
DVLOG(2) << "Get empty CdmCapability.";
diff --git a/chromium/media/mojo/services/media_metrics_provider.cc b/chromium/media/mojo/services/media_metrics_provider.cc
index 4e7e4c5af56..62d2f6a865a 100644
--- a/chromium/media/mojo/services/media_metrics_provider.cc
+++ b/chromium/media/mojo/services/media_metrics_provider.cc
@@ -263,6 +263,16 @@ void MediaMetricsProvider::OnError(const PipelineStatus& status) {
uma_info_.last_pipeline_status = status.code();
}
+void MediaMetricsProvider::OnFallback(const PipelineStatus& status) {
+ DCHECK(initialized_);
+ if (is_shutting_down_cb_.Run()) {
+ DVLOG(1) << __func__ << ": Error " << PipelineStatusToString(status)
+ << " ignored since it is reported during shutdown.";
+ return;
+ }
+ // Do nothing for now.
+}
+
void MediaMetricsProvider::SetIsEME() {
// This may be called before Initialize().
uma_info_.is_eme = true;
diff --git a/chromium/media/mojo/services/media_metrics_provider.h b/chromium/media/mojo/services/media_metrics_provider.h
index b97d9d2e33d..5e8595f4884 100644
--- a/chromium/media/mojo/services/media_metrics_provider.h
+++ b/chromium/media/mojo/services/media_metrics_provider.h
@@ -110,6 +110,7 @@ class MEDIA_MOJO_EXPORT MediaMetricsProvider
mojom::MediaURLScheme url_scheme,
mojom::MediaStreamType media_stream_type) override;
void OnError(const PipelineStatus& status) override;
+ void OnFallback(const PipelineStatus& status) override;
void SetAudioPipelineInfo(const AudioPipelineInfo& info) override;
void SetContainerName(
container_names::MediaContainerName container_name) override;
diff --git a/chromium/media/mojo/services/media_service_factory.cc b/chromium/media/mojo/services/media_service_factory.cc
index 55ebf5a4916..1c533946eb0 100644
--- a/chromium/media/mojo/services/media_service_factory.cc
+++ b/chromium/media/mojo/services/media_service_factory.cc
@@ -28,6 +28,9 @@ std::unique_ptr<MediaService> CreateMediaService(
#if BUILDFLAG(IS_ANDROID)
return std::make_unique<MediaService>(
std::make_unique<AndroidMojoMediaClient>(), std::move(receiver));
+#elif BUILDFLAG(IS_WIN)
+ return std::make_unique<MediaService>(
+ std::make_unique<MediaFoundationMojoMediaClient>(), std::move(receiver));
#else
NOTREACHED() << "No MediaService implementation available.";
return nullptr;
diff --git a/chromium/media/mojo/services/mojo_audio_input_stream.cc b/chromium/media/mojo/services/mojo_audio_input_stream.cc
index 58c856de951..df9550d8b80 100644
--- a/chromium/media/mojo/services/mojo_audio_input_stream.cc
+++ b/chromium/media/mojo/services/mojo_audio_input_stream.cc
@@ -13,6 +13,7 @@
#include "base/sync_socket.h"
#include "base/threading/thread_task_runner_handle.h"
#include "mojo/public/cpp/system/platform_handle.h"
+#include "third_party/abseil-cpp/absl/utility/utility.h"
namespace media {
@@ -90,16 +91,11 @@ void MojoAudioInputStream::OnStreamCreated(
mojo::PlatformHandle socket_handle(foreign_socket->Take());
std::move(stream_created_callback_)
- .Run({base::in_place, std::move(shared_memory_region),
+ .Run({absl::in_place, std::move(shared_memory_region),
std::move(socket_handle)},
initially_muted);
}
-void MojoAudioInputStream::OnMuted(int stream_id, bool is_muted) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- client_->OnMutedStateChanged(is_muted);
-}
-
void MojoAudioInputStream::OnStreamError(int stream_id) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
client_->OnError(mojom::InputStreamErrorCode::kUnknown);
diff --git a/chromium/media/mojo/services/mojo_audio_input_stream.h b/chromium/media/mojo/services/mojo_audio_input_stream.h
index 922e7e92ba7..b4449d5c238 100644
--- a/chromium/media/mojo/services/mojo_audio_input_stream.h
+++ b/chromium/media/mojo/services/mojo_audio_input_stream.h
@@ -62,7 +62,6 @@ class MEDIA_MOJO_EXPORT MojoAudioInputStream
base::ReadOnlySharedMemoryRegion shared_memory_region,
std::unique_ptr<base::CancelableSyncSocket> foreign_socket,
bool initially_muted) override;
- void OnMuted(int stream_id, bool is_muted) override;
void OnStreamError(int stream_id) override;
// Closes connection to client and notifies owner.
diff --git a/chromium/media/mojo/services/mojo_audio_output_stream.cc b/chromium/media/mojo/services/mojo_audio_output_stream.cc
index 7f3023166b6..865dedecf9a 100644
--- a/chromium/media/mojo/services/mojo_audio_output_stream.cc
+++ b/chromium/media/mojo/services/mojo_audio_output_stream.cc
@@ -14,6 +14,7 @@
#include "base/threading/thread_task_runner_handle.h"
#include "media/mojo/mojom/audio_data_pipe.mojom.h"
#include "mojo/public/cpp/system/platform_handle.h"
+#include "third_party/abseil-cpp/absl/utility/utility.h"
namespace media {
@@ -91,7 +92,7 @@ void MojoAudioOutputStream::OnStreamCreated(
std::move(stream_created_callback_)
.Run(std::move(pending_stream),
- {base::in_place, std::move(shared_memory_region),
+ {absl::in_place, std::move(shared_memory_region),
std::move(socket_handle)});
}
diff --git a/chromium/media/mojo/services/mojo_media_client.cc b/chromium/media/mojo/services/mojo_media_client.cc
index 0376613e05b..348549f62a9 100644
--- a/chromium/media/mojo/services/mojo_media_client.cc
+++ b/chromium/media/mojo/services/mojo_media_client.cc
@@ -45,7 +45,8 @@ std::unique_ptr<VideoDecoder> MojoMediaClient::CreateVideoDecoder(
MediaLog* media_log,
mojom::CommandBufferIdPtr command_buffer_id,
RequestOverlayInfoCB request_overlay_info_cb,
- const gfx::ColorSpace& target_color_space) {
+ const gfx::ColorSpace& target_color_space,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder> oop_video_decoder) {
return nullptr;
}
diff --git a/chromium/media/mojo/services/mojo_media_client.h b/chromium/media/mojo/services/mojo_media_client.h
index 8de7d5d06cb..03fe64fb3b4 100644
--- a/chromium/media/mojo/services/mojo_media_client.h
+++ b/chromium/media/mojo/services/mojo_media_client.h
@@ -18,6 +18,7 @@
#include "media/mojo/buildflags.h"
#include "media/mojo/mojom/frame_interface_factory.mojom.h"
#include "media/mojo/mojom/renderer_extensions.mojom.h"
+#include "media/mojo/mojom/stable/stable_video_decoder.mojom.h"
#include "media/mojo/mojom/video_decoder.mojom.h"
#include "media/mojo/services/media_mojo_export.h"
@@ -68,7 +69,8 @@ class MEDIA_MOJO_EXPORT MojoMediaClient {
MediaLog* media_log,
mojom::CommandBufferIdPtr command_buffer_id,
RequestOverlayInfoCB request_overlay_info_cb,
- const gfx::ColorSpace& target_color_space);
+ const gfx::ColorSpace& target_color_space,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder> oop_video_decoder);
// Returns the Renderer to be used by MojoRendererService.
// TODO(hubbe): Find out whether we should pass in |target_color_space| here.
diff --git a/chromium/media/mojo/services/mojo_renderer_service.cc b/chromium/media/mojo/services/mojo_renderer_service.cc
index dee5dc5bfeb..ce26bf64cd6 100644
--- a/chromium/media/mojo/services/mojo_renderer_service.cc
+++ b/chromium/media/mojo/services/mojo_renderer_service.cc
@@ -157,6 +157,10 @@ void MojoRendererService::OnError(PipelineStatus error) {
client_->OnError(std::move(error));
}
+void MojoRendererService::OnFallback(PipelineStatus error) {
+ NOTREACHED();
+}
+
void MojoRendererService::OnEnded() {
DVLOG(1) << __func__;
CancelPeriodicMediaTimeUpdates();
diff --git a/chromium/media/mojo/services/mojo_renderer_service.h b/chromium/media/mojo/services/mojo_renderer_service.h
index 8f1d29ea499..e2701757b89 100644
--- a/chromium/media/mojo/services/mojo_renderer_service.h
+++ b/chromium/media/mojo/services/mojo_renderer_service.h
@@ -83,6 +83,7 @@ class MEDIA_MOJO_EXPORT MojoRendererService final : public mojom::Renderer,
// RendererClient implementation.
void OnError(PipelineStatus status) final;
+ void OnFallback(PipelineStatus status) final;
void OnEnded() final;
void OnStatisticsUpdate(const PipelineStatistics& stats) final;
void OnBufferingStateChange(BufferingState state,
diff --git a/chromium/media/mojo/services/mojo_video_decoder_service.cc b/chromium/media/mojo/services/mojo_video_decoder_service.cc
index 4369bc406f1..78b47d4977f 100644
--- a/chromium/media/mojo/services/mojo_video_decoder_service.cc
+++ b/chromium/media/mojo/services/mojo_video_decoder_service.cc
@@ -99,9 +99,13 @@ class VideoFrameHandleReleaserImpl final
MojoVideoDecoderService::MojoVideoDecoderService(
MojoMediaClient* mojo_media_client,
- MojoCdmServiceContext* mojo_cdm_service_context)
+ MojoCdmServiceContext* mojo_cdm_service_context,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder>
+ oop_video_decoder_pending_remote)
: mojo_media_client_(mojo_media_client),
- mojo_cdm_service_context_(mojo_cdm_service_context) {
+ mojo_cdm_service_context_(mojo_cdm_service_context),
+ oop_video_decoder_pending_remote_(
+ std::move(oop_video_decoder_pending_remote)) {
DVLOG(1) << __func__;
DCHECK(mojo_media_client_);
DCHECK(mojo_cdm_service_context_);
@@ -151,7 +155,7 @@ void MojoVideoDecoderService::Construct(
DVLOG(1) << __func__;
TRACE_EVENT0("media", "MojoVideoDecoderService::Construct");
- if (decoder_) {
+ if (media_log_) {
mojo::ReportBadMessage("Construct() already called");
return;
}
@@ -175,7 +179,7 @@ void MojoVideoDecoderService::Construct(
task_runner, media_log_.get(), std::move(command_buffer_id),
base::BindRepeating(
&MojoVideoDecoderService::OnDecoderRequestedOverlayInfo, weak_this_),
- target_color_space);
+ target_color_space, std::move(oop_video_decoder_pending_remote_));
}
void MojoVideoDecoderService::Initialize(
@@ -196,6 +200,15 @@ void MojoVideoDecoderService::Initialize(
init_cb_ = std::move(callback);
+ // Prevent creation of too many hardware decoding instances since it may lead
+ // to system instability. Note: This will break decoding entirely for codecs
+ // which don't have software fallback, so we use a conservative limit. Most
+ // platforms will self-limit and never reach this limit.
+ if (!config.is_encrypted() && g_num_active_mvd_instances >= 128) {
+ OnDecoderInitialized(DecoderStatus::Codes::kTooManyDecoders);
+ return;
+ }
+
if (!decoder_) {
OnDecoderInitialized(DecoderStatus::Codes::kFailedToCreateDecoder);
return;
diff --git a/chromium/media/mojo/services/mojo_video_decoder_service.h b/chromium/media/mojo/services/mojo_video_decoder_service.h
index df1820bf454..26d76d9c0a1 100644
--- a/chromium/media/mojo/services/mojo_video_decoder_service.h
+++ b/chromium/media/mojo/services/mojo_video_decoder_service.h
@@ -16,11 +16,13 @@
#include "media/base/decoder_status.h"
#include "media/base/overlay_info.h"
#include "media/base/video_decoder.h"
+#include "media/mojo/mojom/stable/stable_video_decoder.mojom.h"
#include "media/mojo/mojom/video_decoder.mojom.h"
#include "media/mojo/services/media_mojo_export.h"
#include "media/mojo/services/mojo_media_client.h"
#include "mojo/public/cpp/bindings/associated_remote.h"
#include "mojo/public/cpp/bindings/pending_associated_remote.h"
+#include "mojo/public/cpp/bindings/pending_remote.h"
#include "mojo/public/cpp/bindings/self_owned_receiver.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
@@ -40,7 +42,9 @@ class MEDIA_MOJO_EXPORT MojoVideoDecoderService final
public:
explicit MojoVideoDecoderService(
MojoMediaClient* mojo_media_client,
- MojoCdmServiceContext* mojo_cdm_service_context);
+ MojoCdmServiceContext* mojo_cdm_service_context,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder>
+ oop_video_decoder_remote);
MojoVideoDecoderService(const MojoVideoDecoderService&) = delete;
MojoVideoDecoderService& operator=(const MojoVideoDecoderService&) = delete;
@@ -123,6 +127,13 @@ class MEDIA_MOJO_EXPORT MojoVideoDecoderService final
std::unique_ptr<media::VideoDecoder> decoder_;
+ // An out-of-process video decoder to forward decode requests to. This member
+ // just holds the PendingRemote in between the construction of the
+ // MojoVideoDecoderService and the call to
+ // |mojo_media_client_|->CreateVideoDecoder().
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder>
+ oop_video_decoder_pending_remote_;
+
InitializeCallback init_cb_;
ResetCallback reset_cb_;
diff --git a/chromium/media/mojo/services/mojo_video_encode_accelerator_service.cc b/chromium/media/mojo/services/mojo_video_encode_accelerator_service.cc
index e26aa45974c..34b3e2522f9 100644
--- a/chromium/media/mojo/services/mojo_video_encode_accelerator_service.cc
+++ b/chromium/media/mojo/services/mojo_video_encode_accelerator_service.cc
@@ -158,14 +158,14 @@ void MojoVideoEncodeAcceleratorService::Encode(
void MojoVideoEncodeAcceleratorService::UseOutputBitstreamBuffer(
int32_t bitstream_buffer_id,
- mojo::ScopedSharedBufferHandle buffer) {
+ base::UnsafeSharedMemoryRegion region) {
DVLOG(2) << __func__ << " bitstream_buffer_id=" << bitstream_buffer_id;
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!encoder_)
return;
- if (!buffer.is_valid()) {
- DLOG(ERROR) << __func__ << " invalid |buffer|.";
+ if (!region.IsValid()) {
+ DLOG(ERROR) << __func__ << " invalid |region|.";
NotifyError(::media::VideoEncodeAccelerator::kInvalidArgumentError);
return;
}
@@ -176,9 +176,6 @@ void MojoVideoEncodeAcceleratorService::UseOutputBitstreamBuffer(
return;
}
- base::subtle::PlatformSharedMemoryRegion region =
- mojo::UnwrapPlatformSharedMemoryRegion(std::move(buffer));
-
auto memory_size = region.GetSize();
if (memory_size < output_buffer_size_) {
DLOG(ERROR) << __func__ << " bitstream_buffer_id=" << bitstream_buffer_id
diff --git a/chromium/media/mojo/services/mojo_video_encode_accelerator_service.h b/chromium/media/mojo/services/mojo_video_encode_accelerator_service.h
index 77474a98d7c..978552d82d0 100644
--- a/chromium/media/mojo/services/mojo_video_encode_accelerator_service.h
+++ b/chromium/media/mojo/services/mojo_video_encode_accelerator_service.h
@@ -12,6 +12,7 @@
#include <vector>
#include "base/compiler_specific.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/memory/weak_ptr.h"
#include "base/sequence_checker.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
@@ -73,7 +74,7 @@ class MEDIA_MOJO_EXPORT MojoVideoEncodeAcceleratorService
bool force_keyframe,
EncodeCallback callback) override;
void UseOutputBitstreamBuffer(int32_t bitstream_buffer_id,
- mojo::ScopedSharedBufferHandle buffer) override;
+ base::UnsafeSharedMemoryRegion region) override;
void RequestEncodingParametersChangeWithBitrate(
const media::Bitrate& bitrate_allocation,
uint32_t framerate) override;
diff --git a/chromium/media/mojo/services/mojo_video_encode_accelerator_service_unittest.cc b/chromium/media/mojo/services/mojo_video_encode_accelerator_service_unittest.cc
index e3c32a16104..2c5ed85ae79 100644
--- a/chromium/media/mojo/services/mojo_video_encode_accelerator_service_unittest.cc
+++ b/chromium/media/mojo/services/mojo_video_encode_accelerator_service_unittest.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/memory/ptr_util.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/run_loop.h"
#include "base/test/task_environment.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
@@ -174,10 +175,10 @@ TEST_F(MojoVideoEncodeAcceleratorServiceTest, EncodeOneFrame) {
const int32_t kBitstreamBufferId = 17;
{
const uint64_t kShMemSize = fake_vea()->minimum_output_buffer_size();
- auto handle = mojo::SharedBufferHandle::Create(kShMemSize);
+ auto region = base::UnsafeSharedMemoryRegion::Create(kShMemSize);
mojo_vea_service()->UseOutputBitstreamBuffer(kBitstreamBufferId,
- std::move(handle));
+ std::move(region));
base::RunLoop().RunUntilIdle();
}
@@ -303,13 +304,13 @@ TEST_F(MojoVideoEncodeAcceleratorServiceTest,
const int32_t kBitstreamBufferId = 17;
const uint64_t wrong_size = fake_vea()->minimum_output_buffer_size() / 2;
- auto handle = mojo::SharedBufferHandle::Create(wrong_size);
+ auto region = base::UnsafeSharedMemoryRegion::Create(wrong_size);
EXPECT_CALL(*mock_mojo_vea_client(),
NotifyError(VideoEncodeAccelerator::kInvalidArgumentError));
mojo_vea_service()->UseOutputBitstreamBuffer(kBitstreamBufferId,
- std::move(handle));
+ std::move(region));
base::RunLoop().RunUntilIdle();
}
@@ -347,9 +348,9 @@ TEST_F(MojoVideoEncodeAcceleratorServiceTest, CallsBeforeInitializeAreIgnored) {
{
const int32_t kBitstreamBufferId = 17;
const uint64_t kShMemSize = 10;
- auto handle = mojo::SharedBufferHandle::Create(kShMemSize);
+ auto region = base::UnsafeSharedMemoryRegion::Create(kShMemSize);
mojo_vea_service()->UseOutputBitstreamBuffer(kBitstreamBufferId,
- std::move(handle));
+ std::move(region));
base::RunLoop().RunUntilIdle();
}
{
diff --git a/chromium/media/mojo/services/stable_video_decoder_factory_service.cc b/chromium/media/mojo/services/stable_video_decoder_factory_service.cc
index e9daea2b3e7..5163fae97cf 100644
--- a/chromium/media/mojo/services/stable_video_decoder_factory_service.cc
+++ b/chromium/media/mojo/services/stable_video_decoder_factory_service.cc
@@ -4,21 +4,123 @@
#include "media/mojo/services/stable_video_decoder_factory_service.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "media/base/media_log.h"
+#include "media/base/media_util.h"
+#include "media/gpu/buildflags.h"
+#include "media/gpu/chromeos/platform_video_frame_pool.h"
+#include "media/gpu/chromeos/video_decoder_pipeline.h"
+#include "media/gpu/chromeos/video_frame_converter.h"
+#include "media/mojo/services/mojo_media_client.h"
+#include "media/mojo/services/mojo_video_decoder_service.h"
+#include "media/mojo/services/stable_video_decoder_service.h"
+#include "mojo/public/cpp/bindings/self_owned_receiver.h"
+
namespace media {
-StableVideoDecoderFactoryService::StableVideoDecoderFactoryService() = default;
-StableVideoDecoderFactoryService::~StableVideoDecoderFactoryService() = default;
+namespace {
+
+// This is a lighter alternative to using a GpuMojoMediaClient. While we could
+// use a GpuMojoMediaClient, that would be abusing the abstraction a bit since
+// that class is too semantically coupled with the GPU process through things
+// like its |gpu_task_runner_| and |media_gpu_channel_manager_| members.
+class MojoMediaClientImpl : public MojoMediaClient {
+ public:
+ MojoMediaClientImpl() = default;
+ MojoMediaClientImpl(const MojoMediaClientImpl&) = delete;
+ MojoMediaClientImpl& operator=(const MojoMediaClientImpl&) = delete;
+ ~MojoMediaClientImpl() override = default;
+
+ // MojoMediaClient implementation.
+ std::vector<SupportedVideoDecoderConfig> GetSupportedVideoDecoderConfigs()
+ final {
+ // TODO(b/195769334): we should pass a meaningful
+ // gpu::GpuDriverBugWorkarounds so that we can restrict the supported
+ // configurations using that facility.
+ absl::optional<std::vector<SupportedVideoDecoderConfig>> configs =
+ VideoDecoderPipeline::GetSupportedConfigs(
+ gpu::GpuDriverBugWorkarounds());
+ return configs.value_or(std::vector<SupportedVideoDecoderConfig>{});
+ }
+ VideoDecoderType GetDecoderImplementationType() final {
+ // TODO(b/195769334): how can we keep this in sync with
+ // VideoDecoderPipeline::GetDecoderType()?
+#if BUILDFLAG(USE_VAAPI)
+ return VideoDecoderType::kVaapi;
+#elif BUILDFLAG(USE_V4L2_CODEC)
+ return VideoDecoderType::kV4L2;
+#else
+#error StableVideoDecoderFactoryService should only be built on platforms that
+#error support video decode acceleration through either VA-API or V4L2.
+#endif
+ }
+ std::unique_ptr<VideoDecoder> CreateVideoDecoder(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ MediaLog* media_log,
+ mojom::CommandBufferIdPtr command_buffer_id,
+ RequestOverlayInfoCB request_overlay_info_cb,
+ const gfx::ColorSpace& target_color_space,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder> oop_video_decoder)
+ final {
+ // TODO(b/195769334): some platforms do not support the
+ // VideoDecoderPipeline so we need to handle those (and the rest of the
+ // methods of MojoMediaClientImpl are affected as well).
+
+ // For out-of-process video decoding, |command_buffer_id| is not used and
+ // should not be supplied.
+ DCHECK(!command_buffer_id);
+
+ DCHECK(!oop_video_decoder);
+
+ std::unique_ptr<MediaLog> log =
+ media_log ? media_log->Clone()
+ : std::make_unique<media::NullMediaLog>();
+ return VideoDecoderPipeline::Create(
+ /*client_task_runner=*/std::move(task_runner),
+ std::make_unique<PlatformVideoFramePool>(),
+ std::make_unique<media::VideoFrameConverter>(), std::move(log),
+ /*oop_video_decoder=*/{});
+ }
+};
+
+} // namespace
+
+StableVideoDecoderFactoryService::StableVideoDecoderFactoryService()
+ : receiver_(this),
+ mojo_media_client_(std::make_unique<MojoMediaClientImpl>()) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ mojo_media_client_->Initialize();
+}
+
+StableVideoDecoderFactoryService::~StableVideoDecoderFactoryService() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+}
void StableVideoDecoderFactoryService::BindReceiver(
mojo::PendingReceiver<stable::mojom::StableVideoDecoderFactory> receiver) {
- receivers_.Add(this, std::move(receiver));
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ // The browser process should guarantee that BindReceiver() is only called
+ // once.
+ DCHECK(!receiver_.is_bound());
+ receiver_.Bind(std::move(receiver));
}
void StableVideoDecoderFactoryService::CreateStableVideoDecoder(
mojo::PendingReceiver<stable::mojom::StableVideoDecoder> receiver) {
- // TODO(b/171813538): connect with the ash-chrome video decoding stack.
- // TODO(b/195769334): plumb OOP-VD.
- NOTIMPLEMENTED();
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ std::unique_ptr<mojom::VideoDecoder> dst_video_decoder;
+ if (video_decoder_creation_cb_for_testing_) {
+ dst_video_decoder = video_decoder_creation_cb_for_testing_.Run(
+ mojo_media_client_.get(), &cdm_service_context_);
+ } else {
+ dst_video_decoder = std::make_unique<MojoVideoDecoderService>(
+ mojo_media_client_.get(), &cdm_service_context_,
+ mojo::PendingRemote<stable::mojom::StableVideoDecoder>());
+ }
+ video_decoders_.Add(
+ std::make_unique<StableVideoDecoderService>(std::move(dst_video_decoder)),
+ std::move(receiver));
}
} // namespace media
diff --git a/chromium/media/mojo/services/stable_video_decoder_factory_service.h b/chromium/media/mojo/services/stable_video_decoder_factory_service.h
index 17b4178da72..427fb41c148 100644
--- a/chromium/media/mojo/services/stable_video_decoder_factory_service.h
+++ b/chromium/media/mojo/services/stable_video_decoder_factory_service.h
@@ -5,13 +5,31 @@
#ifndef MEDIA_MOJO_SERVICES_STABLE_VIDEO_DECODER_FACTORY_SERVICE_H_
#define MEDIA_MOJO_SERVICES_STABLE_VIDEO_DECODER_FACTORY_SERVICE_H_
+#include "base/callback.h"
+#include "base/sequence_checker.h"
#include "media/mojo/mojom/stable/stable_video_decoder.mojom.h"
#include "media/mojo/services/media_mojo_export.h"
+#include "media/mojo/services/mojo_cdm_service_context.h"
#include "mojo/public/cpp/bindings/pending_receiver.h"
-#include "mojo/public/cpp/bindings/receiver_set.h"
+#include "mojo/public/cpp/bindings/receiver.h"
+#include "mojo/public/cpp/bindings/unique_receiver_set.h"
namespace media {
+namespace mojom {
+class VideoDecoder;
+} // namespace mojom
+class MojoMediaClient;
+
+// A StableVideoDecoderFactoryService allows a browser process to create
+// StableVideoDecoders. It's intended to live inside a video decoder process (a
+// utility process) and there should only be one such instance per process
+// because one video decoder process corresponds to a client that handles one
+// origin. For example, all the StableVideoDecoders for a video conference call
+// can live in the same process (and thus be created by the same
+// StableVideoDecoderFactoryService). However, the StableVideoDecoder for a
+// YouTube video should live in a process separate than a StableVideoDecoder for
+// a Vimeo video.
class MEDIA_MOJO_EXPORT StableVideoDecoderFactoryService
: public stable::mojom::StableVideoDecoderFactory {
public:
@@ -22,6 +40,17 @@ class MEDIA_MOJO_EXPORT StableVideoDecoderFactoryService
const StableVideoDecoderFactoryService&) = delete;
~StableVideoDecoderFactoryService() override;
+ using VideoDecoderCreationCBForTesting =
+ base::RepeatingCallback<std::unique_ptr<mojom::VideoDecoder>(
+ MojoMediaClient*,
+ MojoCdmServiceContext*)>;
+ void SetVideoDecoderCreationCallbackForTesting(
+ VideoDecoderCreationCBForTesting video_decoder_creation_cb_for_testing) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ video_decoder_creation_cb_for_testing_ =
+ video_decoder_creation_cb_for_testing;
+ }
+
void BindReceiver(
mojo::PendingReceiver<stable::mojom::StableVideoDecoderFactory> receiver);
@@ -31,7 +60,22 @@ class MEDIA_MOJO_EXPORT StableVideoDecoderFactoryService
override;
private:
- mojo::ReceiverSet<stable::mojom::StableVideoDecoderFactory> receivers_;
+ VideoDecoderCreationCBForTesting video_decoder_creation_cb_for_testing_
+ GUARDED_BY_CONTEXT(sequence_checker_);
+
+ mojo::Receiver<stable::mojom::StableVideoDecoderFactory> receiver_;
+
+ // |mojo_media_client_| and |cdm_service_context_| must be declared before
+ // |video_decoders_| because the interface implementation instances managed by
+ // that set take raw pointers to them.
+ std::unique_ptr<MojoMediaClient> mojo_media_client_
+ GUARDED_BY_CONTEXT(sequence_checker_);
+ MojoCdmServiceContext cdm_service_context_
+ GUARDED_BY_CONTEXT(sequence_checker_);
+ mojo::UniqueReceiverSet<stable::mojom::StableVideoDecoder> video_decoders_
+ GUARDED_BY_CONTEXT(sequence_checker_);
+
+ SEQUENCE_CHECKER(sequence_checker_);
};
} // namespace media
diff --git a/chromium/media/mojo/services/stable_video_decoder_service.cc b/chromium/media/mojo/services/stable_video_decoder_service.cc
new file mode 100644
index 00000000000..9a8cab03c0b
--- /dev/null
+++ b/chromium/media/mojo/services/stable_video_decoder_service.cc
@@ -0,0 +1,143 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mojo/services/stable_video_decoder_service.h"
+
+namespace media {
+
+StableVideoDecoderService::StableVideoDecoderService(
+ std::unique_ptr<mojom::VideoDecoder> dst_video_decoder)
+ : video_decoder_client_receiver_(this),
+ media_log_receiver_(this),
+ stable_video_frame_handle_releaser_receiver_(this),
+ dst_video_decoder_(std::move(dst_video_decoder)),
+ dst_video_decoder_receiver_(dst_video_decoder_.get()) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ CHECK(!!dst_video_decoder_);
+ dst_video_decoder_remote_.Bind(
+ dst_video_decoder_receiver_.BindNewPipeAndPassRemote());
+}
+
+StableVideoDecoderService::~StableVideoDecoderService() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+}
+
+void StableVideoDecoderService::GetSupportedConfigs(
+ GetSupportedConfigsCallback callback) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ NOTIMPLEMENTED();
+}
+
+void StableVideoDecoderService::Construct(
+ mojo::PendingAssociatedRemote<stable::mojom::VideoDecoderClient>
+ stable_video_decoder_client_remote,
+ mojo::PendingRemote<stable::mojom::MediaLog> stable_media_log_remote,
+ mojo::PendingReceiver<stable::mojom::VideoFrameHandleReleaser>
+ stable_video_frame_handle_releaser_receiver,
+ mojo::ScopedDataPipeConsumerHandle decoder_buffer_pipe,
+ const gfx::ColorSpace& target_color_space) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ if (video_decoder_client_receiver_.is_bound()) {
+ mojo::ReportBadMessage("Construct() already called");
+ return;
+ }
+
+ DCHECK(!video_decoder_client_receiver_.is_bound());
+ DCHECK(!stable_video_decoder_client_remote_.is_bound());
+ stable_video_decoder_client_remote_.Bind(
+ std::move(stable_video_decoder_client_remote));
+
+ DCHECK(!media_log_receiver_.is_bound());
+ DCHECK(!stable_media_log_remote_.is_bound());
+ stable_media_log_remote_.Bind(std::move(stable_media_log_remote));
+
+ DCHECK(!video_frame_handle_releaser_remote_.is_bound());
+ DCHECK(!stable_video_frame_handle_releaser_receiver_.is_bound());
+ stable_video_frame_handle_releaser_receiver_.Bind(
+ std::move(stable_video_frame_handle_releaser_receiver));
+
+ dst_video_decoder_remote_->Construct(
+ video_decoder_client_receiver_.BindNewEndpointAndPassRemote(),
+ media_log_receiver_.BindNewPipeAndPassRemote(),
+ video_frame_handle_releaser_remote_.BindNewPipeAndPassReceiver(),
+ std::move(decoder_buffer_pipe), mojom::CommandBufferIdPtr(),
+ target_color_space);
+}
+
+void StableVideoDecoderService::Initialize(
+ const VideoDecoderConfig& config,
+ bool low_delay,
+ mojo::PendingRemote<stable::mojom::StableCdmContext> cdm_context,
+ InitializeCallback callback) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ if (!video_decoder_client_receiver_.is_bound()) {
+ std::move(callback).Run(DecoderStatus::Codes::kFailedToCreateDecoder,
+ /*needs_bitstream_conversion=*/false,
+ /*max_decode_requests=*/1,
+ VideoDecoderType::kUnknown);
+ return;
+ }
+
+ // The |config| should have been validated at deserialization time.
+ DCHECK(config.IsValidConfig());
+
+ // TODO(b/195769334): implement out-of-process video decoding of hardware
+ // protected content.
+ if (config.is_encrypted()) {
+ std::move(callback).Run(DecoderStatus::Codes::kUnsupportedConfig,
+ /*needs_bitstream_conversion=*/false,
+ /*max_decode_requests=*/1,
+ VideoDecoderType::kUnknown);
+ return;
+ }
+ dst_video_decoder_remote_->Initialize(
+ config, low_delay, /*cdm_id=*/absl::nullopt, std::move(callback));
+}
+
+void StableVideoDecoderService::Decode(
+ const scoped_refptr<DecoderBuffer>& buffer,
+ DecodeCallback callback) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ NOTIMPLEMENTED();
+}
+
+void StableVideoDecoderService::Reset(ResetCallback callback) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ NOTIMPLEMENTED();
+}
+
+void StableVideoDecoderService::ReleaseVideoFrame(
+ const base::UnguessableToken& release_token) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ NOTIMPLEMENTED();
+}
+
+void StableVideoDecoderService::OnVideoFrameDecoded(
+ const scoped_refptr<VideoFrame>& frame,
+ bool can_read_without_stalling,
+ const absl::optional<base::UnguessableToken>& release_token) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(stable_video_decoder_client_remote_.is_bound());
+ DCHECK(release_token.has_value());
+ stable_video_decoder_client_remote_->OnVideoFrameDecoded(
+ frame, can_read_without_stalling, *release_token);
+}
+
+void StableVideoDecoderService::OnWaiting(WaitingReason reason) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ NOTIMPLEMENTED();
+}
+
+void StableVideoDecoderService::RequestOverlayInfo(
+ bool restart_for_transitions) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ NOTREACHED();
+}
+
+void StableVideoDecoderService::AddLogRecord(const MediaLogRecord& event) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ NOTIMPLEMENTED();
+}
+
+} // namespace media
diff --git a/chromium/media/mojo/services/stable_video_decoder_service.h b/chromium/media/mojo/services/stable_video_decoder_service.h
new file mode 100644
index 00000000000..20ee73ab5cb
--- /dev/null
+++ b/chromium/media/mojo/services/stable_video_decoder_service.h
@@ -0,0 +1,132 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MOJO_SERVICES_STABLE_VIDEO_DECODER_SERVICE_H_
+#define MEDIA_MOJO_SERVICES_STABLE_VIDEO_DECODER_SERVICE_H_
+
+#include "base/sequence_checker.h"
+#include "base/thread_annotations.h"
+#include "media/mojo/mojom/media_log.mojom.h"
+#include "media/mojo/mojom/stable/stable_video_decoder.mojom.h"
+#include "media/mojo/mojom/video_decoder.mojom.h"
+#include "media/mojo/services/media_mojo_export.h"
+#include "mojo/public/cpp/bindings/associated_receiver.h"
+#include "mojo/public/cpp/bindings/associated_remote.h"
+#include "mojo/public/cpp/bindings/receiver.h"
+#include "mojo/public/cpp/bindings/remote.h"
+
+namespace media {
+
+// A StableVideoDecoderService serves as an adapter between the
+// stable::mojom::StableVideoDecoder interface and the mojom::VideoDecoder
+// interface. This allows us to provide hardware video decoding capabilities to
+// clients that may be using a different version of the
+// stable::mojom::StableVideoDecoder interface, e.g., LaCrOS. A
+// StableVideoDecoderService is intended to live in a video decoder process.
+// This process can host multiple StableVideoDecoderServices, but the assumption
+// is that they don't distrust each other. For example, they should all be
+// serving the same renderer process.
+//
+// TODO(b/195769334): a StableVideoDecoderService should probably be responsible
+// for checking incoming data to address issues that may arise due to the stable
+// nature of the stable::mojom::StableVideoDecoder interface. For example,
+// suppose the StableVideoDecoderService implements an older version of the
+// interface relative to the one used by the client. If the client Initialize()s
+// the StableVideoDecoderService with a VideoCodecProfile that's unsupported by
+// the older version of the interface, the StableVideoDecoderService should
+// reject that initialization. Conversely, the client of the
+// StableVideoDecoderService should also check incoming data due to similar
+// concerns.
+class MEDIA_MOJO_EXPORT StableVideoDecoderService
+ : public stable::mojom::StableVideoDecoder,
+ public stable::mojom::VideoFrameHandleReleaser,
+ public mojom::VideoDecoderClient,
+ public mojom::MediaLog {
+ public:
+ explicit StableVideoDecoderService(
+ std::unique_ptr<mojom::VideoDecoder> dst_video_decoder);
+ StableVideoDecoderService(const StableVideoDecoderService&) = delete;
+ StableVideoDecoderService& operator=(const StableVideoDecoderService&) =
+ delete;
+ ~StableVideoDecoderService() override;
+
+ // stable::mojom::StableVideoDecoder implementation.
+ void GetSupportedConfigs(GetSupportedConfigsCallback callback) final;
+ void Construct(
+ mojo::PendingAssociatedRemote<stable::mojom::VideoDecoderClient>
+ stable_video_decoder_client_remote,
+ mojo::PendingRemote<stable::mojom::MediaLog> stable_media_log_remote,
+ mojo::PendingReceiver<stable::mojom::VideoFrameHandleReleaser>
+ stable_video_frame_handle_releaser_receiver,
+ mojo::ScopedDataPipeConsumerHandle decoder_buffer_pipe,
+ const gfx::ColorSpace& target_color_space) final;
+ void Initialize(
+ const VideoDecoderConfig& config,
+ bool low_delay,
+ mojo::PendingRemote<stable::mojom::StableCdmContext> cdm_context,
+ InitializeCallback callback) final;
+ void Decode(const scoped_refptr<DecoderBuffer>& buffer,
+ DecodeCallback callback) final;
+ void Reset(ResetCallback callback) final;
+
+ // mojom::stable::VideoFrameHandleReleaser implementation.
+ void ReleaseVideoFrame(const base::UnguessableToken& release_token) final;
+
+ // mojom::VideoDecoderClient implementation.
+ void OnVideoFrameDecoded(
+ const scoped_refptr<VideoFrame>& frame,
+ bool can_read_without_stalling,
+ const absl::optional<base::UnguessableToken>& release_token) final;
+ void OnWaiting(WaitingReason reason) final;
+ void RequestOverlayInfo(bool restart_for_transitions) final;
+
+ // mojom::MediaLog implementation.
+ void AddLogRecord(const MediaLogRecord& event) final;
+
+ private:
+ // Incoming calls from the |dst_video_decoder_| to
+ // |video_decoder_client_receiver_| are forwarded to
+ // |stable_video_decoder_client_remote_|.
+ mojo::AssociatedReceiver<mojom::VideoDecoderClient>
+ video_decoder_client_receiver_ GUARDED_BY_CONTEXT(sequence_checker_);
+ mojo::AssociatedRemote<stable::mojom::VideoDecoderClient>
+ stable_video_decoder_client_remote_ GUARDED_BY_CONTEXT(sequence_checker_);
+
+ // Incoming calls from the |dst_video_decoder_| to |media_log_receiver_| are
+ // forwarded to |stable_media_log_remote_|.
+ mojo::Receiver<mojom::MediaLog> media_log_receiver_
+ GUARDED_BY_CONTEXT(sequence_checker_);
+ mojo::Remote<stable::mojom::MediaLog> stable_media_log_remote_
+ GUARDED_BY_CONTEXT(sequence_checker_);
+
+ // Incoming requests from the client to
+ // |stable_video_frame_handle_releaser_receiver_| are forwarded to
+ // |video_frame_handle_releaser_remote_|.
+ mojo::Receiver<stable::mojom::VideoFrameHandleReleaser>
+ stable_video_frame_handle_releaser_receiver_
+ GUARDED_BY_CONTEXT(sequence_checker_);
+ mojo::Remote<mojom::VideoFrameHandleReleaser>
+ video_frame_handle_releaser_remote_ GUARDED_BY_CONTEXT(sequence_checker_);
+
+ // The incoming stable::mojom::StableVideoDecoder requests are forwarded to
+ // |dst_video_decoder_receiver_| through |dst_video_decoder_remote_|.
+ //
+ // Note: the implementation behind |dst_video_decoder_receiver_| (i.e.,
+ // |dst_video_decoder_|) lives in-process. The reason we don't just make calls
+ // directly to that implementation is that when we call Construct(), we need
+ // to pass a mojo::PendingAssociatedRemote which needs to be sent over an
+ // existing pipe before using it to make calls.
+ std::unique_ptr<mojom::VideoDecoder> dst_video_decoder_
+ GUARDED_BY_CONTEXT(sequence_checker_);
+ mojo::Receiver<mojom::VideoDecoder> dst_video_decoder_receiver_
+ GUARDED_BY_CONTEXT(sequence_checker_);
+ mojo::Remote<mojom::VideoDecoder> dst_video_decoder_remote_
+ GUARDED_BY_CONTEXT(sequence_checker_);
+
+ SEQUENCE_CHECKER(sequence_checker_);
+};
+
+} // namespace media
+
+#endif // MEDIA_MOJO_SERVICES_STABLE_VIDEO_DECODER_SERVICE_H_
diff --git a/chromium/media/mojo/services/stable_video_decoder_service_unittest.cc b/chromium/media/mojo/services/stable_video_decoder_service_unittest.cc
new file mode 100644
index 00000000000..ab3224c2803
--- /dev/null
+++ b/chromium/media/mojo/services/stable_video_decoder_service_unittest.cc
@@ -0,0 +1,501 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mojo/services/stable_video_decoder_service.h"
+#include "base/test/mock_callback.h"
+#include "base/test/task_environment.h"
+#include "media/mojo/common/mojo_decoder_buffer_converter.h"
+#include "media/mojo/mojom/media_log.mojom.h"
+#include "media/mojo/mojom/video_decoder.mojom.h"
+#include "media/mojo/services/stable_video_decoder_factory_service.h"
+#include "mojo/public/cpp/bindings/associated_receiver.h"
+#include "mojo/public/cpp/bindings/remote.h"
+#include "mojo/public/cpp/system/data_pipe.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::_;
+using testing::ByMove;
+using testing::Mock;
+using testing::Return;
+using testing::SaveArg;
+using testing::StrictMock;
+
+namespace media {
+
+namespace {
+
+VideoDecoderConfig CreateValidVideoDecoderConfig() {
+ const VideoDecoderConfig config(
+ VideoCodec::kH264, VideoCodecProfile::H264PROFILE_BASELINE,
+ VideoDecoderConfig::AlphaMode::kHasAlpha, VideoColorSpace::REC709(),
+ VideoTransformation(VIDEO_ROTATION_90, /*mirrored=*/true),
+ /*coded_size=*/gfx::Size(640, 368),
+ /*visible_rect=*/gfx::Rect(1, 1, 630, 360),
+ /*natural_size=*/gfx::Size(1260, 720),
+ /*extra_data=*/std::vector<uint8_t>{1, 2, 3},
+ EncryptionScheme::kUnencrypted);
+ DCHECK(config.IsValidConfig());
+ return config;
+}
+
+class MockVideoFrameHandleReleaser : public mojom::VideoFrameHandleReleaser {
+ public:
+ explicit MockVideoFrameHandleReleaser(
+ mojo::PendingReceiver<mojom::VideoFrameHandleReleaser>
+ video_frame_handle_releaser)
+ : video_frame_handle_releaser_receiver_(
+ this,
+ std::move(video_frame_handle_releaser)) {}
+ MockVideoFrameHandleReleaser(const MockVideoFrameHandleReleaser&) = delete;
+ MockVideoFrameHandleReleaser& operator=(const MockVideoFrameHandleReleaser&) =
+ delete;
+ ~MockVideoFrameHandleReleaser() override = default;
+
+ // mojom::VideoFrameHandleReleaser implementation.
+ MOCK_METHOD2(ReleaseVideoFrame,
+ void(const base::UnguessableToken& release_token,
+ const gpu::SyncToken& release_sync_token));
+
+ private:
+ mojo::Receiver<mojom::VideoFrameHandleReleaser>
+ video_frame_handle_releaser_receiver_;
+};
+
+class MockVideoDecoder : public mojom::VideoDecoder {
+ public:
+ MockVideoDecoder() = default;
+ MockVideoDecoder(const MockVideoDecoder&) = delete;
+ MockVideoDecoder& operator=(const MockVideoDecoder&) = delete;
+ ~MockVideoDecoder() override = default;
+
+ mojo::AssociatedRemote<mojom::VideoDecoderClient> TakeClientRemote() {
+ return std::move(client_remote_);
+ }
+ mojo::Remote<mojom::MediaLog> TakeMediaLogRemote() {
+ return std::move(media_log_remote_);
+ }
+ std::unique_ptr<StrictMock<MockVideoFrameHandleReleaser>>
+ TakeVideoFrameHandleReleaser() {
+ return std::move(video_frame_handle_releaser_);
+ }
+ std::unique_ptr<MojoDecoderBufferReader> TakeMojoDecoderBufferReader() {
+ return std::move(mojo_decoder_buffer_reader_);
+ };
+
+ // mojom::VideoDecoder implementation.
+ MOCK_METHOD1(GetSupportedConfigs, void(GetSupportedConfigsCallback callback));
+ void Construct(
+ mojo::PendingAssociatedRemote<mojom::VideoDecoderClient> client,
+ mojo::PendingRemote<mojom::MediaLog> media_log,
+ mojo::PendingReceiver<mojom::VideoFrameHandleReleaser>
+ video_frame_handle_releaser,
+ mojo::ScopedDataPipeConsumerHandle decoder_buffer_pipe,
+ mojom::CommandBufferIdPtr command_buffer_id,
+ const gfx::ColorSpace& target_color_space) final {
+ client_remote_.Bind(std::move(client));
+ media_log_remote_.Bind(std::move(media_log));
+ video_frame_handle_releaser_ =
+ std::make_unique<StrictMock<MockVideoFrameHandleReleaser>>(
+ std::move(video_frame_handle_releaser));
+ DoConstruct(std::move(command_buffer_id), target_color_space);
+ }
+ MOCK_METHOD2(DoConstruct,
+ void(mojom::CommandBufferIdPtr command_buffer_id,
+ const gfx::ColorSpace& target_color_space));
+ MOCK_METHOD4(Initialize,
+ void(const VideoDecoderConfig& config,
+ bool low_delay,
+ const absl::optional<base::UnguessableToken>& cdm_id,
+ InitializeCallback callback));
+ MOCK_METHOD2(Decode,
+ void(mojom::DecoderBufferPtr buffer, DecodeCallback callback));
+ MOCK_METHOD1(Reset, void(ResetCallback callback));
+ MOCK_METHOD1(OnOverlayInfoChanged, void(const OverlayInfo& overlay_info));
+
+ private:
+ mojo::AssociatedRemote<mojom::VideoDecoderClient> client_remote_;
+ mojo::Remote<mojom::MediaLog> media_log_remote_;
+ std::unique_ptr<StrictMock<MockVideoFrameHandleReleaser>>
+ video_frame_handle_releaser_;
+ std::unique_ptr<MojoDecoderBufferReader> mojo_decoder_buffer_reader_;
+};
+
+class MockStableVideoDecoderClient : public stable::mojom::VideoDecoderClient {
+ public:
+ explicit MockStableVideoDecoderClient(
+ mojo::PendingAssociatedReceiver<stable::mojom::VideoDecoderClient>
+ pending_receiver)
+ : receiver_(this, std::move(pending_receiver)) {}
+ MockStableVideoDecoderClient(const MockStableVideoDecoderClient&) = delete;
+ MockStableVideoDecoderClient& operator=(const MockStableVideoDecoderClient&) =
+ delete;
+ ~MockStableVideoDecoderClient() override = default;
+
+ // stable::mojom::VideoDecoderClient implementation.
+ MOCK_METHOD3(OnVideoFrameDecoded,
+ void(const scoped_refptr<VideoFrame>& frame,
+ bool can_read_without_stalling,
+ const base::UnguessableToken& release_token));
+ MOCK_METHOD1(OnWaiting, void(WaitingReason reason));
+
+ private:
+ mojo::AssociatedReceiver<stable::mojom::VideoDecoderClient> receiver_;
+};
+
+class MockStableMediaLog : public stable::mojom::MediaLog {
+ public:
+ explicit MockStableMediaLog(
+ mojo::PendingReceiver<stable::mojom::MediaLog> pending_receiver)
+ : receiver_(this, std::move(pending_receiver)) {}
+ MockStableMediaLog(const MockStableMediaLog&) = delete;
+ MockStableMediaLog& operator=(const MockStableMediaLog&) = delete;
+ ~MockStableMediaLog() override = default;
+
+ // stable::mojom::MediaLog implementation.
+ MOCK_METHOD1(AddLogRecord, void(const MediaLogRecord& event));
+
+ private:
+ mojo::Receiver<stable::mojom::MediaLog> receiver_;
+};
+
+// AuxiliaryEndpoints groups the endpoints that support the operation of a
+// StableVideoDecoderService and that come from the Construct() call. That way,
+// tests can easily poke at one endpoint and set expectations on the other. For
+// example, a test might want to simulate the scenario in which a frame has been
+// decoded by the underlying mojom::VideoDecoder. In this case, the test can
+// call |video_decoder_client_remote|->OnVideoFrameDecoded() and then set an
+// expectation on |mock_stable_video_decoder_client|->OnVideoFrameDecoded().
+struct AuxiliaryEndpoints {
+ // |video_decoder_client_remote| is the client that the underlying
+ // mojom::VideoDecoder receives through the Construct() call. Tests can make
+ // calls on it and those calls should ultimately be received by the
+ // |mock_stable_video_decoder_client|.
+ mojo::AssociatedRemote<mojom::VideoDecoderClient> video_decoder_client_remote;
+ std::unique_ptr<StrictMock<MockStableVideoDecoderClient>>
+ mock_stable_video_decoder_client;
+
+ // |media_log_remote| is the MediaLog that the underlying mojom::VideoDecoder
+ // receives through the Construct() call. Tests can make calls on it and those
+ // calls should ultimately be received by the |mock_stable_media_log|.
+ mojo::Remote<mojom::MediaLog> media_log_remote;
+ std::unique_ptr<StrictMock<MockStableMediaLog>> mock_stable_media_log;
+
+ // Tests can use |stable_video_frame_handle_releaser_remote| to simulate
+ // releasing a VideoFrame.
+ // |mock_video_frame_handle_releaser| is the VideoFrameHandleReleaser that's
+ // setup when the underlying mojom::VideoDecoder receives a Construct() call.
+ // Tests can make calls on |stable_video_frame_handle_releaser_remote| and
+ // they should be ultimately received by the
+ // |mock_video_frame_handle_releaser|.
+ mojo::Remote<stable::mojom::VideoFrameHandleReleaser>
+ stable_video_frame_handle_releaser_remote;
+ std::unique_ptr<StrictMock<MockVideoFrameHandleReleaser>>
+ mock_video_frame_handle_releaser;
+
+ // |mojo_decoder_buffer_reader| wraps the reading end of the data pipe that
+ // the underlying mojom::VideoDecoder receives through the Construct() call.
+ // Tests can write data using the |mojo_decoder_buffer_writer| and that data
+ // should be ultimately received by the |mojo_decoder_buffer_reader|.
+ std::unique_ptr<MojoDecoderBufferWriter> mojo_decoder_buffer_writer;
+ std::unique_ptr<MojoDecoderBufferReader> mojo_decoder_buffer_reader;
+};
+
+// Calls Construct() on |stable_video_decoder_remote| and, if
+// |expect_construct_call| is true, expects a corresponding Construct() call on
+// |mock_video_decoder| which is assumed to be the backing decoder of
+// |stable_video_decoder_remote|. Returns nullptr if the expectations on
+// |mock_video_decoder| are violated. Otherwise, returns an AuxiliaryEndpoints
+// instance that contains the supporting endpoints that tests can use to
+// interact with the auxiliary interfaces used by the
+// |stable_video_decoder_remote|.
+std::unique_ptr<AuxiliaryEndpoints> ConstructStableVideoDecoder(
+ mojo::Remote<stable::mojom::StableVideoDecoder>&
+ stable_video_decoder_remote,
+ StrictMock<MockVideoDecoder>& mock_video_decoder,
+ bool expect_construct_call) {
+ constexpr gfx::ColorSpace kTargetColorSpace = gfx::ColorSpace::CreateSRGB();
+ if (expect_construct_call) {
+ EXPECT_CALL(mock_video_decoder,
+ DoConstruct(/*command_buffer_id=*/_,
+ /*target_color_space=*/kTargetColorSpace));
+ }
+ mojo::PendingAssociatedRemote<stable::mojom::VideoDecoderClient>
+ stable_video_decoder_client_remote;
+ auto mock_stable_video_decoder_client =
+ std::make_unique<StrictMock<MockStableVideoDecoderClient>>(
+ stable_video_decoder_client_remote
+ .InitWithNewEndpointAndPassReceiver());
+
+ mojo::PendingRemote<stable::mojom::MediaLog> stable_media_log_remote;
+ auto mock_stable_media_log = std::make_unique<StrictMock<MockStableMediaLog>>(
+ stable_media_log_remote.InitWithNewPipeAndPassReceiver());
+
+ mojo::Remote<stable::mojom::VideoFrameHandleReleaser>
+ video_frame_handle_releaser_remote;
+
+ mojo::ScopedDataPipeConsumerHandle remote_consumer_handle;
+ std::unique_ptr<MojoDecoderBufferWriter> mojo_decoder_buffer_writer =
+ MojoDecoderBufferWriter::Create(
+ GetDefaultDecoderBufferConverterCapacity(DemuxerStream::VIDEO),
+ &remote_consumer_handle);
+
+ stable_video_decoder_remote->Construct(
+ std::move(stable_video_decoder_client_remote),
+ std::move(stable_media_log_remote),
+ video_frame_handle_releaser_remote.BindNewPipeAndPassReceiver(),
+ std::move(remote_consumer_handle), kTargetColorSpace);
+ stable_video_decoder_remote.FlushForTesting();
+
+ if (!Mock::VerifyAndClearExpectations(&mock_video_decoder))
+ return nullptr;
+
+ auto auxiliary_endpoints = std::make_unique<AuxiliaryEndpoints>();
+
+ auxiliary_endpoints->video_decoder_client_remote =
+ mock_video_decoder.TakeClientRemote();
+ auxiliary_endpoints->mock_stable_video_decoder_client =
+ std::move(mock_stable_video_decoder_client);
+
+ auxiliary_endpoints->media_log_remote =
+ mock_video_decoder.TakeMediaLogRemote();
+ auxiliary_endpoints->mock_stable_media_log = std::move(mock_stable_media_log);
+
+ auxiliary_endpoints->stable_video_frame_handle_releaser_remote =
+ std::move(video_frame_handle_releaser_remote);
+ auxiliary_endpoints->mock_video_frame_handle_releaser =
+ mock_video_decoder.TakeVideoFrameHandleReleaser();
+
+ auxiliary_endpoints->mojo_decoder_buffer_writer =
+ std::move(mojo_decoder_buffer_writer);
+ auxiliary_endpoints->mojo_decoder_buffer_reader =
+ mock_video_decoder.TakeMojoDecoderBufferReader();
+
+ return auxiliary_endpoints;
+}
+
+class StableVideoDecoderServiceTest : public testing::Test {
+ public:
+ StableVideoDecoderServiceTest() {
+ stable_video_decoder_factory_service_
+ .SetVideoDecoderCreationCallbackForTesting(
+ video_decoder_creation_cb_.Get());
+ }
+
+ StableVideoDecoderServiceTest(const StableVideoDecoderServiceTest&) = delete;
+ StableVideoDecoderServiceTest& operator=(
+ const StableVideoDecoderServiceTest&) = delete;
+ ~StableVideoDecoderServiceTest() override = default;
+
+ void SetUp() override {
+ mojo::PendingReceiver<stable::mojom::StableVideoDecoderFactory>
+ stable_video_decoder_factory_receiver;
+ stable_video_decoder_factory_remote_ =
+ mojo::Remote<stable::mojom::StableVideoDecoderFactory>(
+ stable_video_decoder_factory_receiver
+ .InitWithNewPipeAndPassRemote());
+ stable_video_decoder_factory_service_.BindReceiver(
+ std::move(stable_video_decoder_factory_receiver));
+ ASSERT_TRUE(stable_video_decoder_factory_remote_.is_connected());
+ }
+
+ protected:
+ mojo::Remote<stable::mojom::StableVideoDecoder> CreateStableVideoDecoder(
+ std::unique_ptr<StrictMock<MockVideoDecoder>> dst_video_decoder) {
+ // Each CreateStableVideoDecoder() should result in exactly one call to the
+ // video decoder creation callback, i.e., the
+ // StableVideoDecoderFactoryService should not re-use mojom::VideoDecoder
+ // implementation instances.
+ EXPECT_CALL(video_decoder_creation_cb_, Run(_, _))
+ .WillOnce(Return(ByMove(std::move(dst_video_decoder))));
+ mojo::PendingReceiver<stable::mojom::StableVideoDecoder>
+ stable_video_decoder_receiver;
+ mojo::Remote<stable::mojom::StableVideoDecoder> video_decoder_remote(
+ stable_video_decoder_receiver.InitWithNewPipeAndPassRemote());
+ stable_video_decoder_factory_remote_->CreateStableVideoDecoder(
+ std::move(stable_video_decoder_receiver));
+ stable_video_decoder_factory_remote_.FlushForTesting();
+ if (!Mock::VerifyAndClearExpectations(&video_decoder_creation_cb_))
+ return {};
+ return video_decoder_remote;
+ }
+
+ base::test::TaskEnvironment task_environment_;
+ StrictMock<base::MockRepeatingCallback<std::unique_ptr<
+ mojom::VideoDecoder>(MojoMediaClient*, MojoCdmServiceContext*)>>
+ video_decoder_creation_cb_;
+ StableVideoDecoderFactoryService stable_video_decoder_factory_service_;
+ mojo::Remote<stable::mojom::StableVideoDecoderFactory>
+ stable_video_decoder_factory_remote_;
+ mojo::Remote<stable::mojom::StableVideoDecoder> stable_video_decoder_remote_;
+};
+
+// Tests that we can create multiple StableVideoDecoder implementation instances
+// through the StableVideoDecoderFactory and that they can exist concurrently.
+TEST_F(StableVideoDecoderServiceTest, FactoryCanCreateStableVideoDecoders) {
+ std::vector<mojo::Remote<stable::mojom::StableVideoDecoder>>
+ stable_video_decoder_remotes;
+ constexpr size_t kNumConcurrentDecoders = 5u;
+ for (size_t i = 0u; i < kNumConcurrentDecoders; i++) {
+ auto mock_video_decoder = std::make_unique<StrictMock<MockVideoDecoder>>();
+ auto stable_video_decoder_remote =
+ CreateStableVideoDecoder(std::move(mock_video_decoder));
+ stable_video_decoder_remotes.push_back(
+ std::move(stable_video_decoder_remote));
+ }
+ for (const auto& remote : stable_video_decoder_remotes) {
+ ASSERT_TRUE(remote.is_bound());
+ ASSERT_TRUE(remote.is_connected());
+ }
+}
+
+// Tests that a call to stable::mojom::VideoDecoder::Construct() gets routed
+// correctly to the underlying mojom::VideoDecoder.
+TEST_F(StableVideoDecoderServiceTest, StableVideoDecoderCanBeConstructed) {
+ auto mock_video_decoder = std::make_unique<StrictMock<MockVideoDecoder>>();
+ auto* mock_video_decoder_raw = mock_video_decoder.get();
+ auto stable_video_decoder_remote =
+ CreateStableVideoDecoder(std::move(mock_video_decoder));
+ ASSERT_TRUE(stable_video_decoder_remote.is_bound());
+ ASSERT_TRUE(stable_video_decoder_remote.is_connected());
+ ASSERT_TRUE(ConstructStableVideoDecoder(stable_video_decoder_remote,
+ *mock_video_decoder_raw,
+ /*expect_construct_call=*/true));
+}
+
+// Tests that if two calls to stable::mojom::VideoDecoder::Construct() are made,
+// only one is routed to the underlying mojom::VideoDecoder.
+TEST_F(StableVideoDecoderServiceTest,
+ StableVideoDecoderCannotBeConstructedTwice) {
+ auto mock_video_decoder = std::make_unique<StrictMock<MockVideoDecoder>>();
+ auto* mock_video_decoder_raw = mock_video_decoder.get();
+ auto stable_video_decoder_remote =
+ CreateStableVideoDecoder(std::move(mock_video_decoder));
+ ASSERT_TRUE(stable_video_decoder_remote.is_bound());
+ ASSERT_TRUE(stable_video_decoder_remote.is_connected());
+ EXPECT_TRUE(ConstructStableVideoDecoder(stable_video_decoder_remote,
+ *mock_video_decoder_raw,
+ /*expect_construct_call=*/true));
+ EXPECT_TRUE(ConstructStableVideoDecoder(stable_video_decoder_remote,
+ *mock_video_decoder_raw,
+ /*expect_construct_call=*/false));
+}
+
+// Tests that a call to stable::mojom::VideoDecoder::Initialize() gets routed
+// correctly to the underlying mojom::VideoDecoder. Also tests that when the
+// underlying mojom::VideoDecoder calls the initialization callback, the call
+// gets routed to the client.
+TEST_F(StableVideoDecoderServiceTest, StableVideoDecoderCanBeInitialized) {
+ auto mock_video_decoder = std::make_unique<StrictMock<MockVideoDecoder>>();
+ auto* mock_video_decoder_raw = mock_video_decoder.get();
+ auto stable_video_decoder_remote =
+ CreateStableVideoDecoder(std::move(mock_video_decoder));
+ ASSERT_TRUE(stable_video_decoder_remote.is_bound());
+ ASSERT_TRUE(stable_video_decoder_remote.is_connected());
+ auto auxiliary_endpoints = ConstructStableVideoDecoder(
+ stable_video_decoder_remote, *mock_video_decoder_raw,
+ /*expect_construct_call=*/true);
+ ASSERT_TRUE(auxiliary_endpoints);
+
+ const VideoDecoderConfig config_to_send = CreateValidVideoDecoderConfig();
+ VideoDecoderConfig received_config;
+ constexpr bool kLowDelay = true;
+ constexpr absl::optional<base::UnguessableToken> kCdmId = absl::nullopt;
+ StrictMock<base::MockOnceCallback<void(
+ const media::DecoderStatus& status, bool needs_bitstream_conversion,
+ int32_t max_decode_requests, VideoDecoderType decoder_type)>>
+ initialize_cb_to_send;
+ mojom::VideoDecoder::InitializeCallback received_initialize_cb;
+ const DecoderStatus kDecoderStatus = DecoderStatus::Codes::kAborted;
+ constexpr bool kNeedsBitstreamConversion = true;
+ constexpr int32_t kMaxDecodeRequests = 123;
+ constexpr VideoDecoderType kDecoderType = VideoDecoderType::kVda;
+
+ EXPECT_CALL(*mock_video_decoder_raw,
+ Initialize(/*config=*/_, kLowDelay, kCdmId,
+ /*callback=*/_))
+ .WillOnce([&](const VideoDecoderConfig& config, bool low_delay,
+ const absl::optional<base::UnguessableToken>& cdm_id,
+ mojom::VideoDecoder::InitializeCallback callback) {
+ received_config = config;
+ received_initialize_cb = std::move(callback);
+ });
+ EXPECT_CALL(initialize_cb_to_send,
+ Run(kDecoderStatus, kNeedsBitstreamConversion, kMaxDecodeRequests,
+ kDecoderType));
+ stable_video_decoder_remote->Initialize(
+ config_to_send, kLowDelay,
+ mojo::PendingRemote<stable::mojom::StableCdmContext>(),
+ initialize_cb_to_send.Get());
+ stable_video_decoder_remote.FlushForTesting();
+ ASSERT_TRUE(Mock::VerifyAndClearExpectations(mock_video_decoder_raw));
+
+ std::move(received_initialize_cb)
+ .Run(kDecoderStatus, kNeedsBitstreamConversion, kMaxDecodeRequests,
+ kDecoderType);
+ task_environment_.RunUntilIdle();
+}
+
+// Tests that the StableVideoDecoderService rejects a call to
+// stable::mojom::VideoDecoder::Initialize() before
+// stable::mojom::VideoDecoder::Construct() gets called.
+TEST_F(StableVideoDecoderServiceTest,
+ StableVideoDecoderCannotBeInitializedBeforeConstruction) {
+ auto mock_video_decoder = std::make_unique<StrictMock<MockVideoDecoder>>();
+ auto stable_video_decoder_remote =
+ CreateStableVideoDecoder(std::move(mock_video_decoder));
+ ASSERT_TRUE(stable_video_decoder_remote.is_bound());
+ ASSERT_TRUE(stable_video_decoder_remote.is_connected());
+
+ const VideoDecoderConfig config_to_send = CreateValidVideoDecoderConfig();
+ constexpr bool kLowDelay = true;
+ StrictMock<base::MockOnceCallback<void(
+ const media::DecoderStatus& status, bool needs_bitstream_conversion,
+ int32_t max_decode_requests, VideoDecoderType decoder_type)>>
+ initialize_cb_to_send;
+
+ EXPECT_CALL(initialize_cb_to_send,
+ Run(DecoderStatus(DecoderStatus::Codes::kFailed),
+ /*needs_bitstream_conversion=*/false,
+ /*max_decode_requests=*/1, VideoDecoderType::kUnknown));
+ stable_video_decoder_remote->Initialize(
+ config_to_send, kLowDelay,
+ mojo::PendingRemote<stable::mojom::StableCdmContext>(),
+ initialize_cb_to_send.Get());
+ stable_video_decoder_remote.FlushForTesting();
+}
+
+TEST_F(StableVideoDecoderServiceTest,
+ StableVideoDecoderClientReceivesOnVideoFrameDecodedEvent) {
+ auto mock_video_decoder = std::make_unique<StrictMock<MockVideoDecoder>>();
+ auto* mock_video_decoder_raw = mock_video_decoder.get();
+ auto stable_video_decoder_remote =
+ CreateStableVideoDecoder(std::move(mock_video_decoder));
+ ASSERT_TRUE(stable_video_decoder_remote.is_bound());
+ ASSERT_TRUE(stable_video_decoder_remote.is_connected());
+ auto auxiliary_endpoints = ConstructStableVideoDecoder(
+ stable_video_decoder_remote, *mock_video_decoder_raw,
+ /*expect_construct_call=*/true);
+ ASSERT_TRUE(auxiliary_endpoints);
+ ASSERT_TRUE(auxiliary_endpoints->video_decoder_client_remote);
+ ASSERT_TRUE(auxiliary_endpoints->mock_stable_video_decoder_client);
+
+ const auto token_for_release = base::UnguessableToken::Create();
+ scoped_refptr<VideoFrame> video_frame_to_send = VideoFrame::CreateEOSFrame();
+ scoped_refptr<VideoFrame> video_frame_received;
+ constexpr bool kCanReadWithoutStalling = true;
+ EXPECT_CALL(
+ *auxiliary_endpoints->mock_stable_video_decoder_client,
+ OnVideoFrameDecoded(_, kCanReadWithoutStalling, token_for_release))
+ .WillOnce(SaveArg<0>(&video_frame_received));
+ auxiliary_endpoints->video_decoder_client_remote->OnVideoFrameDecoded(
+ video_frame_to_send, kCanReadWithoutStalling, token_for_release);
+ auxiliary_endpoints->video_decoder_client_remote.FlushForTesting();
+ ASSERT_TRUE(video_frame_received);
+ EXPECT_TRUE(video_frame_received->metadata().end_of_stream);
+}
+
+} // namespace
+
+} // namespace media
diff --git a/chromium/media/mojo/services/webrtc_video_perf_fuzzer_seed_corpus/update_record_and_get_perf.textproto b/chromium/media/mojo/services/webrtc_video_perf_fuzzer_seed_corpus/update_record_and_get_perf.textproto
new file mode 100644
index 00000000000..45a109a09a4
--- /dev/null
+++ b/chromium/media/mojo/services/webrtc_video_perf_fuzzer_seed_corpus/update_record_and_get_perf.textproto
@@ -0,0 +1,77 @@
+actions {
+ update_record {
+ features {
+ new {
+ id: 1
+ m_is_decode_stats: true
+ m_profile: 12
+ m_video_pixels: 2073600
+ m_hardware_accelerated: false
+ }
+ }
+ video_stats {
+ new {
+ id: 1
+ m_frames_processed: 400
+ m_key_frames_processed: 3
+ m_p99_processing_time_ms: 11.0
+ }
+ }
+ }
+}
+actions {
+ update_record {
+ features {
+ new {
+ id: 1
+ m_is_decode_stats: true
+ m_profile: 12
+ m_video_pixels: 8294400
+ m_hardware_accelerated: false
+ }
+ }
+ video_stats {
+ new {
+ id: 1
+ m_frames_processed: 600
+ m_key_frames_processed: 5
+ m_p99_processing_time_ms: 14.0
+ }
+ }
+ }
+}
+actions {
+ update_record {
+ features {
+ new {
+ id: 1
+ m_is_decode_stats: true
+ m_profile: 12
+ m_video_pixels: 0
+ m_hardware_accelerated: false
+ }
+ }
+ video_stats {
+ new {
+ id: 1
+ m_frames_processed: 0
+ m_key_frames_processed: 0
+ m_p99_processing_time_ms: 0.0
+ }
+ }
+ }
+}
+actions {
+ get_perf_info {
+ features {
+ new {
+ id: 1
+ m_is_decode_stats: true
+ m_profile: 12
+ m_video_pixels: 2073600
+ m_hardware_accelerated: false
+ }
+ }
+ frames_per_second: 60
+ }
+} \ No newline at end of file
diff --git a/chromium/media/mojo/services/webrtc_video_perf_history.cc b/chromium/media/mojo/services/webrtc_video_perf_history.cc
index a746b03c610..ab5f84db291 100644
--- a/chromium/media/mojo/services/webrtc_video_perf_history.cc
+++ b/chromium/media/mojo/services/webrtc_video_perf_history.cc
@@ -4,6 +4,8 @@
#include "media/mojo/services/webrtc_video_perf_history.h"
+#include <math.h>
+
#include "base/bind.h"
#include "base/callback.h"
#include "base/format_macros.h"
@@ -167,6 +169,7 @@ bool AreVideoStatsInvalid(const media::mojom::WebrtcVideoStats& video_stats) {
video_stats.frames_processed >
WebrtcVideoStatsDB::kFramesProcessedMaxValue ||
video_stats.key_frames_processed > video_stats.frames_processed ||
+ isnan(video_stats.p99_processing_time_ms) ||
video_stats.p99_processing_time_ms <
WebrtcVideoStatsDB::kP99ProcessingTimeMinValueMs ||
video_stats.p99_processing_time_ms >
@@ -206,9 +209,9 @@ void WebrtcVideoPerfHistory::InitDatabase() {
// initialized during their lifetime.
DCHECK_EQ(db_init_status_, UNINITIALIZED);
+ db_init_status_ = PENDING;
db_->Initialize(base::BindOnce(&WebrtcVideoPerfHistory::OnDatabaseInit,
weak_ptr_factory_.GetWeakPtr()));
- db_init_status_ = PENDING;
}
void WebrtcVideoPerfHistory::OnDatabaseInit(bool success) {
@@ -310,16 +313,16 @@ void WebrtcVideoPerfHistory::OnGotStatsCollectionForRequest(
// inserted as a placeholder.
std::vector<absl::optional<bool>> smooth_per_pixel;
absl::optional<size_t> specific_key_index;
- for (auto const& stats : *stats_collection) {
- if (stats.first >= video_key.pixels && !specific_key_index) {
+ for (auto const& [key_index, video_stats_entry] : *stats_collection) {
+ if (key_index >= video_key.pixels && !specific_key_index) {
specific_key_index = smooth_per_pixel.size();
- if (stats.first > video_key.pixels) {
+ if (key_index > video_key.pixels) {
// No exact match found, insert a nullopt.
smooth_per_pixel.push_back(absl::nullopt);
}
}
smooth_per_pixel.push_back(PredictSmooth(
- video_key.is_decode_stats, stats.second, frames_per_second));
+ video_key.is_decode_stats, video_stats_entry, frames_per_second));
}
if (!specific_key_index) {
// Pixels for the specific key is higher than any pixels number that
diff --git a/chromium/media/mojo/services/webrtc_video_perf_history_unittest.cc b/chromium/media/mojo/services/webrtc_video_perf_history_unittest.cc
index 89833cb48e1..5fc59d60f26 100644
--- a/chromium/media/mojo/services/webrtc_video_perf_history_unittest.cc
+++ b/chromium/media/mojo/services/webrtc_video_perf_history_unittest.cc
@@ -118,12 +118,11 @@ class FakeWebrtcVideoStatsDB : public WebrtcVideoStatsDB {
WebrtcVideoStatsDB::VideoStatsCollection collection;
std::string key_filter = key.SerializeWithoutPixels();
- for (auto const& entry : entries_) {
- if (entry.first.rfind(key_filter, 0) == 0) {
- absl::optional<int> pixels =
- VideoDescKey::ParsePixelsFromKey(entry.first);
+ for (auto const& [str, video_stats_entry] : entries_) {
+ if (str.rfind(key_filter, 0) == 0) {
+ absl::optional<int> pixels = VideoDescKey::ParsePixelsFromKey(str);
if (pixels) {
- collection.insert({*pixels, std::move(entry.second)});
+ collection.insert({*pixels, std::move(video_stats_entry)});
}
}
}
diff --git a/chromium/media/mojo/services/webrtc_video_perf_mojolpm_fuzzer.cc b/chromium/media/mojo/services/webrtc_video_perf_mojolpm_fuzzer.cc
new file mode 100644
index 00000000000..88839462f9a
--- /dev/null
+++ b/chromium/media/mojo/services/webrtc_video_perf_mojolpm_fuzzer.cc
@@ -0,0 +1,226 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <utility>
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/notreached.h"
+#include "base/test/task_environment.h"
+#include "base/test/test_timeouts.h"
+#include "components/leveldb_proto/testing/fake_db.h"
+#include "media/capabilities/webrtc_video_stats.pb.h"
+#include "media/capabilities/webrtc_video_stats_db_impl.h"
+#include "media/mojo/mojom/webrtc_video_perf.mojom-mojolpm.h"
+#include "media/mojo/services/webrtc_video_perf_history.h"
+#include "media/mojo/services/webrtc_video_perf_mojolpm_fuzzer.pb.h"
+#include "media/mojo/services/webrtc_video_perf_recorder.h"
+#include "third_party/libprotobuf-mutator/src/src/libfuzzer/libfuzzer_macro.h"
+
+namespace media {
+
+// Helper class to call private constructor of friend class.
+class WebrtcVideoPerfLPMFuzzerHelper {
+ public:
+ static std::unique_ptr<WebrtcVideoStatsDBImpl> CreateWebrtcVideoStatsDbImpl(
+ std::unique_ptr<leveldb_proto::ProtoDatabase<WebrtcVideoStatsEntryProto>>
+ proto_db) {
+ return base::WrapUnique(new WebrtcVideoStatsDBImpl(std::move(proto_db)));
+ }
+};
+
+namespace {
+
+struct InitGlobals {
+ InitGlobals() {
+ // The call to CommandLine::Init is needed so that TestTimeouts::Initialize
+ // does not fail.
+ bool success = base::CommandLine::Init(0, nullptr);
+ DCHECK(success);
+ // TaskEnvironment requires TestTimeouts initialization to watch for
+ // problematic long-running tasks.
+ TestTimeouts::Initialize();
+
+ // Mark this thread as an IO_THREAD with MOCK_TIME, and ensure that Now()
+ // is driven from the same mock clock.
+ task_environment = std::make_unique<base::test::TaskEnvironment>(
+ base::test::TaskEnvironment::MainThreadType::IO,
+ base::test::TaskEnvironment::TimeSource::MOCK_TIME);
+ }
+
+ // This allows us to mock time for all threads.
+ std::unique_ptr<base::test::TaskEnvironment> task_environment;
+};
+
+InitGlobals* init_globals = new InitGlobals();
+
+base::test::TaskEnvironment& GetEnvironment() {
+ return *init_globals->task_environment;
+}
+
+scoped_refptr<base::SingleThreadTaskRunner> GetFuzzerTaskRunner() {
+ return GetEnvironment().GetMainThreadTaskRunner();
+}
+
+// This in-memory database uses the FakeDB proto implementation as the
+// underlying storage. The underlying FakeDB class requires that all callbacks
+// are triggered manually. This class is used as a convenience class triggering
+// the callbacks with success=true.
+class InMemoryWebrtcVideoPerfDb
+ : public leveldb_proto::test::FakeDB<WebrtcVideoStatsEntryProto> {
+ public:
+ explicit InMemoryWebrtcVideoPerfDb(EntryMap* db) : FakeDB(db) {}
+
+ // Partial ProtoDatabase implementation.
+ void Init(leveldb_proto::Callbacks::InitStatusCallback callback) override {
+ FakeDB::Init(std::move(callback));
+ InitStatusCallback(leveldb_proto::Enums::InitStatus::kOK);
+ }
+
+ void GetEntry(
+ const std::string& key,
+ typename leveldb_proto::Callbacks::Internal<
+ WebrtcVideoStatsEntryProto>::GetCallback callback) override {
+ FakeDB::GetEntry(key, std::move(callback));
+ // Run callback.
+ GetCallback(true);
+ }
+
+ void LoadKeysAndEntriesWhile(
+ const std::string& start,
+ const leveldb_proto::KeyIteratorController& controller,
+ typename leveldb_proto::Callbacks::Internal<WebrtcVideoStatsEntryProto>::
+ LoadKeysAndEntriesCallback callback) override {
+ FakeDB::LoadKeysAndEntriesWhile(start, controller, std::move(callback));
+ // Run callback.
+ LoadCallback(true);
+ }
+
+ void UpdateEntries(
+ std::unique_ptr<typename ProtoDatabase<
+ WebrtcVideoStatsEntryProto>::KeyEntryVector> entries_to_save,
+ std::unique_ptr<std::vector<std::string>> keys_to_remove,
+ leveldb_proto::Callbacks::UpdateCallback callback) override {
+ FakeDB::UpdateEntries(std::move(entries_to_save), std::move(keys_to_remove),
+ std::move(callback));
+ // Run callback.
+ UpdateCallback(true);
+ }
+
+ void UpdateEntriesWithRemoveFilter(
+ std::unique_ptr<typename leveldb_proto::Util::Internal<
+ WebrtcVideoStatsEntryProto>::KeyEntryVector> entries_to_save,
+ const leveldb_proto::KeyFilter& filter,
+ leveldb_proto::Callbacks::UpdateCallback callback) override {
+ FakeDB::UpdateEntriesWithRemoveFilter(std::move(entries_to_save), filter,
+ std::move(callback));
+ // Run callback.
+ UpdateCallback(true);
+ }
+};
+
+class WebrtcVideoPerfLPMFuzzer {
+ public:
+ WebrtcVideoPerfLPMFuzzer(
+ const fuzzing::webrtc_video_perf::proto::Testcase& testcase)
+ : testcase_(testcase) {
+ // Create all objects that are needed and connect everything.
+ in_memory_db_ = new InMemoryWebrtcVideoPerfDb(&in_memory_db_map_);
+ std::unique_ptr<WebrtcVideoStatsDBImpl> stats_db =
+ WebrtcVideoPerfLPMFuzzerHelper::CreateWebrtcVideoStatsDbImpl(
+ std::unique_ptr<InMemoryWebrtcVideoPerfDb>(in_memory_db_));
+ perf_history_ =
+ std::make_unique<WebrtcVideoPerfHistory>(std::move(stats_db));
+ perf_recorder_ = std::make_unique<WebrtcVideoPerfRecorder>(
+ perf_history_->GetSaveCallback());
+ }
+
+ void NextAction() {
+ const auto& action = testcase_.actions(action_index_);
+ switch (action.action_case()) {
+ case fuzzing::webrtc_video_perf::proto::Action::kUpdateRecord: {
+ const auto& update_record = action.update_record();
+ auto features_ptr = media::mojom::WebrtcPredictionFeatures::New();
+ auto video_stats_ptr = media::mojom::WebrtcVideoStats::New();
+ mojolpm::FromProto(update_record.features(), features_ptr);
+ mojolpm::FromProto(update_record.video_stats(), video_stats_ptr);
+ perf_recorder_->UpdateRecord(std::move(features_ptr),
+ std::move(video_stats_ptr));
+ break;
+ }
+ case fuzzing::webrtc_video_perf::proto::Action::kGetPerfInfo: {
+ const auto& get_perf_info = action.get_perf_info();
+ auto features_ptr = media::mojom::WebrtcPredictionFeatures::New();
+ mojolpm::FromProto(get_perf_info.features(), features_ptr);
+ perf_history_->GetPerfInfo(std::move(features_ptr),
+ get_perf_info.frames_per_second(),
+ base::DoNothing());
+ break;
+ }
+ default: {
+ // Do nothing.
+ }
+ }
+ ++action_index_;
+ }
+
+ bool IsFinished() { return action_index_ >= testcase_.actions_size(); }
+
+ private:
+ const fuzzing::webrtc_video_perf::proto::Testcase& testcase_;
+ int action_index_ = 0;
+
+ // Database storage.
+ InMemoryWebrtcVideoPerfDb::EntryMap in_memory_db_map_;
+ // Proto buffer database implementation that uses `in_memory_db_map_` as
+ // storage.
+ raw_ptr<InMemoryWebrtcVideoPerfDb> in_memory_db_;
+ std::unique_ptr<WebrtcVideoPerfHistory> perf_history_;
+ std::unique_ptr<WebrtcVideoPerfRecorder> perf_recorder_;
+};
+
+void NextAction(WebrtcVideoPerfLPMFuzzer* testcase,
+ base::OnceClosure fuzzer_run_loop) {
+ if (!testcase->IsFinished()) {
+ testcase->NextAction();
+ GetFuzzerTaskRunner()->PostTask(
+ FROM_HERE, base::BindOnce(NextAction, base::Unretained(testcase),
+ std::move(fuzzer_run_loop)));
+ } else {
+ std::move(fuzzer_run_loop).Run();
+ }
+}
+
+void RunTestcase(WebrtcVideoPerfLPMFuzzer* testcase) {
+ base::RunLoop fuzzer_run_loop;
+ GetFuzzerTaskRunner()->PostTask(
+ FROM_HERE, base::BindOnce(NextAction, base::Unretained(testcase),
+ fuzzer_run_loop.QuitClosure()));
+ // Make sure that all callbacks have completed.
+ constexpr base::TimeDelta kTimeout = base::Seconds(5);
+ GetEnvironment().FastForwardBy(kTimeout);
+ fuzzer_run_loop.Run();
+}
+
+} // namespace
+
+DEFINE_BINARY_PROTO_FUZZER(
+ const fuzzing::webrtc_video_perf::proto::Testcase& testcase) {
+ if (!testcase.actions_size()) {
+ return;
+ }
+
+ WebrtcVideoPerfLPMFuzzer webtc_video_perf_fuzzer_instance(testcase);
+ base::RunLoop main_run_loop;
+
+ GetFuzzerTaskRunner()->PostTaskAndReply(
+ FROM_HERE,
+ base::BindOnce(RunTestcase,
+ base::Unretained(&webtc_video_perf_fuzzer_instance)),
+ main_run_loop.QuitClosure());
+ main_run_loop.Run();
+}
+
+} // namespace media \ No newline at end of file
diff --git a/chromium/media/mojo/services/webrtc_video_perf_mojolpm_fuzzer.proto b/chromium/media/mojo/services/webrtc_video_perf_mojolpm_fuzzer.proto
new file mode 100644
index 00000000000..1f5b264a70e
--- /dev/null
+++ b/chromium/media/mojo/services/webrtc_video_perf_mojolpm_fuzzer.proto
@@ -0,0 +1,36 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Message format for the MojoLPM fuzzer for the WebrtcVideoPerf interface.
+
+syntax = "proto2";
+
+package media.fuzzing.webrtc_video_perf.proto;
+
+import "media/mojo/mojom/webrtc_video_perf.mojom.mojolpm.proto";
+
+// Update the record for the specified features.
+message UpdateRecordAction {
+ required mojolpm.media.mojom.WebrtcPredictionFeatures features = 1;
+ required mojolpm.media.mojom.WebrtcVideoStats video_stats = 2;
+}
+
+// Get perf info for the specified features.
+message GetPerfInfoAction {
+ required mojolpm.media.mojom.WebrtcPredictionFeatures features = 1;
+ required int32 frames_per_second = 2;
+}
+
+// Actions that can be performed by the fuzzer.
+message Action {
+ oneof action {
+ UpdateRecordAction update_record = 1;
+ GetPerfInfoAction get_perf_info = 2;
+ }
+}
+
+// Testcase is the top-level message type interpreted by the fuzzer.
+message Testcase {
+ repeated Action actions = 1;
+} \ No newline at end of file
diff --git a/chromium/media/muxers/webm_muxer_unittest.cc b/chromium/media/muxers/webm_muxer_unittest.cc
index 45b8d3a2ae5..e23a386f95e 100644
--- a/chromium/media/muxers/webm_muxer_unittest.cc
+++ b/chromium/media/muxers/webm_muxer_unittest.cc
@@ -515,9 +515,7 @@ class WebmMuxerTestUnparametrized : public testing::Test {
return true;
}
bool OnNewBuffers(const media::StreamParser::BufferQueueMap& map) {
- for (const auto& kv : map) {
- int track_id = kv.first;
- const media::StreamParser::BufferQueue& queue = kv.second;
+ for (const auto& [track_id, queue] : map) {
for (const auto& stream_parser_buffer : queue) {
buffer_timestamps_ms_[track_id].push_back(
stream_parser_buffer->timestamp().InMilliseconds());
diff --git a/chromium/media/parsers/jpeg_parser.cc b/chromium/media/parsers/jpeg_parser.cc
index 9f41d04e993..a01ce87cf4e 100644
--- a/chromium/media/parsers/jpeg_parser.cc
+++ b/chromium/media/parsers/jpeg_parser.cc
@@ -7,6 +7,7 @@
#include <cstring>
#include "base/big_endian.h"
+#include "base/check_op.h"
#include "base/logging.h"
using base::BigEndianReader;
diff --git a/chromium/media/remoting/courier_renderer_unittest.cc b/chromium/media/remoting/courier_renderer_unittest.cc
index 10ab067088a..1ea0be41393 100644
--- a/chromium/media/remoting/courier_renderer_unittest.cc
+++ b/chromium/media/remoting/courier_renderer_unittest.cc
@@ -94,6 +94,7 @@ class RendererClientImpl final : public RendererClient {
// RendererClient implementation.
void OnError(PipelineStatus status) override {}
+ void OnFallback(PipelineStatus status) override {}
void OnEnded() override {}
MOCK_METHOD1(OnStatisticsUpdate, void(const PipelineStatistics& stats));
MOCK_METHOD2(OnBufferingStateChange,
diff --git a/chromium/media/remoting/demuxer_stream_adapter.cc b/chromium/media/remoting/demuxer_stream_adapter.cc
index 566e8f22c72..b1b3026a8b0 100644
--- a/chromium/media/remoting/demuxer_stream_adapter.cc
+++ b/chromium/media/remoting/demuxer_stream_adapter.cc
@@ -71,7 +71,7 @@ DemuxerStreamAdapter::DemuxerStreamAdapter(
stream_sender_.Bind(std::move(stream_sender_remote));
stream_sender_.set_disconnect_handler(
base::BindOnce(&DemuxerStreamAdapter::OnFatalError,
- weak_factory_.GetWeakPtr(), MOJO_PIPE_ERROR));
+ weak_factory_.GetWeakPtr(), MOJO_DISCONNECTED));
}
DemuxerStreamAdapter::~DemuxerStreamAdapter() {
@@ -237,12 +237,25 @@ void DemuxerStreamAdapter::ReadUntil(
void DemuxerStreamAdapter::EnableBitstreamConverter() {
DCHECK(media_task_runner_->BelongsToCurrentThread());
DEMUXER_VLOG(2) << "Received RPC_DS_ENABLEBITSTREAMCONVERTER";
+ bool is_command_sent = true;
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
demuxer_stream_->EnableBitstreamConverter();
#else
+ is_command_sent = false;
DEMUXER_VLOG(1) << "Ignoring EnableBitstreamConverter() RPC: Proprietary "
"codecs not enabled in this Chromium build.";
#endif
+
+ if (remote_callback_handle_ != RpcMessenger::kInvalidHandle) {
+ auto rpc = std::make_unique<openscreen::cast::RpcMessage>();
+ rpc->set_handle(remote_callback_handle_);
+ rpc->set_proc(
+ openscreen::cast::RpcMessage::RPC_DS_ENABLEBITSTREAMCONVERTER_CALLBACK);
+ rpc->set_boolean_value(is_command_sent);
+ main_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&RpcMessenger::SendMessageToRemote,
+ rpc_messenger_, *rpc));
+ }
}
void DemuxerStreamAdapter::RequestBuffer() {
@@ -320,7 +333,7 @@ void DemuxerStreamAdapter::WriteFrame() {
void DemuxerStreamAdapter::OnFrameWritten(bool success) {
if (!success) {
- OnFatalError(MOJO_PIPE_ERROR);
+ OnFatalError(DATA_PIPE_WRITE_ERROR);
return;
}
diff --git a/chromium/media/remoting/demuxer_stream_adapter_unittest.cc b/chromium/media/remoting/demuxer_stream_adapter_unittest.cc
index cdb3304b2ce..72bf24ed162 100644
--- a/chromium/media/remoting/demuxer_stream_adapter_unittest.cc
+++ b/chromium/media/remoting/demuxer_stream_adapter_unittest.cc
@@ -299,18 +299,36 @@ TEST_F(DemuxerStreamAdapterTest, DuplicateInitializeCausesFatalError) {
EXPECT_EQ(PEERS_OUT_OF_SYNC, errors[0]);
}
-TEST_F(DemuxerStreamAdapterTest, ClosingPipeCausesFatalError) {
+TEST_F(DemuxerStreamAdapterTest, ClosingMessagePipeCausesMojoDisconnected) {
std::vector<StopTrigger> errors;
demuxer_stream_adapter_->TakeErrors(&errors);
ASSERT_TRUE(errors.empty());
- // Closes one end of mojo message and data pipes.
+ // Closes one end of mojo message pipes.
data_stream_sender_.reset();
RunPendingTasks(); // Allow notification from mojo to propagate.
demuxer_stream_adapter_->TakeErrors(&errors);
ASSERT_EQ(1u, errors.size());
- EXPECT_EQ(MOJO_PIPE_ERROR, errors[0]);
+ EXPECT_EQ(MOJO_DISCONNECTED, errors[0]);
+}
+
+TEST_F(DemuxerStreamAdapterTest, ClosingDataPipeCausesWriteError) {
+ EXPECT_CALL(*demuxer_stream_, Read(_)).Times(1);
+
+ std::vector<StopTrigger> errors;
+ demuxer_stream_adapter_->TakeErrors(&errors);
+ ASSERT_TRUE(errors.empty());
+
+ // Closes the consumer end of the data pipe.
+ data_stream_sender_->CloseDataPipe();
+ demuxer_stream_->CreateFakeFrame(100, true /* key frame */, 1 /* pts */);
+ demuxer_stream_adapter_->FakeReadUntil(1, 999);
+ RunPendingTasks(); // Allow notification from mojo to propagate.
+
+ demuxer_stream_adapter_->TakeErrors(&errors);
+ ASSERT_EQ(1u, errors.size());
+ EXPECT_EQ(DATA_PIPE_WRITE_ERROR, errors[0]);
}
} // namespace remoting
diff --git a/chromium/media/remoting/fake_remoter.cc b/chromium/media/remoting/fake_remoter.cc
index 18ec3a2bd60..fa91911568d 100644
--- a/chromium/media/remoting/fake_remoter.cc
+++ b/chromium/media/remoting/fake_remoter.cc
@@ -94,6 +94,10 @@ bool FakeRemotingDataStreamSender::ValidateFrameBuffer(size_t index,
#endif // BUILDFLAG(ENABLE_MEDIA_REMOTING_RPC)
}
+void FakeRemotingDataStreamSender::CloseDataPipe() {
+ data_pipe_reader_.Close();
+}
+
void FakeRemotingDataStreamSender::SendFrame(uint32_t frame_size) {
next_frame_data_.resize(frame_size);
data_pipe_reader_.Read(
@@ -103,7 +107,8 @@ void FakeRemotingDataStreamSender::SendFrame(uint32_t frame_size) {
}
void FakeRemotingDataStreamSender::OnFrameRead(bool success) {
- EXPECT_TRUE(success);
+ if (!success)
+ return;
++send_frame_count_;
received_frame_list.push_back(std::move(next_frame_data_));
diff --git a/chromium/media/remoting/fake_remoter.h b/chromium/media/remoting/fake_remoter.h
index 8f321fad72b..666035a1861 100644
--- a/chromium/media/remoting/fake_remoter.h
+++ b/chromium/media/remoting/fake_remoter.h
@@ -37,6 +37,7 @@ class FakeRemotingDataStreamSender : public mojom::RemotingDataStreamSender {
size_t size,
bool key_frame,
int pts_ms);
+ void CloseDataPipe();
private:
// mojom::RemotingDataStreamSender implementation.
diff --git a/chromium/media/remoting/receiver.cc b/chromium/media/remoting/receiver.cc
index bba671ddfd6..2cba44ee20b 100644
--- a/chromium/media/remoting/receiver.cc
+++ b/chromium/media/remoting/receiver.cc
@@ -243,6 +243,10 @@ void Receiver::OnError(PipelineStatus status) {
SendRpcMessageOnMainThread(std::move(rpc));
}
+void Receiver::OnFallback(PipelineStatus status) {
+ NOTREACHED();
+}
+
void Receiver::OnEnded() {
auto rpc = cast_streaming::remoting::CreateMessageForMediaEnded();
rpc->set_handle(remote_handle_);
diff --git a/chromium/media/remoting/receiver.h b/chromium/media/remoting/receiver.h
index d43a5f20c2f..d1f0881b1bf 100644
--- a/chromium/media/remoting/receiver.h
+++ b/chromium/media/remoting/receiver.h
@@ -73,6 +73,7 @@ class Receiver final
// RendererClient implementation.
void OnError(PipelineStatus status) override;
+ void OnFallback(PipelineStatus status) override;
void OnEnded() override;
void OnStatisticsUpdate(const PipelineStatistics& stats) override;
void OnBufferingStateChange(BufferingState state,
diff --git a/chromium/media/remoting/renderer_controller.cc b/chromium/media/remoting/renderer_controller.cc
index 4492bfe45b4..002f8aa09fe 100644
--- a/chromium/media/remoting/renderer_controller.cc
+++ b/chromium/media/remoting/renderer_controller.cc
@@ -87,7 +87,8 @@ MediaObserverClient::ReasonToSwitchToLocal GetSwitchReason(
case PEERS_OUT_OF_SYNC:
case RPC_INVALID:
case DATA_PIPE_CREATE_ERROR:
- case MOJO_PIPE_ERROR:
+ case MOJO_DISCONNECTED:
+ case DATA_PIPE_WRITE_ERROR:
case MESSAGE_SEND_FAILED:
case DATA_SEND_FAILED:
case UNEXPECTED_FAILURE:
diff --git a/chromium/media/remoting/triggers.h b/chromium/media/remoting/triggers.h
index 5c2b7ecab90..8d4a07ebd09 100644
--- a/chromium/media/remoting/triggers.h
+++ b/chromium/media/remoting/triggers.h
@@ -41,8 +41,10 @@ enum StartTrigger {
//
// NOTE: Never re-number or re-use numbers for different triggers. These are
// used in UMA histograms, and must remain backwards-compatible for all time.
-// However, *do* change STOP_TRIGGER_MAX to one after the greatest value when
-// adding new ones. Also, don't forget to update histograms.xml!
+//
+// ADDITIONAL NOTE: The values are intentionally out-of-order to maintain a
+// logical grouping. When adding a new value, add one to STOP_TRIGGER_MAX, then
+// update STOP_TRIGGER_MAX. Also, don't forget to update enums.xml!
enum StopTrigger {
UNKNOWN_STOP_TRIGGER = 0,
@@ -70,7 +72,9 @@ enum StopTrigger {
PEERS_OUT_OF_SYNC = 15, // The local state disagrees with the remote.
RPC_INVALID = 16, // An RPC field value is missing or has bad data.
DATA_PIPE_CREATE_ERROR = 17, // Mojo data pipe creation failed (OOM?).
- MOJO_PIPE_ERROR = 18, // Mojo message/data pipe operation failed.
+ MOJO_DISCONNECTED = 18, // Mojo message pipe was disconnected; e.g, the
+ // browser shut down.
+ DATA_PIPE_WRITE_ERROR = 24, // Failure to write the mojo data pipe.
// Message/Data sending errors forcing shutdown.
MESSAGE_SEND_FAILED = 19, // Failed to send a RPC message to the sink.
@@ -82,7 +86,7 @@ enum StopTrigger {
USER_DISABLED = 23, // Media Remoting was disabled by user.
// Change this to the highest value.
- STOP_TRIGGER_MAX = 23,
+ STOP_TRIGGER_MAX = 24,
};
} // namespace remoting
diff --git a/chromium/media/renderers/BUILD.gn b/chromium/media/renderers/BUILD.gn
index e240678c18f..b409e49a610 100644
--- a/chromium/media/renderers/BUILD.gn
+++ b/chromium/media/renderers/BUILD.gn
@@ -72,6 +72,8 @@ source_set("renderers") {
"win/media_foundation_renderer.cc",
"win/media_foundation_renderer.h",
"win/media_foundation_renderer_extension.h",
+ "win/media_foundation_rendering_mode.cc",
+ "win/media_foundation_rendering_mode.h",
"win/media_foundation_source_wrapper.cc",
"win/media_foundation_source_wrapper.h",
"win/media_foundation_stream_wrapper.cc",
diff --git a/chromium/media/renderers/audio_renderer_impl.cc b/chromium/media/renderers/audio_renderer_impl.cc
index 1f840f68034..7c47c375ab5 100644
--- a/chromium/media/renderers/audio_renderer_impl.cc
+++ b/chromium/media/renderers/audio_renderer_impl.cc
@@ -33,6 +33,7 @@
#include "media/base/media_client.h"
#include "media/base/media_log.h"
#include "media/base/media_switches.h"
+#include "media/base/media_util.h"
#include "media/base/renderer_client.h"
#include "media/base/timestamp_constants.h"
#include "media/filters/audio_clock.h"
@@ -40,26 +41,6 @@
namespace media {
-namespace {
-
-AudioParameters::Format ConvertCodecToBitstreamFormat(AudioCodec codec) {
- switch (codec) {
- case AudioCodec::kAC3:
- return AudioParameters::Format::AUDIO_BITSTREAM_AC3;
- case AudioCodec::kEAC3:
- return AudioParameters::Format::AUDIO_BITSTREAM_EAC3;
- case AudioCodec::kDTS:
- return AudioParameters::Format::AUDIO_BITSTREAM_DTS;
- // No support for DTS_HD yet as this section is related to the incoming
- // stream type. DTS_HD support is only added for audio track output to
- // support audiosink reporting DTS_HD support.
- default:
- return AudioParameters::Format::AUDIO_FAKE;
- }
-}
-
-} // namespace
-
AudioRendererImpl::AudioRendererImpl(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
AudioRendererSink* sink,
@@ -457,7 +438,7 @@ void AudioRendererImpl::OnDeviceInfoReceived(
AudioCodec codec = stream->audio_decoder_config().codec();
if (auto* mc = GetMediaClient()) {
- const auto format = ConvertCodecToBitstreamFormat(codec);
+ const auto format = ConvertAudioCodecToBitstreamFormat(codec);
is_passthrough_ = mc->IsSupportedBitstreamAudioCodec(codec) &&
hw_params.IsFormatSupportedByHardware(format);
} else {
@@ -502,6 +483,9 @@ void AudioRendererImpl::OnDeviceInfoReceived(
} else if (codec == AudioCodec::kDTS) {
format = AudioParameters::AUDIO_BITSTREAM_DTS;
target_output_sample_format = kSampleFormatDts;
+ } else if (codec == AudioCodec::kDTSXP2) {
+ format = AudioParameters::AUDIO_BITSTREAM_DTSX_P2;
+ target_output_sample_format = kSampleFormatDtsxP2;
} else {
NOTREACHED();
}
diff --git a/chromium/media/renderers/audio_renderer_impl_unittest.cc b/chromium/media/renderers/audio_renderer_impl_unittest.cc
index cc46128a84c..d79fbfd0250 100644
--- a/chromium/media/renderers/audio_renderer_impl_unittest.cc
+++ b/chromium/media/renderers/audio_renderer_impl_unittest.cc
@@ -219,6 +219,7 @@ class AudioRendererImplTest : public ::testing::Test,
// RendererClient implementation.
MOCK_METHOD1(OnError, void(PipelineStatus));
+ void OnFallback(PipelineStatus status) override { NOTREACHED(); }
void OnEnded() override {
CHECK(!ended_);
ended_ = true;
diff --git a/chromium/media/renderers/default_decoder_factory.cc b/chromium/media/renderers/default_decoder_factory.cc
index 430ec53038a..f052f86cd39 100644
--- a/chromium/media/renderers/default_decoder_factory.cc
+++ b/chromium/media/renderers/default_decoder_factory.cc
@@ -22,10 +22,6 @@
#include "media/filters/decrypting_video_decoder.h"
#endif
-#if BUILDFLAG(IS_FUCHSIA)
-#include "media/filters/fuchsia/fuchsia_video_decoder.h"
-#endif
-
#if BUILDFLAG(ENABLE_DAV1D_DECODER)
#include "media/filters/dav1d_video_decoder.h"
#endif
@@ -160,27 +156,6 @@ void DefaultDecoderFactory::CreateVideoDecoders(
std::move(request_overlay_info_cb), target_color_space, video_decoders);
}
-#if BUILDFLAG(IS_FUCHSIA)
- // TODO(crbug.com/1122116): Minimize Fuchsia-specific code paths.
- if (gpu_factories && gpu_factories->IsGpuVideoDecodeAcceleratorEnabled()) {
- auto* context_provider = gpu_factories->GetMediaContextProvider();
-
- // GetMediaContextProvider() may return nullptr when the context was lost
- // (e.g. after GPU process crash). To handle this case RenderThreadImpl
- // creates a new GpuVideoAcceleratorFactories with a new ContextProvider
- // instance, but there is no way to get it here. For now just don't add
- // FuchsiaVideoDecoder in that scenario.
- //
- // TODO(crbug.com/580386): Handle context loss properly.
- if (context_provider) {
- video_decoders->push_back(FuchsiaVideoDecoder::Create(context_provider));
- } else {
- DLOG(ERROR)
- << "Can't create FuchsiaVideoDecoder due to GPU context loss.";
- }
- }
-#endif
-
#if BUILDFLAG(ENABLE_LIBVPX)
video_decoders->push_back(std::make_unique<OffloadingVpxVideoDecoder>());
#endif
diff --git a/chromium/media/renderers/paint_canvas_video_renderer.cc b/chromium/media/renderers/paint_canvas_video_renderer.cc
index 5e3c4145d0f..534f9e9e9e3 100644
--- a/chromium/media/renderers/paint_canvas_video_renderer.cc
+++ b/chromium/media/renderers/paint_canvas_video_renderer.cc
@@ -796,6 +796,7 @@ class VideoTextureBacking : public cc::TextureBacking {
sk_image_info_(sk_image_->imageInfo()),
mailbox_(mailbox),
wraps_video_frame_texture_(wraps_video_frame_texture) {
+ DCHECK(sk_image_->isTextureBacked());
raster_context_provider_ = std::move(raster_context_provider);
}
@@ -832,23 +833,19 @@ class VideoTextureBacking : public cc::TextureBacking {
// Used only for recycling this TextureBacking - where we need to keep the
// texture/mailbox alive, but replace the SkImage.
void ReplaceAcceleratedSkImage(sk_sp<SkImage> sk_image) {
+ DCHECK(sk_image->isTextureBacked());
sk_image_ = sk_image;
sk_image_info_ = sk_image->imageInfo();
}
sk_sp<SkImage> GetSkImageViaReadback() override {
- if (sk_image_)
- return sk_image_->makeNonTextureImage();
-
sk_sp<SkData> image_pixels =
SkData::MakeUninitialized(sk_image_info_.computeMinByteSize());
- uint8_t* writable_pixels =
- static_cast<uint8_t*>(image_pixels->writable_data());
- gpu::raster::RasterInterface* ri =
- raster_context_provider_->RasterInterface();
- ri->ReadbackImagePixels(mailbox_, sk_image_info_,
- sk_image_info_.minRowBytes(), 0, 0,
- writable_pixels);
+ if (!readPixels(sk_image_info_, image_pixels->writable_data(),
+ sk_image_info_.minRowBytes(), 0, 0)) {
+ DLOG(ERROR) << "VideoTextureBacking::GetSkImageViaReadback failed.";
+ return nullptr;
+ }
return SkImage::MakeRasterData(sk_image_info_, std::move(image_pixels),
sk_image_info_.minRowBytes());
}
@@ -858,12 +855,19 @@ class VideoTextureBacking : public cc::TextureBacking {
size_t dst_row_bytes,
int src_x,
int src_y) override {
+ gpu::raster::RasterInterface* ri =
+ raster_context_provider_->RasterInterface();
if (sk_image_) {
+ GrGLTextureInfo texture_info;
+ if (!sk_image_->getBackendTexture(/*flushPendingGrContextIO=*/true)
+ .getGLTextureInfo(&texture_info)) {
+ DLOG(ERROR) << "Failed to getGLTextureInfo for VideoTextureBacking.";
+ return false;
+ }
+ ScopedSharedImageAccess scoped_access(ri, texture_info.fID, mailbox_);
return sk_image_->readPixels(dst_info, dst_pixels, dst_row_bytes, src_x,
src_y);
}
- gpu::raster::RasterInterface* ri =
- raster_context_provider_->RasterInterface();
ri->ReadbackImagePixels(mailbox_, dst_info, dst_info.minRowBytes(), src_x,
src_y, dst_pixels);
return true;
diff --git a/chromium/media/renderers/paint_canvas_video_renderer_unittest.cc b/chromium/media/renderers/paint_canvas_video_renderer_unittest.cc
index 98363ec7874..08ba9055106 100644
--- a/chromium/media/renderers/paint_canvas_video_renderer_unittest.cc
+++ b/chromium/media/renderers/paint_canvas_video_renderer_unittest.cc
@@ -8,6 +8,7 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/memory/aligned_memory.h"
+#include "base/memory/raw_ptr.h"
#include "base/sys_byteorder.h"
#include "base/test/task_environment.h"
#include "build/build_config.h"
@@ -1047,7 +1048,7 @@ class PaintCanvasVideoRendererWithGLTest : public testing::Test {
using GetColorCallback = base::RepeatingCallback<SkColor(int, int)>;
void SetUp() override {
- gl::GLSurfaceTestSupport::InitializeOneOff();
+ display_ = gl::GLSurfaceTestSupport::InitializeOneOff();
enable_pixels_.emplace();
media_context_ = base::MakeRefCounted<viz::TestInProcessContextProvider>(
viz::TestContextType::kGpuRaster, /*support_locking=*/false);
@@ -1074,7 +1075,7 @@ class PaintCanvasVideoRendererWithGLTest : public testing::Test {
media_context_.reset();
enable_pixels_.reset();
viz::TestGpuServiceHolder::ResetInstance();
- gl::GLSurfaceTestSupport::ShutdownGL();
+ gl::GLSurfaceTestSupport::ShutdownGL(display_);
}
// Uses CopyVideoFrameTexturesToGLTexture to copy |frame| into a GL texture,
@@ -1237,6 +1238,7 @@ class PaintCanvasVideoRendererWithGLTest : public testing::Test {
PaintCanvasVideoRenderer renderer_;
scoped_refptr<VideoFrame> cropped_frame_;
base::test::TaskEnvironment task_environment_;
+ raw_ptr<gl::GLDisplay> display_ = nullptr;
};
TEST_F(PaintCanvasVideoRendererWithGLTest, CopyVideoFrameYUVDataToGLTexture) {
diff --git a/chromium/media/renderers/renderer_impl.cc b/chromium/media/renderers/renderer_impl.cc
index 9d0974bea8e..cb62e2d7785 100644
--- a/chromium/media/renderers/renderer_impl.cc
+++ b/chromium/media/renderers/renderer_impl.cc
@@ -41,6 +41,9 @@ class RendererImpl::RendererClientInternal final : public RendererClient {
}
void OnError(PipelineStatus error) override { renderer_->OnError(error); }
+ void OnFallback(PipelineStatus error) override {
+ renderer_->OnFallback(std::move(error).AddHere());
+ }
void OnEnded() override { renderer_->OnRendererEnded(type_); }
void OnStatisticsUpdate(const PipelineStatistics& stats) override {
renderer_->OnStatisticsUpdate(stats);
@@ -904,6 +907,10 @@ void RendererImpl::RunEndedCallbackIfNeeded() {
client_->OnEnded();
}
+void RendererImpl::OnFallback(PipelineStatus fallback) {
+ client_->OnFallback(std::move(fallback).AddHere());
+}
+
void RendererImpl::OnError(PipelineStatus error) {
DVLOG(1) << __func__ << "(" << error << ")";
DCHECK(task_runner_->BelongsToCurrentThread());
diff --git a/chromium/media/renderers/renderer_impl.h b/chromium/media/renderers/renderer_impl.h
index 5bad0196b44..5ede0182d54 100644
--- a/chromium/media/renderers/renderer_impl.h
+++ b/chromium/media/renderers/renderer_impl.h
@@ -197,6 +197,10 @@ class MEDIA_EXPORT RendererImpl final : public Renderer {
// Callback executed when a runtime error happens.
void OnError(PipelineStatus error);
+ // Callback executed when there is a fallback somewhere in the pipeline which
+ // should be recorded for metrics analysis.
+ void OnFallback(PipelineStatus fallback);
+
void OnWaiting(WaitingReason reason);
void OnVideoNaturalSizeChange(const gfx::Size& size);
void OnAudioConfigChange(const AudioDecoderConfig& config);
diff --git a/chromium/media/renderers/video_frame_rgba_to_yuva_converter.cc b/chromium/media/renderers/video_frame_rgba_to_yuva_converter.cc
index 61e85ae455d..80fa9431cd7 100644
--- a/chromium/media/renderers/video_frame_rgba_to_yuva_converter.cc
+++ b/chromium/media/renderers/video_frame_rgba_to_yuva_converter.cc
@@ -118,13 +118,27 @@ bool CopyRGBATextureToVideoFrame(viz::RasterContextProvider* provider,
const gfx::ColorSpace& src_color_space,
GrSurfaceOrigin src_surface_origin,
const gpu::MailboxHolder& src_mailbox_holder,
- VideoFrame* dst_video_frame,
- gpu::SyncToken& completion_sync_token) {
+ VideoFrame* dst_video_frame) {
DCHECK_EQ(dst_video_frame->format(), PIXEL_FORMAT_NV12);
auto* ri = provider->RasterInterface();
DCHECK(ri);
+ // If context is lost for any reason e.g. creating shared image failed, we
+ // cannot distinguish between OOP and non-OOP raster based on GrContext().
+ if (ri->GetGraphicsResetStatusKHR() != GL_NO_ERROR) {
+ DLOG(ERROR) << "Raster context lost.";
+ return false;
+ }
+
+#if BUILDFLAG(IS_WIN)
+ // CopyToGpuMemoryBuffer is only supported for D3D shared images on Windows.
+ if (!provider->ContextCapabilities().shared_image_d3d) {
+ DLOG(ERROR) << "CopyToGpuMemoryBuffer not supported.";
+ return false;
+ }
+#endif // BUILDFLAG(IS_WIN)
+
if (!provider->GrContext()) {
SkYUVAInfo yuva_info =
VideoFrameYUVMailboxesHolder::VideoFrameGetSkYUVAInfo(dst_video_frame);
@@ -179,34 +193,35 @@ bool CopyRGBATextureToVideoFrame(viz::RasterContextProvider* provider,
const size_t num_planes = dst_video_frame->layout().num_planes();
+#if BUILDFLAG(IS_WIN)
// For shared memory GMBs on Windows we needed to explicitly request a copy
- // from the shared image GPU texture to the GMB. Set `completion_sync_token`
- // to mark the completion of the copy.
- if (dst_video_frame->HasGpuMemoryBuffer() &&
- dst_video_frame->GetGpuMemoryBuffer()->GetType() ==
- gfx::SHARED_MEMORY_BUFFER) {
- auto* sii = provider->SharedImageInterface();
-
- gpu::SyncToken blit_done_sync_token;
- ri->GenUnverifiedSyncTokenCHROMIUM(blit_done_sync_token.GetData());
-
- for (size_t plane = 0; plane < num_planes; ++plane) {
- const auto& mailbox = dst_video_frame->mailbox_holder(plane).mailbox;
- sii->CopyToGpuMemoryBuffer(blit_done_sync_token, mailbox);
- }
-
- auto copy_to_gmb_done_sync_token = sii->GenUnverifiedSyncToken();
- ri->WaitSyncTokenCHROMIUM(copy_to_gmb_done_sync_token.GetData());
+ // from the shared image GPU texture to the GMB.
+ DCHECK(dst_video_frame->HasGpuMemoryBuffer());
+ DCHECK_EQ(dst_video_frame->GetGpuMemoryBuffer()->GetType(),
+ gfx::SHARED_MEMORY_BUFFER);
+
+ gpu::SyncToken blit_done_sync_token;
+ ri->GenUnverifiedSyncTokenCHROMIUM(blit_done_sync_token.GetData());
+
+ auto* sii = provider->SharedImageInterface();
+ for (size_t plane = 0; plane < num_planes; ++plane) {
+ const auto& mailbox = dst_video_frame->mailbox_holder(plane).mailbox;
+ sii->CopyToGpuMemoryBuffer(blit_done_sync_token, mailbox);
}
- // We want to generate a SyncToken from the RasterInterface since callers may
- // be using RasterInterface::Finish() to ensure synchronization in cases where
- // SignalSyncToken can't be used.
- ri->GenSyncTokenCHROMIUM(completion_sync_token.GetData());
+ // Synchronize RasterInterface with SharedImageInterface. We want to generate
+ // the final SyncToken from the RasterInterface since callers might be using
+ // RasterInterface::Finish() to ensure synchronization in cases where
+ // SignalSyncToken can't be used (e.g. webrtc video frame adapter).
+ auto copy_to_gmb_done_sync_token = sii->GenUnverifiedSyncToken();
+ ri->WaitSyncTokenCHROMIUM(copy_to_gmb_done_sync_token.GetData());
+#endif // BUILDFLAG(IS_WIN)
// Make access to the `dst_video_frame` wait on copy completion. We also
// update the ReleaseSyncToken here since it's used when the underlying
// GpuMemoryBuffer and SharedImage resources are returned to the pool.
+ gpu::SyncToken completion_sync_token;
+ ri->GenSyncTokenCHROMIUM(completion_sync_token.GetData());
SimpleSyncTokenClient simple_client(completion_sync_token);
for (size_t plane = 0; plane < num_planes; ++plane)
dst_video_frame->UpdateMailboxHolderSyncToken(plane, &simple_client);
diff --git a/chromium/media/renderers/video_frame_rgba_to_yuva_converter.h b/chromium/media/renderers/video_frame_rgba_to_yuva_converter.h
index 5ae5a302ec8..3e04b990616 100644
--- a/chromium/media/renderers/video_frame_rgba_to_yuva_converter.h
+++ b/chromium/media/renderers/video_frame_rgba_to_yuva_converter.h
@@ -16,7 +16,6 @@ class Size;
namespace gpu {
struct MailboxHolder;
-struct SyncToken;
} // namespace gpu
namespace viz {
@@ -39,8 +38,7 @@ MEDIA_EXPORT bool CopyRGBATextureToVideoFrame(
const gfx::ColorSpace& src_color_space,
GrSurfaceOrigin src_surface_origin,
const gpu::MailboxHolder& src_mailbox_holder,
- media::VideoFrame* dst_video_frame,
- gpu::SyncToken& completion_sync_token);
+ media::VideoFrame* dst_video_frame);
} // namespace media
diff --git a/chromium/media/renderers/video_renderer_impl.cc b/chromium/media/renderers/video_renderer_impl.cc
index 9b0e9cbd799..6ddc1bd3a70 100644
--- a/chromium/media/renderers/video_renderer_impl.cc
+++ b/chromium/media/renderers/video_renderer_impl.cc
@@ -181,6 +181,8 @@ void VideoRendererImpl::Initialize(
task_runner_, create_video_decoders_cb_, media_log_);
video_decoder_stream_->set_config_change_observer(base::BindRepeating(
&VideoRendererImpl::OnConfigChange, weak_factory_.GetWeakPtr()));
+ video_decoder_stream_->set_fallback_observer(base::BindRepeating(
+ &VideoRendererImpl::OnFallback, weak_factory_.GetWeakPtr()));
if (gpu_memory_buffer_pool_) {
video_decoder_stream_->SetPrepareCB(base::BindRepeating(
&GpuMemoryBufferVideoFramePool::MaybeCreateHardwareFrame,
@@ -385,6 +387,11 @@ void VideoRendererImpl::OnConfigChange(const VideoDecoderConfig& config) {
}
}
+void VideoRendererImpl::OnFallback(PipelineStatus status) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ client_->OnFallback(std::move(status).AddHere());
+}
+
void VideoRendererImpl::SetTickClockForTesting(
const base::TickClock* tick_clock) {
tick_clock_ = tick_clock;
@@ -575,12 +582,16 @@ void VideoRendererImpl::FrameReady(VideoDecoderStream::ReadResult result) {
default:
// Anything other than `kOk` or `kAborted` is treated as an error.
DCHECK(result.has_error());
- auto status = result.code() == DecoderStatus::Codes::kDisconnected
- ? PIPELINE_ERROR_DISCONNECTED
- : PIPELINE_ERROR_DECODE;
+
+ PipelineStatus::Codes code =
+ result.code() == DecoderStatus::Codes::kDisconnected
+ ? PIPELINE_ERROR_DISCONNECTED
+ : PIPELINE_ERROR_DECODE;
+ PipelineStatus status = {code, std::move(result).error()};
task_runner_->PostTask(
- FROM_HERE, base::BindOnce(&VideoRendererImpl::OnPlaybackError,
- weak_factory_.GetWeakPtr(), status));
+ FROM_HERE,
+ base::BindOnce(&VideoRendererImpl::OnPlaybackError,
+ weak_factory_.GetWeakPtr(), std::move(status)));
return;
}
diff --git a/chromium/media/renderers/video_renderer_impl.h b/chromium/media/renderers/video_renderer_impl.h
index a101a2a16e3..1c760273371 100644
--- a/chromium/media/renderers/video_renderer_impl.h
+++ b/chromium/media/renderers/video_renderer_impl.h
@@ -115,6 +115,10 @@ class MEDIA_EXPORT VideoRendererImpl
// RenderClient of the new config.
void OnConfigChange(const VideoDecoderConfig& config);
+ // Called when the decoder stream and selector have a fallback after failed
+ // decode.
+ void OnFallback(PipelineStatus status);
+
// Callback for |video_decoder_stream_| to deliver decoded video frames and
// report video decoding status.
void FrameReady(VideoDecoderStream::ReadResult result);
diff --git a/chromium/media/renderers/video_renderer_impl_unittest.cc b/chromium/media/renderers/video_renderer_impl_unittest.cc
index 4cde8bace29..dbc8adf8822 100644
--- a/chromium/media/renderers/video_renderer_impl_unittest.cc
+++ b/chromium/media/renderers/video_renderer_impl_unittest.cc
@@ -611,6 +611,7 @@ TEST_F(VideoRendererImplTest, DecodeError_DuringStartPlayingFrom) {
Initialize();
QueueFrames("error");
EXPECT_CALL(mock_cb_, OnError(HasStatusCode(PIPELINE_ERROR_DECODE)));
+ EXPECT_CALL(mock_cb_, OnFallback(HasStatusCode(PIPELINE_ERROR_DECODE)));
StartPlayingFrom(0);
Destroy();
}
diff --git a/chromium/media/renderers/video_resource_updater.cc b/chromium/media/renderers/video_resource_updater.cc
index 968513a37e8..de46e680b03 100644
--- a/chromium/media/renderers/video_resource_updater.cc
+++ b/chromium/media/renderers/video_resource_updater.cc
@@ -870,10 +870,11 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForHardwarePlanes(
VideoFrameExternalResources external_resources;
gfx::ColorSpace resource_color_space = video_frame->ColorSpace();
- const auto& copy_mode = video_frame->metadata().copy_mode;
+ const bool copy_required = video_frame->metadata().copy_required;
+
GLuint target = video_frame->mailbox_holder(0).texture_target;
- // If texture copy is required, then we will copy into a GL_TEXTURE_2D target.
- if (copy_mode == VideoFrameMetadata::CopyMode::kCopyToNewTexture)
+ // If |copy_required| then we will copy into a GL_TEXTURE_2D target.
+ if (copy_required)
target = GL_TEXTURE_2D;
gfx::BufferFormat buffer_formats[VideoFrame::kMaxPlanes];
@@ -897,35 +898,24 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForHardwarePlanes(
const gpu::MailboxHolder& mailbox_holder = video_frame->mailbox_holder(i);
if (mailbox_holder.mailbox.IsZero())
break;
- if (copy_mode == VideoFrameMetadata::CopyMode::kCopyToNewTexture) {
+
+ if (copy_required) {
CopyHardwarePlane(video_frame.get(), resource_color_space, mailbox_holder,
&external_resources);
} else {
- gpu::SyncToken sync_token = mailbox_holder.sync_token;
- gpu::Mailbox mailbox = mailbox_holder.mailbox;
- if (copy_mode == VideoFrameMetadata::CopyMode::kCopyMailboxesOnly) {
- auto* sii = SharedImageInterface();
- uint32_t usage =
- gpu::SHARED_IMAGE_USAGE_DISPLAY | gpu::SHARED_IMAGE_USAGE_GLES2;
- mailbox = sii->CreateSharedImageWithAHB(mailbox_holder.mailbox, usage,
- mailbox_holder.sync_token);
- // Insert a sync token at this point and update video frame release sync
- // token with it.
- SyncTokenClientImpl client(nullptr /* GLES2Interface */, sii,
- gpu::SyncToken());
- sync_token = video_frame->UpdateReleaseSyncToken(&client);
- }
-
const size_t width = video_frame->columns(i);
const size_t height = video_frame->rows(i);
const gfx::Size plane_size(width, height);
auto transfer_resource = viz::TransferableResource::MakeGL(
- mailbox, GL_LINEAR, mailbox_holder.texture_target, sync_token,
- plane_size, video_frame->metadata().allow_overlay);
+ mailbox_holder.mailbox, GL_LINEAR, mailbox_holder.texture_target,
+ mailbox_holder.sync_token, plane_size,
+ video_frame->metadata().allow_overlay);
transfer_resource.color_space = resource_color_space;
transfer_resource.hdr_metadata = video_frame->hdr_metadata();
- transfer_resource.read_lock_fences_enabled =
- video_frame->metadata().read_lock_fences_enabled;
+ if (video_frame->metadata().read_lock_fences_enabled) {
+ transfer_resource.synchronization_type = viz::TransferableResource::
+ SynchronizationType::kGpuCommandsCompleted;
+ }
transfer_resource.format = viz::GetResourceFormat(buffer_formats[i]);
transfer_resource.ycbcr_info = video_frame->ycbcr_info();
@@ -940,21 +930,9 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForHardwarePlanes(
#endif
external_resources.resources.push_back(std::move(transfer_resource));
- if (copy_mode == VideoFrameMetadata::CopyMode::kCopyMailboxesOnly) {
- // Adding a ref on |video_frame| to make sure lifetime of |video frame|
- // is same as lifetime of this |mailbox|. Releasing |video_frame| before
- // |mailbox| causes renderer to prepare more video frame which in turn
- // causes holding onto multiple AHardwareBuffers by both |mailbox| and
- // |video_frame| which in turn causes higher gpu memory usage and
- // potential memory crashes.
- external_resources.release_callbacks.push_back(base::BindOnce(
- &VideoResourceUpdater::DestroyMailbox,
- weak_ptr_factory_.GetWeakPtr(), mailbox, video_frame));
- } else {
- external_resources.release_callbacks.push_back(
- base::BindOnce(&VideoResourceUpdater::ReturnTexture,
- weak_ptr_factory_.GetWeakPtr(), video_frame));
- }
+ external_resources.release_callbacks.push_back(
+ base::BindOnce(&VideoResourceUpdater::ReturnTexture,
+ weak_ptr_factory_.GetWeakPtr(), video_frame));
}
}
return external_resources;
@@ -1352,19 +1330,6 @@ void VideoResourceUpdater::ReturnTexture(scoped_refptr<VideoFrame> video_frame,
video_frame->UpdateReleaseSyncToken(&client);
}
-void VideoResourceUpdater::DestroyMailbox(gpu::Mailbox mailbox,
- scoped_refptr<VideoFrame> video_frame,
- const gpu::SyncToken& sync_token,
- bool lost_resource) {
- if (lost_resource)
- return;
-
- auto* sii = SharedImageInterface();
- sii->DestroySharedImage(sync_token, mailbox);
- SyncTokenClientImpl client(nullptr, sii, sync_token);
- video_frame->UpdateReleaseSyncToken(&client);
-}
-
void VideoResourceUpdater::RecycleResource(uint32_t plane_resource_id,
const gpu::SyncToken& sync_token,
bool lost_resource) {
@@ -1427,14 +1392,6 @@ bool VideoResourceUpdater::OnMemoryDump(
return true;
}
-gpu::SharedImageInterface* VideoResourceUpdater::SharedImageInterface() const {
- auto* sii = raster_context_provider_
- ? raster_context_provider_->SharedImageInterface()
- : context_provider_->SharedImageInterface();
- DCHECK(sii);
- return sii;
-}
-
VideoResourceUpdater::FrameResource::FrameResource() = default;
VideoResourceUpdater::FrameResource::FrameResource(viz::ResourceId id,
diff --git a/chromium/media/renderers/video_resource_updater.h b/chromium/media/renderers/video_resource_updater.h
index 7cc33853c89..f717343ca86 100644
--- a/chromium/media/renderers/video_resource_updater.h
+++ b/chromium/media/renderers/video_resource_updater.h
@@ -30,10 +30,6 @@ class Rect;
class Transform;
} // namespace gfx
-namespace gpu {
-class SharedImageInterface;
-} // namespace gpu
-
namespace viz {
class ClientResourceProvider;
class ContextProvider;
@@ -201,15 +197,10 @@ class MEDIA_EXPORT VideoResourceUpdater
void ReturnTexture(scoped_refptr<VideoFrame> video_frame,
const gpu::SyncToken& sync_token,
bool lost_resource);
- void DestroyMailbox(gpu::Mailbox mailbox,
- scoped_refptr<VideoFrame> video_frame,
- const gpu::SyncToken& sync_token,
- bool lost_resource);
// base::trace_event::MemoryDumpProvider implementation.
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
- gpu::SharedImageInterface* SharedImageInterface() const;
const raw_ptr<viz::ContextProvider> context_provider_;
const raw_ptr<viz::RasterContextProvider> raster_context_provider_;
const raw_ptr<viz::SharedBitmapReporter> shared_bitmap_reporter_;
diff --git a/chromium/media/renderers/video_resource_updater_unittest.cc b/chromium/media/renderers/video_resource_updater_unittest.cc
index fe788ab4ada..3cd5e2bdf89 100644
--- a/chromium/media/renderers/video_resource_updater_unittest.cc
+++ b/chromium/media/renderers/video_resource_updater_unittest.cc
@@ -214,10 +214,10 @@ class VideoResourceUpdaterTest : public testing::Test {
}
scoped_refptr<VideoFrame> CreateTestStreamTextureHardwareVideoFrame(
- absl::optional<VideoFrameMetadata::CopyMode> copy_mode) {
+ bool needs_copy) {
scoped_refptr<VideoFrame> video_frame = CreateTestHardwareVideoFrame(
PIXEL_FORMAT_ARGB, GL_TEXTURE_EXTERNAL_OES);
- video_frame->metadata().copy_mode = std::move(copy_mode);
+ video_frame->metadata().copy_required = needs_copy;
return video_frame;
}
@@ -557,18 +557,27 @@ TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes) {
EXPECT_EQ(VideoFrameResourceType::YUV, resources.type);
EXPECT_EQ(3u, resources.resources.size());
EXPECT_EQ(3u, resources.release_callbacks.size());
- EXPECT_FALSE(resources.resources[0].read_lock_fences_enabled);
- EXPECT_FALSE(resources.resources[1].read_lock_fences_enabled);
- EXPECT_FALSE(resources.resources[2].read_lock_fences_enabled);
+ EXPECT_EQ(resources.resources[0].synchronization_type,
+ viz::TransferableResource::SynchronizationType::kSyncToken);
+ EXPECT_EQ(resources.resources[1].synchronization_type,
+ viz::TransferableResource::SynchronizationType::kSyncToken);
+ EXPECT_EQ(resources.resources[2].synchronization_type,
+ viz::TransferableResource::SynchronizationType::kSyncToken);
video_frame = CreateTestYuvHardwareVideoFrame(PIXEL_FORMAT_I420, 3,
GL_TEXTURE_RECTANGLE_ARB);
video_frame->metadata().read_lock_fences_enabled = true;
resources = updater->CreateExternalResourcesFromVideoFrame(video_frame);
- EXPECT_TRUE(resources.resources[0].read_lock_fences_enabled);
- EXPECT_TRUE(resources.resources[1].read_lock_fences_enabled);
- EXPECT_TRUE(resources.resources[2].read_lock_fences_enabled);
+ EXPECT_EQ(
+ resources.resources[0].synchronization_type,
+ viz::TransferableResource::SynchronizationType::kGpuCommandsCompleted);
+ EXPECT_EQ(
+ resources.resources[1].synchronization_type,
+ viz::TransferableResource::SynchronizationType::kGpuCommandsCompleted);
+ EXPECT_EQ(
+ resources.resources[2].synchronization_type,
+ viz::TransferableResource::SynchronizationType::kGpuCommandsCompleted);
}
TEST_F(VideoResourceUpdaterTest,
@@ -578,7 +587,7 @@ TEST_F(VideoResourceUpdaterTest,
CreateUpdaterForHardware(true);
EXPECT_EQ(0u, GetSharedImageCount());
scoped_refptr<VideoFrame> video_frame =
- CreateTestStreamTextureHardwareVideoFrame(absl::nullopt);
+ CreateTestStreamTextureHardwareVideoFrame(/*needs_copy=*/false);
VideoFrameExternalResources resources =
updater->CreateExternalResourcesFromVideoFrame(video_frame);
@@ -591,8 +600,7 @@ TEST_F(VideoResourceUpdaterTest,
// A copied stream texture should return an RGBA resource in a new
// GL_TEXTURE_2D texture.
- video_frame = CreateTestStreamTextureHardwareVideoFrame(
- VideoFrameMetadata::CopyMode::kCopyToNewTexture);
+ video_frame = CreateTestStreamTextureHardwareVideoFrame(/*needs_copy=*/true);
resources = updater->CreateExternalResourcesFromVideoFrame(video_frame);
EXPECT_EQ(VideoFrameResourceType::RGBA_PREMULTIPLIED, resources.type);
EXPECT_EQ(1u, resources.resources.size());
@@ -602,43 +610,11 @@ TEST_F(VideoResourceUpdaterTest,
EXPECT_EQ(1u, GetSharedImageCount());
}
-TEST_F(VideoResourceUpdaterTest,
- CreateForHardwarePlanes_StreamTexture_CopyMailboxesOnly) {
- // Note that |use_stream_video_draw_quad| is true for this test.
- std::unique_ptr<VideoResourceUpdater> updater =
- CreateUpdaterForHardware(true);
- EXPECT_EQ(0u, GetSharedImageCount());
- scoped_refptr<VideoFrame> video_frame =
- CreateTestStreamTextureHardwareVideoFrame(absl::nullopt);
- VideoFrameExternalResources resources =
- updater->CreateExternalResourcesFromVideoFrame(video_frame);
- EXPECT_EQ(VideoFrameResourceType::STREAM_TEXTURE, resources.type);
- EXPECT_EQ(1u, resources.resources.size());
- EXPECT_EQ((GLenum)GL_TEXTURE_EXTERNAL_OES,
- resources.resources[0].mailbox_holder.texture_target);
- EXPECT_EQ(1u, resources.release_callbacks.size());
- EXPECT_EQ(0u, GetSharedImageCount());
-
- // If mailbox is copied, the texture target should still be
- // GL_TEXTURE_EXTERNAL_OES and resource type should be STREAM_TEXTURE.
- video_frame = CreateTestStreamTextureHardwareVideoFrame(
- VideoFrameMetadata::CopyMode::kCopyMailboxesOnly);
- resources = updater->CreateExternalResourcesFromVideoFrame(video_frame);
- EXPECT_EQ(VideoFrameResourceType::STREAM_TEXTURE, resources.type);
- EXPECT_EQ(1u, resources.resources.size());
- EXPECT_EQ((GLenum)GL_TEXTURE_EXTERNAL_OES,
- resources.resources[0].mailbox_holder.texture_target);
- EXPECT_EQ(1u, resources.release_callbacks.size());
- // This count will be 1 since a new mailbox will be created when mailbox is
- // being copied.
- EXPECT_EQ(1u, GetSharedImageCount());
-}
-
TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_TextureQuad) {
std::unique_ptr<VideoResourceUpdater> updater = CreateUpdaterForHardware();
EXPECT_EQ(0u, GetSharedImageCount());
scoped_refptr<VideoFrame> video_frame =
- CreateTestStreamTextureHardwareVideoFrame(absl::nullopt);
+ CreateTestStreamTextureHardwareVideoFrame(/*needs_copy=*/false);
VideoFrameExternalResources resources =
updater->CreateExternalResourcesFromVideoFrame(video_frame);
@@ -724,8 +700,7 @@ TEST_F(VideoResourceUpdaterTest, GenerateSyncTokenOnTextureCopy) {
std::unique_ptr<VideoResourceUpdater> updater = CreateUpdaterForHardware();
scoped_refptr<VideoFrame> video_frame =
- CreateTestStreamTextureHardwareVideoFrame(
- VideoFrameMetadata::CopyMode::kCopyToNewTexture);
+ CreateTestStreamTextureHardwareVideoFrame(/*needs_copy=*/true);
VideoFrameExternalResources resources =
updater->CreateExternalResourcesFromVideoFrame(video_frame);
diff --git a/chromium/media/renderers/win/media_foundation_audio_stream.cc b/chromium/media/renderers/win/media_foundation_audio_stream.cc
index c3fca8cf272..765b083ef8c 100644
--- a/chromium/media/renderers/win/media_foundation_audio_stream.cc
+++ b/chromium/media/renderers/win/media_foundation_audio_stream.cc
@@ -22,180 +22,6 @@ namespace media {
using Microsoft::WRL::ComPtr;
using Microsoft::WRL::MakeAndInitialize;
-namespace {
-
-// Given an audio format tag |wave_format|, it returns an audio subtype GUID per
-// https://docs.microsoft.com/en-us/windows/win32/medfound/audio-subtype-guids
-// |wave_format| must be one of the WAVE_FORMAT_* constants defined in mmreg.h.
-GUID MediaFoundationSubTypeFromWaveFormat(uint32_t wave_format) {
- GUID format_base = MFAudioFormat_Base;
- format_base.Data1 = wave_format;
- return format_base;
-}
-
-GUID AudioCodecToMediaFoundationSubtype(AudioCodec codec) {
- DVLOG(1) << __func__ << ": codec=" << codec;
-
- switch (codec) {
- case AudioCodec::kAAC:
- return MFAudioFormat_AAC;
- case AudioCodec::kMP3:
- return MFAudioFormat_MP3;
- case AudioCodec::kPCM:
- return MFAudioFormat_PCM;
- case AudioCodec::kVorbis:
- return MFAudioFormat_Vorbis;
- case AudioCodec::kFLAC:
- return MFAudioFormat_FLAC;
- case AudioCodec::kAMR_NB:
- return MFAudioFormat_AMR_NB;
- case AudioCodec::kAMR_WB:
- return MFAudioFormat_AMR_WB;
- case AudioCodec::kPCM_MULAW:
- return MediaFoundationSubTypeFromWaveFormat(WAVE_FORMAT_MULAW);
- case AudioCodec::kGSM_MS:
- return MediaFoundationSubTypeFromWaveFormat(WAVE_FORMAT_GSM610);
- case AudioCodec::kPCM_S16BE:
- return MFAudioFormat_PCM;
- case AudioCodec::kPCM_S24BE:
- return MFAudioFormat_PCM;
- case AudioCodec::kOpus:
- return MFAudioFormat_Opus;
- case AudioCodec::kEAC3:
- return MFAudioFormat_Dolby_DDPlus;
- case AudioCodec::kPCM_ALAW:
- return MediaFoundationSubTypeFromWaveFormat(WAVE_FORMAT_ALAW);
- case AudioCodec::kALAC:
- return MFAudioFormat_ALAC;
- case AudioCodec::kAC3:
- return MFAudioFormat_Dolby_AC3;
- default:
- return GUID_NULL;
- }
-}
-
-bool IsUncompressedAudio(AudioCodec codec) {
- switch (codec) {
- case AudioCodec::kPCM:
- case AudioCodec::kPCM_S16BE:
- case AudioCodec::kPCM_S24BE:
- return true;
- default:
- return false;
- }
-}
-
-// Given an AudioDecoderConfig, get its corresponding IMFMediaType format.
-// Note:
-// IMFMediaType is derived from IMFAttributes and hence all the of information
-// in a media type is store as attributes.
-// https://docs.microsoft.com/en-us/windows/win32/medfound/media-type-attributes
-// has a list of media type attributes.
-HRESULT GetDefaultAudioType(const AudioDecoderConfig decoder_config,
- IMFMediaType** media_type_out) {
- DVLOG(1) << __func__;
-
- ComPtr<IMFMediaType> media_type;
- RETURN_IF_FAILED(MFCreateMediaType(&media_type));
- RETURN_IF_FAILED(media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Audio));
-
- GUID mf_subtype = AudioCodecToMediaFoundationSubtype(decoder_config.codec());
- if (mf_subtype == GUID_NULL) {
- DLOG(ERROR) << "Unsupported codec type: " << decoder_config.codec();
- return MF_E_TOPO_CODEC_NOT_FOUND;
- }
- RETURN_IF_FAILED(media_type->SetGUID(MF_MT_SUBTYPE, mf_subtype));
-
- bool uncompressed = IsUncompressedAudio(decoder_config.codec());
-
- if (uncompressed) {
- RETURN_IF_FAILED(media_type->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1));
- } else {
- RETURN_IF_FAILED(media_type->SetUINT32(MF_MT_COMPRESSED, 1));
- }
-
- int channels = decoder_config.channels();
- if (channels > 0) {
- RETURN_IF_FAILED(media_type->SetUINT32(MF_MT_AUDIO_NUM_CHANNELS, channels));
- }
-
- int samples_per_second = decoder_config.samples_per_second();
- if (samples_per_second > 0) {
- RETURN_IF_FAILED(media_type->SetUINT32(MF_MT_AUDIO_SAMPLES_PER_SECOND,
- samples_per_second));
- }
-
- int bits_per_sample = decoder_config.bytes_per_frame() * 8;
- if (bits_per_sample > 0) {
- RETURN_IF_FAILED(
- media_type->SetUINT32(MF_MT_AUDIO_BITS_PER_SAMPLE, bits_per_sample));
- }
-
- if (uncompressed) {
- unsigned long block_alignment = channels * (bits_per_sample / 8);
- if (block_alignment > 0) {
- RETURN_IF_FAILED(
- media_type->SetUINT32(MF_MT_AUDIO_BLOCK_ALIGNMENT, block_alignment));
- }
- unsigned long average_bps = samples_per_second * (bits_per_sample / 8);
- if (average_bps > 0) {
- RETURN_IF_FAILED(
- media_type->SetUINT32(MF_MT_AUDIO_AVG_BYTES_PER_SECOND, average_bps));
- }
- }
- *media_type_out = media_type.Detach();
- return S_OK;
-}
-
-#if BUILDFLAG(USE_PROPRIETARY_CODECS)
-HRESULT GetAacAudioType(const AudioDecoderConfig decoder_config,
- IMFMediaType** media_type_out) {
- DVLOG(1) << __func__;
-
- ComPtr<IMFMediaType> media_type;
- RETURN_IF_FAILED(GetDefaultAudioType(decoder_config, &media_type));
-
- // On Windows `extra_data` is not populated for AAC in `decoder_config`. Use
- // `aac_extra_data` instead. See crbug.com/1245123.
- const auto& extra_data = decoder_config.aac_extra_data();
-
- size_t wave_format_size = sizeof(HEAACWAVEINFO) + extra_data.size();
- std::vector<uint8_t> wave_format_buffer(wave_format_size);
- HEAACWAVEINFO* aac_wave_format =
- reinterpret_cast<HEAACWAVEINFO*>(wave_format_buffer.data());
-
- aac_wave_format->wfx.wFormatTag = WAVE_FORMAT_MPEG_HEAAC;
- aac_wave_format->wfx.nChannels = decoder_config.channels();
- aac_wave_format->wfx.wBitsPerSample = decoder_config.bytes_per_channel() * 8;
- aac_wave_format->wfx.nSamplesPerSec = decoder_config.samples_per_second();
- aac_wave_format->wfx.nAvgBytesPerSec =
- decoder_config.samples_per_second() * decoder_config.bytes_per_frame();
- aac_wave_format->wfx.nBlockAlign = 1;
-
- size_t extra_size = wave_format_size - sizeof(WAVEFORMATEX);
- aac_wave_format->wfx.cbSize = static_cast<WORD>(extra_size);
- aac_wave_format->wPayloadType = 0; // RAW AAC
- aac_wave_format->wAudioProfileLevelIndication =
- 0xFE; // no audio profile specified
- aac_wave_format->wStructType = 0; // audio specific config follows
- aac_wave_format->wReserved1 = 0;
- aac_wave_format->dwReserved2 = 0;
-
- if (!extra_data.empty()) {
- memcpy(reinterpret_cast<uint8_t*>(aac_wave_format) + sizeof(HEAACWAVEINFO),
- extra_data.data(), extra_data.size());
- }
-
- RETURN_IF_FAILED(MFInitMediaTypeFromWaveFormatEx(
- media_type.Get(), reinterpret_cast<const WAVEFORMATEX*>(aac_wave_format),
- wave_format_size));
- *media_type_out = media_type.Detach();
- return S_OK;
-}
-#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
-
-} // namespace
-
/*static*/
HRESULT MediaFoundationAudioStream::Create(
int stream_id,
diff --git a/chromium/media/renderers/win/media_foundation_audio_stream.h b/chromium/media/renderers/win/media_foundation_audio_stream.h
index 1d281a3f862..21f3586ee2e 100644
--- a/chromium/media/renderers/win/media_foundation_audio_stream.h
+++ b/chromium/media/renderers/win/media_foundation_audio_stream.h
@@ -8,6 +8,7 @@
#include <mfapi.h>
#include <mfidl.h>
+#include "media/filters/win/media_foundation_utils.h"
#include "media/renderers/win/media_foundation_stream_wrapper.h"
#include "media/base/media_log.h"
diff --git a/chromium/media/renderers/win/media_foundation_renderer.cc b/chromium/media/renderers/win/media_foundation_renderer.cc
index 12ebf396740..2279fb47132 100644
--- a/chromium/media/renderers/win/media_foundation_renderer.cc
+++ b/chromium/media/renderers/win/media_foundation_renderer.cc
@@ -27,6 +27,7 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/cdm_context.h"
#include "media/base/media_log.h"
+#include "media/base/media_switches.h"
#include "media/base/timestamp_constants.h"
#include "media/base/win/dxgi_device_manager.h"
#include "media/base/win/mf_helpers.h"
@@ -154,21 +155,29 @@ void MediaFoundationRenderer::Initialize(MediaResource* media_resource,
renderer_client_ = client;
- // If the content is not protected then we need to start off in
- // frame server mode so that the first frame's image data is
- // available to Chromium, quite a few web tests need that image.
- bool start_in_dcomp_mode = false;
+ // Check the rendering strategy & whether we're operating on clear or
+ // protected content to determine the starting 'rendering_mode_'.
+ // If the Direct Composition strategy is specified or if we're operating on
+ // protected content then start in Direct Composition mode, else start in
+ // Frame Server mode. This behavior must match the logic in
+ // MediaFoundationRendererClient::Initialize.
+ auto rendering_strategy = kMediaFoundationClearRenderingStrategyParam.Get();
+ rendering_mode_ =
+ rendering_strategy ==
+ MediaFoundationClearRenderingStrategy::kDirectComposition
+ ? MediaFoundationRenderingMode::DirectComposition
+ : MediaFoundationRenderingMode::FrameServer;
for (DemuxerStream* stream : media_resource->GetAllStreams()) {
if (stream->type() == DemuxerStream::Type::VIDEO &&
stream->video_decoder_config().is_encrypted()) {
- // This conditional must match the conditional in
- // MediaFoundationRendererClient::Initialize
- start_in_dcomp_mode = true;
+ // This is protected content which only supports Direct Composition mode,
+ // update 'rendering_mode_' accordingly.
+ rendering_mode_ = MediaFoundationRenderingMode::DirectComposition;
}
}
- rendering_mode_ = start_in_dcomp_mode ? RenderingMode::DirectComposition
- : RenderingMode::FrameServer;
+ MEDIA_LOG(INFO, media_log_)
+ << "Starting MediaFoundationRenderingMode: " << rendering_mode_;
HRESULT hr = CreateMediaEngine(media_resource);
if (FAILED(hr)) {
@@ -247,7 +256,7 @@ HRESULT MediaFoundationRenderer::CreateMediaEngine(
// TODO(crbug.com/1276067): We'll investigate scenarios to see if we can use
// the on-screen video window size and not the native video size.
- if (rendering_mode_ == RenderingMode::FrameServer) {
+ if (rendering_mode_ == MediaFoundationRenderingMode::FrameServer) {
gfx::Size max_video_size;
bool has_video = false;
for (auto* stream : media_resource->GetAllStreams()) {
@@ -459,38 +468,42 @@ void MediaFoundationRenderer::Flush(base::OnceClosure flush_cb) {
std::move(flush_cb).Run();
}
-void MediaFoundationRenderer::SetRenderingMode(RenderingMode render_mode) {
+void MediaFoundationRenderer::SetMediaFoundationRenderingMode(
+ MediaFoundationRenderingMode render_mode) {
ComPtr<IMFMediaEngineEx> mf_media_engine_ex;
HRESULT hr = mf_media_engine_.As(&mf_media_engine_ex);
if (mf_media_engine_->HasVideo()) {
- if (render_mode == RenderingMode::FrameServer) {
+ if (render_mode == MediaFoundationRenderingMode::FrameServer) {
// Make sure we reinitialize the texture pool
hr = InitializeTexturePool(native_video_size_);
- } else if (render_mode == RenderingMode::DirectComposition) {
+ } else if (render_mode == MediaFoundationRenderingMode::DirectComposition) {
// If needed renegotiate the DComp visual and send it to the client for
// presentation
} else {
DVLOG(1) << "Rendering mode: " << static_cast<int>(render_mode)
<< " is unsupported";
MEDIA_LOG(ERROR, media_log_)
- << "MediaFoundationRenderer SetRenderingMode: " << (int)render_mode
+ << "MediaFoundationRenderer SetMediaFoundationRenderingMode: "
+ << (int)render_mode
<< " is not defined. No change to the rendering mode.";
hr = E_NOT_SET;
}
if (SUCCEEDED(hr)) {
hr = mf_media_engine_ex->EnableWindowlessSwapchainMode(
- render_mode == RenderingMode::DirectComposition);
+ render_mode == MediaFoundationRenderingMode::DirectComposition);
if (SUCCEEDED(hr)) {
rendering_mode_ = render_mode;
+ MEDIA_LOG(INFO, media_log_)
+ << "Set MediaFoundationRenderingMode: " << rendering_mode_;
}
}
}
}
bool MediaFoundationRenderer::InFrameServerMode() {
- return rendering_mode_ == RenderingMode::FrameServer;
+ return rendering_mode_ == MediaFoundationRenderingMode::FrameServer;
}
void MediaFoundationRenderer::StartPlayingFrom(base::TimeDelta time) {
@@ -632,7 +645,7 @@ HRESULT MediaFoundationRenderer::UpdateVideoStream(const gfx::Rect& rect) {
RECT dest_rect = {0, 0, rect.width(), rect.height()};
RETURN_IF_FAILED(mf_media_engine_ex->UpdateVideoStream(
/*pSrc=*/nullptr, &dest_rect, /*pBorderClr=*/nullptr));
- if (rendering_mode_ == RenderingMode::FrameServer) {
+ if (rendering_mode_ == MediaFoundationRenderingMode::FrameServer) {
RETURN_IF_FAILED(InitializeTexturePool(native_video_size_));
}
return S_OK;
@@ -877,7 +890,7 @@ void MediaFoundationRenderer::OnVideoNaturalSizeChange() {
std::ignore = UpdateVideoStream(test_rect);
}
- if (rendering_mode_ == RenderingMode::FrameServer) {
+ if (rendering_mode_ == MediaFoundationRenderingMode::FrameServer) {
InitializeTexturePool(native_video_size_);
}
@@ -927,7 +940,7 @@ void MediaFoundationRenderer::RequestNextFrameBetweenTimestamps(
base::TimeTicks deadline_min,
base::TimeTicks deadline_max) {
DCHECK(task_runner_->RunsTasksInCurrentSequence());
- if (rendering_mode_ != RenderingMode::FrameServer) {
+ if (rendering_mode_ != MediaFoundationRenderingMode::FrameServer) {
return;
}
diff --git a/chromium/media/renderers/win/media_foundation_renderer.h b/chromium/media/renderers/win/media_foundation_renderer.h
index 6566282dc90..6fa22afee29 100644
--- a/chromium/media/renderers/win/media_foundation_renderer.h
+++ b/chromium/media/renderers/win/media_foundation_renderer.h
@@ -101,7 +101,8 @@ class MEDIA_EXPORT MediaFoundationRenderer
void NotifyFrameReleased(const base::UnguessableToken& frame_token) override;
void RequestNextFrameBetweenTimestamps(base::TimeTicks deadline_min,
base::TimeTicks deadline_max) override;
- void SetRenderingMode(RenderingMode render_mode) override;
+ void SetMediaFoundationRenderingMode(
+ MediaFoundationRenderingMode render_mode) override;
// Testing verification
bool InFrameServerMode();
@@ -211,8 +212,9 @@ class MEDIA_EXPORT MediaFoundationRenderer
// Composition mode.
MediaFoundationTexturePool texture_pool_;
- // The represents the rendering mode of the Media Engine.
- RenderingMode rendering_mode_ = RenderingMode::DirectComposition;
+ // Rendering mode the Media Engine will use.
+ MediaFoundationRenderingMode rendering_mode_ =
+ MediaFoundationRenderingMode::DirectComposition;
bool has_reported_significant_playback_ = false;
diff --git a/chromium/media/renderers/win/media_foundation_renderer_extension.h b/chromium/media/renderers/win/media_foundation_renderer_extension.h
index d7170681016..768e4a317b4 100644
--- a/chromium/media/renderers/win/media_foundation_renderer_extension.h
+++ b/chromium/media/renderers/win/media_foundation_renderer_extension.h
@@ -8,18 +8,11 @@
#include "base/callback.h"
#include "base/win/scoped_handle.h"
#include "media/base/media_export.h"
+#include "media/renderers/win/media_foundation_rendering_mode.h"
#include "ui/gfx/geometry/rect.h"
namespace media {
-// This C++ enum is the equivalent to mojom::RenderingMode
-enum class RenderingMode : int32_t {
- DirectComposition = 0,
- FrameServer = 1,
- kMinValue = 0,
- kMaxValue = 1,
-};
-
// C++ interface equivalent to mojom::MediaFoundationRendererExtension.
// This interface allows MediaFoundationRenderer to support video rendering
// using Direct Compositon.
@@ -54,7 +47,8 @@ class MEDIA_EXPORT MediaFoundationRendererExtension {
base::TimeTicks deadline_max) = 0;
// Change which mode we are using for video frame rendering.
- virtual void SetRenderingMode(RenderingMode mode) = 0;
+ virtual void SetMediaFoundationRenderingMode(
+ MediaFoundationRenderingMode mode) = 0;
};
} // namespace media
diff --git a/chromium/media/renderers/win/media_foundation_renderer_unittest.cc b/chromium/media/renderers/win/media_foundation_renderer_unittest.cc
index 1f8b972c75d..185d18952cf 100644
--- a/chromium/media/renderers/win/media_foundation_renderer_unittest.cc
+++ b/chromium/media/renderers/win/media_foundation_renderer_unittest.cc
@@ -11,10 +11,12 @@
#include "base/memory/scoped_refptr.h"
#include "base/task/single_thread_task_runner.h"
#include "base/test/mock_callback.h"
+#include "base/test/scoped_feature_list.h"
#include "base/test/task_environment.h"
#include "base/win/scoped_com_initializer.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/demuxer_stream.h"
+#include "media/base/media_switches.h"
#include "media/base/media_util.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
@@ -241,6 +243,11 @@ TEST_F(MediaFoundationRendererTest, ClearStartsInFrameServer) {
if (!MediaFoundationRenderer::IsSupported())
return;
+ base::test::ScopedFeatureList feature_list;
+ feature_list.InitAndEnableFeatureWithParameters(
+ media::kMediaFoundationClearRendering, {{"strategy", "dynamic"}});
+ ;
+
AddStream(DemuxerStream::AUDIO, /*encrypted=*/false);
AddStream(DemuxerStream::VIDEO, /*encrypted=*/false);
diff --git a/chromium/media/renderers/win/media_foundation_rendering_mode.cc b/chromium/media/renderers/win/media_foundation_rendering_mode.cc
new file mode 100644
index 00000000000..a1c4d509c6b
--- /dev/null
+++ b/chromium/media/renderers/win/media_foundation_rendering_mode.cc
@@ -0,0 +1,31 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/renderers/win/media_foundation_rendering_mode.h"
+
+#include "base/strings/string_number_conversions.h"
+
+#include <string>
+
+namespace media {
+
+std::ostream& operator<<(std::ostream& os,
+ const MediaFoundationRenderingMode& render_mode) {
+ std::string mode;
+ switch (render_mode) {
+ case (MediaFoundationRenderingMode::FrameServer):
+ mode = "Frame Server";
+ break;
+ case (MediaFoundationRenderingMode::DirectComposition):
+ mode = "Direct Composition";
+ break;
+ default:
+ mode = "UNEXPECTED RENDERING MODE " +
+ base::NumberToString(static_cast<int>(render_mode));
+ }
+
+ return os << mode;
+}
+
+} // namespace media
diff --git a/chromium/media/renderers/win/media_foundation_rendering_mode.h b/chromium/media/renderers/win/media_foundation_rendering_mode.h
new file mode 100644
index 00000000000..e82ca89d0c9
--- /dev/null
+++ b/chromium/media/renderers/win/media_foundation_rendering_mode.h
@@ -0,0 +1,24 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_RENDERERS_WIN_MEDIA_FOUNDATION_RENDERING_MODE_H_
+#define MEDIA_RENDERERS_WIN_MEDIA_FOUNDATION_RENDERING_MODE_H_
+
+#include <ostream>
+
+namespace media {
+
+// This C++ enum is the equivalent to mojom::MediaFoundationRenderingMode
+enum class MediaFoundationRenderingMode : int32_t {
+ DirectComposition = 0,
+ FrameServer = 1,
+ kMaxValue = 1,
+};
+
+std::ostream& operator<<(std::ostream& os,
+ const MediaFoundationRenderingMode& render_mode);
+
+} // namespace media
+
+#endif // MEDIA_RENDERERS_WIN_MEDIA_FOUNDATION_RENDERING_MODE_H_
diff --git a/chromium/media/renderers/win/media_foundation_source_wrapper.cc b/chromium/media/renderers/win/media_foundation_source_wrapper.cc
index a6b2cc8279a..8d5d492a201 100644
--- a/chromium/media/renderers/win/media_foundation_source_wrapper.cc
+++ b/chromium/media/renderers/win/media_foundation_source_wrapper.cc
@@ -339,6 +339,9 @@ HRESULT MediaFoundationSourceWrapper::GetInputTrustAuthority(
IUnknown** object_out) {
DVLOG_FUNC(1);
+ if (state_ == State::kShutdown)
+ return MF_E_SHUTDOWN;
+
if (stream_id >= StreamCount())
return E_INVALIDARG;
diff --git a/chromium/media/renderers/win/media_foundation_stream_wrapper.cc b/chromium/media/renderers/win/media_foundation_stream_wrapper.cc
index 0d7783695af..2102ff8cc00 100644
--- a/chromium/media/renderers/win/media_foundation_stream_wrapper.cc
+++ b/chromium/media/renderers/win/media_foundation_stream_wrapper.cc
@@ -307,9 +307,9 @@ void MediaFoundationStreamWrapper::ProcessRequestsIfPossible() {
return;
}
- if (!demuxer_stream_ || pending_stream_read_) {
+ if (!demuxer_stream_ || pending_stream_read_)
return;
- }
+
demuxer_stream_->Read(
base::BindOnce(&MediaFoundationStreamWrapper::OnDemuxerStreamRead,
weak_factory_.GetWeakPtr()));
@@ -326,6 +326,8 @@ HRESULT MediaFoundationStreamWrapper::ServiceSampleRequest(
if (buffer->end_of_stream()) {
if (!enabled_) {
DVLOG_FUNC(2) << "Ignoring EOS for disabled stream";
+ // token not dropped to reflect an outstanding request that stream wrapper
+ // should service when the stream is enabled
return S_OK;
}
DVLOG_FUNC(2) << "End of stream";
@@ -348,6 +350,9 @@ HRESULT MediaFoundationStreamWrapper::ServiceSampleRequest(
RETURN_IF_FAILED(mf_media_event_queue_->QueueEventParamUnk(
MEMediaSample, GUID_NULL, S_OK, mf_sample.Get()));
}
+
+ pending_sample_request_tokens_.pop();
+
return S_OK;
}
@@ -369,7 +374,6 @@ bool MediaFoundationStreamWrapper::ServicePostFlushSampleRequest() {
return false;
}
- pending_sample_request_tokens_.pop();
post_flush_buffers_.pop();
return true;
}
@@ -417,7 +421,6 @@ void MediaFoundationStreamWrapper::OnDemuxerStreamRead(
<< ": ServiceSampleRequest failed: " << PrintHr(hr);
return;
}
- pending_sample_request_tokens_.pop();
}
} else if (status == DemuxerStream::Status::kConfigChanged) {
DVLOG_FUNC(2) << "Stream config changed, AreFormatChangesEnabled="
diff --git a/chromium/media/renderers/win/media_foundation_texture_pool_unittest.cc b/chromium/media/renderers/win/media_foundation_texture_pool_unittest.cc
index f4b7939a86f..e776a2535e7 100644
--- a/chromium/media/renderers/win/media_foundation_texture_pool_unittest.cc
+++ b/chromium/media/renderers/win/media_foundation_texture_pool_unittest.cc
@@ -4,6 +4,7 @@
#include "media/renderers/win/media_foundation_texture_pool.h"
+#include "base/memory/raw_ptr.h"
#include "base/test/mock_callback.h"
#include "base/test/task_environment.h"
#include "media/base/mock_filters.h"
@@ -83,7 +84,7 @@ class MockD3D11Resource final : public IDXGIResource1 {
SetPrivateDataInterface(REFGUID guid, const IUnknown* pData) override;
private:
- MockD3D11Texture2D* parent_;
+ raw_ptr<MockD3D11Texture2D> parent_;
volatile ULONG refcount_ = 1;
};
@@ -516,8 +517,8 @@ TEST_F(MediaFoundationTexturePoolTest, VerifyTextureInitialization) {
class SpecialCallback {
private:
- base::WaitableEvent* wait_event_;
- gfx::Size* frame_size_;
+ raw_ptr<base::WaitableEvent> wait_event_;
+ raw_ptr<gfx::Size> frame_size_;
public:
SpecialCallback(base::WaitableEvent* wait_event, gfx::Size* frame_size)
diff --git a/chromium/media/renderers/win/media_foundation_video_stream.cc b/chromium/media/renderers/win/media_foundation_video_stream.cc
index 03d7fa191f7..a2d575118db 100644
--- a/chromium/media/renderers/win/media_foundation_video_stream.cc
+++ b/chromium/media/renderers/win/media_foundation_video_stream.cc
@@ -26,6 +26,10 @@ namespace {
// This is supported by Media Foundation.
DEFINE_MEDIATYPE_GUID(MFVideoFormat_THEORA, FCC('theo'))
+// MF_MT_MIN_MASTERING_LUMINANCE values are in 1/10000th of a nit (0.0001 nit).
+// https://docs.microsoft.com/en-us/windows/win32/api/dxgi1_5/ns-dxgi1_5-dxgi_hdr_metadata_hdr10
+constexpr int kMasteringDispLuminanceScale = 10000;
+
GUID VideoCodecToMFSubtype(VideoCodec codec, VideoCodecProfile profile) {
switch (codec) {
case VideoCodec::kH264:
@@ -160,6 +164,22 @@ MFVideoTransferFunction VideoTransferFunctionToMF(
return MFVideoTransFunc_Unknown;
}
+MT_CUSTOM_VIDEO_PRIMARIES CustomVideoPrimaryToMF(
+ gfx::ColorVolumeMetadata color_volume_metadata) {
+ // MT_CUSTOM_VIDEO_PRIMARIES stores value in float no scaling factor needed
+ // https://docs.microsoft.com/en-us/windows/win32/api/mfapi/ns-mfapi-mt_custom_video_primaries
+ MT_CUSTOM_VIDEO_PRIMARIES primaries = {0};
+ primaries.fRx = color_volume_metadata.primary_r.x();
+ primaries.fRy = color_volume_metadata.primary_r.y();
+ primaries.fGx = color_volume_metadata.primary_g.x();
+ primaries.fGy = color_volume_metadata.primary_g.y();
+ primaries.fBx = color_volume_metadata.primary_b.x();
+ primaries.fBy = color_volume_metadata.primary_b.y();
+ primaries.fWx = color_volume_metadata.white_point.x();
+ primaries.fWy = color_volume_metadata.white_point.y();
+ return primaries;
+}
+
#if BUILDFLAG(ENABLE_PLATFORM_DOLBY_VISION)
// To MediaFoundation, DolbyVision renderer profile strings are always 7
// characters. For HEVC based profiles, it's in the format "dvhe.xx". For AVC
@@ -261,6 +281,31 @@ HRESULT GetVideoType(const VideoDecoderConfig& config,
RETURN_IF_FAILED(
media_type->SetUINT32(MF_MT_VIDEO_PRIMARIES, mf_video_primary));
+ UINT32 video_nominal_range =
+ config.color_space_info().range == gfx::ColorSpace::RangeID::FULL
+ ? MFNominalRange_0_255
+ : MFNominalRange_16_235;
+ RETURN_IF_FAILED(
+ media_type->SetUINT32(MF_MT_VIDEO_NOMINAL_RANGE, video_nominal_range));
+
+ if (config.hdr_metadata().has_value()) {
+ UINT32 max_display_mastering_luminance =
+ config.hdr_metadata()->color_volume_metadata.luminance_max;
+ RETURN_IF_FAILED(media_type->SetUINT32(MF_MT_MAX_MASTERING_LUMINANCE,
+ max_display_mastering_luminance));
+
+ UINT32 min_display_mastering_luminance =
+ config.hdr_metadata()->color_volume_metadata.luminance_min *
+ kMasteringDispLuminanceScale;
+ RETURN_IF_FAILED(media_type->SetUINT32(MF_MT_MIN_MASTERING_LUMINANCE,
+ min_display_mastering_luminance));
+
+ MT_CUSTOM_VIDEO_PRIMARIES primaries =
+ CustomVideoPrimaryToMF(config.hdr_metadata()->color_volume_metadata);
+ RETURN_IF_FAILED(media_type->SetBlob(MF_MT_CUSTOM_VIDEO_PRIMARIES,
+ reinterpret_cast<UINT8*>(&primaries),
+ sizeof(MT_CUSTOM_VIDEO_PRIMARIES)));
+ }
base::UmaHistogramEnumeration(
"Media.MediaFoundation.VideoColorSpace.TransferID",
config.color_space_info().transfer);
diff --git a/chromium/media/video/BUILD.gn b/chromium/media/video/BUILD.gn
index bda5d803cf4..9b7658a09b4 100644
--- a/chromium/media/video/BUILD.gn
+++ b/chromium/media/video/BUILD.gn
@@ -53,7 +53,7 @@ source_set("video") {
"h265_nalu_parser.cc",
"h265_nalu_parser.h",
]
- if (enable_platform_hevc_decoding) {
+ if (enable_hevc_parser_and_hw_decoder) {
sources += [
"h265_parser.cc",
"h265_parser.h",
@@ -150,8 +150,11 @@ source_set("unit_tests") {
]
if (enable_platform_hevc) {
sources += [ "h265_nalu_parser_unittest.cc" ]
- if (enable_platform_hevc_decoding) {
- sources += [ "h265_parser_unittest.cc" ]
+ if (enable_hevc_parser_and_hw_decoder) {
+ sources += [
+ "h265_parser_unittest.cc",
+ "h265_poc_unittest.cc",
+ ]
}
}
@@ -180,7 +183,7 @@ fuzzer_test("media_h264_parser_fuzzer") {
]
}
-if (enable_platform_hevc_decoding) {
+if (enable_hevc_parser_and_hw_decoder) {
fuzzer_test("media_h265_parser_fuzzer") {
sources = [ "h265_parser_fuzzertest.cc" ]
deps = [
diff --git a/chromium/media/video/av1_video_encoder.cc b/chromium/media/video/av1_video_encoder.cc
index 368e7a47a5d..50121a0cbe9 100644
--- a/chromium/media/video/av1_video_encoder.cc
+++ b/chromium/media/video/av1_video_encoder.cc
@@ -380,7 +380,7 @@ void Av1VideoEncoder::Encode(scoped_refptr<VideoFrame> frame,
return;
}
- TRACE_EVENT0("media", "aom_codec_encode");
+ TRACE_EVENT1("media", "aom_codec_encode", "timestamp", frame->timestamp());
// Use artificial timestamps, so the encoder will not be misled by frame's
// fickle timestamps when doing rate control.
auto error =
diff --git a/chromium/media/video/fake_video_encode_accelerator.cc b/chromium/media/video/fake_video_encode_accelerator.cc
index 3ea82727ceb..aee0e86a6ef 100644
--- a/chromium/media/video/fake_video_encode_accelerator.cc
+++ b/chromium/media/video/fake_video_encode_accelerator.cc
@@ -40,6 +40,7 @@ FakeVideoEncodeAccelerator::GetSupportedProfiles() {
profile.max_resolution.SetSize(1920, 1088);
profile.max_framerate_numerator = 30;
profile.max_framerate_denominator = 1;
+ profile.rate_control_modes = media::VideoEncodeAccelerator::kConstantMode;
profile.profile = media::H264PROFILE_MAIN;
profiles.push_back(profile);
diff --git a/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc b/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc
index b619ccb44fa..a42246b1241 100644
--- a/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc
+++ b/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc
@@ -6,6 +6,7 @@
#include <memory>
#include "base/bind.h"
+#include "base/memory/raw_ptr.h"
#include "base/test/simple_test_tick_clock.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -237,10 +238,10 @@ class GpuMemoryBufferVideoFramePoolTest : public ::testing::Test {
static constexpr uint8_t kUValue = 50;
static constexpr uint8_t kVValue = 150;
- uint8_t* y_data_ = nullptr;
- uint8_t* u_data_ = nullptr;
- uint8_t* v_data_ = nullptr;
- uint8_t* uv_data_ = nullptr;
+ raw_ptr<uint8_t> y_data_ = nullptr;
+ raw_ptr<uint8_t> u_data_ = nullptr;
+ raw_ptr<uint8_t> v_data_ = nullptr;
+ raw_ptr<uint8_t> uv_data_ = nullptr;
base::SimpleTestTickClock test_clock_;
std::unique_ptr<MockGpuVideoAcceleratorFactories> mock_gpu_factories_;
diff --git a/chromium/media/video/h264_level_limits.cc b/chromium/media/video/h264_level_limits.cc
index f4f60c2bd97..6261632c9f1 100644
--- a/chromium/media/video/h264_level_limits.cc
+++ b/chromium/media/video/h264_level_limits.cc
@@ -5,6 +5,7 @@
#include "media/video/h264_level_limits.h"
#include "base/logging.h"
+#include "base/numerics/checked_math.h"
#include "media/video/h264_parser.h"
namespace media {
diff --git a/chromium/media/video/h265_parser.cc b/chromium/media/video/h265_parser.cc
index 4969e15163e..46e840fd3ee 100644
--- a/chromium/media/video/h265_parser.cc
+++ b/chromium/media/video/h265_parser.cc
@@ -650,40 +650,44 @@ H265Parser::Result H265Parser::ParseSPS(int* sps_id) {
sps->pic_height_in_luma_samples);
}
- bool sps_extension_present_flag;
- bool sps_range_extension_flag = false;
- bool sps_multilayer_extension_flag = false;
- bool sps_3d_extension_flag = false;
- bool sps_scc_extension_flag = false;
- READ_BOOL_OR_RETURN(&sps_extension_present_flag);
- if (sps_extension_present_flag) {
- READ_BOOL_OR_RETURN(&sps_range_extension_flag);
- READ_BOOL_OR_RETURN(&sps_multilayer_extension_flag);
- READ_BOOL_OR_RETURN(&sps_3d_extension_flag);
- READ_BOOL_OR_RETURN(&sps_scc_extension_flag);
+ READ_BOOL_OR_RETURN(&sps->sps_extension_present_flag);
+ if (sps->sps_extension_present_flag) {
+ READ_BOOL_OR_RETURN(&sps->sps_range_extension_flag);
+ READ_BOOL_OR_RETURN(&sps->sps_multilayer_extension_flag);
+ READ_BOOL_OR_RETURN(&sps->sps_3d_extension_flag);
+ READ_BOOL_OR_RETURN(&sps->sps_scc_extension_flag);
SKIP_BITS_OR_RETURN(4); // sps_extension_4bits
}
- if (sps_range_extension_flag) {
- DVLOG(1) << "HEVC range extension not supported";
- return kInvalidStream;
- }
- if (sps_multilayer_extension_flag) {
+ if (sps->sps_range_extension_flag) {
+ READ_BOOL_OR_RETURN(&sps->transform_skip_rotation_enabled_flag);
+ READ_BOOL_OR_RETURN(&sps->transform_skip_context_enabled_flag);
+ READ_BOOL_OR_RETURN(&sps->implicit_rdpcm_enabled_flag);
+ READ_BOOL_OR_RETURN(&sps->explicit_rdpcm_enabled_flag);
+ READ_BOOL_OR_RETURN(&sps->extended_precision_processing_flag);
+ READ_BOOL_OR_RETURN(&sps->intra_smoothing_disabled_flag);
+ READ_BOOL_OR_RETURN(&sps->high_precision_offsets_enabled_flag);
+ READ_BOOL_OR_RETURN(&sps->persistent_rice_adaptation_enabled_flag);
+ READ_BOOL_OR_RETURN(&sps->cabac_bypass_alignment_enabled_flag);
+ }
+ if (sps->sps_multilayer_extension_flag) {
DVLOG(1) << "HEVC multilayer extension not supported";
return kInvalidStream;
}
- if (sps_3d_extension_flag) {
+ if (sps->sps_3d_extension_flag) {
DVLOG(1) << "HEVC 3D extension not supported";
return kInvalidStream;
}
- if (sps_scc_extension_flag) {
+ if (sps->sps_scc_extension_flag) {
DVLOG(1) << "HEVC SCC extension not supported";
return kInvalidStream;
}
- // NOTE: The below 2 values are dependent upon the range extension if that is
- // ever implemented.
- sps->wp_offset_half_range_y = 1 << 7;
- sps->wp_offset_half_range_c = 1 << 7;
+ sps->wp_offset_half_range_y = 1 << (sps->high_precision_offsets_enabled_flag
+ ? sps->bit_depth_luma_minus8 + 7
+ : 7);
+ sps->wp_offset_half_range_c = 1 << (sps->high_precision_offsets_enabled_flag
+ ? sps->bit_depth_chroma_minus8 + 7
+ : 7);
// If an SPS with the same id already exists, replace it.
*sps_id = sps->sps_seq_parameter_set_id;
@@ -808,33 +812,51 @@ H265Parser::Result H265Parser::ParsePPS(const H265NALU& nalu, int* pps_id) {
IN_RANGE_OR_RETURN(pps->log2_parallel_merge_level_minus2, 0,
sps->ctb_log2_size_y - 2);
READ_BOOL_OR_RETURN(&pps->slice_segment_header_extension_present_flag);
- bool pps_extension_present_flag;
- READ_BOOL_OR_RETURN(&pps_extension_present_flag);
- bool pps_range_extension_flag = false;
- bool pps_multilayer_extension_flag = false;
- bool pps_3d_extension_flag = false;
- bool pps_scc_extension_flag = false;
- if (pps_extension_present_flag) {
- READ_BOOL_OR_RETURN(&pps_range_extension_flag);
- READ_BOOL_OR_RETURN(&pps_multilayer_extension_flag);
- READ_BOOL_OR_RETURN(&pps_3d_extension_flag);
- READ_BOOL_OR_RETURN(&pps_scc_extension_flag);
+ READ_BOOL_OR_RETURN(&pps->pps_extension_present_flag);
+ if (pps->pps_extension_present_flag) {
+ READ_BOOL_OR_RETURN(&pps->pps_range_extension_flag);
+ READ_BOOL_OR_RETURN(&pps->pps_multilayer_extension_flag);
+ READ_BOOL_OR_RETURN(&pps->pps_3d_extension_flag);
+ READ_BOOL_OR_RETURN(&pps->pps_scc_extension_flag);
SKIP_BITS_OR_RETURN(4); // pps_extension_4bits
}
- if (pps_range_extension_flag) {
- DVLOG(1) << "HEVC range extension not supported";
- return kInvalidStream;
- }
- if (pps_multilayer_extension_flag) {
+ if (pps->pps_range_extension_flag) {
+ if (pps->transform_skip_enabled_flag) {
+ READ_UE_OR_RETURN(&pps->log2_max_transform_skip_block_size_minus2);
+ IN_RANGE_OR_RETURN(pps->log2_max_transform_skip_block_size_minus2, 0, 3);
+ }
+ READ_BOOL_OR_RETURN(&pps->cross_component_prediction_enabled_flag);
+ READ_BOOL_OR_RETURN(&pps->chroma_qp_offset_list_enabled_flag);
+ if (pps->chroma_qp_offset_list_enabled_flag) {
+ READ_UE_OR_RETURN(&pps->diff_cu_chroma_qp_offset_depth);
+ IN_RANGE_OR_RETURN(pps->diff_cu_chroma_qp_offset_depth, 0,
+ sps->log2_diff_max_min_luma_coding_block_size);
+ READ_UE_OR_RETURN(&pps->chroma_qp_offset_list_len_minus1);
+ IN_RANGE_OR_RETURN(pps->chroma_qp_offset_list_len_minus1, 0, 5);
+ for (int i = 0; i <= pps->chroma_qp_offset_list_len_minus1; i++) {
+ READ_SE_OR_RETURN(&pps->cb_qp_offset_list[i]);
+ IN_RANGE_OR_RETURN(pps->cb_qp_offset_list[i], -12, 12);
+ READ_SE_OR_RETURN(&pps->cr_qp_offset_list[i]);
+ IN_RANGE_OR_RETURN(pps->cr_qp_offset_list[i], -12, 12);
+ }
+ }
+ READ_UE_OR_RETURN(&pps->log2_sao_offset_scale_luma);
+ IN_RANGE_OR_RETURN(pps->log2_sao_offset_scale_luma, 0,
+ std::max(sps->bit_depth_luma_minus8 - 2, 0));
+ READ_UE_OR_RETURN(&pps->log2_sao_offset_scale_chroma);
+ IN_RANGE_OR_RETURN(pps->log2_sao_offset_scale_chroma, 0,
+ std::max(sps->bit_depth_chroma_minus8 - 2, 0));
+ }
+ if (pps->pps_multilayer_extension_flag) {
DVLOG(1) << "HEVC multilayer extension not supported";
return kInvalidStream;
}
- if (pps_3d_extension_flag) {
+ if (pps->pps_3d_extension_flag) {
DVLOG(1) << "HEVC 3D extension not supported";
return kInvalidStream;
}
- if (pps_scc_extension_flag) {
+ if (pps->pps_scc_extension_flag) {
DVLOG(1) << "HEVC SCC extension not supported";
return kInvalidStream;
}
@@ -1136,8 +1158,8 @@ H265Parser::Result H265Parser::ParseSliceHeader(const H265NALU& nalu,
// pps_slice_act_qp_offsets_present_flag is zero, we don't support SCC ext.
- // chroma_qp_offset_list_enabled_flag is zero, we don't support range ext.
-
+ if (pps->chroma_qp_offset_list_enabled_flag)
+ SKIP_BITS_OR_RETURN(1); // cu_chroma_qp_offset_enabled_flag
bool deblocking_filter_override_flag = false;
if (pps->deblocking_filter_override_enabled_flag)
READ_BOOL_OR_RETURN(&deblocking_filter_override_flag);
@@ -1229,6 +1251,22 @@ VideoCodecProfile H265Parser::ProfileIDCToVideoCodecProfile(int profile_idc) {
return HEVCPROFILE_MAIN10;
case H265ProfileTierLevel::kProfileIdcMainStill:
return HEVCPROFILE_MAIN_STILL_PICTURE;
+ case H265ProfileTierLevel::kProfileIdcRangeExtensions:
+ return HEVCPROFILE_REXT;
+ case H265ProfileTierLevel::kProfileIdcHighThroughput:
+ return HEVCPROFILE_HIGH_THROUGHPUT;
+ case H265ProfileTierLevel::kProfileIdcMultiviewMain:
+ return HEVCPROFILE_MULTIVIEW_MAIN;
+ case H265ProfileTierLevel::kProfileIdcScalableMain:
+ return HEVCPROFILE_SCALABLE_MAIN;
+ case H265ProfileTierLevel::kProfileIdc3dMain:
+ return HEVCPROFILE_3D_MAIN;
+ case H265ProfileTierLevel::kProfileIdcScreenContentCoding:
+ return HEVCPROFILE_SCREEN_EXTENDED;
+ case H265ProfileTierLevel::kProfileIdcScalableRangeExtensions:
+ return HEVCPROFILE_SCALABLE_REXT;
+ case H265ProfileTierLevel::kProfileIdcHighThroughputScreenContentCoding:
+ return HEVCPROFILE_HIGH_THROUGHPUT_SCREEN_EXTENDED;
default:
DVLOG(1) << "unknown video profile: " << profile_idc;
return VIDEO_CODEC_PROFILE_UNKNOWN;
diff --git a/chromium/media/video/h265_parser.h b/chromium/media/video/h265_parser.h
index 3553ecaaaf8..0dce696e1cd 100644
--- a/chromium/media/video/h265_parser.h
+++ b/chromium/media/video/h265_parser.h
@@ -35,14 +35,17 @@ enum {
struct MEDIA_EXPORT H265ProfileTierLevel {
H265ProfileTierLevel();
- // From Annex A.3.
enum H265ProfileIdc {
kProfileIdcMain = 1,
kProfileIdcMain10 = 2,
kProfileIdcMainStill = 3,
kProfileIdcRangeExtensions = 4,
kProfileIdcHighThroughput = 5,
+ kProfileIdcMultiviewMain = 6,
+ kProfileIdcScalableMain = 7,
+ kProfileIdc3dMain = 8,
kProfileIdcScreenContentCoding = 9,
+ kProfileIdcScalableRangeExtensions = 10,
kProfileIdcHighThroughputScreenContentCoding = 11,
};
@@ -179,6 +182,22 @@ struct MEDIA_EXPORT H265SPS {
bool strong_intra_smoothing_enabled_flag;
H265VUIParameters vui_parameters;
+ // Extension extra elements.
+ bool sps_extension_present_flag;
+ bool sps_range_extension_flag;
+ bool sps_multilayer_extension_flag;
+ bool sps_3d_extension_flag;
+ bool sps_scc_extension_flag;
+ bool transform_skip_rotation_enabled_flag;
+ bool transform_skip_context_enabled_flag;
+ bool implicit_rdpcm_enabled_flag;
+ bool explicit_rdpcm_enabled_flag;
+ bool extended_precision_processing_flag;
+ bool intra_smoothing_disabled_flag;
+ bool high_precision_offsets_enabled_flag;
+ bool persistent_rice_adaptation_enabled_flag;
+ bool cabac_bypass_alignment_enabled_flag;
+
// Calculated fields.
int chroma_array_type;
int sub_width_c;
@@ -251,6 +270,22 @@ struct MEDIA_EXPORT H265PPS {
int log2_parallel_merge_level_minus2;
bool slice_segment_header_extension_present_flag;
+ // Extension extra elements.
+ bool pps_extension_present_flag;
+ bool pps_range_extension_flag;
+ bool pps_multilayer_extension_flag;
+ bool pps_3d_extension_flag;
+ bool pps_scc_extension_flag;
+ int log2_max_transform_skip_block_size_minus2;
+ bool cross_component_prediction_enabled_flag;
+ bool chroma_qp_offset_list_enabled_flag;
+ int diff_cu_chroma_qp_offset_depth;
+ int chroma_qp_offset_list_len_minus1;
+ int cb_qp_offset_list[6];
+ int cr_qp_offset_list[6];
+ int log2_sao_offset_scale_luma;
+ int log2_sao_offset_scale_chroma;
+
// Calculated fields.
int qp_bd_offset_y;
};
diff --git a/chromium/media/video/h265_poc_unittest.cc b/chromium/media/video/h265_poc_unittest.cc
new file mode 100644
index 00000000000..85a856dc003
--- /dev/null
+++ b/chromium/media/video/h265_poc_unittest.cc
@@ -0,0 +1,393 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "media/video/h265_parser.h"
+#include "media/video/h265_poc.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class H265POCTest : public testing::Test {
+ public:
+ H265POCTest() : sps_(), pps_(), slice_hdr_() {}
+
+ H265POCTest(const H265POCTest&) = delete;
+ H265POCTest& operator=(const H265POCTest&) = delete;
+
+ protected:
+ void ComputePOC() {
+ poc_ = h265_poc_.ComputePicOrderCnt(&sps_, &pps_, slice_hdr_);
+ }
+
+ int32_t poc_;
+
+ H265SPS sps_;
+ H265PPS pps_;
+ H265SliceHeader slice_hdr_;
+ H265POC h265_poc_;
+};
+
+TEST_F(H265POCTest, PicOrderCnt) {
+ sps_.log2_max_pic_order_cnt_lsb_minus4 = 7;
+ pps_.temporal_id = 0;
+
+ // Initial I frame with POC 0.
+ slice_hdr_.nal_unit_type = H265NALU::IDR_N_LP;
+ slice_hdr_.irap_pic = true;
+ slice_hdr_.slice_pic_order_cnt_lsb = 0;
+ ComputePOC();
+ ASSERT_EQ(0, poc_);
+
+ // P frame with POC lsb 4.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 4;
+ ComputePOC();
+ ASSERT_EQ(4, poc_);
+
+ // B frame with POC lsb 2.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 2;
+ ComputePOC();
+
+ ASSERT_EQ(2, poc_);
+
+ // B frame with POC lsb 1.
+ slice_hdr_.nal_unit_type = H265NALU::TSA_N;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 1;
+ ComputePOC();
+ ASSERT_EQ(1, poc_);
+
+ // B frame with POC lsb 3.
+ slice_hdr_.nal_unit_type = H265NALU::TSA_N;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 3;
+ ComputePOC();
+ ASSERT_EQ(3, poc_);
+
+ // P frame with POC lsb 8.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 8;
+ ComputePOC();
+ ASSERT_EQ(8, poc_);
+
+ // B Ref frame with POC lsb 6.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 6;
+ ComputePOC();
+ ASSERT_EQ(6, poc_);
+
+ // B frame with POC lsb 5.
+ slice_hdr_.nal_unit_type = H265NALU::TSA_N;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 5;
+ ComputePOC();
+ ASSERT_EQ(5, poc_);
+
+ // B frame with POC lsb 7.
+ slice_hdr_.nal_unit_type = H265NALU::TSA_N;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 7;
+ ComputePOC();
+ ASSERT_EQ(7, poc_);
+
+ // P frame with POC lsb 12.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 12;
+ ComputePOC();
+ ASSERT_EQ(12, poc_);
+
+ // B frame with POC lsb 10.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 10;
+ ComputePOC();
+ ASSERT_EQ(10, poc_);
+
+ // B frame with POC lsb 9.
+ slice_hdr_.nal_unit_type = H265NALU::TSA_N;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 9;
+ ComputePOC();
+ ASSERT_EQ(9, poc_);
+
+ // B frame with POC lsb 11.
+ slice_hdr_.nal_unit_type = H265NALU::TSA_N;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 11;
+ ComputePOC();
+ ASSERT_EQ(11, poc_);
+
+ // P frame with POC lsb 16.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 16;
+ ComputePOC();
+ ASSERT_EQ(16, poc_);
+
+ // B frame with POC lsb 14.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 14;
+ ComputePOC();
+ ASSERT_EQ(14, poc_);
+
+ // B frame with POC lsb 13.
+ slice_hdr_.nal_unit_type = H265NALU::TSA_N;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 13;
+ ComputePOC();
+ ASSERT_EQ(13, poc_);
+
+ // B frame with POC lsb 15.
+ slice_hdr_.nal_unit_type = H265NALU::TSA_N;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 15;
+ ComputePOC();
+ ASSERT_EQ(15, poc_);
+};
+
+TEST_F(H265POCTest, PicOrderCntInOrder) {
+ sps_.log2_max_pic_order_cnt_lsb_minus4 = 12;
+ pps_.temporal_id = 0;
+
+ // Initial I frame with POC 0.
+ slice_hdr_.nal_unit_type = H265NALU::IDR_W_RADL;
+ slice_hdr_.irap_pic = true;
+ slice_hdr_.slice_pic_order_cnt_lsb = 0;
+ ComputePOC();
+ ASSERT_EQ(0, poc_);
+
+ // P frame with POC lsb 1.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 1;
+ ComputePOC();
+ ASSERT_EQ(1, poc_);
+
+ // P frame with POC lsb 2.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 2;
+ ComputePOC();
+ ASSERT_EQ(2, poc_);
+
+ // P frame with POC lsb 3.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 3;
+ ComputePOC();
+ ASSERT_EQ(3, poc_);
+
+ // P frame with POC lsb 4.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 4;
+ ComputePOC();
+ ASSERT_EQ(4, poc_);
+
+ // P frame with POC lsb 5.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 5;
+ ComputePOC();
+ ASSERT_EQ(5, poc_);
+
+ // P frame with POC lsb 6.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 6;
+ ComputePOC();
+ ASSERT_EQ(6, poc_);
+
+ // P frame with POC lsb 7.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 7;
+ ComputePOC();
+ ASSERT_EQ(7, poc_);
+
+ // P frame with POC lsb 8.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 8;
+ ComputePOC();
+ ASSERT_EQ(8, poc_);
+
+ // P frame with POC lsb 9.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 9;
+ ComputePOC();
+ ASSERT_EQ(9, poc_);
+
+ // P frame with POC lsb 10.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 10;
+ ComputePOC();
+ ASSERT_EQ(10, poc_);
+
+ // P frame with POC lsb 11.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 11;
+ ComputePOC();
+ ASSERT_EQ(11, poc_);
+
+ // P frame with POC lsb 12.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 12;
+ ComputePOC();
+ ASSERT_EQ(12, poc_);
+
+ // P frame with POC lsb 13.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 13;
+ ComputePOC();
+ ASSERT_EQ(13, poc_);
+
+ // P frame with POC lsb 14.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 14;
+ ComputePOC();
+ ASSERT_EQ(14, poc_);
+
+ // P frame with POC lsb 15.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 15;
+ ComputePOC();
+ ASSERT_EQ(15, poc_);
+
+ // P frame with POC lsb 16.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 16;
+ ComputePOC();
+ ASSERT_EQ(16, poc_);
+
+ // P frame with POC lsb 17.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 17;
+ ComputePOC();
+ ASSERT_EQ(17, poc_);
+
+ // P frame with POC lsb 18.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 18;
+ ComputePOC();
+ ASSERT_EQ(18, poc_);
+
+ // P frame with POC lsb 19.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 19;
+ ComputePOC();
+ ASSERT_EQ(19, poc_);
+
+ // P frame with POC lsb 20.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 20;
+ ComputePOC();
+ ASSERT_EQ(20, poc_);
+
+ // P frame with POC lsb 21.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 21;
+ ComputePOC();
+ ASSERT_EQ(21, poc_);
+
+ // P frame with POC lsb 22.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 22;
+ ComputePOC();
+ ASSERT_EQ(22, poc_);
+
+ // P frame with POC lsb 23.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 23;
+ ComputePOC();
+ ASSERT_EQ(23, poc_);
+
+ // P frame with POC lsb 24.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 24;
+ ComputePOC();
+ ASSERT_EQ(24, poc_);
+
+ // P frame with POC lsb 25.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 25;
+ ComputePOC();
+ ASSERT_EQ(25, poc_);
+
+ // P frame with POC lsb 26.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 26;
+ ComputePOC();
+ ASSERT_EQ(26, poc_);
+
+ // P frame with POC lsb 27.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 27;
+ ComputePOC();
+ ASSERT_EQ(27, poc_);
+
+ // P frame with POC lsb 28.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 28;
+ ComputePOC();
+ ASSERT_EQ(28, poc_);
+
+ // P frame with POC lsb 29.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 29;
+ ComputePOC();
+ ASSERT_EQ(29, poc_);
+
+ // I frame with POC 0.
+ slice_hdr_.nal_unit_type = H265NALU::IDR_W_RADL;
+ slice_hdr_.irap_pic = true;
+ slice_hdr_.slice_pic_order_cnt_lsb = 0;
+ ComputePOC();
+ ASSERT_EQ(0, poc_);
+
+ // P frame with POC lsb 1.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 1;
+ ComputePOC();
+ ASSERT_EQ(1, poc_);
+
+ // P frame with POC lsb 2.
+ slice_hdr_.nal_unit_type = H265NALU::TRAIL_R;
+ slice_hdr_.irap_pic = false;
+ slice_hdr_.slice_pic_order_cnt_lsb = 2;
+ ComputePOC();
+ ASSERT_EQ(2, poc_);
+};
+} // namespace media \ No newline at end of file
diff --git a/chromium/media/video/openh264_video_encoder.cc b/chromium/media/video/openh264_video_encoder.cc
index 92f45f7f43d..23142afbf48 100644
--- a/chromium/media/video/openh264_video_encoder.cc
+++ b/chromium/media/video/openh264_video_encoder.cc
@@ -328,7 +328,8 @@ void OpenH264VideoEncoder::Encode(scoped_refptr<VideoFrame> frame,
}
SFrameBSInfo frame_info = {};
- TRACE_EVENT0("media", "OpenH264::EncodeFrame");
+ TRACE_EVENT1("media", "OpenH264::EncodeFrame", "timestamp",
+ frame->timestamp());
if (int err = codec_->EncodeFrame(&picture, &frame_info)) {
std::move(done_cb).Run(
EncoderStatus(EncoderStatus::Codes::kEncoderFailedEncode,
diff --git a/chromium/media/video/picture.cc b/chromium/media/video/picture.cc
index 046aeb195a4..216db308485 100644
--- a/chromium/media/video/picture.cc
+++ b/chromium/media/video/picture.cc
@@ -98,7 +98,8 @@ Picture::Picture(int32_t picture_buffer_id,
read_lock_fences_enabled_(false),
size_changed_(false),
texture_owner_(false),
- wants_promotion_hint_(false) {}
+ wants_promotion_hint_(false),
+ is_webgpu_compatible_(false) {}
Picture::Picture(const Picture& other) = default;
diff --git a/chromium/media/video/picture.h b/chromium/media/video/picture.h
index 0b2173d5d0b..088a1c9817a 100644
--- a/chromium/media/video/picture.h
+++ b/chromium/media/video/picture.h
@@ -173,6 +173,12 @@ class MEDIA_EXPORT Picture {
return scoped_shared_images_[plane];
}
+ void set_is_webgpu_compatible(bool is_webgpu_compatible) {
+ is_webgpu_compatible_ = is_webgpu_compatible;
+ }
+
+ bool is_webgpu_compatible() { return is_webgpu_compatible_; }
+
private:
int32_t picture_buffer_id_;
int32_t bitstream_buffer_id_;
@@ -183,6 +189,7 @@ class MEDIA_EXPORT Picture {
bool size_changed_;
bool texture_owner_;
bool wants_promotion_hint_;
+ bool is_webgpu_compatible_;
std::array<scoped_refptr<ScopedSharedImage>, VideoFrame::kMaxPlanes>
scoped_shared_images_;
};
diff --git a/chromium/media/video/renderable_gpu_memory_buffer_video_frame_pool.cc b/chromium/media/video/renderable_gpu_memory_buffer_video_frame_pool.cc
index b714eb8fd44..dbf2b3e1f42 100644
--- a/chromium/media/video/renderable_gpu_memory_buffer_video_frame_pool.cc
+++ b/chromium/media/video/renderable_gpu_memory_buffer_video_frame_pool.cc
@@ -179,6 +179,8 @@ bool FrameResources::Initialize() {
return false;
}
+ gpu_memory_buffer_->SetColorSpace(color_space_);
+
// Bind SharedImages to each plane.
constexpr size_t kNumPlanes = 2;
constexpr gfx::BufferPlane kPlanes[kNumPlanes] = {gfx::BufferPlane::Y,
diff --git a/chromium/media/video/software_video_encoder_test.cc b/chromium/media/video/software_video_encoder_test.cc
index 4c8e81626dd..97eecc2dabe 100644
--- a/chromium/media/video/software_video_encoder_test.cc
+++ b/chromium/media/video/software_video_encoder_test.cc
@@ -8,6 +8,7 @@
#include <string>
#include "base/callback_helpers.h"
+#include "base/feature_list.h"
#include "base/logging.h"
#include "base/memory/scoped_refptr.h"
#include "base/strings/string_number_conversions.h"
@@ -19,7 +20,9 @@
#include "base/time/time.h"
#include "build/build_config.h"
#include "media/base/decoder_buffer.h"
+#include "media/base/media_switches.h"
#include "media/base/mock_media_log.h"
+#include "media/base/video_decoder.h"
#include "media/base/video_encoder.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
@@ -47,6 +50,10 @@
#include "media/filters/dav1d_video_decoder.h"
#endif
+#if BUILDFLAG(ENABLE_LIBGAV1_DECODER)
+#include "media/filters/gav1_video_decoder.h"
+#endif
+
namespace media {
struct SwVideoTestParams {
@@ -93,9 +100,16 @@ class SoftwareVideoEncoderTest
decoder_ = std::make_unique<VpxVideoDecoder>();
#endif
} else if (codec_ == VideoCodec::kAV1) {
+#if BUILDFLAG(ENABLE_LIBGAV1_DECODER)
+ if (base::FeatureList::IsEnabled(kGav1VideoDecoder)) {
+ decoder_ = std::make_unique<Gav1VideoDecoder>(&media_log_);
+ } else
+#endif
+ {
#if BUILDFLAG(ENABLE_DAV1D_DECODER)
- decoder_ = std::make_unique<Dav1dVideoDecoder>(&media_log_);
+ decoder_ = std::make_unique<Dav1dVideoDecoder>(&media_log_);
#endif
+ }
}
EXPECT_NE(decoder_, nullptr);
@@ -118,10 +132,10 @@ class SoftwareVideoEncoderTest
frame->data(VideoFrame::kYPlane), frame->stride(VideoFrame::kYPlane),
frame->data(VideoFrame::kUPlane), frame->stride(VideoFrame::kUPlane),
frame->data(VideoFrame::kVPlane), frame->stride(VideoFrame::kVPlane),
- 0, // left
- 0, // top
- frame->visible_rect().width(), // right
- frame->visible_rect().height(), // bottom
+ frame->visible_rect().x(), // x
+ frame->visible_rect().y(), // y
+ frame->visible_rect().width(), // width
+ frame->visible_rect().height(), // height
y, // Y color
u, // U color
v); // V color
@@ -147,10 +161,10 @@ class SoftwareVideoEncoderTest
libyuv::ARGBRect(frame->data(VideoFrame::kARGBPlane),
frame->stride(VideoFrame::kARGBPlane),
- 0, // left
- 0, // top
- frame->visible_rect().width(), // right
- frame->visible_rect().height(), // bottom
+ frame->visible_rect().x(), // dst_x
+ frame->visible_rect().y(), // dst_y
+ frame->visible_rect().width(), // width
+ frame->visible_rect().height(), // height
color);
return frame;
@@ -259,7 +273,7 @@ class SoftwareVideoEncoderTest
uint8_t tolerance = 10;
if (frame1.format() != frame2.format() ||
- frame1.visible_rect() != frame2.visible_rect()) {
+ frame1.visible_rect().size() != frame2.visible_rect().size()) {
return frame1.coded_size().GetArea();
}
@@ -470,7 +484,8 @@ TEST_P(SoftwareVideoEncoderTest, EncodeAndDecode) {
auto original_frame = frames_to_encode[i];
auto decoded_frame = decoded_frames[i];
EXPECT_EQ(decoded_frame->timestamp(), original_frame->timestamp());
- EXPECT_EQ(decoded_frame->visible_rect(), original_frame->visible_rect());
+ EXPECT_EQ(decoded_frame->visible_rect().size(),
+ original_frame->visible_rect().size());
EXPECT_EQ(decoded_frame->format(), PIXEL_FORMAT_I420);
if (decoded_frame->format() == original_frame->format()) {
EXPECT_LE(CountDifferentPixels(*decoded_frame, *original_frame),
diff --git a/chromium/media/video/video_encode_accelerator.cc b/chromium/media/video/video_encode_accelerator.cc
index 0d93cff2259..9c04460ab99 100644
--- a/chromium/media/video/video_encode_accelerator.cc
+++ b/chromium/media/video/video_encode_accelerator.cc
@@ -152,11 +152,13 @@ VideoEncodeAccelerator::SupportedProfile::SupportedProfile(
const gfx::Size& max_resolution,
uint32_t max_framerate_numerator,
uint32_t max_framerate_denominator,
+ SupportedRateControlMode rc_modes,
const std::vector<SVCScalabilityMode>& scalability_modes)
: profile(profile),
max_resolution(max_resolution),
max_framerate_numerator(max_framerate_numerator),
max_framerate_denominator(max_framerate_denominator),
+ rate_control_modes(rc_modes),
scalability_modes(scalability_modes) {}
VideoEncodeAccelerator::SupportedProfile::SupportedProfile(
@@ -180,7 +182,7 @@ bool VideoEncodeAccelerator::IsFlushSupported() {
}
bool VideoEncodeAccelerator::IsGpuFrameResizeSupported() {
-#if BUILDFLAG(IS_CHROMEOS_ASH)
+#if BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_WIN)
// TODO(crbug.com/1166889) Add proper method overrides in
// MojoVideoEncodeAccelerator and other subclasses that might return true.
return true;
@@ -202,6 +204,7 @@ bool operator==(const VideoEncodeAccelerator::SupportedProfile& l,
l.max_resolution == r.max_resolution &&
l.max_framerate_numerator == r.max_framerate_numerator &&
l.max_framerate_denominator == r.max_framerate_denominator &&
+ l.rate_control_modes == r.rate_control_modes &&
l.scalability_modes == r.scalability_modes;
}
diff --git a/chromium/media/video/video_encode_accelerator.h b/chromium/media/video/video_encode_accelerator.h
index 8c9e816b306..4711981843d 100644
--- a/chromium/media/video/video_encode_accelerator.h
+++ b/chromium/media/video/video_encode_accelerator.h
@@ -141,6 +141,13 @@ struct MEDIA_EXPORT BitstreamBufferMetadata final {
// Video encoder interface.
class MEDIA_EXPORT VideoEncodeAccelerator {
public:
+ // Bitmask values for supported rate control modes.
+ enum SupportedRateControlMode : uint8_t {
+ kNoMode = 0, // for uninitialized profiles only
+ kConstantMode = 0b0001,
+ kVariableMode = 0b0010,
+ };
+
// Specification of an encoding profile supported by an encoder.
struct MEDIA_EXPORT SupportedProfile {
SupportedProfile();
@@ -149,6 +156,7 @@ class MEDIA_EXPORT VideoEncodeAccelerator {
const gfx::Size& max_resolution,
uint32_t max_framerate_numerator = 0u,
uint32_t max_framerate_denominator = 1u,
+ SupportedRateControlMode rc_modes = kConstantMode,
const std::vector<SVCScalabilityMode>& scalability_modes = {});
SupportedProfile(const SupportedProfile& other);
SupportedProfile& operator=(const SupportedProfile& other) = default;
@@ -158,6 +166,7 @@ class MEDIA_EXPORT VideoEncodeAccelerator {
gfx::Size max_resolution;
uint32_t max_framerate_numerator{0};
uint32_t max_framerate_denominator{0};
+ SupportedRateControlMode rate_control_modes = kNoMode;
std::vector<SVCScalabilityMode> scalability_modes;
};
using SupportedProfiles = std::vector<SupportedProfile>;
@@ -445,6 +454,35 @@ MEDIA_EXPORT bool operator==(
const VideoEncodeAccelerator::Config::SpatialLayer& r);
MEDIA_EXPORT bool operator==(const VideoEncodeAccelerator::Config& l,
const VideoEncodeAccelerator::Config& r);
+
+MEDIA_EXPORT inline VideoEncodeAccelerator::SupportedRateControlMode operator|(
+ VideoEncodeAccelerator::SupportedRateControlMode lhs,
+ VideoEncodeAccelerator::SupportedRateControlMode rhs) {
+ return static_cast<VideoEncodeAccelerator::SupportedRateControlMode>(
+ static_cast<uint8_t>(lhs) | static_cast<uint8_t>(rhs));
+}
+
+MEDIA_EXPORT inline VideoEncodeAccelerator::SupportedRateControlMode&
+operator|=(VideoEncodeAccelerator::SupportedRateControlMode& lhs,
+ VideoEncodeAccelerator::SupportedRateControlMode rhs) {
+ lhs = lhs | rhs;
+ return lhs;
+}
+
+MEDIA_EXPORT inline VideoEncodeAccelerator::SupportedRateControlMode operator&(
+ VideoEncodeAccelerator::SupportedRateControlMode lhs,
+ VideoEncodeAccelerator::SupportedRateControlMode rhs) {
+ return static_cast<VideoEncodeAccelerator::SupportedRateControlMode>(
+ static_cast<uint8_t>(lhs) & static_cast<uint8_t>(rhs));
+}
+
+MEDIA_EXPORT inline VideoEncodeAccelerator::SupportedRateControlMode&
+operator&=(VideoEncodeAccelerator::SupportedRateControlMode& lhs,
+ VideoEncodeAccelerator::SupportedRateControlMode rhs) {
+ lhs = lhs & rhs;
+ return lhs;
+}
+
} // namespace media
namespace std {
diff --git a/chromium/media/video/video_encoder_fallback.cc b/chromium/media/video/video_encoder_fallback.cc
index 0197d62b32c..12c78924dfd 100644
--- a/chromium/media/video/video_encoder_fallback.cc
+++ b/chromium/media/video/video_encoder_fallback.cc
@@ -65,6 +65,7 @@ void VideoEncoderFallback::Encode(scoped_refptr<VideoFrame> frame,
bool key_frame,
EncoderStatusCB done_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(!init_done_cb_);
if (use_fallback_) {
if (fallback_initialized_) {
diff --git a/chromium/media/video/video_encoder_fallback_test.cc b/chromium/media/video/video_encoder_fallback_test.cc
index 8787118373b..36cbecdf2cf 100644
--- a/chromium/media/video/video_encoder_fallback_test.cc
+++ b/chromium/media/video/video_encoder_fallback_test.cc
@@ -300,18 +300,19 @@ TEST_F(VideoEncoderFallbackTest, SecondaryFailureOnInitialize) {
EncoderStatus::Codes::kEncoderUnsupportedCodec);
}));
- EXPECT_CALL(*main_video_encoder_, Encode(_, _, _))
- .WillRepeatedly(
- Invoke([&, this](scoped_refptr<VideoFrame> frame, bool key_frame,
- VideoEncoder::EncoderStatusCB done_cb) {
- RunStatusCallbackAync(
- std::move(done_cb),
- EncoderStatus::Codes::kEncoderInitializeNeverCompleted);
- }));
-
fallback_encoder_->Initialize(
profile, options, std::move(output_cb),
ValidatingStatusCB(EncoderStatus::Codes::kEncoderUnsupportedCodec));
+ RunLoop();
+
+ EXPECT_CALL(*secondary_video_encoder_, Encode(_, _, _))
+ .Times(kFrameCount)
+ .WillRepeatedly(Invoke([&, this](scoped_refptr<VideoFrame> frame,
+ bool key_frame,
+ VideoEncoder::EncoderStatusCB done_cb) {
+ RunStatusCallbackAync(std::move(done_cb),
+ EncoderStatus::Codes::kEncoderUnsupportedCodec);
+ }));
for (int i = 0; i < kFrameCount; i++) {
auto frame = VideoFrame::CreateFrame(PIXEL_FORMAT_I420, kFrameSize,
@@ -329,4 +330,96 @@ TEST_F(VideoEncoderFallbackTest, SecondaryFailureOnInitialize) {
EXPECT_TRUE(FallbackHappened());
}
+// Test how VideoEncoderFallback reports errors when encoding with the secondary
+// encoder.
+TEST_F(VideoEncoderFallbackTest, SecondaryFailureOnEncode) {
+ int outputs = 0;
+ VideoEncoder::Options options;
+ VideoCodecProfile profile = VIDEO_CODEC_PROFILE_UNKNOWN;
+ VideoEncoder::OutputCB output_cb =
+ BindToCurrentLoop(base::BindLambdaForTesting(
+ [&](VideoEncoderOutput,
+ absl::optional<VideoEncoder::CodecDescription>) { outputs++; }));
+ VideoEncoder::OutputCB primary_output_cb;
+ VideoEncoder::OutputCB secondary_output_cb;
+
+ // Initialize() on the main encoder should succeed
+ EXPECT_CALL(*main_video_encoder_, Initialize(_, _, _, _))
+ .WillOnce(Invoke([&, this](VideoCodecProfile profile,
+ const VideoEncoder::Options& options,
+ VideoEncoder::OutputCB output_cb,
+ VideoEncoder::EncoderStatusCB done_cb) {
+ primary_output_cb = std::move(output_cb);
+ RunStatusCallbackAync(std::move(done_cb));
+ }));
+
+ // Initialize() on the second encoder should succeed as well
+ EXPECT_CALL(*secondary_video_encoder_, Initialize(_, _, _, _))
+ .WillOnce(Invoke([&, this](VideoCodecProfile profile,
+ const VideoEncoder::Options& options,
+ VideoEncoder::OutputCB output_cb,
+ VideoEncoder::EncoderStatusCB done_cb) {
+ secondary_output_cb = std::move(output_cb);
+ RunStatusCallbackAync(std::move(done_cb));
+ }));
+
+ // Start failing encodes after half of the frames.
+ auto encoder_switch_time = base::Seconds(kFrameCount / 2);
+ EXPECT_CALL(*main_video_encoder_, Encode(_, _, _))
+ .WillRepeatedly(Invoke([&, this](scoped_refptr<VideoFrame> frame,
+ bool key_frame,
+ VideoEncoder::EncoderStatusCB done_cb) {
+ EXPECT_TRUE(frame);
+ EXPECT_TRUE(done_cb);
+ if (frame->timestamp() > encoder_switch_time) {
+ std::move(done_cb).Run(EncoderStatus::Codes::kEncoderFailedEncode);
+ return;
+ }
+
+ VideoEncoderOutput output;
+ output.timestamp = frame->timestamp();
+ primary_output_cb.Run(std::move(output), {});
+ RunStatusCallbackAync(std::move(done_cb));
+ }));
+
+ // All encodes should come to the secondary encoder. Again fail encoding
+ // once we reach 3/4 the total frame count.
+ auto second_encoder_fail_time = base::Seconds(3 * kFrameCount / 4);
+ LOG(ERROR) << second_encoder_fail_time << "!!!!";
+ EXPECT_CALL(*secondary_video_encoder_, Encode(_, _, _))
+ .WillRepeatedly(Invoke([&, this](scoped_refptr<VideoFrame> frame,
+ bool key_frame,
+ VideoEncoder::EncoderStatusCB done_cb) {
+ EXPECT_TRUE(frame);
+ EXPECT_TRUE(done_cb);
+ EXPECT_GT(frame->timestamp(), encoder_switch_time);
+ if (frame->timestamp() > second_encoder_fail_time) {
+ std::move(done_cb).Run(EncoderStatus::Codes::kEncoderFailedEncode);
+ return;
+ }
+ VideoEncoderOutput output;
+ output.timestamp = frame->timestamp();
+ secondary_output_cb.Run(std::move(output), {});
+ RunStatusCallbackAync(std::move(done_cb));
+ }));
+
+ fallback_encoder_->Initialize(profile, options, std::move(output_cb),
+ ValidatingStatusCB());
+ RunLoop();
+
+ for (int i = 1; i <= kFrameCount; i++) {
+ auto frame = VideoFrame::CreateFrame(PIXEL_FORMAT_I420, kFrameSize,
+ gfx::Rect(kFrameSize), kFrameSize,
+ base::Seconds(i));
+ auto done_cb =
+ ValidatingStatusCB((frame->timestamp() <= second_encoder_fail_time)
+ ? EncoderStatus::Codes::kOk
+ : EncoderStatus::Codes::kEncoderFailedEncode);
+ fallback_encoder_->Encode(frame, true, std::move(done_cb));
+ }
+ RunLoop();
+ EXPECT_TRUE(FallbackHappened());
+ EXPECT_EQ(outputs, 3 * kFrameCount / 4);
+}
+
} // namespace media
diff --git a/chromium/media/video/vpx_video_encoder.cc b/chromium/media/video/vpx_video_encoder.cc
index 5397f6a54b6..2ed71b09323 100644
--- a/chromium/media/video/vpx_video_encoder.cc
+++ b/chromium/media/video/vpx_video_encoder.cc
@@ -540,7 +540,7 @@ void VpxVideoEncoder::Encode(scoped_refptr<VideoFrame> frame,
}
}
- TRACE_EVENT0("media", "vpx_codec_encode");
+ TRACE_EVENT1("media", "vpx_codec_encode", "timestamp", frame->timestamp());
auto vpx_error = vpx_codec_encode(codec_.get(), &vpx_image_, timestamp_us,
duration_us, flags, deadline);
diff --git a/chromium/media/webrtc/audio_processor.cc b/chromium/media/webrtc/audio_processor.cc
index bc57e649440..586fea2161c 100644
--- a/chromium/media/webrtc/audio_processor.cc
+++ b/chromium/media/webrtc/audio_processor.cc
@@ -249,6 +249,13 @@ AudioProcessor::AudioProcessor(
DCHECK_EQ(output_format_.sample_rate() / 100,
output_format_.frames_per_buffer());
}
+ if (input_format_.sample_rate() % 100 != 0 ||
+ output_format_.sample_rate() % 100 != 0) {
+ SendLogMessage(base::StringPrintf(
+ "%s: WARNING: Sample rate not divisible by 100, processing is provided "
+ "on a best-effort basis. input rate=[%d], output rate=[%d]",
+ __func__, input_format_.sample_rate(), output_format_.sample_rate()));
+ }
SendLogMessage(base::StringPrintf(
"%s({input_format_=[%s], output_format_=[%s]})", __func__,
input_format_.AsHumanReadableString().c_str(),
@@ -548,14 +555,8 @@ absl::optional<AudioParameters> AudioProcessor::ComputeInputFormat(
return absl::nullopt;
}
- // The audio processor code assumes that sample rates are divisible by 100.
- if (device_format.sample_rate() % 100 != 0) {
- return absl::nullopt;
- }
-
AudioParameters params(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- device_format.sample_rate(),
+ device_format.format(), channel_layout, device_format.sample_rate(),
GetCaptureBufferSize(
audio_processing_settings.NeedWebrtcAudioProcessing(),
device_format));
@@ -625,9 +626,9 @@ AudioParameters AudioProcessor::GetDefaultOutputFormat(
output_frames = input_format.frames_per_buffer();
}
- media::AudioParameters output_format = media::AudioParameters(
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY, output_channel_layout,
- output_sample_rate, output_frames);
+ media::AudioParameters output_format =
+ media::AudioParameters(input_format.format(), output_channel_layout,
+ output_sample_rate, output_frames);
if (output_channel_layout == media::CHANNEL_LAYOUT_DISCRETE) {
// Explicitly set number of channels for discrete channel layouts.
output_format.set_channels_for_discrete(input_format.channels());
diff --git a/chromium/media/webrtc/audio_processor.h b/chromium/media/webrtc/audio_processor.h
index 7258c614e1a..7186a2e53e4 100644
--- a/chromium/media/webrtc/audio_processor.h
+++ b/chromium/media/webrtc/audio_processor.h
@@ -66,7 +66,9 @@ class COMPONENT_EXPORT(MEDIA_WEBRTC) AudioProcessor {
// |input_format| specifies the format of the incoming capture data.
// |output_format| specifies the output format. If
// |settings|.NeedWebrtcAudioProcessing() is true, then the output must be in
- // 10 ms chunks.
+ // 10 ms chunks: the formats must specify |sample rate|/100 samples per buffer
+ // (rounded down). Sample rates which are not divisible by 100 are supported
+ // on a best-effort basis, audio quality and stability may suffer.
static std::unique_ptr<AudioProcessor> Create(
DeliverProcessedAudioCallback deliver_processed_audio_callback,
LogCallback log_callback,
diff --git a/chromium/media/webrtc/audio_processor_test.cc b/chromium/media/webrtc/audio_processor_test.cc
index 6b62c394193..2cb5a351642 100644
--- a/chromium/media/webrtc/audio_processor_test.cc
+++ b/chromium/media/webrtc/audio_processor_test.cc
@@ -165,8 +165,14 @@ class AudioProcessorTest : public ::testing::Test {
EXPECT_TRUE(config.high_pass_filter.enabled);
EXPECT_TRUE(config.noise_suppression.enabled);
EXPECT_EQ(config.noise_suppression.level, config.noise_suppression.kHigh);
+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
+ BUILDFLAG(IS_CHROMEOS)
+ EXPECT_TRUE(config.gain_controller1.analog_gain_controller
+ .clipping_predictor.enabled);
+#else
EXPECT_FALSE(config.gain_controller1.analog_gain_controller
.clipping_predictor.enabled);
+#endif
#if BUILDFLAG(IS_ANDROID)
EXPECT_TRUE(config.echo_canceller.mobile_mode);
EXPECT_EQ(config.gain_controller1.mode,
diff --git a/chromium/media/webrtc/helpers_unittests.cc b/chromium/media/webrtc/helpers_unittests.cc
index 59f91b806bb..4b3bd1a0b89 100644
--- a/chromium/media/webrtc/helpers_unittests.cc
+++ b/chromium/media/webrtc/helpers_unittests.cc
@@ -87,7 +87,12 @@ TEST(CreateWebRtcAudioProcessingModuleTest, CheckDefaultAgcConfig) {
// zero.
EXPECT_EQ(agc1_analog_config.startup_min_volume, 0);
#endif
+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
+ BUILDFLAG(IS_CHROMEOS)
+ EXPECT_TRUE(agc1_analog_config.clipping_predictor.enabled);
+#else
EXPECT_FALSE(agc1_analog_config.clipping_predictor.enabled);
+#endif
// TODO(bugs.webrtc.org/7909): Uncomment below once fixed.
// #endif
diff --git a/chromium/media/webrtc/webrtc_features.cc b/chromium/media/webrtc/webrtc_features.cc
index 1267b52c692..52e9dd55335 100644
--- a/chromium/media/webrtc/webrtc_features.cc
+++ b/chromium/media/webrtc/webrtc_features.cc
@@ -18,6 +18,15 @@ constexpr base::FeatureState kWebRtcHybridAgcState =
#endif
} // namespace
+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
+ BUILDFLAG(IS_CHROMEOS)
+constexpr base::FeatureState kWebRtcAnalogAgcClippingControlState =
+ base::FEATURE_ENABLED_BY_DEFAULT;
+#else
+constexpr base::FeatureState kWebRtcAnalogAgcClippingControlState =
+ base::FEATURE_DISABLED_BY_DEFAULT;
+#endif
+
// When enabled we will tell WebRTC that we want to use the
// Windows.Graphics.Capture API based DesktopCapturer, if it is available.
const base::Feature kWebRtcAllowWgcDesktopCapturer{
@@ -35,7 +44,7 @@ const base::Feature kWebRtcHybridAgc{"WebRtcHybridAgc", kWebRtcHybridAgcState};
// Enables and configures the clipping control in the WebRTC analog AGC.
const base::Feature kWebRtcAnalogAgcClippingControl{
- "WebRtcAnalogAgcClippingControl", base::FEATURE_DISABLED_BY_DEFAULT};
+ "WebRtcAnalogAgcClippingControl", kWebRtcAnalogAgcClippingControlState};
// Enables the override for the default minimum starting volume of the Automatic
// Gain Control algorithm in WebRTC.